Esempio n. 1
0
    def _py_func_rgb_vp(rgb_path, label):

        rgb_path = rgb_path.decode()

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        while 1:
            index = np.random.randint(0, rgb_len)
            rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, index)
            _, image = rgb_cap.read()
            if image is not None:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                image = np.float32(image)
                image = cv2.resize(image, (340, 256))
                rgb_file = DataAugmentation.center_crop(image, 224, 224)
                rgb_file = cv2.resize(rgb_file, (224, 224))
                if _preprocess_name == 'pytorch':
                    rgb_file = normalize(rgb_file)
                elif _preprocess_name == 'tf':
                    rgb_file = tf_preprocess(rgb_file)
                break

        rgb_cap.release()

        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return rgb_file, one_hot_label

        return rgb_file
Esempio n. 2
0
    def _py_func_rgb_val_vp(rgb_path, label):

        rgb_path = rgb_path.decode ()

        num_segments = 3
        # rgb_file = _rgb_vp (rgb_path)
        rgb_cap = cv2.VideoCapture (rgb_path)
        rgb_len = rgb_cap.get (cv2.CAP_PROP_FRAME_COUNT)
        segments_list = np.array_split (np.arange (0, rgb_len), num_segments)
        image_list = []
        while num_segments:
            index = np.random.choice (segments_list[num_segments - 1])
            rgb_cap.set (cv2.CAP_PROP_POS_FRAMES, index)
            flag, image = rgb_cap.read ()
            if image is None:
                continue
            image = np.float32 (image)
            image = cv2.resize (image, (340, 256))
            image = DataAugmentation.center_crop (image,224,224)
            image = normalize (image)
            image_list.append (image)
            num_segments -= 1

        assert len (image_list) != num_segments

        rgb_cap.release ()
        rgb_file = image_list

        if label is not None:
            one_hot_label = np.zeros (101, dtype=np.float32)
            one_hot_label[label] = 1

            return rgb_file, one_hot_label

        return rgb_file
Esempio n. 3
0
    def _py_func_flow_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)
        index = np.random.randint(0, len(flow_file) - _frame_counts)

        img_list = []
        for j in range(index, index + _frame_counts):
            img_u_path = os.path.join(flow_u_path, flow_file[j])
            img_v_path = os.path.join(flow_v_path, flow_file[j])

            img_u = cv2.imread(img_u_path, 0)
            img_v = cv2.imread(img_v_path, 0)
            if img_u is None or img_v is None:
                continue
            img = np.stack([img_u, img_v], axis=-1)
            img = cv2.resize(img, (340, 256))
            img_list.append(img)

        img = np.stack(img_list, axis=0)
        img = DataAugmentation.center_crop(img, 224, 224)
        img = np.float32(img)
        img = (img / 255 - 0.5) / 0.226

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return img, one_hot_label

        return img
Esempio n. 4
0
    def _py_func_flow_test_vp(flow_path, label):
        
        
        _batch_size = 25
        flow_path = flow_path.decode ()

        flow_cap = cv2.VideoCapture(flow_path)
        flow_len = flow_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        total_flow_file = []
        if (flow_len-10) <= _batch_size:
            factor = int((_batch_size - 1) // (flow_len-10) + 1)
            index_list = np.concatenate([np.arange(0,flow_len-10)] * factor ,axis=-1)[:_batch_size]
        else:
            index_list = np.arange(0,flow_len-10,(flow_len-10)//_batch_size)[:_batch_size]

        for index in index_list:
            image_list = []
            flow_cap.set(cv2.CAP_PROP_POS_FRAMES,index)
            for i in range(10):
                _ , img = flow_cap.read()
                if img is not None:
                    image_list.append(img[...,:2])

            image = np.concatenate(image_list,axis=-1)
            image = np.float32(image)
            image = cv2.resize(image,(340,256))

            image_ = DataAugmentation.center_crop(image,224,224)
            image_flip = image_[:,::-1]
            image_ = (image_ / 255 - 0.5) / 0.226
            image_flip = (image_flip / 255 - 0.5) / 0.226
            # for i in range(5):
            total_flow_file.append(image_)
            total_flow_file.append(image_flip)
            #     image_ = DataAugmentation.random_Crop(image,1,i)
            #     image_flip = image_[:,::-1]
                
            #     image_ = (image_ / 255 - 0.5) / 0.226
            #     image_flip = (image_flip / 255 - 0.5) / 0.226

            #     # image_ =(image_ / 255 ) * 2 - 1
            #     # image_flip =(image_flip / 255 ) * 2 - 1
            #     total_flow_file.append(image_)
            #     total_flow_file.append(image_flip)


        total_flow_file = np.float32(total_flow_file)
        flow_cap.release()

        if label is not None:
            one_hot_label = np.zeros (101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_flow_file, one_hot_label
        
        return total_flow_file
Esempio n. 5
0
    def _py_func_rgb_test_vp(rgb_path, label):

        rgb_path = rgb_path.decode()

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        total_rgb_file = []
        try:
            index_list = np.arange(0, rgb_len,
                                   rgb_len // _frame_counts)[:_frame_counts]
        except:
            index_list = np.arange(0, rgb_len)[:_frame_counts]

        image_list = []
        for index in index_list:
            rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, index)
            _, image = rgb_cap.read()
            if image is not None:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                image = cv2.resize(image, (340, 256))
                image_list.append(image)

        if len(image_list) != _frame_counts:
            image_list = np.concatenate([image_list] * _frame_counts,
                                        axis=0)[:_frame_counts]

        image = np.stack(image_list, axis=0)

        img = image
        if _test_crop == 'center':
            rgb_file = DataAugmentation.center_crop(img, 224, 224)
            rgb_file = np.float32(rgb_file)
            rgb_file = normalize(rgb_file)
            rgb_file_flip = rgb_file[:, :, ::-1, :]
            total_rgb_file.append(rgb_file)
            total_rgb_file.append(rgb_file_flip)
        else:
            for k in range(5):
                image = DataAugmentation.random_Crop(img, 1, k)
                image = np.float32(image)
                image = normalize(image)
                image_flip = image[:, :, ::-1, :]
                total_rgb_file.append(image)
                total_rgb_file.append(image_flip)

        rgb_cap.release()
        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_rgb_file, one_hot_label

        return total_rgb_file
Esempio n. 6
0
    def _py_func_rgb_test_vp(rgb_path, label):

        rgb_path = rgb_path.decode()

        _batch_size = 25

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT) - 10

        total_rgb_file = []
        if (rgb_len) <= _batch_size:
            factor = int((_batch_size - 1) // (rgb_len) + 1)
            index_list = np.concatenate([np.arange(0, rgb_len)] * factor,
                                        axis=-1)[:_batch_size]
        else:
            index_list = np.arange(0, rgb_len,
                                   rgb_len // _batch_size)[:_batch_size]

        for index in index_list:
            rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, index)
            _, image = rgb_cap.read()
            if image is not None:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                image = np.float32(image)
                image = cv2.resize(image, (340, 256))

                if test_crop == 'center':
                    image_ = DataAugmentation.center_crop(image, 224, 224)
                    if _preprocess_name == 'pytorch':
                        image_ = normalize(image_)
                    elif _preprocess_name == 'tf':
                        image_ = tf_preprocess(image_)
                    image_flip = np.fliplr(image_)
                    total_rgb_file.append(image_)
                    total_rgb_file.append(image_flip)
                else:
                    for i in range(5):
                        image_ = DataAugmentation.random_Crop(image, 1, i)
                        if _preprocess_name == 'pytorch':
                            image_ = normalize(image_)
                        elif _preprocess_name == 'tf':
                            image_ = tf_preprocess(image_)
                        image_flip = np.fliplr(image_)
                        total_rgb_file.append(image_)
                        total_rgb_file.append(image_flip)

        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_rgb_file, one_hot_label

        return total_rgb_file
Esempio n. 7
0
    def _py_func_rgb_test_vp(rgb_path, label):

        rgb_path = rgb_path.decode ()

        # rgb_file = _rgb_vp (rgb_path)
        rgb_cap = cv2.VideoCapture (rgb_path)
        rgb_len = rgb_cap.get (cv2.CAP_PROP_FRAME_COUNT)

        total_rgb_file = []
        try:
            index_list = np.arange(0,rgb_len-_frame_counts,(rgb_len-_frame_counts)//_batch_size)[:_batch_size]
            np.random.shuffle(index_list)
        except:
            if rgb_len > _frame_counts:
                index_list = np.repeat(np.arange(0,rgb_len-_frame_counts),repeats=(_batch_size//(rgb_len-_frame_counts)+1),axis=0)[:_batch_size]
            else:
                index_list = [0] * _frame_counts

        for index in index_list:
            image_list = []
            while 1:
                # index = np.random.randint (0, rgb_len - _frame_counts)
                rgb_cap.set (cv2.CAP_PROP_POS_FRAMES, index)
                for i in range(_frame_counts):
                    flag, image = rgb_cap.read ()
                    if image is not None:
                        # image = np.float32 (image)
                        image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
                        image = cv2.resize (image, (340, 256))
                        image_list.append(image)
                if len (image_list) == _frame_counts or flag is False:
                    break

            image = np.stack (image_list, axis=0)
            if image.shape[0] < _frame_counts:
                image = np.repeat(image,(_frame_counts//image.shape[0])+1,axis=0)[:_frame_counts,...]
            rgb_file = DataAugmentation.center_crop (image, 224, 224)
            rgb_file = np.float32 (rgb_file)
            rgb_file = normalize (rgb_file)
            rgb_file_flip = rgb_file[:,:,::-1,:]
            assert rgb_file is not None
            total_rgb_file.append(rgb_file)
            total_rgb_file.append(rgb_file_flip)

        rgb_cap.release ()
        if label is not None:
            one_hot_label = np.zeros (101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_rgb_file, one_hot_label

        return total_rgb_file
Esempio n. 8
0
    def _py_func_rgb_vp(rgb_path, label):

        rgb_path = rgb_path.decode()

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        image_list = []
        try:
            index = np.random.randint(0, rgb_len - _frame_counts)
        except:
            index = 0

        rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, index)
        while 1:
            flag, image = rgb_cap.read()
            if image is not None:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                image = cv2.resize(image, (340, 256))
                image_list.append(image)
            if len(image_list) == _frame_counts or flag is False:
                if len(image_list) == 0:
                    rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                    continue
                break

        if len(image_list) != _frame_counts:
            image_list = np.concatenate([image_list] * _frame_counts,
                                        axis=0)[:_frame_counts]

        rgb_cap.release()
        image = np.stack(image_list, axis=0)

        rgb_file = DataAugmentation.center_crop(image, 224, 224)
        rgb_file = np.float32([cv2.resize(x, (224, 224)) for x in rgb_file])

        rgb_file = np.float32(rgb_file)
        rgb_file = normalize(rgb_file)

        assert rgb_file is not None

        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return rgb_file, one_hot_label

        return rgb_file
Esempio n. 9
0
    def _py_func_flow_test_vp(flow_path, label):

        flow_path = flow_path.decode ()

        flow_cap = cv2.VideoCapture (flow_path)
        flow_len = flow_cap.get (cv2.CAP_PROP_FRAME_COUNT)

        total_flow_file = []
        try:
            index_list = np.arange(0,flow_len-_frame_counts,(flow_len-_frame_counts)//_batch_size)[:_batch_size]
        except:
            if flow_len > _frame_counts:
                index_list = np.repeat(np.arange(0,flow_len-_frame_counts),repeats=(_batch_size//(flow_len-_frame_counts)+1),axis=0)[:_batch_size]
            else:
                index_list = [0] * _frame_counts

        flag = True
        for index in index_list:
            image_list = []
                # index = np.random.randint (0, flow_len - 10)
            flow_cap.set (cv2.CAP_PROP_POS_FRAMES, index)
            for i in range(_frame_counts):
                flag, image = flow_cap.read ()
                if image is not None:
                    image = np.float32 (image)
                    image = cv2.resize (image, (340, 256))
                    image_list.append (image[...,:2])
                if len (image_list) == _frame_counts or flag is False:
                    break

            image = np.stack (image_list, axis=0)
            if image.shape[0] < _frame_counts:
                image = np.repeat(image,(_frame_counts//image.shape[0])+1,axis=0)[:_frame_counts,...]
            flow_file = DataAugmentation.center_crop (image, 224, 224)
            flow_file = (flow_file / 255) * 2 - 1
            # flow_file_flip = flow_file[:,:,::-1,:]
            total_flow_file.append(flow_file)
            # total_flow_file.append(flow_file_flip)

        if label is not None:
            one_hot_label = np.zeros (101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_flow_file, one_hot_label

        return total_flow_file
Esempio n. 10
0
    def _py_func_flow_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)
        flow_len = len(flow_file)

        index_list = hmdb_dataset.pick_index(flow_len - _new_length,
                                             _frame_counts)

        flow_img = []
        for j in index_list:
            img_list = []
            for i in range(j, j + _new_length):
                img_u_path = os.path.join(flow_u_path, flow_file[i])
                img_v_path = os.path.join(flow_v_path, flow_file[i])

                img_u = cv2.imread(img_u_path, 0)
                img_v = cv2.imread(img_v_path, 0)
                if img_u is None or img_v is None:
                    continue
                img = np.stack([img_u, img_v], axis=-1)
                img_list.append(img)

            img = np.concatenate(img_list, axis=-1)
            img = cv2.resize(img, (340, 256))
            img = DataAugmentation.center_crop(img, 224, 224)
            img = cv2.resize(img, (224, 224))
            img = np.float32(img)
            img = (img / 255 - 0.5) / 0.226
            flow_img.append(img)

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return flow_img, one_hot_label

        return flow_img
Esempio n. 11
0
    def _py_func_rgb_vp(rgb_path, label):

        rgb_path = rgb_path.decode()

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        image_list = []

        index_list = hmdb_dataset.pick_index(rgb_len, _frame_counts)

        for index in index_list:
            rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, index)
            flag, image = rgb_cap.read()
            if image is not None:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                image = cv2.resize(image, (340, 256))
                image_list.append(image)

        if len(image_list) != _frame_counts:
            image_list = np.concatenate([image_list] * _frame_counts,
                                        axis=0)[:_frame_counts]

        assert image_list != []

        rgb_cap.release()
        image = np.stack(image_list, axis=0)
        rgb_file = DataAugmentation.center_crop(image, 224, 224)
        rgb_file = np.float32([cv2.resize(x, (224, 224)) for x in rgb_file])

        rgb_file = np.float32(rgb_file)
        rgb_file = normalize(rgb_file)

        assert rgb_file is not None

        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return rgb_file, one_hot_label

        return rgb_file
Esempio n. 12
0
    def _py_func_flow_val_vp(flow_path, label):

        flow_path = flow_path.decode ()

        # flow_file = _flow_vp (flow_path)
        flow_cap = cv2.VideoCapture (flow_path)
        flow_len = flow_cap.get (cv2.CAP_PROP_FRAME_COUNT)
        num_segments = 3
        segments_list = np.array_split (np.arange (0, flow_len), num_segments)

        flag = True
        image_list = []
        while num_segments:
            index = np.random.choice (segments_list[num_segments - 1])
            flow_cap.set (cv2.CAP_PROP_POS_FRAMES, index)
            img_list = []
            for i in range (10):
                f, img = flow_cap.read ()
                if img is not None:
                    img_list.append (img[..., :2])
            if len (img_list) != 10:
                continue

            image = np.concatenate (img_list, axis=-1)
            image = np.float32 (image)
            image = cv2.resize (image, (340, 256))
            image = DataAugmentation.center_crop (image,224,224)
            image = cv2.resize (image, (224, 224))
            image = (image / 255) * 2 - 1
            image_list.append (image)
            num_segments -= 1

        flow_file = image_list

        flow_cap.release ()
        if label is not None:
            one_hot_label = np.zeros (101, dtype=np.float32)
            one_hot_label[label] = 1

            return flow_file, one_hot_label

        return flow_file
Esempio n. 13
0
    def _py_func_flow_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)
        index = np.random.randint(0, len(flow_file) - _new_length)
        img_list = []
        for i in range(index, index + _new_length):
            img_u_path = os.path.join(flow_u_path, flow_file[i])
            img_v_path = os.path.join(flow_v_path, flow_file[i])

            img_u = cv2.imread(img_u_path, 0)
            img_v = cv2.imread(img_v_path, 0)

            img = np.stack([img_u, img_v], axis=-1)
            img_list.append(img)

        img = np.concatenate(img_list, axis=-1)
        img = cv2.resize(img, (340, 256))
        img = DataAugmentation.center_crop(img, 224, 224)
        img = cv2.resize(img, (224, 224))
        img = np.float32(img)
        if _preprocess_name == 'pytorch':
            img = (img / 255 - 0.5) / 0.226
        elif _preprocess_name == 'tf':
            img = tf_preprocess(img)

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return img, one_hot_label

        return img
Esempio n. 14
0
    def _py_func_flow_test_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)

        flow_len = len(flow_file)
        try:
            index_list = np.arange(0, flow_len - _new_length,
                                   (flow_len - _new_length) //
                                   _frame_counts)[:_frame_counts]
        except:
            index_list = np.arange(0, flow_len - _new_length)[:_frame_counts]

        total_img_list = []
        video_list = []
        for index in index_list:
            img_list = []
            for i in range(index, index + _new_length):
                img_u_path = os.path.join(flow_u_path, flow_file[i])
                img_v_path = os.path.join(flow_v_path, flow_file[i])

                img_u = cv2.imread(img_u_path, 0)
                img_v = cv2.imread(img_v_path, 0)

                if img_u is not None and img_v is not None:
                    img = np.stack([img_u, img_v], axis=-1)
                    img = cv2.resize(img, (340, 256))
                    img_list.append(img)

            img = np.concatenate(img_list, axis=-1)
            video_list.append(img)

        if len(video_list) != _frame_counts:
            video_list = np.concatenate([video_list] * _frame_counts,
                                        axis=0)[:_frame_counts]

        img = np.stack(video_list, axis=0)

        if _test_crop == 'center':
            image = DataAugmentation.center_crop(img, 224, 224)
            image = np.float32(image)
            image = (image / 255 - 0.5) / 0.226
            image_flip = image[:, :, ::-1, :]
            total_img_list.append(image)
            total_img_list.append(image_flip)
        else:
            for k in range(5):
                image = DataAugmentation.random_Crop(img, 1, k)
                image = np.float32(image)
                image = (image / 255 - 0.5) / 0.226
                image_flip = image[:, :, ::-1, :]
                total_img_list.append(image)
                total_img_list.append(image_flip)

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_img_list, one_hot_label

        return total_img_list
Esempio n. 15
0
    def _py_func_flow_test_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)

        # try:
        #     index = np.arange(len(flow_file)-_frame_counts)
        # except:
        #     index = np.arange(len(flow_file))

        # index = np.array_split(index,_batch_size)
        # index_list = []
        # for i in index:
        #     if len(i) != 0:
        #         index_list.append(i[0])

        # flow_img = []
        # for k in index_list:
        #     img_list = []
        #     for j in range(k,k+_frame_counts):
        #         img_u_path = os.path.join(flow_u_path,flow_file[j])
        #         img_v_path = os.path.join(flow_v_path,flow_file[j])

        #         img_u = cv2.imread(img_u_path,0)
        #         img_v = cv2.imread(img_v_path,0)
        #         if img_u is None or img_v is None:
        #             continue
        #         img = np.stack([img_u,img_v],axis=-1)
        #         img = cv2.resize(img,(340,256))
        #         img_list.append(img)

        #     img = np.stack(img_list,axis=0)
        #     img = DataAugmentation.center_crop(img,224,224)
        #     img = np.float32(img)
        #     img = (img / 255 - 0.5) / 0.226
        #     img_flip = img[:,:,::-1,:]
        #     flow_img.append(img)
        #     flow_img.append(img_flip)

        img_list = []
        flow_img = []
        for i, v in enumerate(flow_file):

            img_u_path = os.path.join(flow_u_path, flow_file[i])
            img_v_path = os.path.join(flow_v_path, flow_file[i])

            img_u = cv2.imread(img_u_path, 0)
            img_v = cv2.imread(img_v_path, 0)
            if img_u is None or img_v is None:
                continue
            img = np.stack([img_u, img_v], axis=-1)
            img = cv2.resize(img, (340, 256))
            img_list.append(img)

        if len(img_list) != _frame_counts:
            img_list = np.concatenate([img_list] *
                                      (_frame_counts // len(img_list) + 1),
                                      axis=0)[:_frame_counts]

        img = np.stack(img_list, axis=0)
        img = DataAugmentation.center_crop(img, 224, 224)
        img = np.float32(img)
        img = (img / 255 - 0.5) / 0.226
        img_flip = img[:, :, ::-1, :]
        flow_img.append(img)
        flow_img.append(img_flip)

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return flow_img, one_hot_label

        return flow_img
Esempio n. 16
0
    def _py_func_flow_test_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)

        _batch_size = 25
        if len(flow_file) - _new_length < _batch_size:
            index_list = np.arange(0, len(flow_file) - _new_length)
            index_list = np.concatenate([index_list] *
                                        (_batch_size //
                                         (len(flow_file) - _new_length) + 1),
                                        axis=0)[:_batch_size]
        else:
            index_list = np.arange(0,
                                   len(flow_file) - _new_length,
                                   (len(flow_file) - _new_length) //
                                   _batch_size)[:_batch_size]

        total_img_list = []
        for index in index_list:
            img_list = []
            for i in range(index, index + _new_length):
                img_u_path = os.path.join(flow_u_path, flow_file[i])
                img_v_path = os.path.join(flow_v_path, flow_file[i])

                img_u = cv2.imread(img_u_path, 0)
                img_v = cv2.imread(img_v_path, 0)

                # if img_u is not None and img_v is not None:
                img = np.stack([img_u, img_v], axis=-1)
                img_list.append(img)

            img = np.concatenate(img_list, axis=-1)
            img = cv2.resize(img, (340, 256))

            if test_crop == 'center':
                image = DataAugmentation.center_crop(img, 224, 224)
                image = np.float32(image)
                if _preprocess_name == 'pytorch':
                    image = (image / 255 - 0.5) / 0.226
                elif _preprocess_name == 'tf':
                    image = tf_preprocess(image)
                image_flip = np.fliplr(image)
                total_img_list.append(image)
                total_img_list.append(image_flip)
            else:
                for j in range(5):
                    image = DataAugmentation.random_Crop(img, 1, j)
                    image = np.float32(image)
                    if _preprocess_name == 'pytorch':
                        image = (image / 255 - 0.5) / 0.226
                    elif _preprocess_name == 'tf':
                        image = tf_preprocess(image)
                    image_flip = np.fliplr(image)
                    total_img_list.append(image)
                    total_img_list.append(image_flip)

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_img_list, one_hot_label

        return total_img_list
Esempio n. 17
0
    def _py_func_flow_test_vp(flow_path, label):

        f_upath , f_vpath = flow_path
        flow_u_path = f_upath.decode ()
        flow_v_path = f_vpath.decode ()
        flow_file = os.listdir (flow_u_path)
        flow_file = sorted(flow_file)

        _batch_size = 25
        if len(flow_file) - 10 < _batch_size:
            index_list = np.arange(0,len(flow_file)-10)
            index_list = np.concatenate([index_list]*(_batch_size//(len(flow_file)-10) + 1),axis=0)[:_batch_size]
        else:
            index_list = np.arange(0,len(flow_file)-10,(len(flow_file)-10)//_batch_size)[:_batch_size]


        total_img_list = []
        for index in index_list:
            img_list = []
            for i in range(index,index+10):
                img_u_path = os.path.join(flow_u_path,flow_file[i])
                img_v_path = os.path.join(flow_v_path,flow_file[i])
                
                img_u = cv2.imread(img_u_path,0)
                img_v = cv2.imread(img_v_path,0)
                
                if img_u is not None and img_v is not None:
                    img = np.stack([img_u,img_v],axis=-1)
                    img_list.append(img)

            img = np.concatenate(img_list,axis=-1)
            if img.shape[-1] != 20:
                img = np.concatenate([img]*(20//img.shape[-1]+1),axis=-1)

            img = cv2.resize(img,(340,256))
            image = DataAugmentation.center_crop(img,224,224)
            image = cv2.resize(image,(224,224))
            image = np.float32(image)
            image = (image / 255 - 0.5) / 0.226
            image_flip = np.fliplr(image)
            total_img_list.append(image)
            total_img_list.append(image_flip)
            # for j in range(5):
            #     image = DataAugmentation.random_Crop(img,1,j)
            #     # img = DataAugmentation.horizontal_flip(img)
            #     image = cv2.resize(image,(224,224))
            #     image = np.float32(image)
            #     image = (image / 255 - 0.5) / 0.226
            #     image_flip = np.fliplr(image)
            #     total_img_list.append(image)
            #     total_img_list.append(image_flip)
           
            


        if label is not None:

            one_hot_label = np.zeros (101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_img_list, one_hot_label

        return total_img_list
Esempio n. 18
0
    def _py_func_rgb_test_vp(rgb_path, label):
        # all frame test
        rgb_path = rgb_path.decode()

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        total_rgb_file = []
        # try:
        #     index_list = np.arange(0,rgb_len-_frame_counts,(rgb_len-_frame_counts)//_batch_size)[:_batch_size]
        #     np.random.shuffle(index_list)
        # except:
        #     if rgb_len > _frame_counts:
        #         index_list = np.repeat(np.arange(0,rgb_len-_frame_counts),repeats=(_batch_size//(rgb_len-_frame_counts)+1),axis=0)[:_batch_size]
        #     else:
        #         index_list = [0] * _frame_counts

        # for index in index_list:
        #     image_list = []
        #     while 1:
        #         rgb_cap.set (cv2.CAP_PROP_POS_FRAMES, index)
        #         for i in range(_frame_counts):
        #             flag, image = rgb_cap.read ()
        #             if image is not None:
        #                 image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
        #                 image = cv2.resize (image, (340, 256))
        #                 image_list.append(image)
        #         if len (image_list) == _frame_counts or flag is False:
        #             break

        # image = np.stack (image_list, axis=0)
        # rgb_file = DataAugmentation.center_crop (image, 224, 224)
        # rgb_file = np.float32 (rgb_file)
        # rgb_file = normalize (rgb_file)
        # rgb_file_flip = rgb_file[:,:,::-1,:]
        # assert rgb_file is not None
        # total_rgb_file.append(rgb_file)
        # total_rgb_file.append(rgb_file_flip)
        img_list = []
        for i in range(int(rgb_len)):
            flag, img = rgb_cap.read()
            if img is not None:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                img = cv2.resize(img, (340, 256))
                img_list.append(img)

        if len(img_list) != _frame_counts:
            img_list = np.concatenate([img_list] *
                                      (_frame_counts // len(img_list) + 1),
                                      axis=0)[:_frame_counts]

        image = np.stack(img_list, axis=0)
        rgb_file = DataAugmentation.center_crop(image, 224, 224)
        rgb_file = np.float32(rgb_file)
        rgb_file = normalize(rgb_file)
        rgb_file_flip = rgb_file[:, :, ::-1, :]
        total_rgb_file.append(rgb_file)
        total_rgb_file.append(rgb_file_flip)

        rgb_cap.release()
        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_rgb_file, one_hot_label

        return total_rgb_file