コード例 #1
0
 def __init__(self,
              video_path,
              patch_size,
              window_len,
              rotate=10,
              scale=1.2,
              full_size=640,
              is_train=True):
     super(VidListv2, self).__init__()
     self.data_dir = video_path
     self.window_len = window_len
     normalize = transforms.Normalize(mean=(128, 128, 128),
                                      std=(128, 128, 128))
     self.transforms1 = transforms.Compose([
         transforms.RandomRotate(rotate),
         transforms.ResizeandPad(full_size),
         transforms.RandomCrop(patch_size),
         transforms.ToTensor(), normalize
     ])
     self.transforms2 = transforms.Compose([
         transforms.ResizeandPad(full_size),
         transforms.ToTensor(), normalize
     ])
     self.is_train = is_train
     self.list = list_sequences(video_path, set_ids=list(
         range(12)))  # training sets: 0~11
コード例 #2
0
ファイル: loader.py プロジェクト: zmbhou/UVC
 def __init__(self,
              video_path,
              list_path,
              patch_size,
              window_len,
              rotate=10,
              scale=1.2,
              full_size=640,
              is_train=True):
     super(VidListv2, self).__init__()
     self.data_dir = video_path
     self.list_path = list_path
     self.window_len = window_len
     normalize = transforms.Normalize(mean=(128, 128, 128),
                                      std=(128, 128, 128))
     self.transforms1 = transforms.Compose([
         transforms.RandomRotate(rotate),
         # transforms.RandomScale(scale),
         transforms.ResizeandPad(full_size),
         transforms.RandomCrop(patch_size),
         transforms.ToTensor(),
         normalize
     ])
     self.transforms2 = transforms.Compose([
         transforms.ResizeandPad(full_size),
         transforms.ToTensor(), normalize
     ])
     self.is_train = is_train
     self.read_list()
コード例 #3
0
ファイル: loader.py プロジェクト: zmbhou/UVC
    def __init__(self,
                 video_path,
                 list_path,
                 patch_size,
                 rotate=10,
                 scale=1.2,
                 is_train=True,
                 moreaug=True):
        super(VidListv1, self).__init__()
        self.data_dir = video_path
        self.list_path = list_path
        normalize = transforms.Normalize(mean=(128, 128, 128),
                                         std=(128, 128, 128))

        t = []
        if rotate > 0:
            t.append(transforms.RandomRotate(rotate))
        if scale > 0:
            t.append(transforms.RandomScale(scale))
        t.extend([
            transforms.RandomCrop(patch_size, seperate=moreaug),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(), normalize
        ])

        self.transforms = transforms.Compose(t)

        self.is_train = is_train
        self.read_list()
コード例 #4
0
ファイル: UAV_loader.py プロジェクト: xueluli/MuG
    def __init__(self,
                 video_path,
                 list_path,
                 patch_size,
                 rotate=10,
                 scale=1.2,
                 is_train=True,
                 moreaug=True):
        super(VidListv1, self).__init__()
        csv_path = "/raid/codes/CorrFlows/functional/feeder/dataset/oxuva.csv"
        filenames = open(csv_path).readlines()
        frame_all = [filename.split(',')[0].strip() for filename in filenames]
        nframes = [
            int(filename.split(',')[1].strip()) for filename in filenames
        ]
        self.data_dir = video_path
        self.list = frame_all
        self.nframes = nframes
        normalize = transforms.Normalize(mean=(128, 128, 128),
                                         std=(128, 128, 128))

        t = []
        if rotate > 0:
            t.append(transforms.RandomRotate(rotate))
        if scale > 0:
            t.append(transforms.RandomScale(scale))
        t.extend([
            transforms.RandomCrop(patch_size, seperate=moreaug),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(), normalize
        ])

        self.transforms = transforms.Compose(t)

        self.is_train = is_train
コード例 #5
0
    def __init__(self,
                 video_path,
                 list_path,
                 patch_size,
                 window_len,
                 rotate=10,
                 scale=1.2,
                 full_size=640,
                 is_train=True):
        super(VidListv2, self).__init__()
        csv_path = "/raid/codes/UVC/libs/GOT-new.csv"
        filenames = open(csv_path).readlines()

        frame_all = [filename.split(',')[0].strip() for filename in filenames]

        nframes = [
            int(filename.split(',')[1].strip()) for filename in filenames
        ]
        self.data_dir = video_path
        self.list = frame_all
        self.window_len = window_len
        self.nframes = nframes
        normalize = transforms.Normalize(mean=(128, 128, 128),
                                         std=(128, 128, 128))
        self.transforms1 = transforms.Compose([
            transforms.RandomRotate(rotate),
            # transforms.RandomScale(scale),
            transforms.ResizeandPad_1(full_size),
            transforms.RandomCrop(patch_size),
            transforms.ToTensor(),
            normalize
        ])
        self.transforms2 = transforms.Compose([
            transforms.ResizeandPad_1(full_size),
            transforms.ToTensor(), normalize
        ])

        self.is_train = is_train
        self.video_list = []
        for filename in filenames:
            record = VideoRecord()
            record.path = os.path.join(video_path,
                                       filename.split(',')[0].strip())
            record.num_frames = int(filename.split(',')[1].strip(
            ))  #len(glob.glob(os.path.join(video_path, '*.jpg')))
            record.label = filename.split(',')[0].strip()
            self.video_list.append(record)
コード例 #6
0
ファイル: GOT_10k_loader.py プロジェクト: xueluli/MuG
	def __init__(self, video_path, list_path, patch_size, window_len, rotate = 10, scale = 1.2, full_size = 640, is_train=True):
		super(VidListv2, self).__init__()
		csv_path = "/raid/codes/CorrFlows/GOT_10k.csv"
		filenames = open(csv_path).readlines()

		frame_all = [filename.split(',')[0].strip() for filename in filenames]
		nframes = [int(filename.split(',')[1].strip()) for filename in filenames]
		self.data_dir = video_path
		self.list = frame_all
		self.window_len = window_len
		self.nframes = nframes
		normalize = transforms.Normalize(mean = (128, 128, 128), std = (128, 128, 128))
		self.transforms1 = transforms.Compose([
						   transforms.RandomRotate(rotate),
						   # transforms.RandomScale(scale),
						   transforms.ResizeandPad(full_size),
						   transforms.RandomCrop(patch_size),
						   transforms.ToTensor(),
						   normalize])			
		self.transforms2 = transforms.Compose([
						   transforms.ResizeandPad(full_size),
						   transforms.ToTensor(),
						   normalize])
		self.is_train = is_train
コード例 #7
0
    def __init__(self,
                 video_path,
                 list_path,
                 patch_size,
                 rotate=10,
                 scale=1.2,
                 is_train=True,
                 moreaug=True):
        super(VidListv1, self).__init__()
        csv_path = "/raid/codes/UVC/libs/GOT-new.csv"
        filenames = open(csv_path).readlines()
        frame_all = [filename.split(',')[0].strip() for filename in filenames]
        nframes = [
            int(filename.split(',')[1].strip()) for filename in filenames
        ]
        self.data_dir = video_path
        self.list = frame_all
        self.nframes = nframes
        normalize = transforms.Normalize(mean=(128, 128, 128),
                                         std=(128, 128, 128))

        t = []
        if rotate > 0:
            t.append(transforms.RandomRotate(rotate))
        if scale > 0:
            t.append(transforms.RandomScale(scale))
        t.extend([
            transforms.RandomCrop(patch_size, seperate=moreaug),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(), normalize
        ])

        self.transforms = transforms.Compose(t)

        self.is_train = is_train
        self.video_list = []
        for filename in filenames:
            record = VideoRecord()
            record.path = os.path.join(video_path,
                                       filename.split(',')[0].strip())
            record.num_frames = int(filename.split(',')[1].strip(
            ))  # len(glob.glob(os.path.join(video_path, '*.jpg')))
            record.label = filename.split(',')[0].strip()
            self.video_list.append(record)
コード例 #8
0
        root = path.partition("Kinetices/")[0]
        if not exists(path):
            raise Exception(
                "{} does not exist in kinet_dataset.py.".format(path))
        self.list = [
            line.replace("/Data/", root).strip() for line in open(path, 'r')
        ]


if __name__ == '__main__':
    normalize = transforms.Normalize(mean=(128, 128, 128), std=(128, 128, 128))
    t = []
    t.extend([
        transforms.RandomCrop(256),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])
    dataset_train = VidList('/home/xtli/DATA/compress/train_256/',
                            '/home/xtli/DATA/compress/train.txt',
                            transforms.Compose(t),
                            window_len=2)

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=16,
                                               shuffle=True,
                                               num_workers=8,
                                               drop_last=True)

    start_time = time.time()
    for i, (frames) in enumerate(train_loader):
        print(i)
コード例 #9
0
ファイル: utils.py プロジェクト: DuckerDuck/UVC
def create_transforms(crop_size):
    normalize = transforms.Normalize(mean=(128, 128, 128), std=(128, 128, 128))
    t = []
    t.extend([transforms.ToTensor(), normalize])
    return transforms.Compose(t)
コード例 #10
0
ファイル: GOT_10k_loader.py プロジェクト: xueluli/MuG
	def read_list(self):
		path = join(self.list_path)
		root = path.partition("Kinetices/")[0]
		if not exists(path):
			raise Exception("{} does not exist in kinet_dataset.py.".format(path))
		self.list = [line.replace("/Data/", root).strip() for line in open(path, 'r')]



if __name__ == '__main__':
	normalize = transforms.Normalize(mean = (128, 128, 128),
									 std = (128, 128, 128))
	t = []
	t.extend([transforms.RandomCrop(256),
			  transforms.RandomHorizontalFlip(),
			  transforms.ToTensor(),
			  normalize])
	dataset_train = VidList('/home/xtli/DATA/compress/train_256/',
							'/home/xtli/DATA/compress/train.txt',
							transforms.Compose(t), window_len=2)

	train_loader = torch.utils.data.DataLoader(dataset_train,
											   batch_size = 16,
											   shuffle = True,
											   num_workers=8,
											   drop_last=True)

	start_time = time.time()
	for i, (frames) in enumerate(train_loader):
		print(i)
		if(i >= 1000):