def __init__(self, h5_file, test_txt, segment_len, ten_crop=False, height=128, width=171, crop_size=112): self.h5_path = h5_file self.keys = sorted(list(h5py.File(self.h5_path, 'r').keys())) self.test_dict = read_testing_txt(test_txt) self.segment_len = segment_len self.ten_crop = ten_crop self.crop_size = crop_size self.height = height self.width = width self.test_dict_annotation() self.mean = [90.25, 97.66, 101.41] self.std = [1, 1, 1] if ten_crop: # self.resize_aug=transforms.Resize([256,340]) # self.ten_crop_aug=transforms.TenCropTensor(self.crop_size) self.ten_crop_aug = transforms.Compose([ transforms.Resize([self.height, self.width]), transforms.ClipToTensor(div_255=False), transforms.Normalize(mean=self.mean, std=self.std), transforms.TenCropTensor(self.crop_size) ]) self.transforms = transforms.Compose([ transforms.Resize([self.height, self.width]), transforms.CenterCrop(self.crop_size), transforms.ClipToTensor(div_255=False), transforms.Normalize(mean=self.mean, std=self.std) ]) self.dataset_len = len(h5py.File(self.h5_path, 'r')[self.keys[0]][:])
def __init__(self, h5_file, test_txt, test_mask_dir, segment_len=16, ten_crop=False, height=256, width=340, crop_size=224): self.h5_path = h5_file # self.keys = sorted(list(h5py.File(self.h5_path, 'r').keys())) self.test_txt = test_txt self.segment_len = segment_len self.ten_crop = ten_crop self.crop_size = crop_size self.test_mask_dir = test_mask_dir self.mean = [128, 128, 128] self.std = [128, 128, 128] self.height = height self.width = width self.test_dict_annotation() if ten_crop: self.ten_crop_aug = transforms.Compose([ transforms.Resize([self.height, self.width]), transforms.ClipToTensor(div_255=False), transforms.Normalize(mean=self.mean, std=self.std), transforms.TenCropTensor(self.crop_size) ]) self.transforms = transforms.Compose([ transforms.Resize([240, 320]), # transforms.CenterCrop(self.crop_size), transforms.ClipToTensor(div_255=False), transforms.Normalize(mean=self.mean, std=self.std) ]) self.dataset_len = len(h5py.File(self.h5_path, 'r')[self.keys[0]][:])
def __init__(self, h5_file, train_txt, pseudo_labels, clip_num=8, segment_len=16, type='Normal', rgb_diff=False, hard_label=False, score_segment_len=16, continuous_sampling=False): self.h5_path = h5_file self.pseudo_labels = np.load(pseudo_labels, allow_pickle=True).tolist() self.keys = sorted(list(h5py.File(self.h5_path, 'r').keys())) self.clip_num = clip_num self.dataset_len = len(h5py.File(self.h5_path, 'r')[self.keys[0]][:]) self.segment_len = segment_len self.rgb_diff = rgb_diff self.hard_label = hard_label self.score_segment_len = score_segment_len self.continuous_sampling = continuous_sampling self.train_txt = train_txt # self.mean =torch.from_numpy(np.load('/mnt/sdd/jiachang/c3d_train01_16_128_171_mean.npy')) self.mean = [90.25, 97.66, 101.41] self.std = [1, 1, 1] self.get_vid_names_dict() self.type = type if self.type == 'Normal': self.selected_keys = list(self.norm_vid_names_dict.keys()) self.selected_dict = self.norm_vid_names_dict else: self.selected_keys = list(self.abnorm_vid_names_dict.keys()) self.selected_dict = self.abnorm_vid_names_dict self.transforms = transforms.Compose([ transforms.Resize((128, 171)), # transforms.RandomCrop((112,112)), transforms.MultiScaleCrop(112, [1.0, 0.8], max_distort=1, fix_crop=True), transforms.RandomHorizontalFlip(), transforms.RandomGrayScale(), transforms.ClipToTensor(div_255=False), transforms.Normalize(self.mean, self.std) ])