Esempio n. 1
0
    def __init__(self,
                 data_dir,
                 split_file=None,
                 load_depth=True,
                 load_normal=True,
                 load_albedo=False,
                 num_worker=8):
        self.data_dir = data_dir
        self.home_dir = os.path.join(data_dir, '..', '..', '..')
        self.split_file = split_file
        self.instance_list = None
        if self.split_file is not None:
            with open(self.split_file) as f:
                data = json.load(f)
            data = data[list(data.keys())[0]]
            data = data[list(data.keys())[0]]
            self.instance_list = data

        self.load_depth = load_depth
        self.load_normal = load_normal
        self.load_albedo = load_albedo
        self.num_worker = num_worker

        self.image_dir = os.path.join(self.data_dir, 'final')
        self.image_list = self.get_image_file_list(self.image_dir)
Esempio n. 2
0
    def vis_transform(self, data):
        cmap = self.get_colormap()
        if 'image' in data.keys() and data['image'] is not None:
            imgs = data['image']
            if type(imgs).__module__ != np.__name__:
                imgs = imgs.cpu().detach().numpy()
            data['image'] = imgs

        if 'label' in data.keys(
        ) and data['label'] is not None and data['label'].shape[-1] != 0:
            labels = data['label']
            if type(labels).__module__ != np.__name__:
                labels = labels.cpu().detach().numpy()
            labels = cmap[labels]
            labels = labels.transpose((0, 3, 1, 2))
            labels = labels / 255
            data['label'] = labels

        if 'predict' in data.keys() and data['predict'] is not None:
            preds = data['predict']
            if type(preds).__module__ != np.__name__:
                preds = preds.cpu().detach().numpy()
            if preds.shape[1] == self.num_classes:
                preds = preds.argmax(axis=1)
            preds = cmap[preds]
            preds = preds.transpose((0, 3, 1, 2))
            preds = preds / 255
            data['predict'] = preds

        return data
Esempio n. 3
0
def inference_fer2013():
    import h5py
    import numpy as np
    import cv2
    labels_dict = {
        '0': 'Anger',
        '1': 'Disgust',
        '2': 'Fear',
        '3': 'Happy',
        '4': 'Sad',
        '5': 'Surprise',
        '6': 'Neutral'
    }
    model = load_checkpoint(
        './models_fer2013_deep_short_3channels_48/model.pth')
    path = './data/fer2013/data/fer2013_new_data.h5'
    data = h5py.File(path, 'r')
    print(data.keys())
    for i in data.keys():
        print(i)
    Testing_label = data['Testing_label']
    print(Testing_label.shape)
    print(len(Testing_label))
    Testing_pixel = data['Testing_pixel']
    print(Testing_pixel.shape)

    if torch.cuda.is_available():
        model = model.cuda()
    equal_number = 0
    for i, label in enumerate(Testing_label):
        img = Testing_pixel[i].reshape(48, 48, 1).astype(np.float32)
        s = np.concatenate((img, img, img), axis=-1)
        s = cv2.resize(s, (96, 96))
        b1 = cv2.GaussianBlur(s, (3, 3), 0)
        D1 = ((s - b1) + s)
        # img=np.clip(D1, 0, 255)
        img_all = get_clip_img(D1)
        img_all = torch.from_numpy(np.transpose(img_all, (0, 3, 1, 2)) / 255.)

        #     # img = tv_F.to_tensor(s/255.)
        print('img=', img.shape)
        if torch.cuda.is_available():
            # img = torch.unsqueeze(img, 0).cuda()
            img_all = img_all.cuda()
        out = model(img_all)
        pred = F.softmax(out, dim=1)
        # print('pred=',pred)
        pred = torch.mean(pred, dim=0, keepdim=True)
        # print('pred=', pred)
        predicted = torch.argmax(pred, dim=1)

        print('label=', label)

        pred_np = predicted.cpu().numpy()[0]
        print('predicted.cpu().numpy()[0]', pred_np)
        if pred_np == label:
            equal_number += 1
        print('face_emotion=', labels_dict[str(pred_np)])
    print('acc=', equal_number / len(Testing_label))
Esempio n. 4
0
 def __init__(self, data):
     'Initialization'
     self.labels = list(range(len(data.keys())))
     key_labels = data.keys()
     info = []
     for things in key_labels:
         info.append(data[things].astype(np.float32))
     self.data = info
     info = []
Esempio n. 5
0
def custom_normalization(train, test):
    train_cp = train.copy()
    test_cp = test.copy()

    for data in [train_cp, test_cp]:
        data['DetectorEta'] = data['DetectorEta'] / eta_div
        data['ActiveArea4vec_eta'] = data['ActiveArea4vec_eta'] / eta_div
        data['EMFrac'] = data['EMFrac'] / emfrac_div
        data['NegativeE'] = np.log10(-data['NegativeE'] + 1) / negE_div
        data['eta'] = data['eta'] / eta_div
        data['phi'] = data['phi'] / phi_div
        data['ActiveArea4vec_phi'] = data['ActiveArea4vec_phi'] / phi_div
        if 'Width' in data.keys():
            data['Width'] = data['Width'] / width_div
        else:
            print('Wdith not found when normalizing')
        if 'WidthPhi' in data.keys():
            data['WidthPhi'] = data['WidthPhi'] / width_div
        else:
            print('WdithPhi not found when normalizing')
        data['N90Constituents'] = data['N90Constituents'] / N90_div
        data['Timing'] = data['Timing'] / timing_div
        data['HECQuality'] = data['HECQuality'] / hecq_div
        data['ActiveArea'] = data['ActiveArea'] / area4vec_div
        data['ActiveArea4vec_m'] = data[
            'ActiveArea4vec_m'] / area4vecm_div - area4vecm_sub
        data['ActiveArea4vec_pt'] = data['ActiveArea4vec_pt'] / area4vecpt_div
        data['LArQuality'] = data['LArQuality'] / larq_div

        data['m'] = np.log10(data['m'] + m_add) / m_div
        data['LeadingClusterCenterLambda'] = (
            np.log10(data['LeadingClusterCenterLambda'] + log_add) -
            log_sub) / centerlambda_div
        data['LeadingClusterSecondLambda'] = (
            np.log10(data['LeadingClusterSecondLambda'] + log_add) -
            log_sub) / secondlambda_div
        data['LeadingClusterSecondR'] = (
            np.log10(data['LeadingClusterSecondR'] + log_add) -
            log_sub) / secondR_div
        data['AverageLArQF'] = (np.log10(data['AverageLArQF'] + log_add) -
                                log_sub) / larqf_div
        data['pt'] = (np.log10(data['pt']) - pt_sub) / pt_div
        data['LeadingClusterPt'] = np.log10(data['LeadingClusterPt']) / pt_div
        data['CentroidR'] = (np.log10(data['CentroidR']) -
                             centroidR_sub) / centroidR_div
        data['OotFracClusters10'] = np.log10(data['OotFracClusters10'] +
                                             1) / Oot_div
        data['OotFracClusters5'] = np.log10(data['OotFracClusters5'] +
                                            1) / Oot_div

    return train_cp, test_cp
Esempio n. 6
0
def log_write(fid, data, step = 0):
	try:
		import tensorflow as tf
		fid, summary, sess, var = fid
		feed_dict = {}
		for k in var.keys():
			if k in data.keys():
				feed_dict[var[k]] = data[k]
			else:
				feed_dict[var[k]] = 0
		summary = sess.run(summary, feed_dict = feed_dict)
		fid.add_summary(summary, step)
	except ModuleNotFoundError as e:
		try:
			fid.add_scalars('train', data, step)
		except AttributeError as e:
			s = 'Step: %d\t' % step
			for i,(k, v) in enumerate(data.items()):
				try:
					v = float(v)
					s += '%s:%3.5f' % (k, float(v))
					if i != len(data) - 1:
						s += '\t'
					else:
						s += '\n'
				except:
					continue
			fid.write(s)
			fid.flush()
Esempio n. 7
0
    def __init__(self, path, split='train', transform=None):
        super(VisualGenome, self).__init__()

        self.split = split
        self.transform = transform
        self.img_path = path + '/'
        #print('Hello --- ', split, transform, path)
        with open(
                '/DATA/dataset/Visual_Genome/data/cat_attr_img_roi.json') as f:
            data = json.load(f)
            self.cat = []
            self.txt = []
            self.imgid = []

            for category in data.keys():
                #            k=0
                for txt_query in data[category].keys():
                    #                k+=1
                    #                if k>10:
                    #                    break
                    #                jk=0
                    for img_object in data[category][txt_query]:
                        #                    jk+=1
                        #                    if jk>10:
                        #                        break
                        #                    print(category, txt_query, img_id)
                        self.cat.append(category)
                        self.txt.append(txt_query)
                        self.imgid.append(img_object)
Esempio n. 8
0
 def get_size_dict(self):
     size_dict = dict()
     for dataset in self.all_datasets:
         data = dataset.getitem(0)
         for key in data.keys():
             size_dict[key] = data[key].size()
     return size_dict
Esempio n. 9
0
    def __getitem__(self, index):
        data = sio.loadmat(self.all_data[index])
    
        if self.mode == 'train':
            _, pc = pctk.uniform_resample_np(data['pc'], self.opt.model.input_num)
        else:
            pc = data['pc']
    
        pc = p3dtk.normalize_np(pc.T)
        pc = pc.T

        R = np.eye(3)
        R_label = 29

        if not self.opt.no_augmentation:
            if 'R' in data.keys() and self.mode != 'train':
                pc, R = pctk.rotate_point_cloud(pc, data['R'])
            else:
                pc, R = pctk.rotate_point_cloud(pc)

            _, R_label, R0 = rotation_distance_np(R, self.anchors)

            if self.flag == 'rotation':
                R = R0

        return {'pc':torch.from_numpy(pc.astype(np.float32)),
                'label':torch.from_numpy(data['label'].flatten()).long(),
                'fn': data['name'][0],
                'R': R,
                'R_label': torch.Tensor([R_label]).long(),
               }
def create_tcn_dataset(split_txt_path,json_path, classes, mode):

    videos = []
    dataset = []
    txt_files = glob.glob(split_txt_path+'/*1.txt') # 1rst split
    for txt in txt_files:
        class_name = txt.split('/')[-1][:-16]
        class_idx = classes.index(class_name)
        with open(txt, 'r') as fp:
            lines=fp.readlines()
        for l in lines:
            spl = l.split()
            if spl[1] == '1' and mode == 'train': # train video
                vid_name = spl[0][:-4]
                videos.append(vid_name)
            elif spl[1] == '2' and( mode == 'test' or mode == 'val'): # train video
                vid_name = spl[0][:-4]
                videos.append(vid_name)

        with open(os.path.join(json_path,class_name+'.json'), 'r') as fp:
            data = json.load(fp)
        for feat in data.keys():
            if feat in videos:
                sample = {
                    'video' : feat,
                    'class' : class_name,
                    'class_idx' : class_idx
                }
                dataset.append(sample)
    print(len(dataset))

    return dataset
Esempio n. 11
0
def get_PReID_video_names_and_labels(data):
    video_names = []
    video_labels = []
    classnametoids = {}
    num = []
    day = []
    count = 0
    for video_name in data.keys():
        if data[video_name]["num"] in range(1,7):#numが1〜6だったら追加
            video_names.append(video_name)
            video_labels.append(data[video_name]["label"])
            num.append(data[video_name]["num"])
            day.append(data[video_name]["day"])
        if data[video_name]["num"] in range(11,17):#numが1〜6だったら追加
            video_names.append(video_name)
            video_labels.append(data[video_name]["label"])
            num.append(data[video_name]["num"])
            day.append(data[video_name]["day"])
        if data[video_name]["num"] in range(21,27):#numが1〜6だったら追加
            video_names.append(video_name)
            video_labels.append(data[video_name]["label"])
            num.append(data[video_name]["num"])
            day.append(data[video_name]["day"])
            if not data[video_name]["label"] in classnametoids:
                classnametoids[data[video_name]["label"]] = count
                count += 1

    return video_names,video_labels,num,day,classnametoids
Esempio n. 12
0
def get_PReID_video_names_and_labels_test(data):
    video_names = []
    video_labels = []
    classnametoids = {}
    num = []
    day = []
    count = 0
    for video_name in data.keys():
        if data[video_name]["num"] in range(7,11):#numが7〜10だったら追加
            video_names.append(video_name)
            video_labels.append(data[video_name]["label"])
            num.append(data[video_name]["num"])
            day.append(data[video_name]["day"])
        if data[video_name]["num"] in range(17,21):#numが17〜20だったら追加
            video_names.append(video_name)
            video_labels.append(data[video_name]["label"])
            num.append(data[video_name]["num"])
            day.append(data[video_name]["day"])
        if data[video_name]["num"] in range(27,31):#numが27〜30だったら追加
            video_names.append(video_name)
            video_labels.append(data[video_name]["label"])
            num.append(data[video_name]["num"])
            day.append(data[video_name]["day"])
            if not data[video_name]["label"] in classnametoids:
                classnametoids[data[video_name]["label"]] = count
                count += 1

    return video_names,video_labels,num,day,classnametoids
Esempio n. 13
0
def generator(data, batch_size):
    data_len = len(data)
    image_names = list(data.keys())
    random.shuffle(image_names)
    for i in range(0, data_len, batch_size):
        start = i
        if start + batch_size > data_len:
            flag = False
        end = min(data_len, start + batch_size)
        out_names = image_names[start:end]
        img_list = []
        l1_list = []
        l2_list = []
        l3_list = []
        for name in out_names:
            img_path = DATA_PATH + name + ".jpg"
            image = Image.open(img_path).convert('RGB')
            image = resize(image, (64, 64))
            image = image_to_tensor(image, 0, 255)
            img_list.append(image)
            l1_list.append(data[name][1])
            l2_list.append(data[name][2])
            l3_list.append(data[name][3])
        img = torch.stack(img_list, 0)
        l1 = torch.LongTensor(l1_list)
        l2 = torch.LongTensor(l2_list)
        l3 = torch.LongTensor(l3_list)
        yield img, l1, l2, l3
Esempio n. 14
0
 def __init__(self, file_path):
     super(DatasetFromHDF5, self).__init__()
     self.file_path = file_path
     data = h5py.File(os.path.join(self.file_path, 'gt.h5'), 'r')
     self.keys = list(data.keys())
     random.shuffle(self.keys)
     data.close()
Esempio n. 15
0
 def __init__(self, data, set_type, config):
     super().__init__()
     self.audio_files = list(data.keys())
     self.set_type = set_type
     self.audio_labels = list(data.values())
     config["bg_noise_files"] = list(
         filter(lambda x: x.endswith("wav"),
                config.get("bg_noise_files", [])))
     self.bg_noise_audio = [
         librosa.core.load(file, sr=16000)[0]
         for file in config["bg_noise_files"]
     ]
     self.unknown_prob = config["unknown_prob"]
     self.silence_prob = config["silence_prob"]
     self.noise_prob = config["noise_prob"]
     self.input_length = config["input_length"]
     self.timeshift_ms = config["timeshift_ms"]
     self._audio_cache = SimpleCache(config["cache_size"])
     self._file_cache = SimpleCache(config["cache_size"])
     n_unk = len(list(filter(lambda x: x == 1, self.audio_labels)))
     self.n_silence = int(self.silence_prob *
                          (len(self.audio_labels) - n_unk))
     self.audio_processor = AudioPreprocessor(
         n_mels=config["n_mels"],
         n_dct_filters=config["n_dct_filters"],
         hop_ms=10)
     self.audio_preprocess_type = config["audio_preprocess_type"]
Esempio n. 16
0
def custom_unnormalize(normalized_data):
    data = normalized_data.copy()
    data['DetectorEta'] = data['DetectorEta'] * eta_div
    data['ActiveArea4vec_eta'] = data['ActiveArea4vec_eta'] * eta_div
    data['EMFrac'] = data['EMFrac'] * emfrac_div
    data['eta'] = data['eta'] * eta_div
    data['phi'] = data['phi'] * phi_div
    data['ActiveArea4vec_phi'] = data['ActiveArea4vec_phi'] * phi_div
    if 'Width' in data.keys():
        data['Width'] = data['Width'] * width_div
    else:
        print('Width not found when unnormalizing')
    if 'WidthPhi' in data.keys():
        data['WidthPhi'] = data['WidthPhi'] * width_div
    else:
        print('WidthPhi not found when unnormalizing')
    data['N90Constituents'] = data['N90Constituents'] * N90_div
    data['Timing'] = data['Timing'] * timing_div
    data['HECQuality'] = data['HECQuality'] * hecq_div
    data['ActiveArea'] = data['ActiveArea'] * area4vec_div
    data['ActiveArea4vec_m'] = (data['ActiveArea4vec_m'] +
                                area4vecm_sub) * area4vecm_div
    data['ActiveArea4vec_pt'] = data['ActiveArea4vec_pt'] * area4vecpt_div
    data['LArQuality'] = data['LArQuality'] * larq_div

    data['NegativeE'] = 1 - np.power(10, negE_div * data['NegativeE'])
    data['m'] = np.power(10, m_div * data['m']) - m_add
    data['LeadingClusterCenterLambda'] = np.power(
        10, centerlambda_div * data['LeadingClusterCenterLambda'] +
        log_sub) - log_add
    data['LeadingClusterSecondLambda'] = np.power(
        10, secondlambda_div * data['LeadingClusterSecondLambda'] +
        log_sub) - log_add
    data['LeadingClusterSecondR'] = np.power(
        10, secondR_div * data['LeadingClusterSecondR'] + log_sub) - log_add
    data['AverageLArQF'] = np.power(
        10, larqf_div * data['AverageLArQF'] + log_sub) - log_add
    data['pt'] = np.power(10, pt_div * data['pt'] + pt_sub)
    data['LeadingClusterPt'] = np.power(10, pt_div * data['LeadingClusterPt'])
    data['CentroidR'] = np.power(
        10, centroidR_div * data['CentroidR'] + centroidR_sub)
    data['OotFracClusters10'] = np.power(
        10, Oot_div * data['OotFracClusters10']) - 1
    data['OotFracClusters5'] = np.power(10,
                                        Oot_div * data['OotFracClusters5']) - 1

    return data
Esempio n. 17
0
    def __getitem__(self, key):
        with Reader(self.dirpath) as db:
            data = db[key]

        for k in data.keys():
            data[k] = torch.from_numpy(data[k])

        return data
Esempio n. 18
0
def repair_data(data, batchsize):
    repair_num = batchsize - data['label'].size()[0]
    for key in data.keys():
        if key != 'path':
            data[key] = torch.cat([data[key], data[key][:repair_num]], 0)
        else:
            data[key] += data[key][:repair_num]
    return data
Esempio n. 19
0
def getCurrent(data_path):
    try:
        with open(data_path, "rb") as f:
            data = pickle.load(f)
    except:
        data = {}

    return data, len(data.keys())
Esempio n. 20
0
    def __getitem__(self, i):
        path, type = self.data[i]
        #print('path:',path)
        data = np.load(path)
        if 'face' in data.keys():
            face = data['face']
        else:
            face = data['faces']
        if 'neighbors' in data.keys():
            neighbor_index = data['neighbors']
        else:
            neighbor_index = data['neighbor_index']

        # data augmentation
        if self.augment_data and self.part == 'train':
            sigma, clip = 0.01, 0.05
            jittered_data = np.clip(
                sigma * np.random.randn(*face[:, :12].shape), -1 * clip, clip)
            face = np.concatenate((face[:, :12] + jittered_data, face[:, 12:]),
                                  1)

        # fill for n < 1024
        num_point = len(face)
        if num_point < 4096:
            fill_face = []
            fill_neighbor_index = []
            for i in range(4096 - num_point):
                index = np.random.randint(0, num_point)
                fill_face.append(face[index])
                fill_neighbor_index.append(neighbor_index[index])
            face = np.concatenate((face, np.array(fill_face)))
            neighbor_index = np.concatenate(
                (neighbor_index, np.array(fill_neighbor_index)))

        # to tensor
        face = torch.from_numpy(face).float()
        neighbor_index = torch.from_numpy(neighbor_index).long()
        target = torch.tensor(type, dtype=torch.long)

        # reorganize
        face = face.permute(1, 0).contiguous()
        centers, corners, normals = face[:3], face[3:12], face[12:]
        corners = corners - torch.cat([centers, centers, centers], 0)

        return centers, corners, normals, neighbor_index, target
Esempio n. 21
0
 def _to_device(self, data, device):
     if isinstance(data, (list, tuple)):
         return [self._to_device(x, device) for x in data]
     if isinstance(data, (dict)):
         temp_dict = {}
         for key in data.keys():
             temp_dict[key] = self._to_device(data[key], device)
         return temp_dict
     return data.to(device, non_blocking=True)
Esempio n. 22
0
    def __init__(
        self,
        root,
        split='train',
        num_node=16,
        downsample=0.03,
        self_augment=False,
        augment_noise=0.005,
        augment_axis=1,
        augment_rotation=1.0,
        augment_translation=0.001,
        config=None,
    ):
        self.root = root
        self.split = split
        self.num_node = num_node
        self.downsample = downsample
        self.self_augment = self_augment
        self.augment_noise = augment_noise
        self.augment_axis = augment_axis
        self.augment_rotation = augment_rotation
        self.augment_translation = augment_translation
        self.config = config

        assert self_augment == False

        # containers
        self.ids = []
        self.points = []
        self.src_to_tgt = {}

        # load data
        pts_filename = join(
            self.root, f'3DMatch_{split}_{self.downsample:.3f}_points.pkl')
        keypts_filename = join(
            self.root, f'3DMatch_{split}_{self.downsample:.3f}_keypts.pkl')

        if exists(pts_filename) and exists(keypts_filename):
            with open(pts_filename, 'rb') as file:
                data = pickle.load(file)
                self.points = [*data.values()]
                self.ids_list = [*data.keys()]
            with open(keypts_filename, 'rb') as file:
                self.correspondences = pickle.load(file)
            print(f"Load PKL file from {pts_filename}")
        else:
            print("PKL file not found.")
            return

        for idpair in self.correspondences.keys():
            src = idpair.split("@")[0]
            tgt = idpair.split("@")[1]
            # add (key -> value)  src -> tgt
            if src not in self.src_to_tgt.keys():
                self.src_to_tgt[src] = [tgt]
            else:
                self.src_to_tgt[src] += [tgt]
Esempio n. 23
0
 def __init__(self, cfg, mode='train'):
     self.img_path = cfg.img_path
     self.cfg = cfg
     data = json.load(open(cfg.file, 'r'))
     self.imgs = list(data.keys())
     self.annos = data
     self.mode = mode
     self.accm_batch = 0
     self.size = cfg.size
     self.aug = cfg.augment
Esempio n. 24
0
def inference_CK():
    path = './data/CK/CK_data.h5'
    model = load_checkpoint('./models_fer2013_aug/model.pth')
    data = h5py.File(path, 'r')
    print(data.keys())
    for i in data.keys():
        print(i)
    Testing_pixel = data['data_pixel']
    print(Testing_pixel.shape)
    Testing_label = data['data_label']
    print(Testing_label.shape)

    if torch.cuda.is_available():
        print('cuda available')
        model = model.cuda()

    equal_number = 0
    for i, label in enumerate(Testing_label):
        img = Testing_pixel[i].reshape(48, 48, 1).astype(np.float32)
        s = np.concatenate((img, img, img), axis=-1)
        s = cv2.resize(s, (96, 96))
        b1 = cv2.GaussianBlur(s, (3, 3), 0)
        D1 = ((s - b1) + s)
        img_all = get_clip_img(D1)
        img_all = torch.from_numpy(np.transpose(img_all, (0, 3, 1, 2)) / 255.)
        if torch.cuda.is_available():
            # img = torch.unsqueeze(img, 0).cuda()
            img_all = img_all.cuda()
        out = model(img_all)
        pred = F.softmax(out, dim=1)
        # print('pred=',pred)
        pred = torch.mean(pred, dim=0, keepdim=True)
        # print('pred=', pred)
        predicted = torch.argmax(pred, dim=1)

        print('label=', label)

        pred_np = predicted.cpu().numpy()[0]
        print('predicted.cpu().numpy()[0]', pred_np)
        if pred_np == label:
            equal_number += 1

    print('acc=', equal_number / len(Testing_label))
def quantifyData(data):
    count = 0
    for key in data.keys():
        if type(data[key]) is list:
            count += len(data[key])
            continue
        else:
            count += 1

    return count
Esempio n. 26
0
    def __init__(self,
                 data_dir,
                 experiment_directory,
                 split_file,
                 checkpoint_num=2000,
                 mode=None,
                 load_mesh=False,
                 load_code=False):
        self.data_dir = data_dir
        self.experiment_directory = experiment_directory
        self.checkpoint_num = checkpoint_num
        self.load_mesh = load_mesh
        self.load_code = load_code
        if mode == None:
            self.mode = split_file[:-5].split('_')[-1]
        else:
            self.mode = mode

        with open(split_file, 'r') as f:
            split = json.load(f)

        key_list = list(split.keys())
        assert (len(key_list) == 1)
        self.dataset = key_list[0]

        data = split[self.dataset]
        key_list = list(data.keys())
        assert (len(key_list) == 1)
        self.class_name = key_list[0]

        self.instance_list = split[self.dataset][self.class_name]

        instance_list_new = []
        for instance in self.instance_list:
            fname = os.path.join(os.path.expanduser('~/data'),
                                 'NormalizationParameters', 'ShapeNetV2',
                                 self.class_name, '{}.npz'.format(instance))
            if os.path.exists(fname):
                instance_list_new.append(instance)
            else:
                print('Instance {} does not exist!'.format(instance))
        print(self.class_name)
        self.instance_list = instance_list_new

        if self.load_code:
            if self.mode == 'train':
                latent_code_fname = os.path.join(
                    self.experiment_directory, 'LatentCodes',
                    '{}.pth'.format(checkpoint_num))
                npz_fname = os.path.splitext(latent_code_fname)[0] + '.npz'
                if os.path.isfile(npz_fname):
                    self.latent_codes = np.load(npz_fname)['data']
                else:
                    self.latent_codes = torch.load(latent_code_fname)[
                        'latent_codes'].detach().cpu().numpy()  # [N, 1, 256]
Esempio n. 27
0
 def __init__(self, data, transforms):
     super(ClassifySet, self).__init__()
     self.image_names = list(data.keys())
     self.l1 = []
     self.l2 = []
     self.l3 = []
     self.transforms = transforms
     for key in self.image_names:
         self.l1.append(data[key][1])
         self.l2.append(data[key][2])
         self.l3.append(data[key][3])
Esempio n. 28
0
def create_dict(path):  #path = path to the imagenet json file
    with open(path) as file:
        data = json.load(file)
    key = list(data.keys())
    val = list(data.values())
    arr = []
    db = dict()
    for i in range(len(key)):
        arr.append(val[i]['class_name'])
        db[arr[i]] = key[i]
    return db
Esempio n. 29
0
 def __init__(self, cfg, mode='train'):
     self.img_path = cfg.img_path
     self.cfg = cfg
     data = json.load(open(cfg.file, 'r'))
     self.imgs = list(data.keys())
     self.annos = data
     self.mode = mode
     self.accm_batch = 0
     self.size = random.choice(cfg.sizes)
     if mode == 'train':
         self.aug_num = int(cfg.aug_num)
Esempio n. 30
0
def get_filtered_data(filename_tuple, filters, pdgIDs):
    data = {}
    for dataname in filename_tuple:
        file_data = load_hdf5(dataname, pdgIDs)
        for filt in filters:
            filt.filter(file_data)
        for key in file_data.keys():
            if key in data.keys():
                data[key] = np.append(data[key], file_data[key], axis=0)
            else:
                data[key] = file_data[key]
    return data