def save_training(self): '''save object in hdf5 file format Args: filename: str path to the hdf5 file containing the saved object ''' if '.hdf5' in self.filename: save_dict_to_hdf5(self.__dict__, self.filename) else: raise Exception("Filename not supported")
def create_pos_dataset(pos, target_size, out_dir): ims = [] landmarks = [] labels = [] for line in pos: words = line.strip().split() if '.jpg' in words[0]: image_file_name = dealdata + '/' + words[0] else: image_file_name = dealdata + '/' + words[0] + '.jpg' im = cv2.imread(image_file_name) im = resize(im, target_size) im = im.astype('uint8') ims.append(im) labels.append(int(words[1])) landmark = words[2:6] landmark = list(map(float, landmark)) landmark = np.array(landmark, dtype='float32') landmarks.append(landmark) if len(ims) % 500 == 0: print('pos data doing, total: {}'.format(len(ims))) landmark_data = list(zip(labels, ims, landmarks)) random.shuffle(landmark_data) labels, ims, landmarks = zip(*landmark_data) save_dict_to_hdf5({ 'labels': labels, 'ims': ims, 'bbox': landmarks }, out_dir) print('pos data done, total: {}'.format(len(ims)))
def create_landmark_dataset(net_name, landmark_anno, target_size, out_dir): ims = [] landmarks = [] labels = [] for line in landmark_anno: words = line.strip().split() if '.jpg' in words[0]: image_file_name = words[0] else: image_file_name = words[0] + '.jpg' im = cv2.imread(image_file_name) im = resize(im, target_size) im = im.astype('uint8') ims.append(im) labels.append(int(words[1])) landmark = words[2:12] landmark = list(map(float, landmark)) landmark = np.array(landmark, dtype='float32') landmarks.append(landmark) if len(ims) % 500 == 0: print('landmarks data doing, total: {}'.format(len(ims))) landmark_data = list(zip(labels, ims, landmarks)) random.shuffle(landmark_data) labels, ims, landmarks = zip(*landmark_data) landmark_data_filename = os.path.join(out_dir, 'landmarks_shuffle.h5') save_dict_to_hdf5({ 'labels': labels, 'ims': ims, 'landmarks': landmarks }, landmark_data_filename) print('landmarks data done, total: {}'.format(len(ims)))
frame = frame/float(255) # normalize frame = frame.astype('float32') # cv2.imshow('frame',frame) if gray: frame = np.tile(frame,(1,1,1)) # give it the channel dim video.append(frame) # import time # time.sleep(0.5) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() video = u.stack(video) action_vids[vid] = video # video, subsampled, evenly spaced, consecutive video pbar.finish() u.save_dict_to_hdf5(dataset=action_vids, dataset_name=action+'_subsamp='+str(subsample), dataset_folder=out) # action_vids = np.vstack(action_vids) # consecutive video ACTUALLY THIS MIGHT NOT BE TRUE. WE NEED THE VIDEOS TO BE SEPARATE! # randomly permute -- don't do this! # tm1s = np.random.permutation(range(0,len(action_vids)-1,2)) # ts = np.array([i+1 for i in tm1s]) # shuffle_idxs = list(it.next() for it in itertools.cycle([iter(tm1s), iter(ts)])) # groups of 2 # action_vids = action_vids[np.array(shuffle_idxs),:,:] # action_data[action] = action_vids # save # u.save_dict_to_hdf5(action_data, 'actions_2_frame_subsample_' + str(subsample), root)
(1, 1, 1)) # give it the channel dim video.append(frame) # import time # time.sleep(0.5) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() video = u.stack(video) action_vids[ vid] = video # video, subsampled, evenly spaced, consecutive video pbar.finish() u.save_dict_to_hdf5(dataset=action_vids, dataset_name=action + '_subsamp=' + str(subsample), dataset_folder=out) # action_vids = np.vstack(action_vids) # consecutive video ACTUALLY THIS MIGHT NOT BE TRUE. WE NEED THE VIDEOS TO BE SEPARATE! # randomly permute -- don't do this! # tm1s = np.random.permutation(range(0,len(action_vids)-1,2)) # ts = np.array([i+1 for i in tm1s]) # shuffle_idxs = list(it.next() for it in itertools.cycle([iter(tm1s), iter(ts)])) # groups of 2 # action_vids = action_vids[np.array(shuffle_idxs),:,:] # action_data[action] = action_vids # save # u.save_dict_to_hdf5(action_data, 'actions_2_frame_subsample_' + str(subsample), root)