def reprocess_images(list_folder_data): print('Init reprocess_images') for index, item in enumerate(list_folder_data): folder = item[0] min_score = item[1] print('Processing folder: {0}'.format(folder)) list_files = os.listdir(folder) random.Random(seed).shuffle(list_files) for num_file, filename in enumerate(list_files): file = os.path.join(folder, filename) extension = ClassUtils.get_filename_extension(file) if extension == '.json': with open(file, 'r') as f: data_str = f.read() data_json = json.loads(data_str) if 'vectors' in data_json: print('Processing json file with new format: {0}'.format(file)) person_arr = data_json['vectors'] else: print('Processing json file: {0}'.format(file)) person_arr = data_json valid = ClassUtils.check_vector_integrity_pos(person_arr, min_score) only_pos = ClassUtils.check_vector_only_pos(person_arr, min_score) if not valid: raise Exception('Vector integrity not valid for file: {0}'.format(file)) if only_pos: raise Exception('Invalid vector to perform detection') descriptors = ClassDescriptors.get_person_descriptors(person_arr, min_score, cam_number=0, image=None, calib_params=None, decode_img=False, instance_nn_pose=None) with open(file, 'w') as f: f.write(json.dumps(descriptors)) transformed_points = descriptors['transformedPoints'] # Save pose for debugging purposes re_scale_factor = 100 new_points = ClassDescriptors.re_scale_pose_factor(transformed_points, re_scale_factor, min_score) img_pose = ClassDescriptors.draw_pose_image(new_points, min_score, is_transformed=True) new_file_name = ClassUtils.get_filename_no_extension(file) + '_1.jpg' cv2.imwrite(new_file_name, img_pose) print('Done!')
def load_pose_descriptors(cls, type_desc: EnumDesc): training_data = list() training_labels = list() training_files = list() eval_data = list() eval_labels = list() eval_files = list() classes_number = 0 if type_desc == EnumDesc.ANGLES or type_desc == EnumDesc.ANGLES_TRANSFORMED: data_folder = cls.list_folder_data_angles else: data_folder = cls.list_folder_data cont = True while cont: cont = False for folder_data in data_folder: if folder_data[2] == classes_number: classes_number += 1 cont = True break # Iterate folder for index, item in enumerate(data_folder): folder = item[0] min_score = item[1] label = item[2] list_files = os.listdir(folder) random.Random(cls.seed).shuffle(list_files) total_train = int(len(list_files)) * 70 / 100 for num_file, file in enumerate(list_files): full_path = os.path.join(folder, file) extension = ClassUtils.get_filename_extension(full_path) if extension != '.json': print('Ignoring file {0}'.format(full_path)) continue with open(full_path, 'r') as text_file: arr_json = text_file.read() params = json.loads(arr_json) vectors = params['vectors'] angles = params['angles'] transformed_points = params['transformedPoints'] valid = ClassUtils.check_vector_integrity_pos( vectors, min_score) only_pos = ClassUtils.check_vector_only_pos(vectors, min_score) if not valid: raise Exception( 'Vector integrity not valid for file: {0}'.format( full_path)) if only_pos: raise Exception('Invalid vector to perform detection') # Fill training and eval list # Use angles and position information data_to_add = cls._get_descriptor_list(angles, transformed_points, type_desc) if num_file < total_train: training_data.append(data_to_add) training_labels.append(label) training_files.append(full_path) else: eval_data.append(data_to_add) eval_labels.append(label) eval_files.append(full_path) # Convert data to numpy array training_data_np = np.asanyarray(training_data, dtype=np.float) training_labels_np = np.asanyarray(training_labels, dtype=int) print(training_labels_np) eval_data_np = np.asanyarray(eval_data, dtype=np.float) eval_labels_np = np.asanyarray(eval_labels, dtype=int) print(eval_labels_np) training_files_np = np.asanyarray(training_files, dtype=np.str) eval_files_np = np.asanyarray(eval_files, dtype=np.str) # Getting label_names label_names = [] for folder, _, label in data_folder: names = folder.split('/') label_name = names[-1] # Check if last character is / if len(label_name) == 0: label_names = names[-2] label_names.append((label_name, label)) print('Total training: {0}'.format(len(training_labels))) print('Total eval: {0}'.format(len(eval_labels))) print('Shape training: {0}'.format(training_data_np.shape)) print('Shape eval: {0}'.format(eval_data_np.shape)) print('Classes number: {0}'.format(classes_number)) results = { 'trainingData': training_data_np, 'trainingLabels': training_labels_np, 'evalData': eval_data_np, 'evalLabels': eval_labels_np, 'trainingFiles': training_files_np, 'evalFiles': eval_files_np, 'labelNames': label_names, 'classesNumber': classes_number } return results