def train(args): yaml_file = yaml.safe_load(args.cfg) print(yaml.dump(yaml_file, default_flow_style=False)) cfg = TrainingConfig(yaml_file) output_dir = cfg.output if not os.path.exists(output_dir): os.makedirs(output_dir) chunks = get_all_chunks(cfg.input) print("Found {} files".format(len(chunks))) num_train_chunks = int(len(chunks) * cfg.train_ratio) training_chunks = chunks[:num_train_chunks] test_chunks = chunks[num_train_chunks:] print("Chunks Training({}) Testing({})".format(len(training_chunks), len(test_chunks))) train_loader = BatchLoader(training_chunks, cfg) test_loader = BatchLoader(test_chunks, cfg) worker = TensorWorker(cfg, train_loader, test_loader) print()
t_class = tf.squeeze(t_class, axis=-1) # Compute map cal_map(p_bbox, p_labels, p_scores, np.zeros((138, 138, len(p_bbox))), np.array(t_bbox), np.array(t_class), np.zeros((138, 138, len(t_bbox))), ap_data, iou_thresholds) print(f"Computing map.....{it}", end="\r") it += 1 #if it > 100: # break # Compute the mAp over all thresholds calc_map(ap_data, iou_thresholds, class_names, print_result=True) if __name__ == "__main__": physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) config = TrainingConfig() args = training_config_parser().parse_args() config.update_from_args(args) # Load the model with the new layers to finetune detr = build_model(config) valid_dt = load_coco("val", 1, config, augmentation=None) # Run training eval_model(detr, config, CLASS_NAME, valid_dt)