def evaluate_multi(database_path, user, target_ncolors, impaths, outdir): pngquant = PngQuant() scorer = NonLinearTransform(user, database_path=database_path) scorer.fit() specimen = SpecimenQuant(scorer, quantizer=pngquant) tempdir = tempfile.mkdtemp() alphas = np.arange(0.0, 1.1, 0.1) acc = [] for impath in impaths: for alpha in alphas: print "%s alpha = %f" % (impath, alpha) specimen_impath = specimen.compress(impath, target_ncolors, alpha, outdir=tempdir) res = evaluate_image(impath, specimen_impath) res['alpha'] = alpha res['image'] = impath acc.append(res) df = pd.DataFrame(acc) df.to_csv(os.path.join(outdir, 'multi_objective_eval.csv')) size_at_1 = df[df['alpha'] == 1.0][['image', 'fs']].rename(columns={'fs': 'denom'}) df = pd.merge(df, size_at_1, on=['image']) df['fs_decrease'] = df['fs'] / df['denom'] - 1 df_summary = df.groupby('alpha')[['fs_decrease']].mean().reset_index() alpha_plot = sns.pointplot(data=df_summary, x='alpha', y='fs_decrease') alpha_plot.set_xlabel(r'$\alpha$ (Multi-Objective Weight)', size=15) alpha_plot.set_ylabel(r'Average File Size Decrease Relative to Output For $\alpha=1$', size=15) alpha_plot.tick_params(labelsize=15) plt.tight_layout() plot_path = os.path.join(outdir, 'multi_objective_eval.pdf') alpha_plot.get_figure().savefig(plot_path) # clean up the temporary directory created for images shutil.rmtree(tempdir)
def evaluate_review(env, image, nms_params, review): model = env.best.model.to(env.device) encoder = env.encoder.to(env.device) scale = env.args.scale detections = evaluate.evaluate_image(model, image, encoder, device=env.device, nms_params=nms_params).detections if detections._size == 0: return make_detections(env, []) review = tensors_to(review, device=env.device) ious = box.iou_matrix(detections.bbox, review.bbox * scale) ious[ious < nms_params.nms].fill_(-1) scores = ious.mul(detections.confidence.unsqueeze(1)) review_inds = scores.max(0).values.argsort(descending=True) detections = table_list(detections) for i in review_inds.tolist(): score, ind = scores[:, i].max(0) if score > 0: detections[ind].match = i scores[ind].fill_(0) return make_detections(env, detections)
def evaluate_vis(model, encoder, data, nms_params, classes, args, debug_key=None, iou=0.5): with torch.no_grad(): result = evaluate.evaluate_image(model, data.image, encoder, device=device, nms_params=nms_params) print(shape(result)) target = data.target._map(Tensor.to, result.detections._device) matches = match_boxes(result.detections, target, threshold=iou) scores = mAP_matches(matches, target.label.size(0)) debug = encoder.debug(data.image, target, result.prediction, classes) return struct(image=data.image, file=data.file, id=data.id, image_size=data.image_size, matches=matches, target=data.target, detections=result.detections, stats=image_stats(data.image), mAP=scores.mAP, debug=debug[debug_key] if debug_key else None)
def evaluate_detections(env, image, nms_params): model = env.best.model detections = evaluate.evaluate_image(model.to(env.device), image, env.encoder.to(env.device), nms_params=nms_params, device=env.device).detections return make_detections(list(detections._sequence()))
def f(image, nms_params=detection_table.nms_defaults): return evaluate_image(model, image, encoder, nms_params=nms_params, device=device).detections
print("model parameters:") pprint_struct(model_args) classes = model_args.dataset.classes model.to(device) encoder.to(device) frame = cv.imread_color(args.input) nms_params = detection_table.nms_defaults._extend(nms=args.threshold) pprint_struct(nms_params) detections = evaluate_image(model, frame, encoder, nms_params=nms_params, device=device, crop_boxes=True) for prediction in detections._sequence(): if prediction.confidence > 0.7: label_class = classes[prediction.label].name display.draw_box(frame, prediction.bbox, confidence=prediction.confidence, name=label_class.name, color=display.to_rgb(label_class.colour)) frame = cv.resize(frame, (frame.size(1) // 2, frame.size(0) // 2)) cv.display(frame)
print_timer("load", len(images), start) if args.tensorrt: print("compiling with tensorrt...") from torch2trt import torch2trt x = torch.ones(1, 3, int(size[1]), int(size[0])).to(device) model = torch2trt(model, [x], fp16_mode=True) print("done") dummy = torch.ones(1, 3, int(size[1]), int(size[0])).to(device) model(dummy) start = time() for i in range(len(images)): dummy = torch.ones(1, 3, int(size[1]), int(size[0])).to(device) model(dummy) print_timer("model only", len(images), start) start = time() for image in images: detections = evaluate_image(model, image, encoder, nms_params=nms_params, device=device).detections print_timer("evaluate_image", len(images), start)
def train_ensemble(total_epochs, idx, last_epoch, test_every, model, device, crit_reg, optimizer, scheduler, output_path): from train import train_image_reg from inference import inference_image_reg # open output file fconv = open(os.path.join(output_path, '{}-image-training.csv'.format(now)), 'w') fconv.write('epoch,image_reg_loss\n') fconv.close() # 训练结果保存在 output_path/<timestamp>-image-training.csv if test_every <= args.epochs: fconv = open(os.path.join(output_path, '{}-image-validation.csv'.format(now)), 'w') fconv.write('epoch,mse,qwk\n') fconv.close() # 验证结果保存在 output_path/<timestamp>-image-validation.csv validate = lambda epoch, test_every: (epoch + 1) % test_every == 0 start = int(time.time()) with SummaryWriter(comment=output_path.rsplit('/', maxsplit=1)[-1]) as writer: print("PT.I - image regression training ...") for epoch in range(1 + last_epoch, total_epochs + 1): try: if device.type == 'cuda': torch.cuda.manual_seed(epoch) else: torch.manual_seed(epoch) data.setmode(True, 5) train_loader = data.get_loader(True, idx, batch_size=args.image_batch_size, num_workers=args.workers, collate_fn=collate_fn) loss = train_image_reg(train_loader, epoch, total_epochs, model, device, crit_reg, optimizer, scheduler) print("image reg loss: {:.4f}".format(loss)) fconv = open(os.path.join(output_path, '{}-image-training.csv'.format(now)), 'a') fconv.write('{},{}\n'.format(epoch, loss)) fconv.close() writer.add_scalar("image reg loss", loss, epoch) # Validating step if validate(epoch, test_every): print('Validating ...') # image validating data.setmode(False, 4) val_loader = data.get_loader(False, idx, batch_size=args.image_batch_size, num_workers=args.workers) counts = inference_image_reg(val_loader, model, device, epoch, total_epochs) regconv = open(os.path.join(output_path, '{}-count-e{}.csv'.format( now, epoch)), 'w', newline="") w = csv.writer(regconv, delimiter=',') w.writerow(['id', 'organ', 'label', 'count', 'category label', 'loss']) for i, count in enumerate(np.round(counts).astype(int)): w.writerow([i + 1, data.validating_set.organs[i], data.validating_set.labels[i], count, data.validating_set.cls_labels[i], np.abs(count - data.validating_set.labels[i])]) regconv.close() metrics_i = evaluate_image(data.validating_set, [], counts) print('image categories mAP: {} | MSE: {} | QWK: {}\n'.format(*metrics_i)) fconv = open(os.path.join(output_path, '{}-image-validation.csv'.format(now)), 'a') fconv.write('{},{},{}\n'.format(epoch, *metrics_i[1:])) fconv.close() add_scalar_metrics(writer, epoch, metrics_i) save_model(epoch, model, optimizer, scheduler, output_path, prefix='reg_pt1_{}'.format(idx)) except KeyboardInterrupt: save_model(epoch, model, optimizer, scheduler, output_path, prefix='reg_pt1_{}'.format(idx)) print("\nTraining interrupted at epoch {}. Model saved in \'{}\'.".format(epoch, output_path)) sys.exit(0) end = int(time.time()) print("\nTrained for {} epochs. Model saved in \'{}\'. Runtime: {}s".format(total_epochs, output_path, end - start))