def predict_one(inputs, dir_save, threshold, threshold2, overlap): viz = Visualizer('lib/colors.json') i, target_name, numpy_batch_num = inputs list_predicted_img, pred_coords = suppression(numpy_batch_num, dbox_params, threshold, threshold2, overlap) sub_i = pd.DataFrame() sub_i['token'] = [target_name] * len(pred_coords) sub_i_coord = pd.DataFrame(pred_coords, columns=['x', 'y', 'length', 'width', 'rotate', 'z', 'height', 'class', 'confidence']) sub_i = pd.concat([sub_i, sub_i_coord], axis=1) sub_i['name'] = [viz.names[c] for c in sub_i['class']] if i < 50: img_predicted = viz.draw_predicted_boxes(numpy_batch_num, dbox_params, rate=1.0, img_size=2048) numpy2pil(img_predicted).save(dir_save / '{}_predited_{}.png'.format(i, target_name[:12])) img_submit = viz.draw_mask_from_coords(sub_i_coord, img_size=2048) numpy2pil(img_submit).save(dir_save / '{}_submit_{}_{}.png'.format(i, target_name[:12], len(list_predicted_img))) return sub_i
viz = Visualizer('colors.json') # 768 x 768 in_arr1 = np.zeros((2, 3, 768, 768), dtype=np.float32) in_tensor1 = torch.from_numpy(in_arr1) out_vars1 = model.forward(in_tensor1.cuda()) [print(out_var.shape) for out_var in out_vars1] out_var_numpy1 = [tensor.cpu().data.numpy() for tensor in out_vars1] out_var_numpy_batch1 = [[tensor[b, :, :, :] for tensor in out_var_numpy1] for b in range(2)] img = viz.draw_predicted_boxes(out_var_numpy_batch1[0], dbox_params, img_size=in_arr1.shape[-1]) numpy2pil(img).save(dir_debug / 'sample_1-0.png') img = viz.draw_predicted_boxes(out_var_numpy_batch1[1], dbox_params, img_size=in_arr1.shape[-1]) numpy2pil(img).save(dir_debug / 'sample_1-1.png') # 1024 x 1024 in_arr2 = np.zeros((2, 3, 1024, 1024), dtype=np.float32) in_tensor2 = torch.from_numpy(in_arr2) out_vars2 = model.forward(in_tensor2.cuda()) [print(out_var.shape) for out_var in out_vars2] out_var_numpy2 = [tensor.cpu().data.numpy() for tensor in out_vars2] out_var_numpy_batch2 = [[tensor[b, :, :, :] for tensor in out_var_numpy2] for b in range(2)]
def validate(model, dataset, list_valid_img, target_classes, epoch): dir_save = Path('./_valid/ep{}'.format(epoch)) dir_save.parent.mkdir(exist_ok=True) dir_save.mkdir(exist_ok=True) model.eval() dataset = SampleDataset( dir_img=f'../../input/{dataset}/train', coord_path=f'../../input/{dataset}/coordinates.csv', use_augmentation=epoch < 20, list_imgs=list_valid_img, crop_type=0, target_classes=target_classes) viz = Visualizer('lib/colors.json') for target_name in list_valid_img[:16]: img_input, found_coord, original = dataset[target_name] writer.add_image('original/{}'.format(target_name[:12]), np.asarray(original), epoch, dataformats='HWC') original.save(dir_save / '{}_original.png'.format(target_name[:12])) input_tensor = torch.unsqueeze(img_input, 0) # input_tensor = augment_input(img_input) with torch.no_grad(): net_out = model.forward(input_tensor.float().cuda()) net_out_numpy = [tensor.cpu().data.numpy() for tensor in net_out] net_out_numpy_batch = [tensor[0, :, :, :] for tensor in net_out_numpy] # net_out_numpy_batch = aggregate_output(net_out_numpy) img_predicted = viz.draw_predicted_boxes(net_out_numpy_batch, dbox_params, rate=1.0, img_size=original.height) writer.add_image('predicted/{}'.format(target_name[:12]), img_predicted, epoch) numpy2pil(img_predicted).save( dir_save / '{}_predited.png'.format(target_name[:12])) mask = viz.draw_mask_from_coords(found_coord, img_size=original.height) writer.add_image('mask/{}'.format(target_name[:12]), mask, epoch) numpy2pil(mask).save(dir_save / '{}_mask.png'.format(target_name[:12])) list_predicted_img, list_pred_coords = suppression( net_out_numpy_batch, dbox_params, 0.5, 0.3, 0.1) pred_coords_df = pd.DataFrame(list_pred_coords, columns=[ 'x', 'y', 'length', 'width', 'rotate', 'z', 'height', 'class', 'confidence' ]) img_submit = viz.draw_mask_from_coords(pred_coords_df, img_size=original.height) writer.add_image('submit/{}'.format(target_name[:12]), img_submit, epoch) numpy2pil(img_submit).save(dir_save / '{}_submit_{}.png'.format( target_name[:12], len(list_predicted_img)))