def main():
    parser = argparse.ArgumentParser(description='GAN-based unsupervised segmentation train')
    parser.add_argument('--args', type=str, default=None, help='json with all arguments')

    parser.add_argument('--out', type=str, required=True)
    parser.add_argument('--gan_weights', type=str, default=BIGBIGAN_WEIGHTS)
    parser.add_argument('--bg_direction', type=str, required=True)
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--gen_devices', type=int, nargs='+', default=[1,])
    parser.add_argument('--seed', type=int, default=2)

    parser.add_argument('--z', type=str, default=None)
    parser.add_argument('--z_noise', type=float, default=0.0)
    parser.add_argument('--val_images_dirs', nargs='*', type=str, default=[None])
    parser.add_argument('--val_masks_dirs', nargs='*', type=str, default=[None])

    for key, val in SegmentationTrainParams().__dict__.items():
        val_type = type(val) if key != 'synthezing' else str
        parser.add_argument('--{}'.format(key), type=val_type, default=None)

    args = parser.parse_args()
    torch.random.manual_seed(args.seed)
    torch.cuda.set_device(args.device)
    if args.args is not None:
        with open(args.args) as args_json:
            args_dict = json.load(args_json)
            args.__dict__.update(**args_dict)
    if (args.z is not None) and (not os.path.isfile(args.z)):
        print('No valid z-embeddings file is provided.')
        args.z = None

    # save run p
    if not os.path.isdir(args.out):
        os.makedirs(args.out)
    with open(os.path.join(args.out, 'args.json'), 'w') as args_file:
        json.dump(args.__dict__, args_file)
    with open(os.path.join(args.out, 'command.sh'), 'w') as command_file:
        command_file.write(' '.join(sys.argv))
        command_file.write('\n')

    G = make_big_gan(args.gan_weights).eval().cuda()
    bg_direction = torch.load(args.bg_direction)

    model = UNet().train().cuda()
    train_params = SegmentationTrainParams(**args.__dict__)
    print('run train with p: {}'.format(train_params.__dict__))

    synthetic_score = train_segmentation(
        G, bg_direction, model, train_params, args.out,
        args.gen_devices, val_dirs=[args.val_images_dirs[0], args.val_masks_dirs[0]],
        zs=args.z, z_noise=args.z_noise)

    score_json = os.path.join(args.out, 'score.json')
    update_out_json({'synthetic': synthetic_score}, score_json)
    print('Synthetic data score: {}'.format(synthetic_score))

    if len(args.val_images_dirs) > 0:
        evaluate_all_wrappers(model, score_json,
                              args.val_images_dirs, args.val_masks_dirs)
def main():
    parser = argparse.ArgumentParser(
        description='GAN-based unsupervised segmentation train')
    parser.add_argument('--unet_weights', type=str, default="")
    parser.add_argument('--seed', type=int, default=2)
    parser.add_argument('--val_images_dirs',
                        nargs='*',
                        type=str,
                        default=[None])
    parser.add_argument('--val_masks_dirs',
                        nargs='*',
                        type=str,
                        default=[None])

    args = parser.parse_args()

    model = UNet().train().cuda()
    model.load_state_dict(torch.load(args.unet_weights))
    evaluate_all_wrappers(model, args.val_images_dirs, args.val_masks_dirs)
Exemplo n.º 3
0
    parser.add_argument("--output_dir",
                        type=str,
                        default="output",
                        help="directory saving prediction results")
    opt = parser.parse_args()

    logfile = 'logs/predict/' + opt.dataset_name + '_' + opt.device + '.log'
    sys.stdout = Logger(logfile)
    print(opt)

    device = torch.device(opt.device)

    output_dir_unet = opt.output_dir + '/unet_segmentation/' + opt.dataset_name
    os.makedirs(output_dir_unet, exist_ok=True)
    unet_path = 'UNet/checkpoints/' + opt.unet_ckpt
    model_unet = UNet(n_channels=1, n_classes=1).to(device=device)
    model_unet.load_state_dict(torch.load(unet_path)['state_dict'])
    model_unet.eval()

    output_dir_yolo = opt.output_dir + '/yolo_detection/' + opt.dataset_name
    os.makedirs(output_dir_yolo, exist_ok=True)
    classes = ['vein']
    yolo_path = 'YOLOv3/checkpoints/' + opt.yolo_ckpt
    model_yolo = Darknet('YOLOv3/config/yolov3-custom.cfg',
                         img_size=416).to(device=device)
    model_yolo.load_state_dict(torch.load(yolo_path))
    model_yolo.eval()

    image_folder = 'DATA/' + opt.dataset_name + '/imgs'
    image_files = [x for x in os.listdir(image_folder)
                   if x.endswith('.jpg')]  # only jpg files