예제 #1
0
    elif opt.data_type == 8:
        data['label'] = data['label'].uint8()
        data['inst'] = data['inst'].uint8()
    if opt.export_onnx:
        print("Exporting to ONNX: ", opt.export_onnx)
        assert opt.export_onnx.endswith(
            "onnx"), "Export model file should end with .onnx"
        torch.onnx.export(model, [data['label'], data['inst']],
                          opt.export_onnx,
                          verbose=True)
        exit(0)
    minibatch = 1
    if opt.engine:
        generated = run_trt_engine(opt.engine, minibatch,
                                   [data['label'], data['inst']])
    elif opt.onnx:
        generated = run_onnx(opt.onnx, opt.data_type, minibatch,
                             [data['label'], data['inst']])
    else:
        generated = model.inference(data['label'], data['inst'])

    visuals = OrderedDict([
        ('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
        ('synthesized_image', util.tensor2im(generated.data[0]))
    ])
    img_path = data['path']
    print('process image... %s' % img_path)
    visualizer.save_images(webpage, visuals, img_path)

webpage.save()
예제 #2
0
for i, data in enumerate(dataset):
    if i >= opt.how_many:
        break
    if opt.data_type == 16:
        data['label'] = data['label'].half()
        data['inst']  = data['inst'].half()
    elif opt.data_type == 8:
        data['label'] = data['label'].uint8()
        data['inst']  = data['inst'].uint8()
    if opt.export_onnx:
        print ("Exporting to ONNX: ", opt.export_onnx)
        assert opt.export_onnx.endswith("onnx"), "Export model file should end with .onnx"
        torch.onnx.export(model, [data['label'], data['inst']],
                          opt.export_onnx, verbose=True)
        exit(0)
    minibatch = 1 
    if opt.engine:
        generated = run_trt_engine(opt.engine, minibatch, [data['label'], data['inst']])
    elif opt.onnx:
        generated = run_onnx(opt.onnx, opt.data_type, minibatch, [data['label'], data['inst']])
    else:
        generated = model.inference(data['label'], data['inst'])
        
    visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
                           ('synthesized_image', util.tensor2im(generated.data[0]))])
    img_path = data['path']
    print('process image... %s' % img_path)
    visualizer.save_images(webpage, visuals, img_path)

webpage.save()
예제 #3
0
def infer(n, image_label_path, image_inst_path):
    opt = TestOptions().parse(save=False)
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.name = "label2city_1024p"
    opt.netG = "local"
    opt.ngf = 32
    opt.resize_or_crop = "none"

    data_loader = CreateOneDataLoader(opt)
    dataset = data_loader.load_data(image_label_path, image_inst_path)
    visualizer = Visualizer(opt)
    # create website
    #web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
    #webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))

    # test
    if not opt.engine and not opt.onnx:
        model = create_model(opt)
        if opt.data_type == 16:
            model.half()
        elif opt.data_type == 8:
            model.type(torch.uint8)

        if opt.verbose:
            print(model)
    else:
        from run_engine import run_trt_engine, run_onnx

    for i, data in enumerate(dataset):
        if i >= opt.how_many:
            break
        if opt.data_type == 16:
            data['label'] = data['label'].half()
            data['inst'] = data['inst'].half()
        elif opt.data_type == 8:
            data['label'] = data['label'].uint8()
            data['inst'] = data['inst'].uint8()
        if opt.export_onnx:
            print("Exporting to ONNX: ", opt.export_onnx)
            assert opt.export_onnx.endswith(
                "onnx"), "Export model file should end with .onnx"
            torch.onnx.export(model, [data['label'], data['inst']],
                              opt.export_onnx,
                              verbose=True)
            exit(0)
        minibatch = 1
        if opt.engine:
            generated = run_trt_engine(opt.engine, minibatch,
                                       [data['label'], data['inst']])
        elif opt.onnx:
            generated = run_onnx(opt.onnx, opt.data_type, minibatch,
                                 [data['label'], data['inst']])
        else:
            generated = model.inference(data['label'], data['inst'])

        visuals = OrderedDict([
            ('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
            ('synthesized_image', util.tensor2im(generated.data[0]))
        ])
        img_path = data['path']
        print('process image... %s' % img_path)
        visualizer.save_image(visuals, n)
예제 #4
0
        assert opt.export_onnx.endswith(
            "onnx"), "Export model file should end with .onnx"
        torch.onnx.export(
            model, [data['sketch'], data['sketch_deform'], data['photo']],
            opt.export_onnx,
            verbose=True)
        exit(0)

    minibatch = 1
    if opt.engine:

        generated = run_trt_engine(opt.engine, minibatch,
                                   [data['sketch'], data['photo']])
    elif opt.onnx:

        generated = run_onnx(opt.onnx, opt.data_type, minibatch,
                             [data['sketch'], data['photo']])
    else:
        model_output = model.inference(data['sketch'], data['photo'])

    visuals = OrderedDict([
        ('sketch', util.tensor2label(data['sketch'][0], opt.label_nc)),
        ('synthesized', util.tensor2im(model_output['fake_image'].data[0])),
    ])

    img_path = data['path']
    print('process image... %s' % img_path)
    race_str = data_loader.dataset.getLabelEncoder().inverse_transform(
        Variable(data['race']))[0]
    img_num = str(data['img_num'][0].item())
    visualizer.save_images(webpage, visuals, img_path, race_str, img_num)