Beispiel #1
0
def reference():
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0   # test code only supports num_threads = 1
    opt.batch_size = 1    # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True    # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1   # no visdom display; the test code saves the results to a HTML file.
    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    model = create_model(opt)      # create a model given opt.model and other options
    model.setup(opt)               # regular setup: load and print networks; create schedulers
    # create a website
    web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch))  # define the website directory
    webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    if opt.eval:
        model.eval()
    for i, data in enumerate(dataset):
        if i >= opt.num_test:  # only apply our model to opt.num_test images.
            break
        model.set_input(data)  # unpack data from data loader
        model.test()           # run inference
        visuals = model.get_current_visuals()  # get image results
        img_path = model.get_image_paths()     # get image paths
        if i % 5 == 0:  # save images to an HTML file
            print('processing (%04d)-th image... %s' % (i, img_path))
        save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
    webpage.save()  # save the HTML
def load_cyclegan_with_path(path='cyclegan_baddies_c_1_a'):
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    opt.dataroot = "./test_image"
    opt.load_size = 256
    opt.crop_size = 256
    opt.gpu_ids = '0'
    # 	opt.gpu_ids = ''
    opt.name = path
    opt.eval = True

    torch.cuda.set_device(0)
    #  create a dataset given opt.dataset_mode and other options
    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(opt)

    model.eval()

    cyclegan_dict[path] = model
    cyclegan_opts[path] = opt
Beispiel #3
0
def run_example(output=None, script=True):
    args = "--dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout"
    opt = TestOptions().parse(args.split(' '))
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    model = create_model(
        opt)  # create a model given opt.model and other options
    data = torch.load('example_input.pt')
    if script:
        print("running scripted...")
        m = torch.jit.script(model.netG.module)
        ip = data['A'].cuda()
        output_tensor = m(ip)
    else:
        print("running eager...")
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()  # get image results
        output_tensor = visuals['fake']

    if output is not None:
        torch.save(output_tensor, output)
Beispiel #4
0
def config_models_and_datasets(model_names, folder):
    opt = None
    models = dict()
    for name in model_names:
        sys.argv = sys.argv[:1]
        sys.argv.append("--dataroot")
        sys.argv.append(folder)
        sys.argv.append("--name")
        sys.argv.append(name)
        sys.argv.append("--model")
        sys.argv.append("test")
        sys.argv.append("--no_dropout")
        opt = TestOptions().parse()
        # hard-code some parameters for test
        opt.num_threads = 0  # test code only supports num_threads = 0
        opt.batch_size = 1  # test code only supports batch_sized = 1
        opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
        opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
        opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.

        model = create_model(opt)
        model.setup(opt)
        models[name] = model
        print(f"model {name} instantiated")
    dataset = create_dataset(opt)
    return dataset, models
Beispiel #5
0
def Predict():
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    model = create_model(opt)  # create a model given opt.model and other options
    model.setup(opt)  # regular setup: load and print networks; create s
    pickle_in = open("emb.pickle", "rb")
    emb = pickle.load(pickle_in)
    for data in emb:
        if data['B_paths'][0].split('/')[-1] != e.get().split('/')[-1]: continue # compare with the user selected file to get the embedding

        model.set_input(data)  # unpack data from data loader
        model.test()  # run inference
        visuals = model.get_current_visuals()  # get image results

        im = save_images(visuals)

        img = Image.fromarray(im)
        img = ImageTk.PhotoImage(img)
        canvas.image = img  # <--- keep reference of your image
        canvas.create_image(2, 2, anchor='nw', image=img)
Beispiel #6
0
def run_test(epoch=-1, is_val=True):
    print('Running Test')
    opt = TestOptions().parse()
    # No shuffling for test set
    opt.serial_batches = True
    opt.which_epoch = epoch

    # Set batch_size to 1
    opt.batch_size = 1
    # If we are running on the test set change the folder path to where the test meshes are stored
    if not is_val:
        opt.phase = "test"

    dataset = DataLoader(opt)
    if opt.verbose:
        print("DEBUG testpath: ", opt.dataroot)
        print("DEBUG dataset length ", len(dataset))
    model = create_model(opt)
    writer = Writer(opt)
    writer.reset_counter()
    for i, data in enumerate(dataset):
        model.set_input(data)
        ncorrect, nexamples = model.test(epoch, is_val)
        if opt.verbose:
            print("DEBUG test ncorrect, nexamples ", ncorrect, nexamples)
        writer.update_counter(ncorrect, nexamples)
    writer.print_acc(epoch, writer.acc)
    return writer.acc
Beispiel #7
0
def main():
    cfg = TestOptions().parse()   # get training options
    cfg.NUM_GPUS = torch.cuda.device_count()
    cfg.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    cfg.no_flip = True    # no flip; comment this line if results on flipped images are needed.
    cfg.display_id = -1   # no visdom display; the test code saves the results to a HTML file.

    cfg.phase = 'test'
    cfg.batch_size = int(cfg.batch_size / max(1, cfg.NUM_GPUS))
    launch_job(cfg=cfg, init_method=cfg.init_method, func=test)
Beispiel #8
0
def get_test_opt():
    opt = TestOptions().parse()

    opt.num_threads = 1  # test code only supports num_threads = 1
    opt.batch_size = 1   # test code only supports batch_size = 1.
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display
    result_root = os.path.join(opt.results_dir, opt.name)
    if not os.path.exists(result_root):
        os.makedirs(result_root)
    return opt
Beispiel #9
0
def uploadFile():
    f = request.files['file']
    global fileName
    fileName = f.filename[0:-4]
    f.save(os.path.join(app.config['UPLOAD_FOLDER'], f.filename))
    opt = TestOptions().parse()
    opt.num_threads = 0
    opt.batch_size = 1
    opt.serial_batches = True
    opt.no_flip = True
    opt.display_id = -1
    return opt
Beispiel #10
0
    def inferViability(self, BrightfieldPath, GeneratedPath, results_dir, checkpoint_dir, experiment_name):
        print(BrightfieldPath)
        opt = TestOptions().parse()  # get test options
        # hard-code some parameters for test
        opt.num_threads = 0   # test code only supports num_threads = 1
        opt.batch_size = 1    # test code only supports batch_size = 1
        opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
        opt.no_flip = True    # no flip; comment this line if results on flipped images are needed.
        opt.num_test = len(list(BrightfieldPath.glob('**/*')))
        opt.dataroot = BrightfieldPath
        opt.checkpoints_dir = checkpoint_dir
        opt.name = experiment_name
        opt.results_dir = results_dir
        
        dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
        model = create_model(opt)      # create a model given opt.model and other options
        model.setup(opt)               # regular setup: load and print networks; create schedulers
         
        # if opt.eval:
        #     print('yes')
        #     model.eval()
        storeCellCount = {}
        for i, data in enumerate(dataset):
            
            if i >= opt.num_test:  # only apply our model to opt.num_test images.
                break
            model.set_input(data)  # unpack data from data loader
            #model.test()           # run inference
            try:
                model.testCell() #runs generator (forward) and discriminator(forwardD)
                fakeCellCount = model.fake_pred_cellCount.item()
                
                #visuals = model.get_current_visuals()  # get image results
                img_path = model.get_image_paths()     # get image paths
                
                fileName = Path(img_path[0]).stem
                fake_img = model.fake_B
                self.saveImages(fake_img,GeneratedPath,fileName)
                if fileName in storeCellCount:
                
                    storeCellCount[fileName].append([fakeCellCount])
                else:
                    storeCellCount[fileName]= [fakeCellCount]
            except KeyError:
                pass
                
            if i % 5 == 0:  
                print('processing (%04d)-th image... %s' % (i, img_path))

        self._toJson(opt.results_dir,storeCellCount,'viability')
Beispiel #11
0
def config_datasets():
    sys.argv = sys.argv[:1]
    sys.argv.append("--dataroot")
    sys.argv.append(temp_folder)
    sys.argv.append("--model")
    sys.argv.append("test")
    sys.argv.append("--no_dropout")
    opt = TestOptions().parse()
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_sized = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    return create_dataset(opt)
 def __init__(self, options, verbose=False):
     opt = TestOptions(options).parse(verbose=verbose)  # get test options
     # hard-code some parameters for test
     opt.num_threads = 0  # test code only supports num_threads = 1
     opt.batch_size = 1  # test code only supports batch_size = 1
     opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
     opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
     opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
     model = create_model(
         opt)  # create a model given opt.model and other options
     model.setup(
         opt)  # regular setup: load and print networks; create schedulers
     model.eval()
     self.opt, self.model = opt, model
     self.totensor = ToTensor()
     self.normalize = Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
Beispiel #13
0
def test_red_seal():
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    opt.name = "document_pix2pix"
    opt.model = "pix2pix"
    opt.netG = "unet_256"
    opt.direction = "AtoB"
    opt.norm = "batch"
    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers
    if opt.eval:
        model.eval()
    # img_path = "test/input/343abe6f-9e28-4813-b87a-cdb098f8c17f_page1.jpg"
    # img_path = "test/input/保利文化集团股份有限公司2019年第三季度财务报表-3.jpg"
    # img_path = "test/input/常州天宁建设发展集团有限公司2019年三季度合并及母公司财务报表-0.jpg"
    # img_path = "test/input/常州市晋陵投资集团有限公司2019年三季度合并及母公司财务报表-0.jpg"
    # img_path = "test/input/重庆市万盛经济技术开发区开发投资集团有限公司2019年三季度财务报表-1.jpg"
    img_path = "test/input/重庆市万盛经济技术开发区开发投资集团有限公司2019年三季度财务报表-2.jpg"
    res_img_dir = "test/output"
    signet_offsets = [(200, 100, 712, 612), (500, 100, 1012, 612),
                      (200, 100, 712, 612), (200, 100, 712, 612),
                      (1000, 200, 1512, 712), (1500, 100, 2012, 612)]
    os.makedirs(res_img_dir, exist_ok=True)
    img_path_list = [img_path]
    import time
    for img_path in img_path_list:
        img = Image.open(img_path).convert("RGB")
        a_img = img.crop(signet_offsets[5])
        t0 = time.time()
        transform = default_transform()
        A = transform(a_img)
        A = torch.unsqueeze(A, dim=0)
        A = A.to(torch.device("cuda:0"))
        fake_B = model.netG(A)
        res_B = tensor2im(fake_B)
        image_pil = Image.fromarray(res_B)
        print("cost:{}".format(time.time() - t0))
        out_img_path = os.path.join(res_img_dir, os.path.basename(img_path))
        image_pil.save(out_img_path)
def cycle_gan_model(model):
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    opt.name = model
    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers
    if opt.eval:
        model.eval(
        )  # regular setup: load and print networks; create schedulers

    return model
def load_model_with_options():
    """
    Load the model details
    """
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    # dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers

    return opt, model
Beispiel #16
0
def get_model(script):
    args = f"--dataroot {os.path.dirname(__file__)}/datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout"
    opt = TestOptions().parse(args.split(' '))
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    model = create_model(
        opt)  # create a model given opt.model and other options
    model = model.netG.module
    data = torch.load('example_input.pt')
    input = data['A'].cuda()

    if script:
        model = torch.jit.script(model)

    return model, (input, )
Beispiel #17
0
def my_create_model():
    opt = TestOptions().parse()
    # hard-code some parameters for test
    opt.num_threads = 1  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display

    opt.name = "shape02shape1_noflip_cyclegan"  # model checkpoint saved
    opt.model = "cycle_gan"
    opt.phase = "test"
    opt.no_dropout = True
    opt.dataset_mode = "unaligned"
    opt.dataroot = "./tmp"

    model = create_model(opt)
    model.setup(opt)
    return opt, model
Beispiel #18
0
def setup(opts):
    generator_checkpoint_path = opts['generator_checkpoint']
    try:
        os.makedirs('checkpoints/pretrained/')
    except OSError:
        pass
    shutil.copy(generator_checkpoint_path, 'checkpoints/pretrained/latest_net_G.pth')

    opt = TestOptions(args=['--dataroot', '',
                            '--name', 'pretrained',
                            '--model', 'test',
                            '--no_dropout']).parse()
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    opt.preprocess = 'none'  # Don't resize to a square
    model = create_model(opt)
    model.setup(opt)
    return {'model': model, 'opt': opt}
Beispiel #19
0
def get_cycled_data(video_id,label_id):
    data_input_path = '../../../dataset/rendered_video/output_img/{}'.format(video_id)
    data_output_path = '../../../dataset/cycled_video/cycled_img'
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.dataroot = data_input_path
    opt.results_dir = data_output_path
    opt.checkpoints_dir = '/scr1/system/alpha-robot/script/video_cycle/pytorch-CycleGAN-and-pix2pix/checkpoints/{}'.format(video_id)
    # opt.checkpoints_dir = '/scr1/system/alpha-robot/script/video_cycle/pytorch-CycleGAN-and-pix2pix/checkpoints/{}'.format('merge')
    opt.num_threads = 0   # test code only supports num_threads = 1
    opt.batch_size = 1    # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True    # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1   # no visdom display; the test code saves the results to a HTML file.
    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    model = create_model(opt)      # create a model given opt.model and other options
    model.setup(opt)               # regular setup: load and print networks; create schedulers
    # create a website
    web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch))  # define the website directory
    webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    if opt.eval:
        model.eval()
    for i, data in enumerate(dataset):
        if i >= opt.num_test:  # only apply our model to opt.num_test images.
            break
        model.set_input(data)  # unpack data from data loader
        model.test()           # run inference
        visuals = model.get_current_visuals()  # get image results
        img_path = model.get_image_paths()     # get image paths
        if i % 5 == 0:  # save images to an HTML file
            print('processing (%04d)-th image... %s' % (i, img_path))
        save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
    webpage.save()  # save the HTML
    video_id,data_num = select_only_B(data_output_path)
    with open(os.path.join(data_output_path,'video_list.txt'),'w') as file:
        file.write('{} {} {}'.format(video_id,data_num,label_id))
Beispiel #20
0
def loadmodel():
    opt = TestOptions().parse()
    # hard-code some parameters for test
    opt.num_threads = 1  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)
    model.setup(opt)
    # create a website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.epoch))
    # webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    if opt.eval:
        model.eval()
    return model
Beispiel #21
0
def test_real_dataset():
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    opt.name = "document_pix2pix_best"
    opt.model = "pix2pix"
    opt.netG = "unet_256"
    opt.direction = "AtoB"
    opt.norm = "batch"
    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers
    if opt.eval:
        model.eval()
    img_path = "/data/zhoubingcheng/signet_testdata/testimgs/39b5e538-18c0-11eb-b96f-02420a019fc9_page10.jpeg"
    res_img_dir = "test/output2"
    os.makedirs(res_img_dir, exist_ok=True)
    img_path_list = [img_path]
    import time
    for img_path in img_path_list:
        img = Image.open(img_path).convert("RGB")
        w, h = img.size
        a_img = img.crop((0, 0, 1024, 1024))
        t0 = time.time()
        transform = default_transform()
        A = transform(a_img)
        A = torch.unsqueeze(A, dim=0)
        A = A.to(torch.device("cuda:0"))
        fake_B = model.netG(A)
        res_B = tensor2im(fake_B)
        image_pil = Image.fromarray(res_B)
        print("cost:{}".format(time.time() - t0))
        out_img_path = os.path.join(res_img_dir, os.path.basename(img_path))
        image_pil.save(out_img_path)
Beispiel #22
0
def test_discriminator():

    opt = TestOptions().parse()  # get test options

    # hard-code some parameters for test
    opt.num_threads = 0   # test code only supports num_threads = 1
    opt.batch_size = 1    # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True    # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1   # no visdom display; the test code saves the results to a HTML file.

    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    
    opt.isTrain = True
    opt.gan_mode = "lsgan"
    model = create_model(opt)      # create a model given opt.model and other options
    model.setup(opt)               # regular setup: load and print networks; create schedulers

    # test with eval mode. This only affects layers like batchnorm and dropout.
    # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    
    if opt.eval:
        model.eval()

    for i, data in enumerate(dataset):

        if i >= opt.num_test:  # only apply our model to opt.num_test images.
            break

        model.set_input(data)  # unpack data from data loader

        real_AB = torch.cat((model.real_A, model.real_B), 1)  # we use conditional GANs; we need to feed both input and output to the discriminator
        pred = model.netD(real_AB.detach())

        print("Image {}: discriminator: {}".format(i, pred.mean()))
Beispiel #23
0
def main():
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    dataset = create_dataset(
        opt)  # create a dataset given opt.dataset_mode and other options
    sample_rate = opt.sample_rate
    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers
    mkdir(opt.results_dir)
    # create a website
    # web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch))  # define the website directory
    # webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    if opt.eval:
        model.eval()
    for i, data in enumerate(dataset):
        if i >= opt.num_test:  # only apply our model to opt.num_test images.
            break
        model.set_input(data)  # unpack data from data loader
        model.test()  # run inference
        clips = model.get_current_audio()  # get image results
        img_path = model.get_clip_paths()  # get image paths
        iter_dir = os.path.join(opt.results_dir, "{:03d}".format(i))
        mkdir(iter_dir)
        for name, y in clips.items():
            librosa.output.write_wav(
                os.path.join(iter_dir, '{}.wav'.format(name)), y, sample_rate)
Beispiel #24
0
from flask import Flask, request, Response, send_from_directory
from flask_cors import CORS
import time
import random

from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import htmls
from util import util

opt = TestOptions().parse(infer=True)
# 直接指定一些选项进行测试
opt.num_threads = 0  # 测试时 DataLoader 仅支持 num_threads = 0
opt.batch_size = 1  # 测试时batch_size设为1
opt.serial_batches = True  # 关闭数据 shuffling; 需要随机选择图片时再打开
opt.no_flip = True  # no flip; 如果需要翻转图片,注释本行
opt.display_id = -1  # no visdom display; 测试时将结果保存入 HTML 文件

model = create_model(opt)  # 根据 opt.model 和其他选项创建 model 实例
model.setup(opt)  # 常规设置: 加载和打印网络; 创建 schedulers
model.eval()

static_folder = 'static'
app = Flask(__name__, static_folder=static_folder)
CORS(app)


@app.route('/', methods=['GET'])
def index():
    # output = torch.argmax(output, dim=1)
    fake_ED_M = fake_ED_M.data.to('cpu').numpy()
    fake_ES_M = fake_ES_M.data.to('cpu').numpy()
    fake_ED_2 = fake_ED_2.data.to('cpu').numpy()
    fake_ES_2 = fake_ES_2.data.to('cpu').numpy()
    flow_2 = flow_2.data.to('cpu').numpy()
    warp_img = warp_img.data.to('cpu').numpy()
    warped_mask = warped_mask.data.to('cpu').numpy()
    return np.concatenate([fake_ED_M, fake_ES_M, fake_ED_2, fake_ES_2, flow_2, warp_img, warped_mask], axis=1)


if __name__ == '__main__':
    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0   # test code only supports num_threads = 1
    opt.batch_size = 1    # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True    # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1   # no visdom display; the test code saves the results to a HTML file.
    # dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options

    # create a website
    web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch))  # define the website directory
    webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    output_dir = './output'
    spacing_target = (10, 1.25, 1.25)
    window_size = 256
    stride = 128
Beispiel #26
0
                            (1, 2, 3)).tolist()
        else:
            g += code.view(code.size(0), -1).cpu().numpy().tolist()
            tlbl += (lbl == cname).numpy().tolist()
            tloss += np.mean(((out.cpu() - data).numpy())**2,
                             (1, 2, 3)).tolist()

        #visuals = model.get_current_visuals()
        #img_path = model.get_image_paths()
        #save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
    # save the website
    #webpage.save()
    output['test'] = g
    output['lbl'] = tlbl
    io.savemat('feat.mat', output)
    fpr, tpr, _ = roc_curve(tlbl, tloss, 0)
    roc_auc1 = auc(fpr, tpr)
    return (roc_auc1)


if __name__ == '__main__':
    opt = TestOptions().parse()
    opt.dataroot = 'nol'
    opt.batch_size = 256
    opt.fineSize = 32
    opt.input_nc = 3
    opt.output_nc = 3
    opt.ngf = 64
    opt.name = 'cifar_AE8'
    roc = run(opt)
Beispiel #27
0
    for label, im_data in visuals.items():
        im = util.tensor2im(im_data)
        save_path = os.path.join(output_dir, file_name)
        util.save_image(im, save_path, aspect_ratio=aspect_ratio)


if __name__ == '__main__':
    opt = TestOptions().parse()
    models_list = [
        'style_monet_pretrained', 'style_vangogh_pretrained',
        'style_ukiyoe_pretrained', 'style_cezanne_pretrained'
    ]
    for style_id, premodel in enumerate(models_list):
        opt.name = premodel
        opt.num_threads = 0
        opt.batch_size = 1
        opt.serial_batches = True
        opt.no_flip = True
        opt.display_id = -1

        file_name = str(style_id + 1) + '.png'
        results_dir = opt.results_dir
        if not os.path.exists(results_dir):
            os.makedirs(results_dir)

        dataset = create_dataset(opt)
        model = create_model(opt)
        model.setup(opt)

        if opt.eval:
            model.eval()
from util.visualizer import Visualizer
import os
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import scipy.io as sio
import models.channel as chan
import shutil
from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM
import math

# Extract the options
opt = TestOptions().parse()

opt.batch_size = 1  # batch size

if opt.dataset_mode == 'CIFAR10':
    opt.dataroot = './data'
    opt.size = 32
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform)
    dataset = torch.utils.data.DataLoader(testset,
                                          batch_size=opt.batch_size,
def main():

    opt = TestOptions().parse()  # get test options
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 0
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(
        opt)  # regular setup: load and print networks; create schedulers
    dataset = create_dataset(
        opt)  # create a dataset given opt.dataset_mode and other options
    pix2pix_image = np.zeros((model_image_size, model_image_size, 3), np.uint8)

    start_time = time.time()
    x = 5  # displays the frame rate every 1 second
    counter = 0

    while True:
        # ZMQ Subscribe convert incoming bytes to PIL and then to CVImage
        frame = socketSubscribe.recv()

        if len(frame) > 10:
            pil_image = Image.frombytes('RGBA',
                                        (model_image_size, model_image_size),
                                        frame, 'raw').convert('RGB')

            # learntest
            #model.update_learning_rate()

            for i, data in enumerate(dataset):
                if i >= opt.num_test:  # only apply our model to opt.num_test images.
                    break
                model.set_input(data,
                                pil_image)  # unpack data from data loader

                # learntest
                #model.optimize_parameters()

                model.test()  # run inference

                visuals = model.get_current_visuals()  # get image results
                for label, im_data in visuals.items():
                    im = util.tensor2im(im_data)
                    open_cv_image = numpy.array(im)
                    open_cv_image = open_cv_image[:, :, ::-1].copy()
                    pix2pix_image = open_cv_image
        #dim = (256,256)
        #blank_image = cv2.resize(blank_image, dim, interpolation = cv2.INTER_AREA)
        socketPublish.send(pix2pix_image)
        cv2.imshow("image", pix2pix_image)
        cv2.waitKey(1)

        counter += 1
        if (time.time() - start_time) > x:
            print("FPS: ", counter / (time.time() - start_time))
            counter = 0
            start_time = time.time()
def main():


    opt = TestOptions().parse()  # get test options
    opt.netG = 'unet_128'
    gen_jison = False
    if gen_jison:
        vis = False
        opt.num_test=50000
    else:
        vis = True
        opt.num_test = 300
    # hard-code some parameters for test
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
    opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
    opt.display_id = -1  # no visdom display; the test code saves the results to a HTML file.
    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    model = create_model(opt)  # create a model given opt.model and other options
    print(model.__dict__)
    model.setup(opt)  # regular setup: load and print networks; create schedulers

    kwargs = {
        "thred_dyn": 50,
        "ksize_dyn": 100,
        "ksize_close": 30,
        "ksize_open": 3,
    }
    detector = DefectDetector(**kwargs)

    if opt.eval:
        model.eval()
    for i, data in enumerate(dataset):
        if i >= opt.num_test:  # only apply our model to opt.num_test images.
            break
        # print(i)
        paths = data["A_paths"]
        print(paths)
        # print(len(paths))
        model.set_input(data)  # unpack data from data loader
        model.test()  # run inference
        visuals = model.get_current_visuals()  # get image results
        imgee_show=cv2.imread(paths[0])
        # print(visuals["pre_score"])
        cv2.imshow('image',imgee_show)
        cv2.waitKey()
        image_batch = visuals["real_A"]
        reconst_batch = visuals["fake_B"]
        # B_mask=data["B_mask"]

        image_batch = image_batch.detach().cpu().numpy()
        # label_batch = B_mask.detach().cpu().numpy()
        reconst_batch = reconst_batch.detach().cpu().numpy()
        # batchs=detector.apply(image_batch,label_batch,reconst_batch)
        batchs = detector.ztb_rangd2_apply(image_batch, image_batch, reconst_batch)



        image_high=128
        image_width=128
        image_name=paths[0].split('/')[-1].split('.')[0]
        image=batchs[-1].squeeze()
        # print(image.shape)
        # cv2.imshow('image',image)
        # cv2.waitKey(3)
        submit(image_high,image_width,image_name,image)
        if vis:
            for idx, path in enumerate(paths):
                visual_imgs = []
                for batch in batchs:
                    visual_imgs.append(batch[idx].squeeze())
                img_visual = detector.concatImage(visual_imgs, offset=None)
                # print(img_visual.size)
                visualization_dir = opt.checkpoints_dir + "/infer_epoch{}/".format(opt.load_iter)
                if not os.path.exists(visualization_dir):
                    os.makedirs(visualization_dir)
                img_visual.save(visualization_dir + "_".join(path.split("/")[-2:]))
    print(i)