Beispiel #1
0
def my_inference(model, opt, img_path):
    print(img_path)
    assert (os.path.exists(img_path) and os.path.isfile(img_path))
    A_dir = "./tmp/testA"
    B_dir = "./tmp/testB"

    if os.path.exists(A_dir):
        shutil.rmtree(A_dir)
    if os.path.exists(B_dir):
        shutil.rmtree(B_dir)
    os.makedirs(A_dir)
    os.makedirs(B_dir)

    img_name = os.path.basename(img_path)
    des1 = os.path.join(A_dir, img_name)
    des2 = os.path.join(B_dir, img_name)
    shutil.copyfile(img_path, des1)
    shutil.copyfile(img_path, des2)

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()

    for i, data in enumerate(dataset):
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        save_path = my_save_images(visuals,
                                   img_path,
                                   aspect_ratio=opt.aspect_ratio,
                                   width=opt.display_winsize)
        break  # only test one image
    return save_path
    def generate_img(self, data_folder):
        # test
        self.opt.dataroot = data_folder
        data_loader = CreateDataLoader(self.opt)
        dataset = data_loader.load_data()

        d_loss = []
        g_loss = []

        for i, data in enumerate(dataset):
            #if i >= self.opt.how_many:
            #    break
            self.model.set_input(data)
            self.model.no_optimisation_run_through()
            d = self.model.get_D_loss()
            g = self.model.get_G_loss()

            d_loss.append(d)
            g_loss.append(g)

            #TODO get loss and return it
            visuals = self.model.get_current_visuals()
            img_path = self.model.get_image_paths()
            #if i % 5 == 0:
            #    print('processing (%04d)-th image... %s' % (i, img_path))
            save_images(self.webpage,
                        visuals,
                        img_path,
                        aspect_ratio=self.opt.aspect_ratio,
                        width=self.opt.display_winsize)

        self.webpage.save()

        return g_loss, d_loss
Beispiel #3
0
def main():
    # step1: opt
    opt = TestOptions().parse()
    # step2: data
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    # step3: model
    model = create_model(opt)
    model.setup(opt)
    # step4: web ,在test中不使用visdom,而是使用html
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '{}_{}'.format(opt.phase, opt.epoch))
    title = 'Experiment = {}, Phase = {}, Epoch = {}'.format(
        opt.name, opt.phase, opt.epoch)
    webpage = HTML(web_dir, title=title)
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    model.eval()
    for i, data in enumerate(dataset):
        if i > opt.num_test:
            break
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_images_paths()
        if i % 5 == 0:
            print('processing {:0>4d}-th image...{}'.format(i, img_path))
        save_images(webpage,
                    visuals,
                    img_path,
                    aspect_ratio=opt.aspect_ratio,
                    width=opt.display_winsize)

    webpage.save()
Beispiel #4
0
def my_batch_inference(model, opt, img_path):
    ###input :  M*N*3*256
    print(img_path)

    assert (os.path.exists(img_path) and os.path.isfile(img_path))
    A_dir = "./tmp/testA"
    B_dir = "./tmp/testB"

    if os.path.exists(A_dir):
        shutil.rmtree(A_dir)
    if os.path.exists(B_dir):
        shutil.rmtree(B_dir)
    os.makedirs(A_dir)
    os.makedirs(B_dir)
    batchdata = sio.loadmat(img_path)['batch']
    orisize = batchdata.shape[0]
    for i in range(0, batchdata.shape[3]):
        img = batchdata[:, :, :, i]
        cv2.imwrite(os.path.join(A_dir, str(i) + ".jpg"), img)
        cv2.imwrite(os.path.join(B_dir, str(i) + ".jpg"), img)

    img_name = os.path.basename(img_path)
    des1 = os.path.join(A_dir, img_name)
    des2 = os.path.join(B_dir, img_name)
    shutil.copyfile(img_path, des1)
    shutil.copyfile(img_path, des2)

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()

    imgnamelist = []
    for i, data in enumerate(dataset):
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        save_path = my_save_images(visuals,
                                   img_path,
                                   aspect_ratio=opt.aspect_ratio,
                                   width=opt.display_winsize)
        imgnamelist.append(save_path)
        # imgdict['name']=save_path
        # break  # only test one image
    x_data = []
    for img in imgnamelist:
        image = cv2.imread(img)
        image = cv2.resize(image, (orisize, orisize))
        x_data.append(image)
    x_data = numpy.array(x_data)
    x_data = x_data.transpose([1, 2, 3, 0])
    save_path = 'temp/batch.mat'
    sio.savemat(save_path, {'batch': x_data})

    return save_path
Beispiel #5
0
    def on_created(self, event):  # when file is created
        # do something, eg. call your function to process the image
        print("Got E event for file %s" % event.src_path)

        try:
            go = os.path.abspath(os.path.join(event.src_path, os.pardir, "go"))

            if not os.path.isfile(go):
                return

            with open(go) as f:
                name = f.readlines()[0]

            print("starting to process %s" % self.opt.name)

            self.model.opt = self.opt

            data_loader = CreateDataLoader(self.opt)
            dataset = data_loader.load_data()

            for i, data in enumerate(dataset):

                basename = os.path.basename(data['A_paths'][0])[:-4]

                self.model.set_input(data)

                z = self.model.encode_real_B()

                img_path = self.model.get_image_paths()
                print('%04d: process image... %s' % (i, img_path))

                outfile = "./output/%s/%s/%s@%s" % (self.directory, name,
                                                    basename, "_".join(
                                                        [str(s)
                                                         for s in z[0]]))
                try:
                    os.makedirs(os.path.dirname(outfile), exist_ok=True)
                except:
                    pass

                touch(outfile)

        except Exception as e:
            traceback.print_exc()
            print(e)

        try:
            rmrf('./input/%s/val/*' % self.directory)

            if os.path.isfile(go):
                os.remove(go)
        except Exception as e:
            traceback.print_exc()
            print(e)
Beispiel #6
0
def eval_test(model,epoch,isTest=False,testOpt=None):
    if not isTest:
        opt = get_val_opt()
        #save image result of validation set in last iteration, otherwise compute ssim and psnr only
        if epoch >= opt.saveimg_epoch:
            opt.saveimg=True
    if isTest:
        opt=testOpt
    set_name=opt.setname
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    print("Current test set is "+set_name+" size:"+str(len(dataset)))
    psnr,ssim=model_test_val(opt,dataset,model,epoch)

    return psnr,ssim
Beispiel #7
0
def test():
    import sys
    sys.argv = args
    import os
    from options.test_options import TestOptions
    from data import CreateDataLoader
    from models import create_model
    from util.visualizer import Visualizer
    from util import html

    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)
    visualizer = Visualizer(opt)
    # create website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (opt.name, opt.phase, opt.which_epoch))
    # test
    for i, data in enumerate(dataset):
        if i >= opt.how_many:
            break
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        img_path[0] = img_path[0] + str(i)
        print('%04d: process image... %s' % (i, img_path))
        visualizer.save_images(webpage,
                               visuals,
                               img_path,
                               aspect_ratio=opt.aspect_ratio)
    webpage.save()
Beispiel #8
0
def loadmodel():
    opt = TestOptions().parse()
    # hard-code some parameters for test
    opt.num_threads = 1  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)
    model.setup(opt)
    # create a website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.epoch))
    # webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
    # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    if opt.eval:
        model.eval()
    return model
    def __init__(self, gpu_ids=[]):
        opt = TestOptions()  #.parse()
        opt.nThreads = 1  # test code only supports nThreads = 1
        opt.batchSize = 1  # test code only supports batchSize = 1
        opt.serial_batches = True  # no shuffle
        opt.no_flip = True  # no flip
        opt.display_id = -1  # no visdom display
        opt.dataset_mode = "single"
        opt.dataroot = "."
        opt.phase = "test"
        opt.loadSize = 256
        opt.fineSize = 256
        opt.isTrain = False
        opt.input_nc = 3
        opt.output_nc = 3
        opt.gpu_ids = gpu_ids
        opt.name = "NU_SEG"
        opt.model_suffix = ""
        opt.checkpoints_dir = "../../NucleiSegmentation/checkpoints/"
        opt.model = "test"
        opt.ngf = 64
        opt.norm = "instance"
        opt.which_model_netG = "unet_256"
        opt.resize_or_crop = "resize_and_crop"
        opt.which_epoch = "latest"
        opt.no_dropout = "store_true"
        opt.init_type = "normal"
        opt.init_gain = 0.02
        opt.verbose = ""
        opt.which_direction = "BtoA"
        data_loader = CreateDataLoader(opt)
        dataset = data_loader.load_data()
        model = create_model(opt)
        model.setup(opt)

        self.model = model
Beispiel #10
0
import time
from options.train_options import TrainOptions
from options.val_options import ValOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer
import numpy as np
import math
from skimage.measure import compare_ssim

if __name__ == '__main__':
    opt = TrainOptions().parse()
    val = ValOptions().parse()
    data_loader = CreateDataLoader(opt)
    data_loader_val = CreateDataLoader(val)
    dataset = data_loader.load_data()
    dataset_val = data_loader_val.load_data()
    dataset_size = len(data_loader)
    dataset_val_size = len(data_loader_val)
    print('#training images = %d' % dataset_size)
    print('#validation images = %d' % dataset_val_size)

    val.nThreads = 1
    val.batchSize = 1
    model = create_model(opt)
    model_val = create_model(val)
    model.setup(opt)
    visualizer = Visualizer(opt)
    total_steps = 0

    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
    config.num_test = 50
    config.phase = 'test'
    config.model = 'test'
    config.loadSize = config.fineSize

    config.num_threads = 1  # test code only supports num_threads = 1
    config.batch_size = 1  # test code only supports batch_size = 1
    config.serial_batches = True  # no shuffle
    config.no_flip = True  # no flip
    config.display_id = -1  # no visdom display

    model = CycleGANModel()
    model.initialize(config)
    model.setup(config)
    print("Network Model Created")
    data_loader = CreateDataLoader(config)
    dataset = data_loader.load_data()

    # create a website
    web_dir = os.path.join(config.results_dir, config.name,
                           '%s_%s' % (config.phase, config.epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (config.name, config.phase, config.epoch))
    # test with eval mode. This only affects layers like batchnorm and dropout.
    # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
    # model.eval()
    for i, data in enumerate(dataset):
        if i >= config.num_test:
            break
        model.set_input(data)
Beispiel #12
0
        opt.gpu_ids) > 0 else "cpu"
    if os.path.exists(abort_file):
        os.remove(abort_file)
        exit("Abort using file: " + abort_file)

    if opt.sanity_check:
        sanity_check(opt)

    batch_size = opt.batch_size
    parallell_batch_size = opt.parallell_batch_size if opt.parallell_batch_size > 0 else opt.batch_size
    opt.batch_size = parallell_batch_size
    n_acc_batches = batch_size // parallell_batch_size
    opt.n_acc_batches = n_acc_batches
    assert batch_size % parallell_batch_size == 0, "Batch size should be divisible by parallell batch size"

    data_loader = CreateDataLoader(copy.deepcopy(opt))

    validation_size = 0
    if opt.validation_freq > 0:
        opt_val = copy.deepcopy(opt)
        opt_val.phase = opt.validation_set
        opt_val.max_dataset_site = opt.max_val_dataset_size
        if opt.validation_set == "split":
            torch.manual_seed(42)
            validation_loader, data_loader = SplitDataLoader(
                data_loader,
                copy.deepcopy(opt),
                opt_val,
                length_first=opt.max_val_dataset_size)
        else:
            validation_loader = CreateDataLoader(opt_val)
Beispiel #13
0
import time
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer

if __name__ == '__main__':
    opt = TrainOptions().parse()
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    print('#training images = %d' % dataset_size)

    model = create_model(opt)
    model.setup(opt)
    visualizer = Visualizer(opt)
    total_steps = 0

    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
        epoch_iter = 0

        for i, data in enumerate(dataset):
            iter_start_time = time.time()
            if total_steps % opt.print_freq == 0:
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
            total_steps += opt.batchSize
            epoch_iter += opt.batchSize
            model.set_input(data)
import os
from options.test_options import TestOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import save_images
from util import html


if __name__ == '__main__':
    opt = TestOptions().parse()
    opt.nThreads = 1   # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)
    model.setup(opt)
    # create website
    web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
    # test
    for i, data in enumerate(dataset):
        if i >= opt.how_many:
            break
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        if i % 5 == 0:
Beispiel #15
0
def main():

    # parse options
    parser = TestOptions()
    opts = parser.parse()
    orig_dir = opts.orig_dir
    blur_dir = opts.dataroot

    saver = Saver(opts)

    # data loader
    print('\n--- load dataset ---')
    dataset_domain = 'A' if opts.a2b else 'B'
    #     dataset = dataset_single(opts, 'A', opts.input_dim_a)
    # else:
    #     dataset = dataset_single(opts, 'B', opts.input_dim_b)
    # loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=opts.nThreads)
    loader = CreateDataLoader(opts)

    # model
    print('\n--- load model ---')
    model = UID(opts)
    model.setgpu(opts.gpu)  ## comment for cpu mode
    model.resume(opts.resume, train=False)
    model.eval()

    # test
    print('\n--- testing ---')
    for idx1, data in enumerate(loader):
        # img1, img_name_list = data[dataset_domain], data[dataset_domain+'_paths']
        # img1 = img1.cuda(opts.gpu).detach()
        images_b = data['B']
        images_a = images_b  # should in the same shape (This is only for the case `resize_or_crop="none"`)
        img_name_list = data['B_paths']  # B is the fluorescence image
        center_crop_shape = data[
            'B_size_WH'][::-1]  # B is the fluorescence image
        if len(img_name_list) > 1:
            print("Warning, there are more than 1 sample in the test batch.")
        images_a = images_a.cuda(opts.gpu).detach()  ## comment for cpu mode
        images_b = images_b.cuda(opts.gpu).detach()  ## comment for cpu mode
        images_a = torch.cat(
            [images_a] * 2,
            dim=0)  # because half of the batch is used as real_A_random
        images_b = torch.cat(
            [images_b] * 2,
            dim=0)  # because half of the batch is used as real_B_random
        print('{}/{}'.format(idx1, len(loader)))
        with torch.no_grad():
            model.inference(images_a, images_b)
            # img = model.test_forward(img1, a2b=opts.a2b)
        img_name = img_name_list[0].split('/')[-1]
        saver.write_img(idx1,
                        model,
                        img_name=img_name,
                        inference_mode=True,
                        mask_path='../input/testB_mask/' + img_name)  # True
        # saver.save_img(img=model.fake_I_encoded[[np.s_[:]]*2 + return_center_crop_slices(input_shapes=images_b.shape[-2:],
        #                                                                                  output_shapes=center_crop_shape,
        #                                                                                  input_scale=1.0,
        #                                                                                  output_scale=opts.fineSize*1.0/opts.loadSize)],
        #                img_name=img_name,
        #                subfolder_name="fake_A") #'gen_%05d.png' % (idx1),

    return
Beispiel #16
0
def main():
  # parse options
  parser = TrainOptions()
  opts = parser.parse()

  # visualizer
  visualizer = Visualizer(opts)

  # data loader
  print('\n--- load dataset ---')
  # dataset = dataset_unpair(opts)    
  # train_loader = torch.utils.data.DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.nThreads)
  data_loader = CreateDataLoader(opts)

  # model
  print('\n--- load model ---')
  model = UID(opts)
  model.setgpu(opts.gpu)
  if opts.resume is None:
    model.initialize()
    ep0 = -1
    total_it = 0
  else:
    ep0, total_it = model.resume(opts.resume)
  model.set_scheduler(opts, last_ep=ep0)
  ep0 += 1
  print('start the training at epoch %d'%(ep0))

  # saver for display and output
  saver = Saver(opts)

  # train
  print('\n--- train ---')
  max_it = 500000
  for ep in range(ep0, opts.n_ep):
    print("Epoch: {}".format(ep))

    for it, data in enumerate(data_loader):
      images_a, images_b = data['A'], data['B']
      if images_a.size(0) != opts.batch_size or images_b.size(0) != opts.batch_size:
        continue
      images_a = images_a.cuda(opts.gpu).detach()
      images_b = images_b.cuda(opts.gpu).detach()

      # update model
      # model.update_D_content(images_a, images_b)  # uncomment for GAN_content, discriminator of z_content
      model.update_D(images_a, images_b)
      if (it + 1) % 2 != 0 and it != len(data_loader)-1:
        continue
      model.update_EG()

      losses_dic = model.get_current_losses()
      visualizer.plot_current_losses(ep, float(it)/len(data_loader), opts, losses_dic)

      # save to display file
      if (it+1) % 48 == 0:
        print('total_it: %d (ep %d, it %d), lr %08f' % (total_it+1, ep, it+1, model.gen_opt.param_groups[0]['lr']))
        print('Dis_I_loss: %04f, Dis_B_loss %04f, GAN_loss_I %04f, GAN_loss_B %04f' % (model.disA_loss, model.disB_loss, model.gan_loss_i,model.gan_loss_b))
        print('B_percp_loss %04f, Recon_II_loss %04f' % (model.B_percp_loss, model.l1_recon_II_loss))
      if (it+1) % 200 == 0:
        saver.write_img(ep*len(data_loader) + (it+1), model)
        
      total_it += 1
      if total_it >= max_it:
        saver.write_img(-1, model)
        saver.write_model(-1, model)
        break

    # decay learning rate
    if opts.n_ep_decay > -1:
      model.update_lr()

    saver.write_img(ep, model)
    # Save network weights
    saver.write_model(ep, total_it+1, model)

  return
Beispiel #17
0
def train():
    import time
    from options.train_options import TrainOptions
    from data import CreateDataLoader
    from models import create_model
    from util.visualizer import Visualizer
    opt = TrainOptions().parse()
    model = create_model(opt)
    #Loading data
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    print('Training images = %d' % dataset_size)
    visualizer = Visualizer(opt)
    total_steps = 0
    #Starts training
    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
        epoch_iter = 0
        for i, data in enumerate(dataset):
            iter_start_time = time.time()
            if total_steps % opt.print_freq == 0:
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
            total_steps += opt.batchSize
            epoch_iter += opt.batchSize
            model.set_input(data)
            model.optimize_parameters()
            #Save current images (real_A, real_B, fake_A, fake_B, rec_A, rec_B)
            if epoch_iter % opt.display_freq == 0:
                save_result = total_steps % opt.update_html_freq == 0
                visualizer.display_current_results(model.get_current_visuals(),
                                                   epoch, epoch_iter,
                                                   save_result)
            #Save current errors
            if total_steps % opt.print_freq == 0:
                errors = model.get_current_errors()
                t = (time.time() - iter_start_time) / opt.batchSize
                visualizer.print_current_errors(epoch, epoch_iter, errors, t,
                                                t_data)
                if opt.display_id > 0:
                    visualizer.plot_current_errors(
                        epoch,
                        float(epoch_iter) / dataset_size, opt, errors)
            #Save model based on the number of iterations
            if total_steps % opt.save_latest_freq == 0:
                print('saving the latest model (epoch %d, total_steps %d)' %
                      (epoch, total_steps))
                model.save('latest')

            iter_data_time = time.time()
        #Save model based on the number of epochs
        print(opt.dataset_mode)
        if epoch % opt.save_epoch_freq == 0:
            print('saving the model at the end of epoch %d, iters %d' %
                  (epoch, total_steps))
            model.save('latest')
            model.save(epoch)

        print('End of epoch %d / %d \t Time Taken: %d sec' %
              (epoch, opt.niter + opt.niter_decay,
               time.time() - epoch_start_time))
        model.update_learning_rate()
Beispiel #18
0
    opt = TrainOptions().parse()
    """
    opt.dataroot = './traindata'
    opt.name = 'DANN_miter1step1'
    opt.batchSize = 64
    opt.lr = 0.00001
    opt.model = 'DANN_m_iter'
    opt.which_epochs_DA = 1
    opt.which_usename_DA = 'DANN_mstep1without'
    opt.which_epochs_Di = 10
    opt.which_usename_Di = 'DANN_mv3step2'
    opt.gpu_ids = [0]
    opt.save_epoch_freq = 100
    """

    mnist_data_loader, mnistm_data_loader, eval_data_loader = CreateDataLoader(
        opt)
    mnist_dataset, mnistm_dataset, eval_dataset = mnist_data_loader.load_data(
    ), mnistm_data_loader.load_data(), eval_data_loader.load_data()
    mnist_dataset_size = len(mnist_data_loader)
    mnistm_dataset_size = len(mnistm_data_loader)
    eval_dataset_size = len(eval_data_loader)

    print('#mnist training images = %d' % mnist_dataset_size)
    print('#mnistm training images = %d' % mnistm_dataset_size)
    print('#eval training images = %d' % eval_dataset_size)
    print('#eval training images = %d' % len(eval_dataset))

    model = create_model(opt)
    best_acc = 0
    total_steps = 0
    i = 0
Beispiel #19
0
import time
import copy

from data import CreateDataLoader
from models import create_model
from options.train_options import TrainOptions
from util.visualizer import Visualizer, save_images

if __name__ == '__main__':
    opt = TrainOptions().parse()
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    print('#training images = %d' % dataset_size)

    if opt.validate_freq > 0:
        validate_opt = copy.deepcopy(opt)
        validate_opt.phase = 'val'
        validate_opt.serial_batches = True  # no shuffle
        val_data_loader = CreateDataLoader(validate_opt)
        val_dataset = val_data_loader.load_data()
        val_dataset_size = len(val_data_loader)
        print('#validation images = %d' % val_dataset_size)

    model = create_model(opt)
    model.setup(opt)
    visualizer = Visualizer(opt)
    total_steps = 0

    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
Beispiel #20
0
def main():

    args = get_arguments()
    print('Called with args:')
    print(args)


    cfg_from_file(cfg, args.yml)

    if cfg.EXP_NAME == '':
        cfg.EXP_NAME = f'exp_1'

    # 创建实验文件夹
    if cfg.EXP_ROOT_SNAPSHOT == '':
        cfg.EXP_ROOT_SNAPSHOT = osp.join(cfg.EXP_ROOT, 'snapshots')
    if cfg.EXP_ROOT_LOGS == '':
        cfg.EXP_ROOT_LOGS = osp.join(cfg.EXP_ROOT, 'logs')
    if not osp.exists(cfg.EXP_ROOT_SNAPSHOT):
        os.makedirs(cfg.EXP_ROOT_SNAPSHOT)
    if not osp.exists(cfg.EXP_ROOT_LOGS):
        os.makedirs(cfg.EXP_ROOT_LOGS)

    print('Using config:')
    pprint.pprint(cfg)

    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.visGPU


    # INIT
    _init_fn = None
    if not cfg.RANDOM_TRAIN:
        torch.manual_seed(cfg.TRAIN.RANDOM_SEED)
        torch.cuda.manual_seed(cfg.TRAIN.RANDOM_SEED)
        np.random.seed(cfg.TRAIN.RANDOM_SEED)
        random.seed(cfg.TRAIN.RANDOM_SEED)

        def _init_fn(worker_id):
            np.random.seed(cfg.TRAIN.RANDOM_SEED + worker_id)


    model_dict = {}

    # LOAD  NET
    assert osp.exists(cfg.INIT_FROM), f'Missing init model {cfg.INIT_FROM}'
    if cfg.TRAIN.MODEL == 'RESNET':
        if cfg.METHOD == 'baseline':
            model_dict['model'] = get_resnet()
        else:
            raise NotImplementedError
    elif cfg.TRAIN.MODEL == 'VGG':
        if cfg.METHOD == 'baseline':
            model_dict['model'] = get_vgg()
        else:
            raise NotImplementedError
    else:
        raise NotImplementedError(f"Not yet supported {cfg.TRAIN.MODEL}")
    print('Model loaded')


    if cfg.RESTOR_FROM != '':
        state_dict = torch.load(cfg.RESTOR_FROM, map_location=lambda storage, loc: storage)
        if cfg.METHOD == 'baseline':
            model_dict['model'].load_state_dict(state_dict['model_state_dict'])
            print(f'model restore from {cfg.RESTOR_FROM}')
        else:
            raise NotImplementedError
    # DATA LOADER
    print('preparing dataloaders ...')
    dataloader = CreateDataLoader(cfg)
    dataloader_iter = enumerate(dataloader)

    with open(osp.join(cfg.EXP_ROOT_LOGS, 'train_cfg.yml'), 'w') as yaml_file:
        yaml.dump(cfg, yaml_file, default_flow_style=False)

    # TRAINING
    if cfg.METHOD == 'baseline':
        train_baseline(model_dict, dataloader_iter, cfg)
Beispiel #21
0
import time
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer

if __name__ == '__main__':
    opt = TrainOptions().parse()         # opt用于处理命令行参数
    data_loader = CreateDataLoader(opt)     # data_loader用于加载数据
    dataset = data_loader.load_data()       # 加载数据集
    dataset_size = len(data_loader)         # 数据集的size
    print('#training images = %d' % dataset_size)     # training data:1096张(train, trainA, trainB均为1096张)

    model = create_model(opt)           # 创建模型,opt.model默认值是cycle_gan
    model.setup(opt)                    # 模型读取opt中的参数,并进行相关初始化操作
    visualizer = Visualizer(opt)        # 用于可视化输出
    total_steps = 0

    # 训练200个epoch
    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
        epoch_iter = 0

        for i, data in enumerate(dataset):
            iter_start_time = time.time()
            if total_steps % opt.print_freq == 0:   # print_freq = 100
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
            total_steps += opt.batch_size      # default batch_size = 1
            epoch_iter += opt.batch_size
Beispiel #22
0
from util.util import post_process

if __name__ == '__main__':
    opt = TrainOptions().parse()
    if opt.no_normalize:
        transform = tr.ToTensor()
    else:
        transform = tr.Compose([
            tr.ToTensor(),
            tr.Normalize(mean=opt.transform_mean, std=opt.transform_std)
        ])
    #mix train---syn data
    if opt.train_type == 'mix':
        opt.batch_size = opt.batch_size // 2
        train_loader = CreateDataLoader(opt,dataroot=opt.dataroot,image_dir=opt.train_img_dir_syn,\
                                   label_dir=opt.train_label_dir_syn,record_txt=opt.train_img_list_syn,\
                                                transform=transform,is_aug=False)
        train_dataset = train_loader.load_data()
        dataset_size = len(train_loader)
        print('#Synthetic training images = %d, batchsize = %d' %
              (dataset_size, opt.batch_size))

    #train---real data
    train_loader_real = CreateDataLoader(opt,dataroot=opt.dataroot,image_dir=opt.train_img_dir_real,\
                                   label_dir=opt.train_label_dir_real,record_txt=opt.train_img_list_real,\
                                         transform=transform,is_aug=False)
    train_dataset_real = train_loader_real.load_data()
    dataset_size_real = len(train_loader_real)
    print('#Real training images = %d, batchsize = %d' %
          (dataset_size_real, opt.batch_size))
import os
from arguments import Arguments
from data import CreateDataLoader
from models import create_model
from util import save_images


if __name__ == '__main__':
    args = Arguments().parse()

    data_loader = CreateDataLoader(args)
    dataset = data_loader.load_data()
    model = create_model(args)

    for i, data in enumerate(dataset):
        if i >= args.how_many:
            break
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        img_size = model.get_image_sizes()
        print('%04d: processing image... %s' % (i, img_path))
        save_images(args.results_dir, visuals, img_path, size=img_size)
Beispiel #24
0
def main(style):

    opt = TestOptions().parse()

    opt.dataroot = "datasets/own_data/testA"

    # four styles
    # opt.name = "style_ink_pretrained"
    # opt.name = "style_monet_pretrained"
    # opt.name = "style_cezanne_pretrained"
    # opt.name = "style_ukiyoe_pretrained"
    # opt.name = "style_vangogh_pretrained"


    # set original img size
    original_img = cv2.imread(opt.dataroot+"/temp.jpg")
    original_img_shape = tuple([item for item in original_img.shape[:-1]][::-1])

    opt.name = "style_%s_pretrained" % style
    # 不可更改
    opt.model = "test"

    cv2.imread("temp.jpg")

    opt.nThreads = 1   # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display

    # need to overwrite 8-27 这边可以不要
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()

    # create model
    model = create_model(opt)
    model.setup(opt)

    # create website
    # website没什么用,但是作者把保存图片写到了web_dir里面了,我就没有修改。

    web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
    print("web_dir", web_dir)
    webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
    print("webpage", webpage)
    # exit()

    # test
    for i, data in enumerate(dataset):
        # i is index enumerate生成,很简单的
        # type of data is dict
        # one key is A, A is a tensor which size is ([1, 3, 256, 256]), another is A_path which type is str. from the read path (include the name)
        # i. e. datasets/own_data/testA/2test.jpg
        # default how_many is 50 : 一个数据集中只能处理 50 张照片

        # need to overwrite  "data"
        # data 的形状和其一样,然后外面改写一个监听,应该就可以了
        if i >= opt.how_many:
            break
        model.set_input(data)

        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        if i % 5 == 0:
            print('processing (%04d)-th image... %s' % (i, img_path))
        save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)

        generate_img = cv2.imread("results/generate_images/" + "temp.png")
        reshape_generate_img = cv2.resize(generate_img, original_img_shape, interpolation=cv2.INTER_CUBIC)

        cv2.imwrite("results/generate_images/" + "temp.png", reshape_generate_img)
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer

# import random
# random.seed(666)

### landmark+bbox <-> face
### aligned, use UnalignedDataset class to achieve
if __name__ == '__main__':
    print('start...')
    ### data
    opt = TrainOptions().parse()
    print('opt parse: success!')
    data_loader = CreateDataLoader(opt)
    print('initialize data_loader: success!')
    dataset = data_loader.load_data()  ### return self 233
    dataset_size = len(data_loader)
    print('#training images = %d' % dataset_size)

    ### model
    model = create_model(opt)
    model.setup(opt)
    visualizer = Visualizer(opt)
    total_steps = 0

    ### train
    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
Beispiel #26
0
    opt = TestOptions().parse()
    # hard-code some parameters for test
    opt.num_threads = 1   # test code only supports num_threads = 1
    opt.batch_size = 1    # test code only supports batch_size = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True    # no flip
    
    if opt.no_normalize:
        transform = tr.ToTensor()
    else:
        transform=tr.Compose([tr.ToTensor(),
                              tr.Normalize(mean=opt.transform_mean,
                                           std=opt.transform_std)
                              ])
        
    data_loader = CreateDataLoader(opt,dataroot=opt.dataroot,image_dir=opt.test_img_dir,\
                                   label_dir=opt.test_label_dir,record_txt=opt.test_img_list,transform=transform,is_aug=False)
    dataset = data_loader.load_data()
    datasize = len(data_loader)
    print('#test images = %d, batchsize = %d' %(datasize,opt.batch_size))
 
    model = create_model(opt)
    model.setup(opt)
    
    img_dir = os.path.join(opt.results_dir, opt.name, '%s' % opt.epoch)
    mkdir(img_dir)
    
    eval_results={}
    count=0
    with open(img_dir+'_eval.txt','w') as log:
        now = time.strftime('%c')
        log.write('=============Evaluation (%s)=============\n' % now)
Beispiel #27
0
# -*- coding:utf-8 -*-
import time
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model

if __name__ == '__main__':
    opt = TrainOptions().parse()
    data_loader = CreateDataLoader(opt)

    tmp = 0
import math
from sklearn.metrics import balanced_accuracy_score, mean_squared_error
from skimage.color import rgb2lab
import numpy as np
import torch
import torch.nn as nn
import os
import shutil

import logging

opt = TrainOptions().parse()

opt.phase = 'train/train_'
opt.serial_batches = False
train_data_loader = CreateDataLoader(opt)
train_dataset = train_data_loader.load_data()
train_dataset_size = len(train_data_loader)

opt.phase = 'test/test_'
opt.batch_size = 1
opt.serial_batches = True
test_data_loader = CreateDataLoader(opt)
test_dataset = test_data_loader.load_data()
test_dataset_size = len(test_data_loader)

model = create_model(opt)
model.setup(opt)

# Set logger
msg = []
Beispiel #29
0
def convert():
    if (chkGpuVar.get() == 0):
        opt.gpu_ids.clear()
    #opt.remove_images = chkDelVar.get()
    opt.resize_or_crop = drpResizeOp.get()
    try:
        opt.epoch = txtEpoch.get()
    except Exception as e:
        print(e)

    if (opt.resize_or_crop.__contains__('scale')):
        for i in range(len(validSizes) - 2):
            if (sclFineVar.get() < validSizes[i + 1]
                    and sclFineVar.get() >= validSizes[i]):
                opt.fineSize = validSizes[i]

    print(testOptions.return_options(opt))
    try:
        data_loader = CreateDataLoader(opt)
        dataset = data_loader.load_data()
        model = create_model(opt)
        model.setup(opt)

        # test with eval mode. This only affects layers like batchnorm and dropout.
        # pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
        # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.

        progressbar.configure(maximum=len(dataset))
        #progressbar.start(len(dataset))
        for i, data in enumerate(dataset):
            while running:
                if i >= opt.num_test or running == False:
                    break
                model.set_input(data)
                model.test()
                visuals = model.get_current_visuals()
                img_path = model.get_image_paths()
                mess = 'processing (%04d)-th of %04d image... %s' % (
                    i + 1, len(dataset), img_path[0])
                print(mess)

                # Open a file with access mode 'a'
                file_object = open('conversion_progress.txt', 'a')
                # Append 'hello' at the end of file
                file_object.write(mess + '\n')
                # Close the file
                file_object.close()
                save_images(opt.results_dir,
                            visuals,
                            img_path,
                            save_both=opt.save_both,
                            aspect_ratio=opt.aspect_ratio)
                progress_var.set(i + 1)
                if (opt.remove_images):
                    os.remove(img_path[0])
                    print('removed image', img_path[0])
    except KeyboardInterrupt:
        progress_var.set(0)
        print("==============Cancelled==============")
        raise
    except Exception as e:
        print(e)
        raise
import time
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer

if __name__ == '__main__':
    opt = TrainOptions().parse()  # Trainning option
    data_loader = CreateDataLoader(
        opt)  # Create a dataset given opt.dataset_mode and other options
    dataset = data_loader.load_data()  # Get dataset
    dataset_size = len(data_loader)
    print('#training images = %d' % dataset_size)

    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(opt)
    visualizer = Visualizer(opt)
    total_steps = 0

    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
        epoch_iter = 0

        for i, data in enumerate(dataset):
            iter_start_time = time.time()
            if total_steps % opt.print_freq == 0:
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
            total_steps += opt.batchSize
Beispiel #31
0
import time
import copy
import torch
from options.train_options import TrainOptions
from data import CreateDataLoader
from models import create_model
from util.visualizer import Visualizer

if __name__ == '__main__':
    # training dataset
    opt = TrainOptions().parse()
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size = len(data_loader)
    print('#training images = %d' % dataset_size)
    print('#training objects = %d' % opt.nTrainObjects)

    ## validation dataset
    if opt.compute_val:
        opt_validation = copy.copy(opt)  # create a clone
        opt_validation.phase = 'val'
        opt_validation.serial_batches = True
        opt_validation.isTrain = False
        data_loader_validation = CreateDataLoader(opt_validation)
        dataset_validation = data_loader_validation.load_data()
        dataset_size_validation = len(data_loader_validation)
        print('#validation images = %d' % dataset_size_validation)
        print('#validation objects = %d' % opt_validation.nValObjects)

    # model
    model = create_model(opt)