def __init__(self, gpu=0): print("start") self.root = "./static/images/" self.batchsize = 1 self.outdir = self.root + "out/" self.outdir_min = self.root + "out_min/" self.gpu = gpu print("load model") cuda.get_device(self.gpu).use() self.cnn_128 = unet.UNET() self.cnn = unet.UNET() self.cnn_128.to_gpu() self.cnn.to_gpu() lnn = lnet.LNET() #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128) serializers.load_npz( "./cgi-bin/paint_x2_unet/models/model_cnn_128_dfl2_9", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn) serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_02", self.cnn) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_p2_1", self.cnn) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_10000", self.cnn) serializers.load_npz("./cgi-bin/paint_x2_unet/models/liner_f", lnn)
def __init__(self, gpu=0): print("start") self.root = "./images/" self.batchsize = 1 self.outdir = self.root + "out/" self.outdir_min = self.root + "out_min/" self.gpu = gpu print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn = unet.UNET() if self.gpu >= 0: self.cnn_128.to_gpu() self.cnn.to_gpu() lnn = lnet.LNET() #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128) serializers.load_npz( "./cgi-bin/paint_x2_unet/models/unet_128_standard", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn) serializers.load_npz( "./cgi-bin/paint_x2_unet/models/unet_512_standard", self.cnn)
def liner(self, id_str): if self.gpu >= 0: cuda.get_device(self.gpu).use() image1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE) image1 = np.asarray(image1, self._dtype) if image1.ndim == 2: image1 = image1[:, :, np.newaxis] img = image1.transpose(2, 0, 1) x = np.zeros((1, 3, img.shape[1], img.shape[2]), dtype='f') if self.gpu >= 0: x = cuda.to_gpu(x) lnn = lnet.LNET() y = lnn.calc(Variable(x, volatile='on'), test=True) self.save_as_img(y.data[0], self.root + "line/" + id_str + ".jpg")
def liner(self, id_str): if self.gpu >= 0: cuda.get_device(self.gpu).use() image1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE) image1 = np.asarray(image1, self._dtype) if image1.ndim == 2: image1 = image1[:, :, np.newaxis] img = image1.transpose(2, 0, 1) x = np.zeros((1, 3, img.shape[1], img.shape[2]), dtype='f') if self.gpu >= 0: x = cuda.to_gpu(x) lnn = lnet.LNET() with chainer.no_backprop_mode(): with chainer.using_config('train', False): y = lnn.calc(Variable(x)) self.save_as_img(y.data[0], self.root + "line/" + id_str + ".jpg")
def __init__(self, gpu=0): print("start") self.root = "./images/" self.batchsize = 1 self.outdir = self.root + "out/" self.outdir_min = self.root + "out_min/" self.gpu = gpu self._dtype = np.float32 if not os.path.isfile("../../models/unet_128_standard"): print( "../../models/unet_128_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/" ) if not os.path.isfile("./models/unet_512_standard"): print( "../../models/unet_512_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/" ) print("load model") if self.gpu >= 0: cuda.get_device(self.gpu).use() cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB chainer.Function.type_check_enable = False self.cnn_128 = unet.UNET() self.cnn_512 = unet.UNET() self.lnn = lnet.LNET() if self.gpu >= 0: self.cnn_128.to_gpu() self.cnn_512.to_gpu() self.lnn.to_gpu() serializers.load_npz("../../models/liner_f", self.lnn) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128) serializers.load_npz("../../models/unet_128_standard", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128) #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn) serializers.load_npz("../../models/unet_512_standard", self.cnn_512)
def main(): parser = argparse.ArgumentParser(description='chainer line drawing colorization') parser.add_argument('--batchsize', '-b', type=int, default=16, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--dataset', '-i', default='./images/', help='Directory of image files.') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--seed', type=int, default=0, help='Random seed') parser.add_argument('--snapshot_interval', type=int, default=10000, help='Interval of snapshot') parser.add_argument('--display_interval', type=int, default=100, help='Interval of displaying log to console') args = parser.parse_args() print('GPU: {}'.format(args.gpu)) print('# Minibatch-size: {}'.format(args.batchsize)) print('# epoch: {}'.format(args.epoch)) print('') root = args.dataset #model = "./model_paint" cnn = unet.UNET() #serializers.load_npz("result/model_iter_10000", cnn) dis = unet.DIS() #serializers.load_npz("result/model_dis_iter_20000", dis) l = lnet.LNET() serializers.load_npz("models/liner_f", l) dataset = Image2ImageDataset("dat/images_color_train.dat",root+"line/",root+"color/", train=True) #dataset.set_img_dict(img_dict) train_iter = chainer.iterators.SerialIterator( dataset , args.batchsize) if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current cnn.to_gpu() # Copy the model to the GPU dis.to_gpu() # Copy the model to the GPU l.to_gpu() # Setup optimizer parameters. opt = optimizers.Adam(alpha=0.0001) opt.setup(cnn) opt.add_hook(chainer.optimizer.WeightDecay(1e-5), 'hook_cnn') opt_d = chainer.optimizers.Adam(alpha=0.0001) opt_d.setup(dis) opt_d.add_hook(chainer.optimizer.WeightDecay(1e-5), 'hook_dec') # Set up a trainer updater = ganUpdater( models=(cnn, dis, l), iterator={ 'main': train_iter, #'test': test_iter }, optimizer={ 'cnn': opt, 'dis': opt_d}, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) snapshot_interval = (args.snapshot_interval, 'iteration') snapshot_interval2 = (args.snapshot_interval*2, 'iteration') trainer.extend(extensions.dump_graph('cnn/loss')) trainer.extend(extensions.snapshot(), trigger=snapshot_interval2) trainer.extend(extensions.snapshot_object( cnn, 'cnn_128_iter_{.updater.iteration}'), trigger=snapshot_interval) trainer.extend(extensions.snapshot_object( dis, 'cnn_128_dis_iter_{.updater.iteration}'), trigger=snapshot_interval) trainer.extend(extensions.snapshot_object( opt, 'optimizer_'), trigger=snapshot_interval) trainer.extend(extensions.LogReport( trigger=(10, 'iteration'), )) trainer.extend(extensions.PrintReport( ['epoch', 'cnn/loss', 'cnn/loss_rec','cnn/loss_adv','cnn/loss_tag','cnn/loss_l','dis/loss' ])) trainer.extend(extensions.ProgressBar(update_interval=20)) trainer.run() if args.resume: # Resume from a snapshot chainer.serializers.load_npz(args.resume, trainer) # Save the trained model chainer.serializers.save_npz(os.path.join(out_dir, 'model_final'), cnn) chainer.serializers.save_npz(os.path.join(out_dir, 'optimizer_final'), opt)