Exemplo n.º 1
0
    def __init__(self, gpu=0):

        print("start")
        self.root = "./static/images/"
        self.batchsize = 1
        self.outdir = self.root + "out/"
        self.outdir_min = self.root + "out_min/"
        self.gpu = gpu

        print("load model")
        cuda.get_device(self.gpu).use()
        self.cnn_128 = unet.UNET()
        self.cnn = unet.UNET()
        self.cnn_128.to_gpu()
        self.cnn.to_gpu()
        lnn = lnet.LNET()
        #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128)
        serializers.load_npz(
            "./cgi-bin/paint_x2_unet/models/model_cnn_128_dfl2_9",
            self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn)
        serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_02",
                             self.cnn)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_p2_1", self.cnn)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_10000", self.cnn)
        serializers.load_npz("./cgi-bin/paint_x2_unet/models/liner_f", lnn)
Exemplo n.º 2
0
    def __init__(self, gpu=0):
        current_path = os.path.dirname(__file__)

        print("start")
        two_up = os.path.abspath(os.path.join(__file__, "../../../"))
        # print(two_up)
        self.root = os.path.join(two_up + "/static/paintschainer/images/")
        self.batchsize = 1
        self.outdir = self.root + "out/"
        self.outdir_min = self.root + "out_min/"
        self.gpu = gpu
        self._dtype = np.float32

        print("load model")
        if self.gpu >= 0:
            cuda.get_device(self.gpu).use()
            cuda.set_max_workspace_size(64 * 1024 * 1024)  # 64MB
            chainer.Function.type_check_enable = False
        self.cnn_128 = unet.UNET()
        self.cnn_512 = unet.UNET()
        if self.gpu >= 0:
            self.cnn_128.to_gpu()
            self.cnn_512.to_gpu()
        #lnn = lnet.LNET()
        #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128)
        serializers.load_npz(
            os.path.join(current_path + "/models/unet_128_standard"),
            self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn)
        serializers.load_npz(
            os.path.join(current_path + "/models/unet_512_standard"),
            self.cnn_512)
Exemplo n.º 3
0
    def __init__(self, gpu=0):

        print("start")
        self.root = "./images/"
        self.batchsize = 1
        self.outdir = self.root + "out/"
        self.outdir_min = self.root + "out_min/"
        self.gpu = gpu
        self._dtype = np.float32

        print("load model")
        if self.gpu >= 0:
            cuda.get_device(self.gpu).use()
            cuda.set_max_workspace_size(64 * 1024 * 1024)  # 64MB
            chainer.Function.type_check_enable = False
        self.cnn_128 = unet.UNET()
        self.cnn_512 = unet.UNET()
        if self.gpu >= 0:
            self.cnn_128.to_gpu()
            self.cnn_512.to_gpu()
        #lnn = lnet.LNET()
        #serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128)
        serializers.load_npz(
            "./cgi-bin/paint_x2_unet/models/unet_128_standard", self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn)
        serializers.load_npz(
            "./cgi-bin/paint_x2_unet/models/unet_512_standard", self.cnn_512)
Exemplo n.º 4
0
    def __init__(self, gpu=0, colormode='LAB', normalized=False):

        print("start")
        self.root = "./images/"
        self.batchsize = 1
        self.outdir = self.root + "out/"
        self.outdir_min = self.root + "out_min/"
        self.gpu = gpu
        self._dtype = np.float32
        self._colormode = colormode
        self._norm = normalized

        print("load model")
        if self.gpu >= 0:
            cuda.get_device(self.gpu).use()
            cuda.set_max_workspace_size(64 * 1024 * 1024)  # 64MB
            chainer.Function.type_check_enable = False
        if self._colormode == 'YUV':
            self.cnn_128 = unet.UNET()
            # self.cnn_512 = unet.UNET()
        elif self._colormode == 'LAB':
            self.cnn_128 = unet.UNET(inputChannel=3, outputChannel=2)
            # self.cnn_512 = unet.UNET()

        if self.gpu >= 0:
            self.cnn_128.to_gpu()
            # self.cnn_512.to_gpu()
        if self._colormode == 'YUV':
            serializers.load_npz("./src/colorize/models/unet_128_standard-YUV", self.cnn_128)
        elif self._colormode == 'LAB':
            serializers.load_npz("./src/colorize/models/cnn_128_iter_370000", self.cnn_128)
Exemplo n.º 5
0
def main():
    #datagen = data.DataGenerator(FLAGS.train_list, FLAGS.test_list, FLAGS.train_mask_dir, debug_dir=FLAGS.debug_dir)
    datagen = data.DataGenerator(FLAGS.train_list, FLAGS.test_list,
                                 FLAGS.train_mask_dir)
    model = unet.UNET(datagen,
                      out_mask_dir=FLAGS.out_mask_dir,
                      model_dir=FLAGS.model_dir)
    with tf.Session() as session:
        model.train(session)
Exemplo n.º 6
0
def initialize_parameters():
    unet_common = unet.UNET(unet.file_path,
                            'unet_params.txt',
                            'keras',
                            prog='unet_example',
                            desc='UNET example')

    # Initialize parameters
    gParameters = candle.initialize_parameters(unet_common)
    return gParameters
Exemplo n.º 7
0
    def __init__(self, gpu=0):

        print("start")
        self.root = "./images/"
        self.gpu = gpu

        print("load model")
        if self.gpu >= 0:
            cuda.get_device(self.gpu).use()
            cuda.set_max_workspace_size(64 * 1024 * 1024)  # 64MB
            chainer.Function.type_check_enable = False
        self.cnn_128 = unet.UNET()
        self.cnn = unet.UNET()
        if self.gpu >= 0:
            self.cnn_128.to_gpu()
            self.cnn.to_gpu()

        serializers.load_npz(
            "./cgi-bin/paint_x2_unet/models/unet_128_standard", self.cnn_128)
        serializers.load_npz(
            "./cgi-bin/paint_x2_unet/models/unet_512_standard", self.cnn)
Exemplo n.º 8
0
    def __init__(self, gpu=-1):

        print("start")
        self.root = "./"
        self.batchsize = 1
        self.outdir = self.root
        self.outdir_min = self.root + "out_min/"
        self.gpu = gpu
        self._dtype = np.float32

        print("load model")
        if self.gpu >= 0:
            cuda.get_device(self.gpu).use()
            cuda.set_max_workspace_size(1024 * 1024 * 1024)  # 64MB
            chainer.Function.type_check_enable = False
        self.cnn_128 = unet.UNET()
        self.cnn_512 = unet.UNET()
        if self.gpu >= 0:
            self.cnn_128.to_gpu()
            self.cnn_512.to_gpu()
        serializers.load_npz("./db/unet_128_standard", self.cnn_128)
        serializers.load_npz("./db/unet_512_standard", self.cnn_512)
Exemplo n.º 9
0
    def __init__(self, gpu=0):

        print("start")
        self.root = "./images/"
        self.batchsize = 1
        self.outdir = self.root + "out/"
        self.outdir_min = self.root + "out_min/"
        self.gpu = gpu
        self._dtype = np.float32

        if not os.path.isfile("../../models/unet_128_standard"):
            print(
                "../../models/unet_128_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/"
            )
        if not os.path.isfile("./models/unet_512_standard"):
            print(
                "../../models/unet_512_standard not found. Please download them from http://paintschainer.preferred.tech/downloads/"
            )

        print("load model")
        if self.gpu >= 0:
            cuda.get_device(self.gpu).use()
            cuda.set_max_workspace_size(64 * 1024 * 1024)  # 64MB
            chainer.Function.type_check_enable = False
        self.cnn_128 = unet.UNET()
        self.cnn_512 = unet.UNET()
        self.lnn = lnet.LNET()
        if self.gpu >= 0:
            self.cnn_128.to_gpu()
            self.cnn_512.to_gpu()
            self.lnn.to_gpu()

        serializers.load_npz("../../models/liner_f", self.lnn)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128)
        serializers.load_npz("../../models/unet_128_standard", self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128)
        #serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn)
        serializers.load_npz("../../models/unet_512_standard", self.cnn_512)
Exemplo n.º 10
0
image_x, image_t = batch.shuffle_image(image_x, image_t)

#後で消す
print(len(image_x))
num_data = settings["num_data"]  ##訓練用データ数
num_test = settings["num_test"]
train_x = image_x[:num_data]
test_x = image_x[num_data:num_data + num_test]
train_t = image_t[:num_data]
test_t = image_t[num_data:num_data + num_test]

#UNETを初期化しています。
unet = unet.UNET(settings["input_sizex"],
                 settings["input_sizey"],
                 settings["num_class"],
                 depth=settings["depth"],
                 layers_default=settings["layers_default"])

Batch_x = batch.Batch(train_x)
Batch_t = batch.Batch(train_t)
Batch_num = settings["Batch_num"]  ##バッチ数

i = 0
for _ in range(settings["learning_times"]):  ##学習回数
    i += 1
    batch_x = Batch_x.next_batch(Batch_num)
    batch_t = Batch_t.next_batch(Batch_num)
    unet.sess.run(unet.train_step,
                  feed_dict={
                      unet.x: batch_x,
Exemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser(description='chainer line drawing colorization')
    parser.add_argument('--batchsize', '-b', type=int, default=16,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dataset', '-i', default='./images/',
                        help='Directory of image files.')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0,
                        help='Random seed')
    parser.add_argument('--snapshot_interval', type=int, default=10000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval', type=int, default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    root = args.dataset
    #model = "./model_paint"

    cnn = unet.UNET()
    #serializers.load_npz("result/model_iter_10000", cnn)

    dis = unet.DIS()
    #serializers.load_npz("result/model_dis_iter_20000", dis)

    l = lnet.LNET()
    serializers.load_npz("models/liner_f", l)

    dataset = Image2ImageDataset("dat/images_color_train.dat",root+"line/",root+"color/", train=True)
    #dataset.set_img_dict(img_dict)
    train_iter = chainer.iterators.SerialIterator( dataset , args.batchsize)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        cnn.to_gpu()  # Copy the model to the GPU
        dis.to_gpu()  # Copy the model to the GPU
        l.to_gpu()

    # Setup optimizer parameters.
    opt = optimizers.Adam(alpha=0.0001)
    opt.setup(cnn)
    opt.add_hook(chainer.optimizer.WeightDecay(1e-5), 'hook_cnn')
   
    opt_d = chainer.optimizers.Adam(alpha=0.0001)
    opt_d.setup(dis)
    opt_d.add_hook(chainer.optimizer.WeightDecay(1e-5), 'hook_dec')


    # Set up a trainer
    updater = ganUpdater(
        models=(cnn, dis, l),
        iterator={
            'main': train_iter,
            #'test': test_iter
             },
        optimizer={
            'cnn': opt,  
            'dis': opt_d},
        device=args.gpu)

    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration') 
    snapshot_interval2 = (args.snapshot_interval*2, 'iteration') 
    trainer.extend(extensions.dump_graph('cnn/loss'))
    trainer.extend(extensions.snapshot(), trigger=snapshot_interval2)
    trainer.extend(extensions.snapshot_object(
        cnn, 'cnn_128_iter_{.updater.iteration}'), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'cnn_128_dis_iter_{.updater.iteration}'), trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        opt, 'optimizer_'), trigger=snapshot_interval)
    trainer.extend(extensions.LogReport( trigger=(10, 'iteration'), ))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'cnn/loss', 'cnn/loss_rec','cnn/loss_adv','cnn/loss_tag','cnn/loss_l','dis/loss' ]))
    trainer.extend(extensions.ProgressBar(update_interval=20))

    trainer.run()

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Save the trained model
    chainer.serializers.save_npz(os.path.join(out_dir, 'model_final'), cnn)
    chainer.serializers.save_npz(os.path.join(out_dir, 'optimizer_final'), opt)
Exemplo n.º 12
0
ID_MODELNAME = {1: 'I3d', 2: 'Unet'}

inputs_2d = tf.ones([1, 224, 224, 3], dtype=tf.float32)

inputs_3d = tf.ones([1, 64, 224, 224, 3], dtype=tf.float32)

parser = argparse.ArgumentParser(
    description='params of running the experiment')
parser.add_argument('--model_id',
                    type=int,
                    default=1,
                    help='To specify which model to test run')

args = parser.parse_args()
model_id = args.model_id

if model_id == 1:
    network = i3d.InceptionI3d()
    inputs = inputs_3d

elif model_id == 2:
    network = unet.UNET()
    inputs = inputs_2d

else:
    print('Model id {} not valid'.format(model_id))

print('Model : {}'.format(ID_MODELNAME[model_id]))
logits, end_points = network(inputs, is_training=True)
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer line drawing colorization')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=16,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=200,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument(
        '--dataset',
        '-i',
        default='/home/ljw/deep_learning/intercolorize/data/farm/',
        help='Directory of image files.')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    root = args.dataset
    #model = "./model_paint"

    # dataset = Image2ImageDataset(
    #     "/home/ljw/deep_learning/intercolorize/data/farm/color_512.txt", root + "gray/", root + "color/", train=True)  # the class of dataset
    # # dataset.set_img_dict(img_dict)
    # train_iter = chainer.iterators.SerialIterator(dataset, args.batchsize)

    print("load model")
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        # cnn.to_gpu()  # Copy the model to the GPU

    # cnn_128 = unet.UNET()
    cnn_128 = unet.UNET(inputChannel=3, outputChannel=2)
    # cnn_512 = unet.UNET()
    if args.gpu >= 0:
        cnn_128.to_gpu()
        # cnn_512.to_gpu()
    serializers.load_npz("result/cnn_128_iter_235000",
                         cnn_128)  #test4_LAB/cnn_128_iter_62000
    # serializers.load_npz(
    #     "./cgi-bin/paint_x2_unet/models/unet_512_standard", self.cnn_512)

    path1 = 'test/2.jpg'
    image1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    image1 = cv2.resize(image1, (512, 512), interpolation=cv2.INTER_AREA)
    image1 = np.asarray(image1, np.float32)
    l = image1.copy()
    # image1 -= 128

    if image1.ndim == 2:
        image1 = image1[:, :, np.newaxis]

    image1 = np.insert(image1, 1, 128, axis=2)
    image1 = np.insert(image1, 2, 128, axis=2)
    img = image1.transpose(2, 0, 1)

    x = np.zeros((1, 3, img.shape[1], img.shape[2]), dtype='f')
    x[0, :] = img

    if args.gpu >= 0:
        x = cuda.to_gpu(x)

    # lnn = lnet.LNET()
    y = cnn_128.calc(Variable(x, volatile='on'), test=True)

    ab = y.data[0].transpose(1, 2, 0)
    ab = ab.clip(0, 255).astype(np.uint8)
    if args.gpu >= 0:
        ab = cuda.to_cpu(ab)

    lab = np.zeros((ab.shape[0], ab.shape[1], 3), dtype='f')
    lab[:, :, 0] = l
    lab[:, :, 1:] = ab
    lab = lab.astype(np.uint8)

    res = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)
    cv2.imwrite('test/lab.jpg', res)
Exemplo n.º 14
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer line drawing colorization')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=16,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument(
        '--dataset',
        '-i',
        default='/media/ljw/Research/research/Deep_Learning/data/Places2/',
        help='Directory of image files.')
    # parser.add_argument('--dataset', '-i', default='/home/ljw/deep_learning/intercolorize/data/farm/',
    #                     help='Directory of image files.')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--seed', type=int, default=0, help='Random seed')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=5000,
                        help='Interval of snapshot')
    parser.add_argument('--display_interval',
                        type=int,
                        default=100,
                        help='Interval of displaying log to console')
    parser.add_argument('--colormode', default='LAB', help='Color mode')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    root = args.dataset
    #model = "./model_paint"

    if args.colormode == 'YUV':
        cnn = unet.UNET()
        dis = unet.DIS()
    elif args.colormode == 'LAB':
        cnn = unet.UNET(inputChannel=3, outputChannel=2)
        dis = unet.DIS(inputChannel=2)
    else:
        print('ERROR! Unexpected color mode!!!')

    # l = lnet.LNET()
    # serializers.load_npz("../models/liner_f", l)   # load pre-trained model to l

    dataset = Image2ImageDataset(
        "/media/ljw/Research/research/Deep_Learning/data/Places2/filelist_places365-standard/places365_train_outdoor_color512-all.txt",
        root + "/",
        root + "data_large",
        train=True,
        colormode=args.colormode)  # the class of dataset
    # dataset = Image2ImageDataset(
    #     "/home/ljw/deep_learning/intercolorize/data/farm/color_512.txt",
    #     root + "gray/", root + "color/", train=True, colormode=args.colormode)  # the class of dataset
    train_iter = chainer.iterators.SerialIterator(dataset, args.batchsize)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        cnn.to_gpu()  # Copy the model to the GPU
        dis.to_gpu()  # Copy the model to the GPU
        # l.to_gpu()

    # Setup optimizer parameters.
    opt = optimizers.Adam(alpha=0.0001)  # use the Adam
    opt.setup(cnn)
    opt.add_hook(chainer.optimizer.WeightDecay(1e-5),
                 'hook_cnn')  # what does this used for???

    opt_d = chainer.optimizers.Adam(alpha=0.0001)
    opt_d.setup(dis)
    opt_d.add_hook(chainer.optimizer.WeightDecay(1e-5), 'hook_dec')

    # Set up a trainer
    updater = ganUpdater(
        colormode=args.colormode,
        models=(cnn, dis),
        iterator={
            'main': train_iter,
            #'test': test_iter
        },
        optimizer={
            'cnn': opt,
            'dis': opt_d
        },
        device=args.gpu)

    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    snapshot_interval = (args.snapshot_interval, 'iteration')
    snapshot_interval2 = (args.snapshot_interval * 2, 'iteration')
    trainer.extend(extensions.dump_graph('cnn/loss'))
    trainer.extend(extensions.snapshot(), trigger=snapshot_interval2)
    trainer.extend(extensions.snapshot_object(
        cnn, 'cnn_128_iter_{.updater.iteration}'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        dis, 'cnn_128_dis_iter_{.updater.iteration}'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(opt, 'optimizer_'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.LogReport(trigger=(10, 'iteration'), ))
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'cnn/loss', 'cnn/loss_rec', 'cnn/loss_adv',
            'cnn/loss_tag', 'cnn/loss_l', 'dis/loss'
        ]))
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(os.path.join(args.out, args.resume),
                                     trainer)

    trainer.run()

    # Save the trained model
    chainer.serializers.save_npz(os.path.join(args.out, 'model_final'), cnn)
    chainer.serializers.save_npz(os.path.join(args.out, 'optimizer_final'),
                                 opt)
Exemplo n.º 15
0
import chainer
from PIL import Image
from PIL import ImageOps
from chainer import cuda, serializers, Variable  # , optimizers, training
import cv2
import os.path
import time

from mldraw_adaptor import canvas_message_handler, start

from img2imgDataset import ImageAndRefDataset

import unet
import lnet

cnn_128 = unet.UNET()
cnn_512 = unet.UNET()

cnn_128.to_gpu()
cnn_512.to_gpu()

serializers.load_npz("./models/unet_128_standard", cnn_128)

serializers.load_npz("./models/unet_512_standard", cnn_512)

cuda.get_device(0).use()


def cvt2YUV(img):
    (major, minor, _) = cv2.__version__.split(".")
    if major == '3':
Exemplo n.º 16
0
if sys.argv[1] in ["TOULOUSE", "TOULOUSE_lod0"] or len(
        sys.argv) == 2 or sys.argv[2] not in ["grey", "normalize"]:
    data = data.copyTOcache(outputresolution=50)
else:
    if sys.argv[2] == "grey":
        data = data.copyTOcache(outputresolution=50, color=False)
    else:
        data = data.copyTOcache(outputresolution=50,
                                color=False,
                                normalize=True)
nbclasses = len(data.setofcolors)
cm = np.zeros((nbclasses, nbclasses), dtype=int)

print("load unet")
import unet
net = unet.UNET(nbclasses, pretrained="/data/vgg16-00b39a1b.pth")
net = net.to(device)

print("train setting")
import torch.nn as nn
import collections
import torch.optim as optim
import random
from sklearn.metrics import confusion_matrix

weigths = torch.Tensor(data.getCriterionWeight()).to(device)
criterion = nn.CrossEntropyLoss(weight=weigths)
optimizer = optim.Adam(net.parameters(), lr=0.0001)
meanloss = collections.deque(maxlen=200)
nbepoch = 120
Exemplo n.º 17
0
    model = googlenet.GoogLeNet()
elif args.arch == 'vgga':
    import vgga
    model = vgga.vgga()
elif args.arch == 'overfeat':
    import overfeat
    model = overfeat.overfeat()
elif args.arch == 'vgg16':
    import vgg16
    model = vgg16.VGG16()
elif args.arch == 'vgg19':
    import vgg19
    model = vgg19.VGG19()
elif args.arch == 'unet':
    import unet
    model = unet.UNET()
elif args.arch == 'resnet50':
    import resnet
    model = resnet.ResNet([3, 4, 6, 3])
elif args.arch == 'resnet101':
    import resnet
    model = resnet.ResNet([3, 4, 23, 3])
elif args.arch == 'resnet152':
    import resnet
    model = resnet.ResNet([3, 8, 36, 3])
else:
    raise ValueError('Invalid architecture name')

if 'resnet' in args.arch:
    model.insize = args.insize