def setup_model(phrase_net, image_net):
    if image_net == 'vgg':
        vis_cnn = L.VGG16Layers()
    elif image_net == 'resnet':
        vis_cnn = L.ResNet50Layers()
    else:
        pass

    wo_image = (image_net is None)

    if phrase_net in ['rnn', 'avr']:
        w_vec = np.load('data/entity/word_vec.npy')
    
    if phrase_net in ['fv', 'fv+cca', 'fv+pca']:
        if image_net is None:
            model = PNetFV()
        elif image_net in ['vgg', 'resnet']:
            model = IPNetFV(vis_cnn)
        else:
            raise RuntimeError
    elif phrase_net == 'rnn':
        model = PNetGRU(w_vec) if wo_image else IPNetGRU(vis_cnn, w_vec)
    elif phrase_net == 'avr':
        model = PNetAvr(w_vec) if wo_image else IPNetAvr(vis_cnn, w_vec)
    else:
        raise NotImplementedError
        
    return model
 def __init__(self, num_classes, **kwargs):
     super(VGG16, self).__init__()
     with self.init_scope():
         self.base = L.VGG16Layers()
         self.fc_6 = L.Linear(512 * 7 * 7, 2048)
         self.fc_7 = L.Linear(2048, 1024)
         self.fc_8 = L.Linear(1024, num_classes)
Example #3
0
    def __init__(self, opt):
        super().__init__()
        with self.init_scope():
            self.detecter = L.VGG16Layers().to_gpu(0)
            self.layer_names = ['conv1_2', 'conv2_2', 'conv3_3', 'conv4_3', 'conv5_3']

            if opt.perceptual_model == 'VGG19':
                self.detecter = L.VGG19Layers().to_gpu(0)
                self.layer_name = ['conv1_2', 'conv2_2', 'conv3_4', 'conv4_4', 'conv5_4']

        self.weight = [32 ** -1,
                       16 ** -1,
                       8 ** -1,
                       4 ** -1,
                       1]

        self.coef = opt.perceptual_coef
        self.criterion = F.mean_absolute_error

        if opt.perceptual_mode == 'MAE':
            self.criterion = F.mean_absolute_error

        if opt.perceptual_mode == 'MSE':
            self.criterion = F.mean_squared_error
            self.coef *= 0.5
Example #4
0
def main():
    parser = argparse.ArgumentParser(description='vgg16')
    parser.add_argument('--input',
                        '-i',
                        type=str,
                        default='./images/cat.jpg',
                        help='predict imagefile')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('Input: {}'.format(args.input))
    print('')

    # import VGG model
    print('load network')
    vgg16 = L.VGG16Layers()
    print('load network, done.')

    # prediction test
    img = Image.open(args.input)
    x = L.model.vision.vgg.prepare(img)
    x = x[np.newaxis]  # batch size
    print('predict')
    starttime = time.time()
    result = vgg16(x)
    predict = F.argmax(F.softmax(result['prob'], axis=1), axis=1)
    endtime = time.time()
    print(predict, (endtime - starttime),
          'sec')  # variable([281]) 47.0666120052 sec
Example #5
0
 def __init__(self, n_class, aspect_ratios):
     init = {
         'initialW': initializers.GlorotUniform(),
         'initial_bias': initializers.constant.Zero(),
     }
     super().__init__(
         base=L.VGG16Layers(pretrained_model=None),
         conv5_1=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_2=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_3=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv6=L.DilatedConvolution2D(None,
                                      1024,
                                      3,
                                      pad=6,
                                      dilate=6,
                                      **init),
         conv7=L.Convolution2D(None, 1024, 1, **init),
         conv8_1=L.Convolution2D(None, 256, 1, **init),
         conv8_2=L.Convolution2D(None, 512, 3, stride=2, pad=1, **init),
         conv9_1=L.Convolution2D(None, 128, 1, **init),
         conv9_2=L.Convolution2D(None, 256, 3, stride=2, pad=1, **init),
         conv10_1=L.Convolution2D(None, 128, 1, **init),
         conv10_2=L.Convolution2D(None, 256, 3, **init),
         conv11_1=L.Convolution2D(None, 128, 1, **init),
         conv11_2=L.Convolution2D(None, 256, 3, **init),
         multibox=MultiBox(n_class, aspect_ratios=aspect_ratios, init=init),
     )
     self.n_class = n_class
     self.aspect_ratios = aspect_ratios
     self.train = False
Example #6
0
def main():
    parser = argparse.ArgumentParser(
        description='generate 2d proessing operator output')
    parser.add_argument('--with-ideep',
                        action='store_true',
                        help='enable ideep')
    parser.add_argument('--input', type=str, help='input file path')
    args = parser.parse_args()
    with open(args.input, "r") as f:
        dims_num = int(f.readline())
        shape = tuple(int(d) for d in f.readline().strip().split(" "))
        raw_data = [np.float32(d) for d in f.readline().strip().split(" ")]
        x = np.array(raw_data).reshape(shape)

    chainer.config.train = False

    model = L.VGG16Layers()
    if args.with_ideep:
        chainer.config.use_ideep = "auto"
        model.to_intel64()

    start = time.process_time()
    y = model(x)
    end = time.process_time()
    print((end - start) * 1000)
Example #7
0
    def __init__(self, class_labels=2):
        super(sideVGG, self).__init__()

        with self.init_scope():
            self.base = L.VGG16Layers()
            self.fc6 = L.Linear(6144, 4096)
            self.fc7 = L.Linear(4096, 256)
            self.fc8 = L.Linear(256, class_labels)
Example #8
0
    def __init__(self, n_out=2):
        super().__init__()

        with self.init_scope():
            self.base = L.VGG16Layers()
            self.fc6 = L.Linear(None, 4096)
            self.fc7 = L.Linear(None, 512)
            self.fc8 = L.Linear(None, n_out)
 def __init__(self, n_units, n_out):
     super(net, self).__init__()
     with self.init_scope(): 
         self.pretrained = L.VGG16Layers()
         # the size of the inputs to each layer will be inferred
         self.l1 = L.Linear(None, n_units)  # n_in -> n_units
         self.l2 = L.Linear(None, n_units)  # n_units -> n_units
         self.l3 = L.Linear(None, n_out)  # n_units -> n_out
Example #10
0
 def __init__(self, out_size):
     super(Model, self).__init__(
         #vgg = L.VGG16Layers(chainermodel),
         vgg=L.VGG16Layers(),
         fc1=L.Linear(None, 1000),
         fc2=L.Linear(None, 1000),
         fc3=L.Linear(None, 1000),
         fc4=L.Linear(None, 500))
Example #11
0
 def __init__(self, n_out, lossfunc=0):
     self.lossfunc = lossfunc
     initializer = chainer.initializers.HeNormal()
     super(ResNet, self).__init__()
     with self.init_scope():
         self.base = L.VGG16Layers()
         self.l1 = L.Linear(4096, initialW=initializer)
         self.l2 = L.Linear(4096, initialW=initializer)
         self.l3 = L.Linear(n_out, initialW=initializer)
Example #12
0
 def __init__(self, n_class):
     super(VGG16_Finetune2, self).__init__()
     with self.init_scope():
         self.m1 = L.VGG16Layers()
         self.m2 = L.Classifier(VGG16_Finetune(n_class))
         chainer.serializers.load_npz("./result/ft_cub/model.npz", self.m2)
         #chainer.serializers.load_npz("./result/lwf_cub/modelLWF.npz", self.m2)
         self.m4 = self.m2.predictor
         self.m4.fc8 = self.m1.fc8
Example #13
0
def get_vgg16(batchsize):
    model = L.VGG16Layers(pretrained_model=None)
    model = Wrapper(model, 'fc8')
    x = np.random.uniform(size=(batchsize, 3, 224, 224)).astype('f')
    x = chainer.as_variable(x)
    t = np.random.randint(size=(batchsize, ), low=0,
                          high=1000).astype(np.int32)
    t = chainer.as_variable(t)

    return [x, t], model
Example #14
0
 def __init__(self,
              n_class=5,
              lossfun=F.softmax_cross_entropy,
              accfun=F.accuracy):
     super(PretrainedVGG16, self).__init__()
     with self.init_scope():
         self.base = L.VGG16Layers()
         self.new_fc8 = L.Linear(None, n_class)
         self.lossfun = lossfun
         self.accfun = accfun
Example #15
0
 def __init__(self, n_out=7):
     super(VGG_double,
           self).__init__(conv=L.Convolution2D(6,
                                               3,
                                               ksize=3,
                                               stride=1,
                                               pad=1,
                                               initialW=HeNormal()),
                          model=L.VGG16Layers(),
                          fc=L.Linear(4096, n_out))
Example #16
0
    def __init__(
        self,
        filename_mesh,
        filename_style,
        texture_size=4,
        camera_distance=2.732,
        camera_distance_noise=0.1,
        elevation_min=20,
        elevation_max=40,
        lr_vertices=0.01,
        lr_textures=1.0,
        lambda_style=1,
        lambda_content=2e9,
        lambda_tv=1e7,
        image_size=224,
    ):
        super(StyleTransferModel, self).__init__()
        self.image_size = image_size
        self.camera_distance = camera_distance
        self.camera_distance_noise = camera_distance_noise
        self.elevation_min = elevation_min
        self.elevation_max = elevation_max
        self.lambda_style = lambda_style
        self.lambda_content = lambda_content
        self.lambda_tv = lambda_tv

        # load feature extractor
        self.vgg16 = cl.VGG16Layers()

        # load reference image
        reference_image = scipy.misc.imread(filename_style)
        reference_image = scipy.misc.imresize(reference_image,
                                              (image_size, image_size))
        reference_image = reference_image.astype('float32') / 255.
        reference_image = reference_image[:, :, :3].transpose(
            (2, 0, 1))[None, :, :, :]
        reference_image = self.xp.array(reference_image)
        with chainer.no_backprop_mode():
            features_ref = [
                f.data for f in self.extract_style_feature(reference_image)
            ]
        self.features_ref = features_ref
        self.background_color = reference_image.mean((0, 2, 3))

        with self.init_scope():
            # load .obj
            self.mesh = neural_renderer.Mesh(filename_mesh, texture_size)
            self.mesh.set_lr(lr_vertices, lr_textures)
            self.vertices_original = self.xp.copy(self.mesh.vertices.data)

            # setup renderer
            renderer = neural_renderer.Renderer()
            renderer.image_size = image_size
            renderer.background_color = self.background_color
            self.renderer = renderer
Example #17
0
    def __init__(self,out_channel):
        self.out_channel=out_channel
        super(CNN, self).__init__(
            resnet=L.VGG16Layers(),#ToDo:use resnet
            fc_class = L.Linear(512,2),
            fc_box = L.Linear(512,4),
            mask_1 = L.Deconvolution2D(512, 256, 2, stride=2, pad=0),
            conv1_bn = L.BatchNormalization(256),
            mask_2 = L.Convolution2D(256, out_channel, 3, stride=1, pad=1),


        )
Example #18
0
    def __init__(self):
        super(VGG, self).__init__()

        with self.init_scope():
            self.base = L.VGG16Layers()
            self.upsample3 = L.Deconvolution2D(
                256, 2, ksize=1, stride=1, pad=0)
            self.upsample4 = L.Deconvolution2D(
                512, 2, ksize=4, stride=2, pad=1)
            self.upsample5 = L.Deconvolution2D(
                512, 2, ksize=8, stride=4, pad=2)
            self.upsample = L.Deconvolution2D(
                2, 1, ksize=16, stride=8, pad=4)
    def set_model(cls, model_name, uses_device=0):
        """
        Set model and device.
          uses_device = -1 : CPU
          uses_device >= 0 : GPU (default 0)
        """
        # use gpu or cpu
        cls.uses_device = uses_device
        
        if uses_device >= 0:
            chainer.cuda.get_device_from_id(uses_device).use()
            chainer.cuda.check_cuda_available()
            import cupy as xp
        else:
            xp = np

        cls.xp = xp

        # set model
        cls.model_name = model_name
        
        if model_name == "VGG16":
            cls.model = L.VGG16Layers()
            cls.last_layer = 'fc8'
            cls.size = (224, 224)
            cls.mean = [103.939, 116.779, 123.68]
            
        elif model_name == "GoogLeNet":
            cls.model = L.GoogLeNet()
            cls.last_layer = 'loss3_fc'
            cls.size = (224, 224)
            cls.mean = [104.0, 117.0, 123.0]
        
        elif model_name == "ResNet152":
            cls.model = L.ResNet152Layers()
            cls.last_layer = 'fc6'
            cls.size = (224, 224)
            cls.mean = [103.063, 115.903, 123.152]
            
        else:
            raise Exception("Invalid model")
            
        if uses_device >= 0:
            cls.model.to_gpu()

        #for memory saving
        for param in cls.model.params():
            param._requires_grad = False
Example #20
0
 def __init__(self, last_only = False):
     super(VGG, self).__init__()
     self.last_only = last_only
     with self.init_scope():
         self.base = L.VGG16Layers()
Example #21
0
    def __init__(self):
        super(VGG, self).__init__()

        with self.init_scope():
            self.base = L.VGG16Layers()
Example #22
0
    def setUp(self):

        self.model = L.VGG16Layers(None)
        self.x = np.zeros((1, 3, 224, 224), dtype=np.float32)
Example #23
0
 def __init__(self):
     super(VGG16FeatureExtractor, self).__init__()
     with self.init_scope():
         self.cnn = L.VGG16Layers()
     self.cnn_layer_name = 'fc7'
Example #24
0
 def __init__(self, out_size=2):
     super(CXR16, self).__init__()
     with self.init_scope():
       self.base = L.VGG16Layers()
       self.fc8 = L.Linear(4096,out_size)
Example #25
0
 def __init__(self, n_out=7):
     super(VGG_double, self).__init__(
         model = L.VGG16Layers(),
         fc = L.Linear(8192,n_out))
                                                     repeat=False,
                                                     shuffle=False)
    #    iterator = chainer.iterators.SerialIterator(dataset, args.batch_size,repeat=False, shuffle=False)

    if args.ch != len(dataset[0][0]):
        print("number of input channels is different during training.")
    print("Input channels {}, Output channels {}".format(args.ch, args.out_ch))

    ## load generator models
    if "enc" in args.model_gen:
        if (args.gen_pretrained_encoder and args.gen_pretrained_lr_ratio == 0):
            if "resnet" in args.gen_pretrained_encoder:
                pretrained = L.ResNet50Layers()
                print("Pretrained ResNet model loaded.")
            else:
                pretrained = L.VGG16Layers()
                print("Pretrained VGG model loaded.")
            if args.gpu >= 0:
                pretrained.to_gpu()
            enc = net.Encoder(args, pretrained)
        else:
            enc = net.Encoder(args)
        print('Loading {:s}..'.format(args.model_gen))
        serializers.load_npz(args.model_gen, enc)
        dec = net.Decoder(args)
        modelfn = args.model_gen.replace('enc_x', 'dec_y')
        modelfn = modelfn.replace('enc_y', 'dec_x')
        print('Loading {:s}..'.format(modelfn))
        serializers.load_npz(modelfn, dec)
        if args.gpu >= 0:
            enc.to_gpu()
Example #27
0
def main():
    args = arguments()
    outdir = os.path.join(args.out, dt.now().strftime('%m%d_%H%M') + "_cgan")

    #    chainer.config.type_check = False
    chainer.config.autotune = True
    chainer.config.dtype = dtypes[args.dtype]
    chainer.print_runtime_info()
    #print('Chainer version: ', chainer.__version__)
    #print('GPU availability:', chainer.cuda.available)
    #print('cuDNN availability:', chainer.cuda.cudnn_enabled)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()

    ## dataset preparation
    train_d = Dataset(args.train,
                      args.root,
                      args.from_col,
                      args.to_col,
                      clipA=args.clipA,
                      clipB=args.clipB,
                      class_num=args.class_num,
                      crop=(args.crop_height, args.crop_width),
                      imgtype=args.imgtype,
                      random=args.random_translate,
                      grey=args.grey,
                      BtoA=args.btoa)
    test_d = Dataset(args.val,
                     args.root,
                     args.from_col,
                     args.to_col,
                     clipA=args.clipA,
                     clipB=args.clipB,
                     class_num=args.class_num,
                     crop=(args.crop_height, args.crop_width),
                     imgtype=args.imgtype,
                     random=args.random_translate,
                     grey=args.grey,
                     BtoA=args.btoa)
    args.crop_height, args.crop_width = train_d.crop
    if (len(train_d) == 0):
        print("No images found!")
        exit()

    # setup training/validation data iterators
    train_iter = chainer.iterators.SerialIterator(train_d, args.batch_size)
    test_iter = chainer.iterators.SerialIterator(test_d,
                                                 args.nvis,
                                                 shuffle=False)
    test_iter_gt = chainer.iterators.SerialIterator(
        train_d, args.nvis,
        shuffle=False)  ## same as training data; used for validation

    args.ch = len(train_d[0][0])
    args.out_ch = len(train_d[0][1])
    print("Input channels {}, Output channels {}".format(args.ch, args.out_ch))
    if (len(train_d) * len(test_d) == 0):
        print("No images found!")
        exit()

    ## Set up models
    # shared pretrained layer
    if (args.gen_pretrained_encoder and args.gen_pretrained_lr_ratio == 0):
        if "resnet" in args.gen_pretrained_encoder:
            pretrained = L.ResNet50Layers()
            print("Pretrained ResNet model loaded.")
        else:
            pretrained = L.VGG16Layers()
            print("Pretrained VGG model loaded.")
        if args.gpu >= 0:
            pretrained.to_gpu()
        enc_x = net.Encoder(args, pretrained)
    else:
        enc_x = net.Encoder(args)


#    gen = net.Generator(args)
    dec_y = net.Decoder(args)

    if args.lambda_dis > 0:
        dis = net.Discriminator(args)
        models = {'enc_x': enc_x, 'dec_y': dec_y, 'dis': dis}
    else:
        dis = L.Linear(1, 1)
        models = {'enc_x': enc_x, 'dec_y': dec_y}

    ## load learnt models
    optimiser_files = []
    if args.model_gen:
        serializers.load_npz(args.model_gen, enc_x)
        serializers.load_npz(args.model_gen.replace('enc_x', 'dec_y'), dec_y)
        print('model loaded: {}, {}'.format(
            args.model_gen, args.model_gen.replace('enc_x', 'dec_y')))
        optimiser_files.append(args.model_gen.replace('enc_x', 'opt_enc_x'))
        optimiser_files.append(args.model_gen.replace('enc_x', 'opt_dec_y'))
    if args.model_dis:
        serializers.load_npz(args.model_dis, dis)
        print('model loaded: {}'.format(args.model_dis))
        optimiser_files.append(args.model_dis.replace('dis', 'opt_dis'))

    ## send models to GPU
    if args.gpu >= 0:
        enc_x.to_gpu()
        dec_y.to_gpu()
        dis.to_gpu()

    # Setup optimisers
    def make_optimizer(model, lr, opttype='Adam', pretrained_lr_ratio=1.0):
        #        eps = 1e-5 if args.dtype==np.float16 else 1e-8
        optimizer = optim[opttype](lr)
        optimizer.setup(model)
        if args.weight_decay > 0:
            if opttype in ['Adam', 'AdaBound', 'Eve']:
                optimizer.weight_decay_rate = args.weight_decay
            else:
                if args.weight_decay_norm == 'l2':
                    optimizer.add_hook(
                        chainer.optimizer.WeightDecay(args.weight_decay))
                else:
                    optimizer.add_hook(
                        chainer.optimizer_hooks.Lasso(args.weight_decay))
        return optimizer

    opt_enc_x = make_optimizer(enc_x, args.learning_rate_gen, args.optimizer)
    opt_dec_y = make_optimizer(dec_y, args.learning_rate_gen, args.optimizer)
    opt_dis = make_optimizer(dis, args.learning_rate_dis, args.optimizer)

    optimizers = {'enc_x': opt_enc_x, 'dec_y': opt_dec_y, 'dis': opt_dis}

    ## resume optimisers from file
    if args.load_optimizer:
        for (m, e) in zip(optimiser_files, optimizers):
            if m:
                try:
                    serializers.load_npz(m, optimizers[e])
                    print('optimiser loaded: {}'.format(m))
                except:
                    print("couldn't load {}".format(m))
                    pass

    # finetuning
    if args.gen_pretrained_encoder:
        if args.gen_pretrained_lr_ratio == 0:
            enc_x.base.disable_update()
        else:
            for func_name in enc_x.encoder.base._children:
                for param in enc_x.encoder.base[func_name].params():
                    param.update_rule.hyperparam.eta *= args.gen_pretrained_lr_ratio

    # Set up trainer
    updater = Updater(
        models=(enc_x, dec_y, dis),
        iterator={'main': train_iter},
        optimizer=optimizers,
        #        converter=convert.ConcatWithAsyncTransfer(),
        params={'args': args},
        device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)

    ## save learnt results at a specified interval or at the end of training
    if args.snapinterval < 0:
        args.snapinterval = args.epoch
    snapshot_interval = (args.snapinterval, 'epoch')
    display_interval = (args.display_interval, 'iteration')

    for e in models:
        trainer.extend(extensions.snapshot_object(models[e],
                                                  e + '{.updater.epoch}.npz'),
                       trigger=snapshot_interval)
        if args.parameter_statistics:
            trainer.extend(extensions.ParameterStatistics(
                models[e]))  ## very slow
    for e in optimizers:
        trainer.extend(extensions.snapshot_object(
            optimizers[e], 'opt_' + e + '{.updater.epoch}.npz'),
                       trigger=snapshot_interval)

    ## plot NN graph
    if args.lambda_rec_l1 > 0:
        trainer.extend(
            extensions.dump_graph('dec_y/loss_L1', out_name='enc.dot'))
    elif args.lambda_rec_l2 > 0:
        trainer.extend(
            extensions.dump_graph('dec_y/loss_L2', out_name='gen.dot'))
    elif args.lambda_rec_ce > 0:
        trainer.extend(
            extensions.dump_graph('dec_y/loss_CE', out_name='gen.dot'))
    if args.lambda_dis > 0:
        trainer.extend(
            extensions.dump_graph('dis/loss_real', out_name='dis.dot'))

    ## log outputs
    log_keys = ['epoch', 'iteration', 'lr']
    log_keys_gen = ['myval/loss_L1', 'myval/loss_L2']
    log_keys_dis = []
    if args.lambda_rec_l1 > 0:
        log_keys_gen.append('dec_y/loss_L1')
    if args.lambda_rec_l2 > 0:
        log_keys_gen.append('dec_y/loss_L2')
    if args.lambda_rec_ce > 0:
        log_keys_gen.extend(['dec_y/loss_CE', 'myval/loss_CE'])
    if args.lambda_reg > 0:
        log_keys.extend(['enc_x/loss_reg'])
    if args.lambda_tv > 0:
        log_keys_gen.append('dec_y/loss_tv')
    if args.lambda_dis > 0:
        log_keys_dis.extend(
            ['dec_y/loss_dis', 'dis/loss_real', 'dis/loss_fake'])
    if args.lambda_mispair > 0:
        log_keys_dis.append('dis/loss_mispair')
    if args.dis_wgan:
        log_keys_dis.extend(['dis/loss_gp'])
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(extensions.PrintReport(log_keys + log_keys_gen +
                                          log_keys_dis),
                   trigger=display_interval)
    if extensions.PlotReport.available():
        #        trainer.extend(extensions.PlotReport(['lr'], 'iteration',trigger=display_interval, file_name='lr.png'))
        trainer.extend(
            extensions.PlotReport(log_keys_gen,
                                  'iteration',
                                  trigger=display_interval,
                                  file_name='loss_gen.png',
                                  postprocess=plot_log))
        trainer.extend(
            extensions.PlotReport(log_keys_dis,
                                  'iteration',
                                  trigger=display_interval,
                                  file_name='loss_dis.png'))
    trainer.extend(extensions.ProgressBar(update_interval=10))

    # learning rate scheduling
    trainer.extend(extensions.observe_lr(optimizer_name='enc_x'),
                   trigger=display_interval)
    if args.optimizer in ['Adam', 'AdaBound', 'Eve']:
        lr_target = 'eta'
    else:
        lr_target = 'lr'
    if args.lr_drop > 0:  ## cosine annealing
        for e in [opt_enc_x, opt_dec_y, opt_dis]:
            trainer.extend(CosineShift(lr_target,
                                       args.epoch // args.lr_drop,
                                       optimizer=e),
                           trigger=(1, 'epoch'))
    else:
        for e in [opt_enc_x, opt_dec_y, opt_dis]:
            #trainer.extend(extensions.LinearShift('eta', (1.0,0.0), (decay_start_iter,decay_end_iter), optimizer=e))
            trainer.extend(extensions.ExponentialShift('lr', 0.33,
                                                       optimizer=e),
                           trigger=(args.epoch // args.lr_drop, 'epoch'))

    # evaluation
    vis_folder = os.path.join(outdir, "vis")
    os.makedirs(vis_folder, exist_ok=True)
    if not args.vis_freq:
        args.vis_freq = max(len(train_d) // 2, 50)
    trainer.extend(VisEvaluator({
        "test": test_iter,
        "train": test_iter_gt
    }, {
        "enc_x": enc_x,
        "dec_y": dec_y
    },
                                params={
                                    'vis_out': vis_folder,
                                    'args': args
                                },
                                device=args.gpu),
                   trigger=(args.vis_freq, 'iteration'))

    # ChainerUI: removed until ChainerUI updates to be compatible with Chainer 6.0
    trainer.extend(CommandsExtension())

    # Run the training
    print("\nresults are saved under: ", outdir)
    save_args(args, outdir)
    trainer.run()
Example #28
0
 def __init__(self):
     super(VGG16, self).__init__(
         vgg16=L.VGG16Layers(),
         fc8=L.Linear(4096, 1),
     )
import argparse

import chainer
from chainer import links as L
import onnx_chainer
import numpy as np

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--onnx_path', type=str, default='models/vgg.onnx')

    args = parser.parse_args()

    model = L.VGG16Layers()

    img = np.zeros((3, 300, 300), dtype=np.float32)
    img = L.model.vision.vgg.prepare(img)
    img = img[np.newaxis, ...]

    chainer.config.train = False
    onnx_model = onnx_chainer.export(model, img, filename=args.onnx_path)
Example #30
0
 def __call__(self, layers=['fc7']):
     out = self.prepare(L.model.vision.vgg.prepare)
     vgg16 = L.VGG16Layers()
     return vgg16(out, layers=layers)