예제 #1
0
def def_bfgs(model_G, layer='conv4', npx=64, alpha=0.002):
    print('COMPILING...')
    t = time()

    # 符号化定义
    x_f = T.tensor4()
    x = T.tensor4()
    z = T.matrix()  # 随机种子
    tanh = activations.Tanh()
    gx = model_G(tanh(z))  # 生成的图像

    if layer is 'hog':
        gx_f = HOGNet.get_hog(gx, use_bin=True, BS=4)
    else:
        # 调整图像格式
        gx_t = AlexNet.transform_im(gx)
        gx_net = AlexNet.build_model(gx_t,
                                     layer=layer,
                                     shape=(None, 3, npx, npx))
        AlexNet.load_model(gx_net, layer=layer)
        # AlexNet截止在layer的输出
        gx_f = lasagne.layers.get_output(gx_net[layer], deterministic=True)

    f_rec = T.mean(T.sqr(x_f - gx_f), axis=(1, 2, 3)) * sharedX(alpha)
    x_rec = T.mean(T.sqr(x - gx), axis=(1, 2, 3))
    cost = T.sum(f_rec) + T.sum(x_rec)
    grad = T.grad(cost, z)
    output = [cost, grad, gx]
    _invert = theano.function(inputs=[z, x, x_f], outputs=output)

    print('%.2f seconds to compile _bfgs function' % (time() - t))
    return _invert, z
예제 #2
0
def def_feature(layer='conv4', up_scale=4):
    print('COMPILING...')
    t = time()
    x = T.tensor4()
    x_t = AlexNet.transform_im(x)
    x_net = AlexNet.build_model(x_t, layer=layer, shape=(None, 3, 64, 64), up_scale=up_scale)
    AlexNet.load_model(x_net, layer=layer)
    x_f = lasagne.layers.get_output(x_net[layer], deterministic=True)
    _ftr = theano.function(inputs=[x], outputs=x_f)
    print('%.2f seconds to compile _feature function' % (time() - t))
    return _ftr
def def_bfgs(net, layer='conv4', npx=64, alpha=0.002):
    print('COMPILING...')
    t = time()

    x_f = T.tensor4()
    x = T.tensor4()
    z = T.matrix()

    z = theano.printing.Print('this is z')(z)
    tanh = activations.Tanh()
    tz = tanh(z)
    # tz = printing_op(tz)

    # tz = z_scale * tz
    net.labels_var  = T.TensorType('float32', [False] * 512) ('labels_var')
    gx = net.G.eval(z, net.labels_var, ignore_unused_inputs=True)
    # gx = printing_op(gx)
    # gx = misc.adjust_dynamic_range(gx, [-1,1], [0,1])
    scale_factor = 16
    gx = theano.tensor.signal.pool.pool_2d(gx, ds=(scale_factor, scale_factor), mode='average_exc_pad', ignore_border=True)
    # gx = printing_op(gx)

    if layer is 'hog':
        gx_f = HOGNet.get_hog(gx, use_bin=True, BS=4)
    else:
        gx_t = AlexNet.transform_im(gx)
        gx_net = AlexNet.build_model(gx_t, layer=layer, shape=(None, 3, npx, npx))
        AlexNet.load_model(gx_net, layer=layer)
        gx_f = lasagne.layers.get_output(gx_net[layer], deterministic=True)

    f_rec = T.mean(T.sqr(x_f - gx_f), axis=(1, 2, 3)) * sharedX(alpha)
    x_rec = T.mean(T.sqr(x - gx), axis=(1, 2, 3))
    cost = T.sum(f_rec) + T.sum(x_rec)
    grad = T.grad(cost, z)
    output = [cost, grad, gx]
    _invert = theano.function(inputs=[z, x, x_f], outputs=output)

    print('%.2f seconds to compile _bfgs function' % (time() - t))
    return _invert,z
예제 #4
0
# define the model
t = time()
x = T.tensor4()
z = train_dcgan_utils.predict(x, predict_params, n_layers=n_layers)
gx = train_dcgan_utils.gen_test(z,
                                gen_params,
                                gen_batchnorm,
                                n_layers=n_layers,
                                n_f=n_f)

# define pixel loss
pixel_loss = costs.L2Loss(gx, x)

# define feature loss
x_t = AlexNet.transform_im(x, npx=npx, nc=nc)
x_net = AlexNet.build_model(x_t, layer=args.layer, shape=(None, 3, npx, npx))
AlexNet.load_model(x_net, layer=args.layer)
x_f = lasagne.layers.get_output(x_net[args.layer], deterministic=True)
gx_t = AlexNet.transform_im(gx, npx=npx, nc=nc)
gx_net = AlexNet.build_model(gx_t, layer=args.layer, shape=(None, 3, npx, npx))
AlexNet.load_model(gx_net, layer=args.layer)
gx_f = lasagne.layers.get_output(gx_net[args.layer], deterministic=True)
ftr_loss = costs.L2Loss(gx_f, x_f)

# add two losses together
cost = pixel_loss + ftr_loss * sharedX(args.alpha)
output = [cost, z]
lrt = sharedX(args.lr)
b1t = sharedX(args.b1)
p_updater = updates.Adam(lr=lrt,
예제 #5
0
# load modelG
gen_params = train_dcgan_utils.init_gen_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc)
train_dcgan_utils.load_model(gen_params, os.path.join(model_dir, 'gen_params'))
gen_batchnorm = train_dcgan_utils.load_batchnorm(os.path.join(model_dir, 'gen_batchnorm'))

# define the model
t= time()
x = T.tensor4()
z = train_dcgan_utils.predict(x, predict_params, n_layers=n_layers)
gx = train_dcgan_utils.gen_test(z, gen_params, gen_batchnorm, n_layers=n_layers, n_f=n_f)

# define pixel loss
pixel_loss = costs.L2Loss(gx, x)

# define feature loss
x_t = AlexNet.transform_im(x, npx=npx, nc=nc)
x_net = AlexNet.build_model(x_t, layer=args.layer, shape=(None, 3, npx, npx))
AlexNet.load_model(x_net, layer=args.layer)
x_f = lasagne.layers.get_output(x_net[args.layer], deterministic=True)
gx_t = AlexNet.transform_im(gx, npx=npx, nc=nc)
gx_net = AlexNet.build_model(gx_t, layer=args.layer, shape=(None, 3, npx, npx))
AlexNet.load_model(gx_net, layer=args.layer)
gx_f = lasagne.layers.get_output(gx_net[args.layer], deterministic=True)
ftr_loss = costs.L2Loss(gx_f, x_f)

# add two losses together
cost = pixel_loss + ftr_loss * sharedX(args.alpha)
output = [cost, z]
lrt = sharedX(args.lr)
b1t = sharedX(args.b1)
p_updater = updates.Adam(lr=lrt, b1=b1t, regularizer=updates.Regularizer(l2=args.weight_decay))