Ejemplo n.º 1
0
 def _init_params(self):
     """
     Initialize parameters for the layers in this generator module.
     """
     if self.use_rand:
         # random values will be stacked on exogenous input
         self.w1 = self.init_func(
             (self.out_chans, (self.in_chans + self.rand_chans),
              self.filt_dim, self.filt_dim), "{}_w1".format(self.mod_name))
     else:
         # random values won't be stacked on exogenous input
         self.w1 = self.init_func(
             (self.out_chans, self.in_chans, self.filt_dim, self.filt_dim),
             "{}_w1".format(self.mod_name))
     self.w2 = self.init_func(
         (self.out_chans, self.out_chans, self.filt_dim, self.filt_dim),
         "{}_w2".format(self.mod_name))
     self.params = [self.w1, self.w2]
     # make gains and biases for transforms that will get batch normed
     if self.apply_bn_1:
         gain_ifn = inits.Normal(loc=1., scale=0.02)
         bias_ifn = inits.Constant(c=0.)
         self.g1 = gain_ifn((self.out_chans), "{}_g1".format(self.mod_name))
         self.b1 = bias_ifn((self.out_chans), "{}_b1".format(self.mod_name))
         self.params.extend([self.g1, self.b1])
     if self.apply_bn_2:
         gain_ifn = inits.Normal(loc=1., scale=0.02)
         bias_ifn = inits.Constant(c=0.)
         self.g2 = gain_ifn((self.out_chans), "{}_g2".format(self.mod_name))
         self.b2 = bias_ifn((self.out_chans), "{}_b2".format(self.mod_name))
         self.params.extend([self.g2, self.b2])
     return
Ejemplo n.º 2
0
 def _init_params(self):
     """
     Initialize parameters for the layers in this discriminator module.
     """
     self.w1 = self.init_func(
         (self.out_chans, self.in_chans, self.filt_dim, self.filt_dim),
         "{}_w1".format(self.mod_name))
     self.w2 = self.init_func(
         (self.out_chans, self.out_chans, self.filt_dim, self.filt_dim),
         "{}_w2".format(self.mod_name))
     self.wd = self.init_func(
         (1, self.out_chans, self.filt_dim, self.filt_dim),
         "{}_wd".format(self.mod_name))
     self.params = [self.w1, self.w2, self.wd]
     # make gains and biases for transforms that will get batch normed
     if self.apply_bn_1:
         gain_ifn = inits.Normal(loc=1., scale=0.02)
         bias_ifn = inits.Constant(c=0.)
         self.g1 = gain_ifn((self.out_chans), "{}_g1".format(self.mod_name))
         self.b1 = bias_ifn((self.out_chans), "{}_b1".format(self.mod_name))
         self.params.extend([self.g1, self.b1])
     if self.apply_bn_2:
         gain_ifn = inits.Normal(loc=1., scale=0.02)
         bias_ifn = inits.Constant(c=0.)
         self.g2 = gain_ifn((self.out_chans), "{}_g2".format(self.mod_name))
         self.b2 = bias_ifn((self.out_chans), "{}_b2".format(self.mod_name))
         self.params.extend([self.g2, self.b2])
     return
Ejemplo n.º 3
0
 def _init_params(self):
     """
     Initialize parameters for the layers in this generator module.
     """
     self.w1 = self.init_func((self.rand_dim, self.out_dim),
                              "{}_w1".format(self.mod_name))
     self.params = [self.w1]
     # make gains and biases for transforms that will get batch normed
     if self.apply_bn:
         gain_ifn = inits.Normal(loc=1., scale=0.02)
         bias_ifn = inits.Constant(c=0.)
         self.g1 = gain_ifn((self.out_dim), "{}_g1".format(self.mod_name))
         self.b1 = bias_ifn((self.out_dim), "{}_b1".format(self.mod_name))
         self.params.extend([self.g1, self.b1])
     return
Ejemplo n.º 4
0
    vaY, labels = tr_data.get_data(tr_handle, slice(10000, min(ntrain, 20000)))
    vaY = transform(vaY)

    va_nnd_1k = nnd_score(vaY.reshape((len(vaY), -1)),
                          vaX.reshape((len(vaX), -1)),
                          metric='euclidean')
    print 'va_nnd_1k = %.2f' % (va_nnd_1k)
    means = labels.mean(axis=0)
    print('labels ', labels.shape, means, means[0] / means[1])

#####################################
# shared variables
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
gain_ifn = inits.Normal(loc=1., scale=0.02)
bias_ifn = inits.Constant(c=0.)

gw = gifn((nz, ngf * 8 * 4 * 4), 'gw')
gg = gain_ifn((ngf * 8 * 4 * 4), 'gg')
gb = bias_ifn((ngf * 8 * 4 * 4), 'gb')
gw2 = gifn((ngf * 8, ngf * 4, 5, 5), 'gw2')
gg2 = gain_ifn((ngf * 4), 'gg2')
gb2 = bias_ifn((ngf * 4), 'gb2')
gw3 = gifn((ngf * 4, ngf * 2, 5, 5), 'gw3')
gg3 = gain_ifn((ngf * 2), 'gg3')
gb3 = bias_ifn((ngf * 2), 'gb3')
gw4 = gifn((ngf * 2, ngf, 5, 5), 'gw4')
gg4 = gain_ifn((ngf), 'gg4')
gb4 = bias_ifn((ngf), 'gb4')
gwx = gifn((ngf, nc, 5, 5), 'gwx')
Ejemplo n.º 5
0
nbatch = 100  # # of examples in batch
npx = 28  # # of pixels width/height of images
nz = 100  # # of dim for Z
ngfc = 1024  # # of gen units for fully connected layers
ndfc = 1024  # # of discrim units for fully connected layers
ngf = 64  # # of gen filters in first conv layer
ndf = 64  # # of discrim filters in first conv layer
nx = npx * npx * nc  # # of dimensions in X
niter = 100  # # of iter at starting learning rate
niter_decay = 100  # # of iter to linearly decay learning rate to zero

n_hidden = 10
n_observe = trX.shape[1]

r_gifn = inits.Uniform(scale=4 * np.sqrt(6. / (n_observe + n_hidden)))
r_bias_fn = inits.Constant()

gB = r_gifn((n_observe, n_hidden), 'gB')
gb = r_bias_fn((n_observe, ), 'gb')
gc = r_bias_fn((n_hidden, ), 'gc')
rbm_params = [gB, gb, gc]

relu = activations.Rectify()
sigmoid = activations.Sigmoid()
tanh = activations.Tanh()
lrelu = activations.LeakyRectify()
bce = T.nnet.binary_crossentropy

gifn = inits.Normal(scale=0.02)

gw = gifn((nz, ngfc), 'gw')
Ejemplo n.º 6
0
b1 = 0.95
b2 = 0.999
learning_rate = 0.0001  # init:0.001

######### init settings
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
tanh = activations.Tanh()

orfn = inits.Orthogonal(scale=1)
gifn = inits.Normal(scale=0.01)
#gifn_CPU = inits.Normal_CPU(scale=0.01)

gain_ifn = inits.Normal(loc=1., scale=0.01)
bias_ifn = inits.Constant(c=0.)

startword_ifn = inits.Constant(c=-7.)

#bias_ifn_CPU = inits.Constant_CPU(c=0.)
###################################
First = True
if First:
    select_epochs = 0

    word_start = startword_ifn((1, 1, n_word_dim), 'word_start')

    shared_Word_vecs = sharedX(
        sorted_vecs
    )  #T._shared(sorted_vecs, borrow=True)      #   sharedX(sorted_vecs)              # force on CPU