Exemplo n.º 1
0
 def __init__(self):
     super(AlexNet_flic_BN_PReLU,
           self).__init__(conv1=F.Convolution2D(3, 96, 11, stride=4, pad=1),
                          bn1=F.BatchNormalization(96),
                          prelu1=F.PReLU(),
                          conv2=F.Convolution2D(96, 256, 5, stride=1,
                                                pad=2),
                          bn2=F.BatchNormalization(256),
                          prelu2=F.PReLU(),
                          conv3=F.Convolution2D(256,
                                                384,
                                                3,
                                                stride=1,
                                                pad=1),
                          prelu3=F.PReLU(),
                          conv4=F.Convolution2D(384,
                                                384,
                                                3,
                                                stride=1,
                                                pad=1),
                          prelu4=F.PReLU(),
                          conv5=F.Convolution2D(384,
                                                256,
                                                3,
                                                stride=1,
                                                pad=1),
                          prelu5=F.PReLU(),
                          fc6=F.Linear(9216, 4096),
                          prelu6=F.PReLU(),
                          fc7=F.Linear(4096, 4096),
                          prelu7=F.PReLU(),
                          fc8=F.Linear(4096, 14))
Exemplo n.º 2
0
    def setUp(self):
        self.func = functions.PReLU(shape=(3, ))
        self.func.W = numpy.random.uniform(-1, 1, self.func.W.shape).astype(
            numpy.float32)
        self.func.gW.fill(0)

        self.W = self.func.W.copy()  # fixed on CPU

        # Avoid unstability of numerical gradient
        self.x = numpy.random.uniform(.5, 1, (4, 3, 2)).astype(numpy.float32)
        self.x *= numpy.random.randint(2, size=(4, 3, 2)) * 2 - 1
        self.gy = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
Exemplo n.º 3
0
    def setUp(self):
        self.func = functions.PReLU()
        self.func.W = numpy.random.uniform(-1, 1, self.func.W.shape).astype(
            numpy.float32)
        self.func.gW.fill(0)

        self.W = self.func.W.copy()  # fixed on CPU

        # Avoid unstability of numerical gradient
        self.x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
        for i in range(self.x.size):
            if -0.01 < self.x.flat[i] < 0.01:
                self.x.flat[i] = 0.5
        self.gy = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
Exemplo n.º 4
0
    def nn_vae_tuning(self, x_data, y_data, nn_n_layers, vae_n_layers_recog, 
                      nonlinear_q='softplus', gpu=-1,train=True):
        
        inputs = Variable(x_data)
        y = Variable(y_data)
        
        nonlinear_out = 'relu'

        # set non-linear function
        nonlinear_dict = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': F.softplus, 'relu': F.relu,
                     'clipped_relu': F.clipped_relu, 'leaky_relu': F.leaky_relu,'PReLU' : F.PReLU(True)}
        nonlinear_f_q = nonlinear_dict[nonlinear_q]
        nonlinear_f_out = nonlinear_dict[nonlinear_out]

        chain = [inputs]

        # compute q(z|x, y)
        for i in range(nn_n_layers):
            chain.append(F.dropout(nonlinear_f_q(getattr(self, 'nn_layer_%i' % i)(chain[-1])),train=train))
        nn_out = getattr(self, 'nn_layer_%i' % nn_n_layers)(chain[-1])
        chain  += [nn_out]
        
        for i in range(vae_n_layers_recog):
            chain.append(F.dropout(nonlinear_f_q(getattr(self, 'vae_recog_%i' % i)(chain[-1])),train=train))
        recog_out = getattr(self, 'vae_recog_%i' % vae_n_layers_recog)(chain[-1])
        log_sigma_out = 0.5 * (getattr(self, 'log_sigma')(chain[-1]))
        
        # np.random.seed(123)

        eps = np.random.normal(0, 1, (inputs.data.shape[0], log_sigma_out.data.shape[1])).astype('float32')
        if gpu >= 0:
            eps = cuda.to_gpu(eps)
        eps = Variable(eps)
        z   = recog_out + F.exp(log_sigma_out) * eps
        predict_score = nonlinear_f_out(getattr(self, 'output')(z))
        
        mean_error =  F.mean_squared_error(predict_score, y)
        
        return mean_error, predict_score