Ejemplo n.º 1
0
def Discriminator(real, fake):
    in_len = 784
    m_disc = 512

    batch_size = 64

    pair = T.concatenate([real, fake], axis=0)

    h1 = HiddenLayer(in_len, m_disc)
    h2 = HiddenLayer(m_disc, m_disc)
    h3 = HiddenLayer(m_disc, 1, activation='sigmoid')

    pc1 = h1.output(pair)
    pc2 = h2.output(pc1)
    pc3 = h3.output(pc2)

    p_real = pc3[:real.shape[0], :].flatten()
    p_gen = pc3[-real.shape[0]:, :].flatten()

    d_cost_real = binary_crossentropy(p_real, T.ones(p_real.shape)).mean()
    d_cost_real = (d_cost_real * (d_cost_real < 0.9)).mean()
    d_cost_gen = binary_crossentropy(p_gen, T.zeros(p_gen.shape)).mean()
    d_cost_gen = (d_cost_gen * (d_cost_gen > 0.1)).mean()
    g_cost_d = binary_crossentropy(p_gen, T.ones(p_gen.shape)).mean()

    d_cost = (d_cost_real + d_cost_gen) / 2.0
    g_cost = g_cost_d

    layers = [h1, h2, h3]
    params = layers2params(layers)

    return d_cost, g_cost, params
Ejemplo n.º 2
0
    def __init__(self,
                 rng,
                 input,
                 input_dim,
                 n_in,
                 n_hidden,
                 n_out,
                 test=False,
                 classifier=None,
                 dropout_p=0.5):

        if test == False:
            input = input.reshape(input_dim)

            # Initialize hidden layer
            self.hiddenLayer = HiddenLayer(
                rng=rng,
                input=input,
                n_in=n_in,
                n_out=n_hidden,
            )

            # Initialize output layer
            self.last_layer = HiddenLayer(rng=rng,
                                          input=self.hiddenLayer.output,
                                          n_in=n_hidden,
                                          n_out=n_out)

            # save parameters
            self.params = self.hiddenLayer.params + self.last_layer.params

        else:
            input = input.reshape(input_dim)
            self.hiddenLayer = classifier.hiddenLayer.TestVersion(
                rng, input, n_in, n_out)
            self.last_layer = classifier.last_layer.TestVersion(
                rng, input=self.hiddenLayer.output, n_in=n_hidden, n_out=n_out)
Ejemplo n.º 3
0
    def __init__(self, rng, input, n_in, n_hidden, n_out):
        """Initialize the parameters for the multilayer perceptron

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
        architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
        which the datapoints lie

        :type n_hidden: int
        :param n_hidden: number of hidden units

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
        which the labels lie

        """

        # Since we are dealing with a one hidden layer MLP, this will translate
        # into a HiddenLayer with a tanh activation function connected to the
        # LogisticRegression layer; the activation function can be replaced by
        # sigmoid or any other nonlinear function
        self.hiddenLayer = HiddenLayer(rng=rng,
                                       input=input,
                                       n_in=n_in,
                                       n_out=n_hidden,
                                       activation=T.tanh)

        self.W = theano.shared(name='W',
                               value=numpy.random.uniform(
                                   -0.2, 0.2, (n_hidden * n_out)).astype(
                                       theano.config.floatX))

        self.b = theano.shared(name='b',
                               value=numpy.zeros(n_out, ).astype(
                                   theano.config.floatX))

        self.outputLayer = T.nnet.softmax(
            T.dot(self.hiddenLayer.output, self.W) + self.b)

        # L1 norm ; one regularization option is to enforce L1 norm to
        # be small
        self.L1 = (abs(self.hiddenLayer.W).sum() + abs(self.W).sum())

        # square of L2 norm ; one regularization option is to enforce
        # square of L2 norm to be small
        self.L2_sqr = ((self.hiddenLayer.W**2).sum() + (self.W**2).sum())

        # negative log likelihood of the MLP is given by the negative
        # log likelihood of the output of the model, computed in the
        # logistic regression layer
        self.predProb = theano.function([input],
                                        self.outputLayer,
                                        name='f_pred_prob')
        self.pred = theano.function([input],
                                    self.outputLayer.argmax(axis=1),
                                    name='f_pred')

        # the parameters of the model are the parameters of the two layer it is
        # made out of
        self.params = self.hiddenLayer.params + [self.W + self.b]