예제 #1
0
    def _get_primary_net(self):

        t = np.cast['int32'](0)
        if self.dataset == 'mnist':
            p_net = lasagne.layers.InputLayer([None, 1, 28, 28])
        elif self.dataset == 'cifar10':
            p_net = lasagne.layers.InputLayer([None, 3, 32, 32])
        print p_net.output_shape
        inputs = {p_net: self.input_var}
        for ws, args in zip(self.weight_shapes, self.args):

            num_filters = ws[0]

            # TO-DO: generalize to have multiple samples?
            weight = self.weights[0, t:t + num_filters].dimshuffle(
                0, 'x', 'x', 'x')

            num_filters = args[0]
            filter_size = args[1]
            stride = args[2]
            pad = args[3]
            nonl = args[4]
            p_net = lasagne.layers.Conv2DLayer(p_net,
                                               num_filters,
                                               filter_size,
                                               stride,
                                               pad,
                                               nonlinearity=nonl)
            p_net = stochastic_weight_norm(p_net, weight)

            if args[5] == 'max':
                p_net = lasagne.layers.MaxPool2DLayer(p_net, 2)
            #print p_net.output_shape
            t += num_filters

        for layer in range(self.num_mlp_layers):
            weight = self.weights[:, t:t + self.num_hids].reshape(
                (self.wd1, self.num_hids))
            p_net = lasagne.layers.DenseLayer(p_net,
                                              self.num_hids,
                                              nonlinearity=rectify)
            p_net = stochastic_weight_norm(p_net, weight)
            t += self.num_hids

        weight = self.weights[:, t:t + self.num_classes].reshape(
            (self.wd1, self.num_classes))

        p_net = lasagne.layers.DenseLayer(p_net,
                                          self.num_classes,
                                          nonlinearity=nonlinearities.softmax)
        p_net = stochastic_weight_norm(p_net, weight)

        y = T.clip(get_output(p_net, inputs), 0.001, 0.999)  # stability

        self.p_net = p_net
        self.y = y
    def _get_primary_net(self):
        t = 0
        p_net = lasagne.layers.InputLayer([None, self.input_dim])
        inputs = {p_net: self.input_var}
        for ws in self.weight_shapes:
            # using weightnorm reparameterization
            # only need ws[1] parameters (for rescaling of the weight matrix)
            num_param = ws[1]
            weight = self.weights[:, t:t + num_param].reshape(
                (self.wd1, ws[1]))
            p_net = lasagne.layers.DenseLayer(p_net, ws[1])
            p_net = stochastic_weight_norm(p_net, weight)
            print p_net.output_shape
            t += num_param

        if self.output_type == 'categorical':
            p_net.nonlinearity = nonlinearities.softmax
            y = T.clip(get_output(p_net, inputs), 0.001, 0.999)  # stability
            self.p_net = p_net
            self.y = y
        elif self.output_type == 'real':
            p_net.nonlinearity = nonlinearities.linear
            y = get_output(p_net, inputs)  # stability
            self.p_net = p_net
            self.y = y
        else:
            assert False
예제 #3
0
    def _get_primary_net(self):
        t = np.cast['int32'](0)
        p_net = lasagne.layers.InputLayer([None, 784])
        inputs = {p_net: self.input_var}
        self.hiddens = list()  # for projection
        for ws in self.weight_shapes:
            # using weightnorm reparameterization
            # only need ws[1] parameters (for rescaling of the weight matrix)
            num_param = ws[1]
            weight = self.weights[:, t:t + num_param].reshape(
                (self.wd1, ws[1]))
            p_net = lasagne.layers.DenseLayer(p_net, ws[1])
            p_net = stochastic_weight_norm(p_net, weight)
            print p_net.output_shape
            t += num_param

            self.hiddens.append(p_net)

        p_net.nonlinearity = nonlinearities.softmax  # replace the nonlinearity
        # of the last layer
        # with softmax for
        # classification

        y = T.clip(get_output(p_net, inputs), 0.001, 0.999)  # stability
        self.hs = get_output(self.hiddens, self.input_var)
        self.p_net = p_net
        self.y = y
예제 #4
0
    def _get_primary_net(self):
        # TODO: figure out why I can't run at school anymore (DK)  >:(
        t = 0  #np.cast['int32'](0) # TODO: what's wrong with np.cast
        p_net = lasagne.layers.InputLayer([None, self.n_inputs])
        inputs = {p_net: self.input_var}
        for ws in self.weight_shapes:
            # using weightnorm reparameterization
            # only need ws[1] parameters (for rescaling of the weight matrix)
            num_param = ws[1]
            weight = self.weights[:, t:t + num_param].reshape(
                (self.wd1, ws[1]))
            p_net = lasagne.layers.DenseLayer(p_net, ws[1])
            p_net = stochastic_weight_norm(p_net, weight)
            print p_net.output_shape
            t += num_param

        if self.output_type == 'categorical':
            p_net.nonlinearity = nonlinearities.softmax
            y = T.clip(get_output(p_net, inputs), 0.001, 0.999)  # stability
            self.p_net = p_net
            self.y = y
            self.y_unclipped = get_output(p_net, inputs)
        elif self.output_type == 'real':
            p_net.nonlinearity = nonlinearities.linear
            y = get_output(p_net, inputs)
            self.p_net = p_net
            self.y = y
            self.y_unclipped = get_output(p_net, inputs)
        else:
            assert False