Exemplo n.º 1
0
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
Exemplo n.º 2
0
def buildModel():

    print "BUILDING MODEL TYPE..."

    #default settings
    filters = 32
    first_stride = 2
    last_filter_multiplier = 4

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters     , filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2   , filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 16 , filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    #net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 32 , filter_size=7, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    #net = l.MaxPool2DLayer(net, pool_size=2)


    #print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net)

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 256, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)
    net = l.batch_norm(l.DenseLayer(net, 256, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
Exemplo n.º 3
0
    def __init__(self, incoming, n_units, svi=True,
                 mW_init=linit.HeNormal(), mb_init=linit.Constant([0.]),
                 sW_init=linit.Constant([-5.]), sb_init=linit.Constant([-5.]),
                 actfun=lnl.softmax, **kwargs):
        """Mixture weights layer with optional weight uncertainty

        If n_units > 1, this becomes a fully-connected layer. Else, no
        parameters are added, and the output defaults to weight 1.

        See ``delfi.neuralnet.layers.FullyConnected`` for docstring
        """
        self.n_units = n_units

        if n_units > 1:
            super(MixtureWeightsLayer, self).__init__(
                incoming,
                n_units,
                svi=svi,
                mW_init=mW_init,
                mb_init=mb_init,
                sW_init=sW_init,
                sb_init=sb_init,
                actfun=actfun,
                **kwargs)
        else:
            # init of lasagne.layers.Layer
            super(FullyConnectedLayer, self).__init__(incoming, **kwargs)
Exemplo n.º 4
0
def initialization(name):

    initializations = {
        'sigmoid': init.HeNormal(gain=1.0),
        'softmax': init.HeNormal(gain=1.0),
        'elu': init.HeNormal(gain=1.0),
        'relu': init.HeNormal(gain=math.sqrt(2)),
        'lrelu': init.HeNormal(gain=math.sqrt(2 / (1 + 0.01**2))),
        'vlrelu': init.HeNormal(gain=math.sqrt(2 / (1 + 0.33**2))),
        'rectify': init.HeNormal(gain=math.sqrt(2)),
        'identity': init.HeNormal(gain=math.sqrt(2))
    }

    return initializations[name]
Exemplo n.º 5
0
    def __init__(self, incoming, n_units, svi=True,
                 mW_init=linit.HeNormal(), mb_init=linit.Constant([0.]),
                 sW_init=linit.Constant([-5.]), sb_init=linit.Constant([-5.]),
                 actfun=lnl.tanh, **kwargs):
        """Fully connected layer with optional weight uncertainty

        Parameters
        ----------
        incoming : lasagne.layers.Layer instance
            Incoming layer
        n_units : int
            Number of units
        svi : bool
            Weight uncertainty
        mW_init : function
            Function to initialise weights for mean of weight (multiplicative)
        mb_init : function
            Function to initialise weights for mean of weight (bias)
        sW_init : function
            Function to initialise weights for log std of weight (multiplicative)
        sb_init : function
            Function to initialise weights for log std of weight (bias)
        actfun : function
            Activation function
        """
        super(FullyConnectedLayer, self).__init__(incoming, **kwargs)
        self.n_units = n_units
        self.actfun = actfun
        self.svi = svi

        self.mW = self.add_param(mW_init,
                                 (self.input_shape[1], self.n_units),
                                 name='mW', mp=True, wp=True)
        self.mb = self.add_param(mb_init,
                                 (self.n_units,),
                                 name='mb', mp=True, bp=True)

        if self.svi:
            self._srng = RandomStreams(
                lasagne.random.get_rng().randint(
                    1, 2147462579))
            self.sW = self.add_param(sW_init,
                                     (self.input_shape[1], self.n_units),
                                     name='sW', sp=True, wp=True)
            self.sb = self.add_param(sb_init,
                                     (self.n_units,),
                                     name='sb', sp=True, bp=True)
Exemplo n.º 6
0
    def __init__(self,
                 incoming,
                 n_components,
                 n_dim,
                 svi=True,
                 rank=None,
                 homoscedastic=False,
                 mWs_init=linit.HeNormal(),
                 mbs_init=linit.Constant([0.]),
                 sWs_init=linit.Constant([-5.]),
                 sbs_init=linit.Constant([-5.]),
                 min_precisions=None,
                 **kwargs):
        """Fully connected layer for mixture precisions, optional weight uncertainty

        Parameters
        ----------
        incoming : lasagne.layers.Layer instance
            Incoming layer
        n_components : int
            Number of components
        n_dim : int
            Dimensionality of output vector
        svi : bool
            Weight uncertainty
        mWs_init : function
            Function to initialise weights for mean of weight (multiplicative);
            applied per component
        mbs_init : function
            Function to initialise weights for mean of weight (bias);
            applied per component
        sWs_init : function
            Function to initialise weights for log std of weight (multiplicative);
            applied per component
        sbs_init : function
            Function to initialise weights for log std of weight (bias);
            applied per component
        min_precisions: 1D numpy array of float32 or None
            Minimum values for the diagonal elements of the precision matrix,
            for all components
        """
        super(MixturePrecisionsLayer, self).__init__(incoming, **kwargs)
        self.n_components = n_components
        self.rank = rank
        assert not homoscedastic
        self.homoscedastic = homoscedastic
        self.n_dim = n_dim
        self.svi = svi

        self.mWs = [
            self.add_param(mWs_init, (self.input_shape[1], self.n_dim**2),
                           name='mW' + str(c),
                           mp=True,
                           wp=True) for c in range(n_components)
        ]
        self.mbs = [
            self.add_param(mbs_init, (self.n_dim**2, ),
                           name='mb' + str(c),
                           mp=True,
                           bp=True) for c in range(n_components)
        ]

        if self.svi:
            self._srng = RandomStreams(lasagne.random.get_rng().randint(
                1, 2147462579))
            self.sWs = [
                self.add_param(sWs_init, (self.input_shape[1], self.n_dim**2),
                               name='sW' + str(c),
                               sp=True,
                               wp=True) for c in range(n_components)
            ]
            self.sbs = [
                self.add_param(sbs_init, (self.n_dim**2, ),
                               name='sb' + str(c),
                               sp=True,
                               bp=True) for c in range(n_components)
            ]

        if min_precisions is not None:
            assert min_precisions.ndim == 1 and \
                   min_precisions.size == self.n_dim, "invalid min precisions"
            min_precisions = min_precisions.astype(dtype)
            self.min_U_column_norms = np.sqrt(min_precisions)
        else:
            self.min_U_column_norms = None
Exemplo n.º 7
0
    def __init__(self,
                 incoming,
                 n_components,
                 n_dim,
                 svi=True,
                 mWs_init=linit.HeNormal(),
                 mbs_init=linit.Normal(1.),
                 sWs_init=linit.Constant([-5.]),
                 sbs_init=linit.Constant([-5.]),
                 **kwargs):
        """Fully connected layer for mixture means, optional weight uncertainty

        Parameters
        ----------
        incoming : lasagne.layers.Layer instance
            Incoming layer
        n_components : int
            Number of components
        n_dim : int
            Dimensionality of output vector
        svi : bool
            Weight uncertainty
        mWs_init : function
            Function to initialise weights for mean of weight (multiplicative);
            applied per component
        mbs_init : function
            Function to initialise weights for mean of weight (bias);
            applied per component
        sWs_init : function
            Function to initialise weights for log std of weight (multiplicative);
            applied per component
        sbs_init : function
            Function to initialise weights for log std of weight (bias);
            applied per component
        """
        super(MixtureMeansLayer, self).__init__(incoming, **kwargs)
        self.n_components = n_components
        self.n_dim = n_dim
        self.svi = svi

        self.mWs = [self.add_param(mWs_init,
                                   (self.input_shape[1], self.n_dim),
                                   name='mW' + str(c), mp=True, wp=True)
                    for c in range(n_components)]
        self.mbs = [self.add_param(mbs_init,
                                   (self.n_dim,),
                                   name='mb' + str(c), mp=True, bp=True)
                    for c in range(n_components)]

        if self.svi:
            self._srng = RandomStreams(
                lasagne.random.get_rng().randint(
                    1, 2147462579))
            self.sWs = [self.add_param(sWs_init,
                                       (self.input_shape[1], self.n_dim),
                                       name='sW' + str(c), sp=True, wp=True)
                        for c in range(n_components)]
            self.sbs = [self.add_param(sbs_init,
                                       (self.n_dim,),
                                       name='sb' + str(c), sp=True, bp=True)
                        for c in range(n_components)]
Exemplo n.º 8
0
 def run(self, shape):
     return LI.HeNormal(gain=self.gain).sample(shape)