Exemplo n.º 1
0
    def __init__(self, inpt,
                 n_inpt, n_hiddens, n_output,
                 hidden_transfers, out_transfer_mean='identity',
                 out_transfer_var=var_transfer,
                 pooling=None,
                 declare=None, name=None, rng=None):
        self.inpt = inpt
        self.n_inpt = n_inpt
        self.n_hiddens = n_hiddens
        self.n_output = n_output
        self.hidden_transfers = hidden_transfers
        self.out_transfer_mean = out_transfer_mean
        self.out_transfer_var = out_transfer_var
        self.pooling = pooling

        self.rnn = Rnn(
            self.inpt, self.n_inpt, self.n_hiddens, self.n_output * 2,
            self.hidden_transfers,
            'identity',
            pooling=pooling,
            declare=declare)

        f_mean_transfer = lookup(self.out_transfer_mean, _transfer)
        f_var_transfer = lookup(self.out_transfer_var, _transfer)

        super(RnnDiagGauss, self).__init__(
            f_mean_transfer(self.rnn.output[:, :self.n_output]),
            f_var_transfer(self.rnn.output[:, self.n_output:]),
            rng)
Exemplo n.º 2
0
    def _forward(self):
        transfers = self.hidden_transfers
        transfers = [lookup(i, _transfer) for i in transfers]
        transfer_insizes = [getattr(i, 'in_size', 1) for i in transfers]
        transfer_outsizes = [1] + [
            getattr(i, 'out_size', 1) for i in transfers
        ]

        n_incoming = [self.n_inpt] + self.n_hiddens[:-1]
        n_outgoing = self.n_hiddens

        n_time_steps, _, _ = self.inpt.shape

        self.layers = []
        x = self.inpt
        for n, m, t, tis, tos in zip(n_incoming, n_outgoing, transfers,
                                     transfer_insizes, transfer_outsizes):
            x_flat = x.reshape((-1, n))

            affine = simple.AffineNonlinear(x_flat,
                                            n * tos,
                                            m * tis,
                                            lambda x: x,
                                            declare=self.declare)
            pre_recurrent_flat = affine.output

            pre_recurrent = pre_recurrent_flat.reshape(
                (n_time_steps, -1, m * tis))

            tout = getattr(t, 'out_size', 1)
            recurrent = sequential.Recurrent(pre_recurrent,
                                             m * tout,
                                             t,
                                             declare=self.declare)
            x = recurrent.output

            self.layers += [affine, recurrent]

        x_flat = x.reshape((-1, m * tout))
        out_transfer = lookup(self.out_transfer, _transfer)
        out_in_size = getattr(out_transfer, 'in_size', 1)
        output_affine = simple.AffineNonlinear(x_flat,
                                               m,
                                               self.n_output * out_in_size,
                                               out_transfer,
                                               declare=self.declare)

        self.layers.append(affine)

        output = output_affine.output.reshape(
            (n_time_steps, -1, self.n_output))

        if self.pooling:
            self.pre_pooling = output
            self.output = sequential.Pooling(output, self.pooling).output
        else:
            self.output = output
Exemplo n.º 3
0
    def __call__(self, inpt):
        f_mean_transfer = lookup(self.mean_transfer, _transfer)
        f_var_transfer = lookup(self.var_transfer, _transfer)

        half = inpt.shape[-1] // 2

        if inpt.ndim == 3:
            mean, var = inpt[:, :, :half], inpt[:, :, half:]
            res = T.concatenate([f_mean_transfer(mean), f_var_transfer(var)], axis=2)
        else:
            mean, var = inpt[:, :half], inpt[:, half:]
            res = T.concatenate([f_mean_transfer(mean), f_var_transfer(var)], axis=1)
        return res
Exemplo n.º 4
0
Arquivo: base.py Projeto: Wiebke/breze
    def _forward(self):
        transfers = self.hidden_transfers
        transfers = [lookup(i, _transfer) for i in transfers]
        transfer_insizes = [getattr(i, 'in_size', 1) for i in transfers]
        transfer_outsizes = [1] + [getattr(i, 'out_size', 1) for i in transfers]

        n_incoming = [self.n_inpt] + self.n_hiddens[:-1]
        n_outgoing = self.n_hiddens

        n_time_steps, _, _ = self.inpt.shape

        self.layers = []
        x = self.inpt
        for n, m, t, tis, tos in zip(n_incoming, n_outgoing, transfers,
                                     transfer_insizes, transfer_outsizes):
            x_flat = x.reshape((-1, n))

            affine = simple.AffineNonlinear(
                x_flat, n * tos, m * tis, lambda x: x, declare=self.declare)
            pre_recurrent_flat = affine.output

            pre_recurrent = pre_recurrent_flat.reshape(
                (n_time_steps, -1, m * tis))

            tout = getattr(t, 'out_size', 1)
            recurrent = sequential.Recurrent(
                pre_recurrent, m * tout, t, declare=self.declare)
            x = recurrent.output

            self.layers += [affine, recurrent]

        x_flat = x.reshape((-1, m * tout))
        out_transfer = lookup(self.out_transfer, _transfer)
        out_in_size = getattr(out_transfer, 'in_size', 1)
        output_affine = simple.AffineNonlinear(
            x_flat, m, self.n_output * out_in_size, out_transfer,
            declare=self.declare
            )

        self.layers.append(affine)

        output = output_affine.output.reshape(
            (n_time_steps, -1, self.n_output))

        if self.pooling:
            self.pre_pooling = output
            self.output = sequential.Pooling(output, self.pooling).output
        else:
            self.output = output
Exemplo n.º 5
0
    def _forward(self):
        f_loss = lookup(self.loss_ident, _loss)

        self.coord_wise_multi = [
            f_loss(self.target, self.transfer(pred))
            for pred in self.predictions
        ]
        if self.imp_weight is not None:
            self.coord_wise_multi = [
                coord_wise * self.imp_weight
                for coord_wise in self.coord_wise_multi
            ]

        self.sample_wise_multi = [
            coord_wise.sum(self.comp_dim)
            for coord_wise in self.coord_wise_multi
        ]
        self.total_multi = [
            sample_wise.mean() for sample_wise in self.sample_wise_multi
        ]

        self.total = T.zeros(self.total_multi[0].shape)
        for tot, pw in zip(self.total_multi, self.p_weights):
            self.total += tot * pw

        if self.mode == 'mean':
            self.total /= len(self.predictions)
Exemplo n.º 6
0
def fawn_recurrent(inpt_mean, inpt_var, weights_mean, weights_var, f,
                   initial_mean, initial_var):

    f_transfer = lookup(f, transfer_)

    def step(inpt_mean, inpt_var, him_m1, hiv_m1, hom_m1, hov_m1):
        wm, wv = weights_mean, weights_var

        pres_mean = T.dot(inpt_mean, wm)
        pres_var = (T.dot(inpt_mean**2, wv) + T.dot(inpt_var, wm**2) +
                    T.dot(inpt_var, wv))

        post_mean, post_var = f_transfer(pres_mean, pres_var)
        return pres_mean, pres_var, post_mean, post_var

    if initial_mean.ndim == 1:
        initial_mean = repeat(initial_mean.dimshuffle('x', 0),
                              inpt_mean.shape[1],
                              axis=0)
    if initial_var.ndim == 1:
        initial_var = repeat(initial_var.dimshuffle('x', 0),
                             inpt_mean.shape[1],
                             axis=0)

    (hidden_in_mean_rec, hidden_in_var_rec, hidden_mean_rec,
     hidden_var_rec), _ = theano.scan(step,
                                      sequences=[inpt_mean, inpt_var],
                                      outputs_info=[
                                          T.zeros_like(inpt_mean[0]),
                                          T.zeros_like(inpt_mean[0]),
                                          initial_mean, initial_var
                                      ])

    return (hidden_in_mean_rec, hidden_in_var_rec, hidden_mean_rec,
            hidden_var_rec)
    def _forward(self):
        self.d_cnn = Cnn3dFlex(
            inpt=self.inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channel=self.n_channel, n_hiddens=self.n_hiddens_conv,
            filter_shapes=self.d_filter_shapes, pool_shapes=self.down_pools,
            hidden_transfers=self.hidden_transfers_conv,
            declare=self.declare, name=self.name, border_modes=self.b_modes_down,
            implementation=self.implementation, strides=self.strides_down
        )

        d_out_height = self.d_cnn.layers[-1].output_height
        d_out_width = self.d_cnn.layers[-1].output_width
        d_out_depth = self.d_cnn.layers[-1].output_depth

        self.u_cnn = UpSampleNetwork3d(
            inpt=self.d_cnn.output, image_height=d_out_height,
            image_width=d_out_width, image_depth=d_out_depth,
            n_channel=self.n_hiddens_conv[-1], n_hiddens=self.n_hiddens_upconv,
            filter_shapes=self.u_filter_shapes, pool_shapes=self.up_pools,
            hidden_transfers=self.hidden_transfers_upconv,
            border_modes=self.b_modes_up, declare=self.declare,
            name=self.name, implementation=self.implementation,
            up_factors=self.up_factors
        )

        output = self.u_cnn.output.dimshuffle(0, 3, 4, 1, 2)
        output = T.reshape(output, (-1, self.n_hiddens_upconv[-1]))

        f = lookup(self.out_transfer, _transfer)
        self.output = T.reshape(f(output), (1, -1, self.n_hiddens_upconv[-1]))
Exemplo n.º 8
0
Arquivo: rnn.py Projeto: Wiebke/breze
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        parameters = ParameterSet()

        self.rnn = neural.FastDropoutRnn(
            inpt,
            self.n_inpt, self.n_hiddens, self.n_output,
            self.hidden_transfers, self.out_transfer,
            p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens,
            p_dropout_hidden_to_out=self.p_dropout_hidden_to_out,
            pooling=self.pooling,
            declare=parameters.declare)

        f_loss = lookup(self.loss_ident, vp_loss)
        output = T.concatenate(self.rnn.outputs, 2)
        self.loss_layer = simple.SupervisedLoss(
            target, output, loss=f_loss,
            imp_weight=imp_weight,
            declare=parameters.declare,
            comp_dim=2)

        SupervisedModel.__init__(
            self, inpt=inpt, target=target, output=output,
            loss=self.loss_layer.total,
            parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
Exemplo n.º 9
0
    def _forward(self):
        f = lookup(self.transfer, _transfer)
        n, m = self.n_inpt, self.n_inpt

        # If a transfer function has differing dimensionalities in its domain
        # and co-domain, it can be specified by its ``in_size`` and ``out_size``
        # attributes. Defaulting to not to.
        n *= getattr(f, 'in_size', 1)
        m *= getattr(f, 'out_size', 1)

        self.initial_mean = self.declare((m, ))
        self.initial_std = self.declare((m, ))
        self.weights = self.declare((m, n))

        if getattr(f, 'stateful', False):
            res = recurrent_layer_stateful(self.inpt_mean, self.inpt_var,
                                           self.weights, f, self.initial_mean,
                                           self.initial_std**2 + 1e-8,
                                           self.p_dropout)
            (self.state_mean, self.state_var, self.output_in_mean,
             self.output_in_var, self.output_mean, self.output_var) = res
        else:
            res = recurrent_layer(self.inpt_mean, self.inpt_var, self.weights,
                                  f, self.initial_mean,
                                  self.initial_std**2 + 1e-8, self.p_dropout)
            (self.output_in_mean, self.output_in_var, self.output_mean,
             self.output_var) = res

        self.outputs = self.output_mean, self.output_var
Exemplo n.º 10
0
def tensor_softmax(inpt, n_classes=2):
    output = inpt.dimshuffle(0, 3, 4, 1, 2)
    output = T.reshape(output, (-1, n_classes))

    f = lookup('softmax', _transfer)
    output = T.reshape(f(output), (1, -1, n_classes))
    return output
Exemplo n.º 11
0
    def _init_exprs(self):
        inpt = tensor5('inpt')
        inpt.tag.test_value = np.zeros((
            2, self.image_depth, self.n_channel,
            self.image_height, self.image_width
        ))

        target = T.matrix('target')
        target.tag.test_value = np.zeros((
            2, self.n_output
        ))

        parameters = ParameterSet()

        if self.dropout:
            self.p_dropout_inpt = .2
            self.p_dropout_hiddens = [.5] * len(self.n_hiddens_full)
        else:
            self.p_dropout_inpt = None
            self.p_dropout_hiddens = None

        self.conv_net = cnn3d.ConvNet3d(
            inpt=inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channel=self.n_channel, n_hiddens_conv=self.n_hiddens_conv,
            filter_shapes=self.filter_shapes, pool_shapes=self.pool_shapes,
            n_hiddens_full=self.n_hiddens_full,
            hidden_transfers_conv=self.hidden_transfers_conv,
            hidden_transfers_full=self.hidden_transfers_full, n_output=self.n_output,
            out_transfer=self.out_transfer,
            border_modes=self.border_modes,
            declare=parameters.declare,
            implementation=self.implementation,
            dropout=self.dropout, p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens
        )

        output = self.conv_net.output

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        if not self.dropout:
            loss_id = self.loss_ident
        else:
            loss_id = lookup(self.loss_ident, vp_loss)

        self.loss_layer = SupervisedLoss(
            target, output, loss=loss_id,
            imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
Exemplo n.º 12
0
    def _forward(self):
        self.output_in = downsample.max_pool_2d(
            input=self.inpt, ds=(self.pool_height, self.pool_width),
            ignore_border=True)

        f = lookup(self.transfer, _transfer)
        self.output = f(self.output_in)
Exemplo n.º 13
0
    def _forward(self):
        f = lookup(self.transfer, _transfer)
        n, m = self.n_inpt, self.n_inpt

        # If a transfer function has differing dimensionalities in its domain
        # and co-domain, it can be specified by its ``in_size`` and ``out_size``
        # attributes. Defaulting to not to.
        n *= getattr(f, 'in_size', 1)
        m *= getattr(f, 'out_size', 1)

        self.initial_mean = self.declare((m,))
        self.initial_std = self.declare((m,))
        self.weights = self.declare((m, n))

        if getattr(f, 'stateful', False):
            res = recurrent_layer_stateful(
                self.inpt_mean, self.inpt_var,
                self.weights,
                f,
                self.initial_mean, self.initial_std ** 2 + 1e-8,
                self.p_dropout)
            (self.state_mean, self.state_var,
             self.output_in_mean, self.output_in_var,
             self.output_mean, self.output_var) = res
        else:
            res = recurrent_layer(
                self.inpt_mean, self.inpt_var,
                self.weights,
                f,
                self.initial_mean, self.initial_std ** 2 + 1e-8,
                self.p_dropout)
            (self.output_in_mean, self.output_in_var,
             self.output_mean, self.output_var) = res

        self.outputs = self.output_mean, self.output_var
Exemplo n.º 14
0
def tensor_softmax(inpt, n_classes=2):
    output = inpt.dimshuffle(0, 3, 4, 1, 2)
    output = T.reshape(output, (-1, n_classes))

    f = lookup('softmax', _transfer)
    output = T.reshape(f(output), (1, -1, n_classes))
    return output
Exemplo n.º 15
0
    def __call__(self, inpt):
        f_mean_transfer = lookup(self.mean_transfer, _transfer)
        f_var_transfer = lookup(self.var_transfer, _transfer)

        half = inpt.shape[-1] // 2

        if inpt.ndim == 3:
            mean, var = inpt[:, :, :half], inpt[:, :, half:]
            res = T.concatenate([f_mean_transfer(mean),
                                 f_var_transfer(var)],
                                axis=2)
        else:
            mean, var = inpt[:, :half], inpt[:, half:]
            res = T.concatenate([f_mean_transfer(mean),
                                 f_var_transfer(var)],
                                axis=1)
        return res
Exemplo n.º 16
0
    def _init_exprs(self):
        super(SparseAutoEncoder, self)._init_exprs()
        f_sparsity_loss = lookup(self.sparsity_loss, loss_)
        sparsity_loss = f_sparsity_loss(
            self.sparsity_target, self.exprs['feature'].mean(axis=0)).sum()
        loss = self.exprs['loss'] + self.c_sparsity * sparsity_loss

        self.exprs.update(get_named_variables(locals(), overwrite=True))
Exemplo n.º 17
0
    def _init_exprs(self):
        super(SparseAutoEncoder, self)._init_exprs()
        f_sparsity_loss = lookup(self.sparsity_loss, loss_)
        sparsity_loss = f_sparsity_loss(
            self.sparsity_target, self.exprs['feature'].mean(axis=0)).sum()
        loss = self.exprs['loss'] + self.c_sparsity * sparsity_loss

        self.exprs.update(get_named_variables(locals(), overwrite=True))
Exemplo n.º 18
0
 def _forward(self):
     if not self.prelu:
         if self.transfer == 't_softmax':
             self.output = tensor_softmax(self.inpt, self.n_output)
         else:
             f = lookup(self.transfer, _transfer)
             self.output = f(self.inpt)
     else:
         self.a = self.declare((1, 1, self.n_output, 1, 1))
         self.output = prelu(self.inpt, self.a)
Exemplo n.º 19
0
    def _forward(self):
        f_density = lookup(self.density, _transfer)
        output = f_density(self.inpt)

        col_normalized = T.sqrt(
            norm.normalize(output, lambda x: x ** 2, axis=0) + 1E-8)
        row_normalized = T.sqrt(
            norm.normalize(col_normalized, lambda x: x ** 2, axis=1) + 1E-8)

        loss_sample_wise = row_normalized.sum(axis=1)
        self.total = loss_sample_wise.mean()
Exemplo n.º 20
0
    def _forward(self):
        f_loss = lookup(self.loss_ident, _loss)

        self.coord_wise = f_loss(self.target, self.prediction)

        if self.imp_weight is not None:
            self.coord_wise *= self.imp_weight

        self.sample_wise = self.coord_wise.sum(self.comp_dim)

        self.total = self.sample_wise.mean()
Exemplo n.º 21
0
    def _forward(self):
        self.weights = self.declare((self.n_inpt, self.n_output))

        self.output_in = T.dot(self.inpt, self.weights)

        if self.use_bias:
            self.bias = self.declare(self.n_output)
            self.output_in += self.bias

        f = lookup(self.transfer, _transfer)

        self.output = f(self.output_in)
Exemplo n.º 22
0
 def _forward(self):
     if not self.prelu:
         if self.transfer == 't_softmax':
             self.output = tensor_softmax(self.inpt, self.n_output)
         else:
             f = lookup(self.transfer, _transfer)
             self.output = f(self.inpt)
     else:
         self.a = self.declare(
             (1, 1, self.n_output, 1, 1)
         )
         self.output = prelu(self.inpt, self.a)
Exemplo n.º 23
0
    def _forward(self):
        #commented this and the bias out because the model currently isn't using the same filters for both conv and deconv...
        # if self.weights == None:
        #     self.weights = self.declare((
        #         self.n_output, self.n_inpt,
        #         self.filter_height, self.filter_width))
        # else:
        #     #deconvolution is correlation so we reverse the weights. The first two columns as switched to account for going backwards.
        #     #self.weights = self.weights.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]
        #     self.weights = (self.weights[:, :, ::-1, ::-1])

        self.weights = self.declare((
            self.n_output, self.n_inpt,
            self.filter_height, self.filter_width))

        self.bias = self.declare((self.n_output,))

        # if self.bias == None:
        #     self.bias = self.declare((self.n_output,))

        # self.output_in = dnn_conv(self.inpt,
        #                           self.weights,
        #                           border_mode='half',
        #                           subsample=self.subsample,
        #                           conv_mode='cross')
        self.output_in = conv.conv2d(
            self.inpt,
            self.weights,
            image_shape=(
                self.n_samples,
                self.n_inpt,
                self.inpt_height,
                self.inpt_width
            ),
            filter_shape=(self.n_output,
                          self.n_inpt,
                          self.filter_height,
                          self.filter_width),
            subsample=self.subsample,
            border_mode=self.border_mode,
        )

        self.output_in = self.output_in[
            :,
            :,
            self.output_in_height/2 - self.output_height/2 :
            self.output_in_height/2 + self.output_height/2,
            self.output_in_width/2 - self.output_width/2 :
            self.output_in_width/2 + self.output_width/2
        ]

        f = lookup(self.transfer, _transfer)
        self.output = f(self.output_in)
Exemplo n.º 24
0
    def _forward(self):
        w = self.weights = self.declare((self.n_inpt, self.n_output))
        b = self.bias = self.declare(self.n_output)

        self.pres_mean = T.dot(self.inpt_mean, w)
        if self.use_bias:
            self.pres_mean += b

        self.pres_var = T.dot(self.inpt_var, w**2)

        f_transfer = lookup(self.transfer, transfer)
        self.post_mean, self.post_var = f_transfer(self.pres_mean,
                                                   self.pres_var)

        self.outputs = self.post_mean, self.post_var
Exemplo n.º 25
0
    def _forward(self):
        w = self.weights = self.declare((self.n_inpt, self.n_output))
        b = self.bias = self.declare(self.n_output)

        self.pres_mean = T.dot(self.inpt_mean, w)
        if self.use_bias:
            self.pres_mean += b

        self.pres_var = T.dot(self.inpt_var, w ** 2)

        f_transfer = lookup(self.transfer, transfer)
        self.post_mean, self.post_var = f_transfer(
            self.pres_mean, self.pres_var)

        self.outputs = self.post_mean, self.post_var
Exemplo n.º 26
0
    def _forward(self):

        # repeat = T.extra_ops.repeat(
        #     T.extra_ops.repeat(self.inpt, self.upsample_height, axis=2),
        #     self.upsample_width, axis=3
        # )
        #upsamp = self.inpt.repeat(self.upsample_height, axis=2).repeat(self.upsample_width, axis=3)

        inpt_shape = self.inpt.shape
        #output_shape = (inpt_shape[0], inpt_shape[1], inpt_shape[2] * self.upsample_height, inpt_shape[3] * self.upsample_width)
        output_shape = (inpt_shape[0], inpt_shape[1], self.upsample_height, self.upsample_width)

        #in_dim = inpt_shape[2] * inpt_shape[3]
        #out_dim = output_shape[2] * output_shape[3]

        #print(in_dim * out_dim)

        #upsamp_matrix = T.alloc(0., in_dim, out_dim)
        #rows = T.arange(in_dim)
        #cols = rows * self.upsample + (rows / inpt_shape[2] * self.upsample * inpt_shape[3])
        #upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.)
        new_matrix = T.alloc(0.0, output_shape[0], output_shape[1], output_shape[2], output_shape[3])
        new_matrix = T.set_subtensor(new_matrix[:, :, ::2, ::2], self.inpt)


        #flat = self.inpt.reshape((inpt_shape[0], inpt_shape[1], inpt_shape[2] * inpt_shape[3]))

        #upsamp_flat = T.dot(flat, upsamp_matrix)

        #upsamp = upsamp_flat.reshape(output_shape)

        if self.padding_left == 0 and self.padding_top == 0:
            self.output_in = new_matrix
        else:
            self.output_in = T.alloc(0., self.inpt.shape[0], self.inpt.shape[1], self.output_height, self.output_width)

            self.output_in = T.set_subtensor(
                self.output_in[
                    :,
                    :,
                    self.padding_top:self.padding_top + self.inpt_height * self.upsample_height,
                    self.padding_left:self.padding_left + self.inpt_width * self.upsample_width
                ],
                new_matrix
            )

        f = lookup(self.transfer, _transfer)
        self.output = f(self.output_in)
Exemplo n.º 27
0
    def _forward(self):
        self.weights = self.declare((
            self.n_output, self.n_inpt,
            self.filter_height, self.filter_width))
        self.bias = self.declare((self.n_output,))

        self.output_in = conv.conv2d(
            self.inpt, self.weights,
            image_shape=(
                self.n_samples, self.n_inpt, self.inpt_height, self.inpt_width),
            subsample=self.subsample,
            border_mode='valid',
            )

        f = lookup(self.transfer, _transfer)
        self.output = f(self.output_in)
Exemplo n.º 28
0
    def _forward(self):
        f_loss = lookup(self.loss_ident, _loss)

        self.coord_wise_multi = [f_loss(self.target, self.transfer(pred)) for pred in self.predictions]
        if self.imp_weight is not None:
            self.coord_wise_multi = [coord_wise * self.imp_weight for coord_wise in self.coord_wise_multi]

        self.sample_wise_multi = [coord_wise.sum(self.comp_dim) for coord_wise in self.coord_wise_multi]
        self.total_multi = [sample_wise.mean() for sample_wise in self.sample_wise_multi]

        self.total = T.zeros(self.total_multi[0].shape)
        for tot, pw in zip(self.total_multi, self.p_weights):
            self.total += tot * pw

        if self.mode == 'mean':
            self.total /= len(self.predictions)
Exemplo n.º 29
0
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        target = T.matrix('target')
        parameters = ParameterSet()

        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.mlp = neural.FastDropoutMlp(inpt,
                                         self.n_inpt,
                                         self.n_hiddens,
                                         self.n_output,
                                         self.hidden_transfers,
                                         self.out_transfer,
                                         self.p_dropout_inpt,
                                         self.p_dropout_hiddens,
                                         declare=parameters.declare)

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
            if theano.config.compute_test_value:
                imp_weight.tag.test_value = np.empty((2, self.n_output))
        else:
            imp_weight = None

        output = T.concatenate(self.mlp.outputs, 1)

        self.loss_layer = SupervisedLoss(
            target,
            output,
            loss=lookup(self.loss_ident, vp_loss),
            imp_weight=imp_weight,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
        self.exprs['imp_weight'] = imp_weight
Exemplo n.º 30
0
    def _forward(self):
        self.d_cnn = Cnn3dFlex(inpt=self.inpt,
                               image_height=self.image_height,
                               image_width=self.image_width,
                               image_depth=self.image_depth,
                               n_channel=self.n_channel,
                               n_hiddens=self.n_hiddens_conv,
                               filter_shapes=self.d_filter_shapes,
                               pool_shapes=self.down_pools,
                               hidden_transfers=self.hidden_transfers_conv,
                               declare=self.declare,
                               name=self.name,
                               border_modes=self.b_modes_down,
                               implementation=self.implementation,
                               strides=self.strides_down)

        d_out_height = self.d_cnn.layers[-1].output_height
        d_out_width = self.d_cnn.layers[-1].output_width
        d_out_depth = self.d_cnn.layers[-1].output_depth

        self.u_cnn = UpSampleNetwork3d(
            inpt=self.d_cnn.output,
            image_height=d_out_height,
            image_width=d_out_width,
            image_depth=d_out_depth,
            n_channel=self.n_hiddens_conv[-1],
            n_hiddens=self.n_hiddens_upconv,
            filter_shapes=self.u_filter_shapes,
            pool_shapes=self.up_pools,
            hidden_transfers=self.hidden_transfers_upconv,
            border_modes=self.b_modes_up,
            declare=self.declare,
            name=self.name,
            implementation=self.implementation,
            up_factors=self.up_factors)

        output = self.u_cnn.output.dimshuffle(0, 3, 4, 1, 2)
        output = T.reshape(output, (-1, self.n_hiddens_upconv[-1]))

        f = lookup(self.out_transfer, _transfer)
        self.output = T.reshape(f(output), (1, -1, self.n_hiddens_upconv[-1]))
Exemplo n.º 31
0
    def _forward(self):
        self.weights = self.declare((
            self.n_output, self.n_inpt,
            self.filter_height, self.filter_width))
        self.bias = self.declare((self.n_output,))
        # self.output_in = dnn_conv(self.inpt,
        #                           self.weights,
        #                           border_mode='valid',
        #                           subsample=self.subsample,
        #                           conv_mode='conv')
        self.output_in = conv.conv2d(
           self.inpt,
           self.weights,
           image_shape=(
               self.n_samples,
               self.n_inpt,
               self.inpt_height,
               self.inpt_width
           ),
           filter_shape=(self.n_output,
                         self.n_inpt,
                         self.filter_height,
                         self.filter_width),
           subsample=self.subsample,
           border_mode=self.border_mode,
        )

        if self.border_mode == "full":
            self.output_in = self.output_in[
                :,
                :,
                self.output_in_height/2 - self.output_height/2 :
                self.output_in_height/2 + self.output_height/2,
                self.output_in_width/2 - self.output_width/2 :
                self.output_in_width/2 + self.output_width/2
            ]

        f = lookup(self.transfer, _transfer)
        self.output = f(self.output_in)
    def _init_exprs(self):
        inpt = T.matrix("inpt")
        target = T.matrix("target")
        parameters = ParameterSet()

        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.mlp = neural.FastDropoutMlp(
            inpt,
            self.n_inpt,
            self.n_hiddens,
            self.n_output,
            self.hidden_transfers,
            self.out_transfer,
            self.p_dropout_inpt,
            self.p_dropout_hiddens,
            declare=parameters.declare,
        )

        if self.imp_weight:
            imp_weight = T.matrix("imp_weight")
            if theano.config.compute_test_value:
                imp_weight.tag.test_value = np.empty((2, self.n_output))
        else:
            imp_weight = None

        output = T.concatenate(self.mlp.outputs, 1)

        self.loss_layer = SupervisedLoss(
            target, output, loss=lookup(self.loss_ident, vp_loss), imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(
            self, inpt=inpt, target=target, output=output, loss=self.loss_layer.total, parameters=parameters
        )
        self.exprs["imp_weight"] = imp_weight
Exemplo n.º 33
0
def fawn_recurrent(
    inpt_mean, inpt_var, weights_mean, weights_var,
    f,
    initial_mean, initial_var):

    f_transfer = lookup(f, transfer_)
    def step(inpt_mean, inpt_var, him_m1, hiv_m1, hom_m1, hov_m1):
        wm, wv = weights_mean, weights_var

        pres_mean = T.dot(inpt_mean, wm)
        pres_var = (T.dot(inpt_mean ** 2, wv)
                    + T.dot(inpt_var, wm ** 2)
                    + T.dot(inpt_var, wv)
                    )

        post_mean, post_var = f_transfer(pres_mean, pres_var)
        return pres_mean, pres_var, post_mean, post_var


    if initial_mean.ndim == 1:
        initial_mean = repeat(
            initial_mean.dimshuffle('x', 0), inpt_mean.shape[1], axis=0)
    if initial_var.ndim == 1:
        initial_var = repeat(
            initial_var.dimshuffle('x', 0), inpt_mean.shape[1], axis=0)

    (hidden_in_mean_rec, hidden_in_var_rec, hidden_mean_rec, hidden_var_rec), _ = theano.scan(
        step,
        sequences=[inpt_mean, inpt_var],
        outputs_info=[T.zeros_like(inpt_mean[0]),
                      T.zeros_like(inpt_mean[0]),
                      initial_mean,
                      initial_var])

    return (hidden_in_mean_rec, hidden_in_var_rec,
            hidden_mean_rec, hidden_var_rec)
Exemplo n.º 34
0
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        parameters = ParameterSet()

        self.rnn = neural.FastDropoutRnn(
            inpt,
            self.n_inpt,
            self.n_hiddens,
            self.n_output,
            self.hidden_transfers,
            self.out_transfer,
            p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens,
            p_dropout_hidden_to_out=self.p_dropout_hidden_to_out,
            pooling=self.pooling,
            declare=parameters.declare)

        f_loss = lookup(self.loss_ident, vp_loss)
        output = T.concatenate(self.rnn.outputs, 2)
        self.loss_layer = simple.SupervisedLoss(target,
                                                output,
                                                loss=f_loss,
                                                imp_weight=imp_weight,
                                                declare=parameters.declare,
                                                comp_dim=2)

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
Exemplo n.º 35
0
Arquivo: base.py Projeto: Wiebke/breze
    def _forward(self):
        transfers = self.hidden_transfers
        transfers = [lookup(i, vp_transfer) for i in transfers]
        transfer_insizes = [getattr(i, 'in_size', 1) for i in transfers]
        transfer_outsizes = [1] + [getattr(i, 'out_size', 1) for i in transfers]

        n_incoming = [self.n_inpt] + self.n_hiddens[:-1]
        n_outgoing = self.n_hiddens

        p_dropouts = self.p_dropout_hiddens

        n_time_steps, _, _ = self.inpt.shape

        self.layers = []
        inpt_var = T.zeros_like(self.inpt)

        if self.p_dropout_inpt == 'parameterized':
            p_dropout_inpt = self.declare((1,))
            p_dropout_inpt = T.nnet.sigmoid(p_dropout_inpt) * 0.49 + 0.01
        else:
            p_dropout_inpt = self.p_dropout_inpt

        fd_layer = vp_simple.FastDropout(
            self.inpt, inpt_var, p_dropout_inpt)
        self.layers.append(fd_layer)
        x_mean, x_var = fd_layer.outputs

        for m, n, t, d, tis, tos in zip(n_incoming, n_outgoing, transfers,
                                        p_dropouts,
                                        transfer_insizes, transfer_outsizes):
            x_mean, x_var = self._make_rec_layer(
                x_mean, x_var, m, n, t, tos, tis, d)

        x_mean_flat = wild_reshape(x_mean, (-1, n))
        x_var_flat = wild_reshape(x_var, (-1, n))
        if self.p_dropout_hidden_to_out == 'parameterized':
            p_dropout_hidden_to_out = self.declare((1,))
            p_dropout_hidden_to_out = T.nnet.sigmoid(
                p_dropout_hidden_to_out) * 0.49 + 0.01
        else:
            p_dropout_hidden_to_out = self.p_dropout_hidden_to_out

        fd = vp_simple.FastDropout(
            x_mean_flat, x_var_flat, p_dropout_hidden_to_out)
        x_mean_flat, x_var_flat = fd.outputs
        affine = vp_simple.AffineNonlinear(
            x_mean_flat, x_var_flat, n, self.n_output, self.out_transfer,
            declare=self.declare)
        output_mean_flat, output_var_flat = affine.outputs
        self.layers += [fd, affine]

        output_mean = wild_reshape(
            output_mean_flat, (n_time_steps, -1, self.n_output))
        output_var = wild_reshape(
            output_var_flat, (n_time_steps, -1, self.n_output))

        if self.pooling:
            raise NotImplemented()

        self.output = T.concatenate([output_mean, output_var], 2)
        self.outputs = output_mean, output_var
Exemplo n.º 36
0
    def _forward(self):
        transfers = self.hidden_transfers
        transfers = [lookup(i, vp_transfer) for i in transfers]
        transfer_insizes = [getattr(i, 'in_size', 1) for i in transfers]
        transfer_outsizes = [1] + [
            getattr(i, 'out_size', 1) for i in transfers
        ]

        n_incoming = [self.n_inpt] + self.n_hiddens[:-1]
        n_outgoing = self.n_hiddens

        p_dropouts = self.p_dropout_hiddens

        n_time_steps, _, _ = self.inpt.shape

        self.layers = []
        inpt_var = T.zeros_like(self.inpt)

        if self.p_dropout_inpt == 'parameterized':
            p_dropout_inpt = self.declare((1, ))
            p_dropout_inpt = T.nnet.sigmoid(p_dropout_inpt) * 0.49 + 0.01
        else:
            p_dropout_inpt = self.p_dropout_inpt

        fd_layer = vp_simple.FastDropout(self.inpt, inpt_var, p_dropout_inpt)
        self.layers.append(fd_layer)
        x_mean, x_var = fd_layer.outputs

        for m, n, t, d, tis, tos in zip(n_incoming, n_outgoing, transfers,
                                        p_dropouts, transfer_insizes,
                                        transfer_outsizes):
            x_mean, x_var = self._make_rec_layer(x_mean, x_var, m, n, t, tos,
                                                 tis, d)

        x_mean_flat = wild_reshape(x_mean, (-1, n))
        x_var_flat = wild_reshape(x_var, (-1, n))
        if self.p_dropout_hidden_to_out == 'parameterized':
            p_dropout_hidden_to_out = self.declare((1, ))
            p_dropout_hidden_to_out = T.nnet.sigmoid(
                p_dropout_hidden_to_out) * 0.49 + 0.01
        else:
            p_dropout_hidden_to_out = self.p_dropout_hidden_to_out

        fd = vp_simple.FastDropout(x_mean_flat, x_var_flat,
                                   p_dropout_hidden_to_out)
        x_mean_flat, x_var_flat = fd.outputs
        affine = vp_simple.AffineNonlinear(x_mean_flat,
                                           x_var_flat,
                                           n,
                                           self.n_output,
                                           self.out_transfer,
                                           declare=self.declare)
        output_mean_flat, output_var_flat = affine.outputs
        self.layers += [fd, affine]

        output_mean = wild_reshape(output_mean_flat,
                                   (n_time_steps, -1, self.n_output))
        output_var = wild_reshape(output_var_flat,
                                  (n_time_steps, -1, self.n_output))

        if self.pooling:
            raise NotImplemented()

        self.output = T.concatenate([output_mean, output_var], 2)
        self.outputs = output_mean, output_var