Exemple #1
0
    def forward(self, conv_out, labels_scaled=None, segSize=None):
        conv5 = conv_out[-1]

        input_size = conv5.size()
        ppm_out = [conv5]
        for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
            ppm_out.append(
                nn.functional.upsample(pool_conv(pool_scale(conv5)),
                                       (input_size[2], input_size[3]),
                                       mode='bilinear'))
        ppm_out = torch.cat(ppm_out, 1)
        f = self.ppm_last_conv(ppm_out)

        quad_ins = [f]
        for i in reversed(range(len(conv_out) - 1)):
            quad_ins.append(self.quad_in[i](conv_out[i]))

        quad_preds = [self.quad_out[0](f)]
        for i in (range(1, len(conv_out) - 1)):
            conv_eq = quad_ins[i]

            conv_minus = quad_ins[i - 1]
            conv_minus = nn.functional.upsample(
                conv_minus, size=conv_eq.size()[2:],
                mode='bilinear')  # top-down branch

            conv_plus = quad_ins[i + 1]
            conv_plus = gather(conv_plus)

            gcn_in = torch.cat([conv_eq, conv_minus, conv_plus], 1)
            quad_ins[i] = self.quad_gcn(gcn_in)

            quad_preds.append(self.quad_out[i](quad_ins[i]))

        x = quad_preds[-1]

        if self.use_softmax:  # is True during inference
            x = nn.functional.upsample(x, size=segSize, mode='bilinear')
            x = nn.functional.softmax(x[:, 1:, :, :], dim=1)
            return x

        x = nn.functional.log_softmax(x, dim=1)

        y = []
        for i in reversed(range(len(quad_preds) - 1)):
            y.append(nn.functional.log_softmax(quad_preds[i], dim=1))

        return x, y
        def grad(x, f, g):
            ''' Evaluates the gradient for the control values.
            f is the associated functional value and g are the values
            of the constraints. '''

            fail = False
            if not ignore_model_errors:
                dj = self.derivative(x, forget=False)
            else:
                try:
                    dj = self.derivative(x, forget=False)
                except:
                    fail = True

            if constraints is not None:
                gJac = np.concatenate([gather(c.jacobian(x)) for c in constraints])
            else:
                gJac = np.zeros(len(x))  # SNOPT fails if no constraints are given, hence add a dummy constraint

            info("j = %f\t\t|dJ| = %f" % (f[0], np.linalg.norm(dj)))
            return np.array([dj]), gJac, fail
def get_global(m_list):
    ''' Takes a list of distributed objects and returns one np array containing their (serialised) values '''
    if not isinstance(m_list, (list, tuple)):
        m_list = [m_list]

    m_global = []
    for m in m_list:

        # Parameters of type float
        if m == None or type(m) == float:
            m_global.append(m)

        elif hasattr(m, "tolist"):
            m_global += m.tolist()

        # Control of type Function
        elif hasattr(m, "vector") or hasattr(m, "gather"):
            if not hasattr(m, "gather"):
                m_v = m.vector()
            else:
                m_v = m
            m_a = gather(m_v)

            m_global += m_a.tolist()

        # Parameters of type Constant
        elif hasattr(m, "value_size"):
            a = np.zeros(m.value_size())
            p = np.zeros(m.value_size())
            m.eval(a, p)
            m_global += a.tolist()

        else:
            raise TypeError, 'Unknown control type %s.' % str(type(m))

    return np.array(m_global, dtype='d')