Пример #1
0
    def forward(self, x, t):
        hierarchical_losses = {}

        y, hy = self.model(x, save_hierarchy=True)
        loss = F.softmax_cross_entropy(y, t)
        loss_final = loss

        # Save current inference
        if self.keep_inference:
            self.y = y
            self.hy = hy

        hy = sorted(hy.items(), key=lambda x: x[0], reverse=True)
        dt = F.expand_dims(chainer.Variable(t.astype(cp.float32)), axis=1)
        for level, volume in hy:
            dt = F.max_pooling_3d(dt, ksize=2, stride=2)
            hl = F.softmax_cross_entropy(
                volume, chainer.Variable(dt[:, 0, ...].data.astype(cp.int32)))
            hierarchical_losses[level] = hl
            loss += hl

        # Report losses
        metrics = {
            'loss_l{}'.format(k): v
            for k, v in hierarchical_losses.items()
        }
        metrics['loss_l{}'.format(max(hierarchical_losses) + 1)] = loss_final
        metrics['loss'] = loss
        reporter.report(metrics, self.observer)

        return loss
Пример #2
0
    def predict(self, loading, fnc, net_type, spatial_map, **kwargs):

        batch_size, channel, x, y, z = spatial_map.shape

        xp = chainer.cuda.get_array_module(spatial_map)
        spatial_map = spatial_map.astype(xp.float32) / 3000.0
        spatial_map[spatial_map > 2.0] = 2.0
        spatial_map[spatial_map < -2.0] = -2.0

        sm = spatial_map.reshape(batch_size * channel, 1, x, y, z)

        sm = F.pad(sm, ((0, 0), (0, 0), (6, 5), (1, 0), (6, 6)), 'constant', constant_values=0)

        sm = F.max_pooling_3d(F.relu(self.bn1(self.conv_3d_1(sm))), ksize=2)  # 64 x 64
        sm = F.max_pooling_3d(F.relu(self.bn2(self.conv_3d_2(sm))), ksize=2)  # 32 x 32
        sm = F.max_pooling_3d(F.relu(self.bn3(self.conv_3d_3(sm))), ksize=2)  # 16 x 16
        sm = F.max_pooling_3d(F.relu(self.bn4(self.conv_3d_4(sm))), ksize=4)  # 8 x 8

        sm = sm.reshape(batch_size, channel, 32)

        h = self.gn1(F.concat([net_type, sm], axis=2))
        e = F.expand_dims(fnc, 3)

        # adj = (xp.abs(fnc) > 1e-5).astype(np.float32)
        # adj_mask = F.tile(F.expand_dims(adj, 3), (1, 1, 1, self.edge_dim))

        for layer in range(self.num_layer):
            e = self['eup{}'.format(layer)](e, h)
            h = self['int{}'.format(layer)](h, e)

        h_mean = F.mean(h, axis=1)
        h_max = F.mean(h, axis=1)
        h = F.concat([loading * 10.0, h_mean, h_max], 1)

        # h = self.gn2(h)
        # h = F.concat([loading * 10.0, h.reshape(batch_size, -1)], 1)

        h = F.relu(self.n1(h))
        out = self.n2(h)
        return out
Пример #3
0
    def __call__(self, x):
        dropout_ratio = CONFIG.fc_dropout_rate

        batch_size, sequence_len, ch, w, h_ = x.shape

        if CONFIG.conv_type == "3D":
            x = x.transpose(0, 2, 1, 3, 4)
        elif CONFIG.conv_type == "2D":
            x = x.reshape(-1, ch, w, h_)
        elif CONFIG.conv_type == "k":
            x = x.reshape(-1, 2, ch, w, h_)
            x = x.transpose(0, 2, 1, 3, 4)

        h = self.conv5_1(x)
        if self.use_bn:
            h = self.bn5_1(h)
        h = F.relu(h)

        h = self.conv5_2(x)
        if self.use_bn:
            h = self.bn5_2(h)
        h = F.relu(h)

        if CONFIG.conv_type == "3D":
            # batch_size, ch, sequence_length, w, h  >> batch_size, sequence_length, ch, w, h
            h = h.transpose(0, 2, 1, 3, 4)
            # batch_size, sequence_length, ch, w, h >> batch_size*sequence_len, ch, w, h
            h = h.reshape(batch_size * sequence_len, ch, w, h_)
            h = F.max_pooling_2d(h, ksize=h.shape[-1], stride=1, pad=0)

        elif CONFIG.conv_type == "2D":
            h = F.max_pooling_2d(h, ksize=h.shape[-1], stride=1, pad=0)

        elif CONFIG.conv_type == "k":
            # batch_size, ch, sequence_length, w, h
            h = F.max_pooling_3d(h,
                                 ksize=(h.shape[2], h.shape[3], h.shape[4]),
                                 stride=1,
                                 pad=0)

        h = self.fc1(h)
        h = F.relu(h)
        if dropout_ratio > 0:
            h = F.dropout(h, dropout_ratio)
        h = self.fc2(h)
        h = F.relu(h)

        h = self.fc3(h)

        h = h.reshape(batch_size, -1, 128)
        return h
Пример #4
0
 def test_max_pooling_3d_invalid(self):
     (x, ksize) = self._get_data(2)
     with self.assertRaises(ValueError):
         functions.max_pooling_3d(x, ksize)
Пример #5
0
 def test_max_pooling_3d(self):
     (x, ksize) = self._get_data(3)
     testing.assert_allclose(
         functions.max_pooling_nd(x, ksize).data,
         functions.max_pooling_3d(x, ksize).data)
Пример #6
0
 def test_max_pooling_3d_invalid(self):
     (x, ksize) = self._get_data(2)
     with self.assertRaises(ValueError):
         functions.max_pooling_3d(x, ksize)
Пример #7
0
 def test_max_pooling_3d(self):
     (x, ksize) = self._get_data(3)
     testing.assert_allclose(
         functions.max_pooling_nd(x, ksize).data,
         functions.max_pooling_3d(x, ksize).data)