コード例 #1
0
ファイル: test_copy.py プロジェクト: zwh930712/chainer
    def _check_forward_internal(self, dst_device_spec, src_device, dst_device,
                                x_mode):
        x = src_device.send(self.x)

        if x_mode == 'array':
            pass
        elif x_mode == 'non_requires_grad':
            x = chainer.Variable(x, requires_grad=False)
        elif x_mode == 'requires_grad':
            x = chainer.Variable(x, requires_grad=True)
        else:
            assert False, x_mode

        error_expected = ((src_device.xp is chainerx) !=
                          (dst_device.xp is chainerx)
                          and x_mode == 'requires_grad')
        if error_expected:
            with pytest.raises(RuntimeError):
                functions.copy(x, dst_device_spec)
            return

        y = functions.copy(x, dst_device_spec)

        assert y.device == dst_device
        assert backend.get_device_from_array(y.array) == dst_device
        assert y.dtype == self.dtype
        numpy.testing.assert_array_equal(_numpy_device.send(y.array), self.x)
コード例 #2
0
    def forward(self, x):
        if self.device0 != self.device1:
            # assume x is on device0
            x1 = F.copy(x, self.device1)

            z0 = self.first0(x)
            z1 = self.first1(x1)

            # synchronize
            h0 = z0 + F.copy(z1, self.device0)
            h1 = z1 + F.copy(z0, self.device1)

            y0 = self.second0(F.relu(h0))
            y1 = self.second1(F.relu(h1))

            y = y0 + F.copy(y1, self.device0)
            return y  # output is on device0
        else:
            z0 = self.first0(x)
            z1 = self.first1(x)
            h = z0 + z1

            y0 = self.second0(F.relu(h))
            y1 = self.second1(F.relu(h))
            y = y0 + y1

            return y
コード例 #3
0
def forward(x_data, y_data, train=True):
    # Neural net architecture
    x_0 = chainer.Variable(cuda.to_gpu(x_data, 0), volatile=not train)
    x_1 = chainer.Variable(cuda.to_gpu(x_data, 1), volatile=not train)
    t = chainer.Variable(cuda.to_gpu(y_data, 0), volatile=not train)

    h1_0 = F.dropout(F.relu(model.gpu0.l1(x_0)), train=train)
    h1_1 = F.dropout(F.relu(model.gpu1.l1(x_1)), train=train)

    h2_0 = F.dropout(F.relu(model.gpu0.l2(h1_0)), train=train)
    h2_1 = F.dropout(F.relu(model.gpu1.l2(h1_1)), train=train)

    h3_0 = F.dropout(F.relu(model.gpu0.l3(h2_0)), train=train)
    h3_1 = F.dropout(F.relu(model.gpu1.l3(h2_1)), train=train)

    # Synchronize
    h3_0 += F.copy(h3_1, 0)
    h3_1 = F.copy(h3_0, 1)

    h4_0 = F.dropout(F.relu(model.gpu0.l4(h3_0)), train=train)
    h4_1 = F.dropout(F.relu(model.gpu1.l4(h3_1)), train=train)

    h5_0 = F.dropout(F.relu(model.gpu0.l5(h4_0)), train=train)
    h5_1 = F.dropout(F.relu(model.gpu1.l5(h4_1)), train=train)

    h6_0 = F.relu(model.gpu0.l6(h5_0))
    h6_1 = F.relu(model.gpu1.l6(h5_1))

    # Synchronize
    y = h6_0 + F.copy(h6_1, 0)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
コード例 #4
0
 def __call__(self, x):
     if self._device_id is not None and self.gpu_for_nn_only:
         x = F.copy(x, self._device_id)
     y = self.l(x)
     if self._device_id is not None and self.gpu_for_nn_only:
         y = F.copy(y, -1)
     return y
コード例 #5
0
ファイル: net.py プロジェクト: KojiOchiai/Parallel-PredNet
    def __call__(self, x):
        A = [None] * self.layers
        R = [None] * (self.layers - 1)

        # update layers
        for nth in range(self.layers):
            if nth == 0 & nth == self.layers - 1:
                (_, _) = self.Layer0(x)
            elif nth == 0:
                (A[1], _) = self.Layer0(x, self.R0)
            elif nth == self.layers - 1:
                (_, R[nth - 1]) = getattr(self, 'Layer' + str(nth))(
                                    getattr(self, 'A' + str(nth)))
            else:
                (A[nth + 1], R[nth - 1]) = getattr(self, 'Layer' + str(nth))(
                                          getattr(self, 'A' + str(nth)),
                                          getattr(self, 'R' + str(nth)))

        # copy data to device
        if isinstance(x.data, numpy.ndarray) or self.devices[nth] is None:
            for nth in range(self.layers - 1):
                setattr(self, 'A' + str(nth + 1), A[nth + 1])
                setattr(self, 'R' + str(nth), R[nth])
        else:
            for nth in range(self.layers - 1):
                setattr(self, 'A' + str(nth + 1),
                        F.copy(A[nth + 1], self.devices[nth + 1]))
                setattr(self, 'R' + str(nth),
                        F.copy(R[nth], self.devices[nth]))

        return self.Layer0.P
コード例 #6
0
def forward(x_data, y_data, train=True):
    x_0 = chainer.Variable(cuda.to_gpu(x_data, 0), volatile=not train)
    x_1 = chainer.Variable(cuda.to_gpu(x_data, 1), volatile=not train)
    t = chainer.Variable(cuda.to_gpu(y_data, 0), volatile=not train)

    h1_0 = F.dropout(F.relu(model.gpu0.l1(x_0)),  train=train)
    h1_1 = F.dropout(F.relu(model.gpu1.l1(x_1)),  train=train)

    h2_0 = F.dropout(F.relu(model.gpu0.l2(h1_0)), train=train)
    h2_1 = F.dropout(F.relu(model.gpu1.l2(h1_1)), train=train)

    h3_0 = F.dropout(F.relu(model.gpu0.l3(h2_0)), train=train)
    h3_1 = F.dropout(F.relu(model.gpu1.l3(h2_1)), train=train)

    # Synchronize
    h3_0 += F.copy(h3_1, 0)
    h3_1 = F.copy(h3_0, 1)

    h4_0 = F.dropout(F.relu(model.gpu0.l4(h3_0)), train=train)
    h4_1 = F.dropout(F.relu(model.gpu1.l4(h3_1)), train=train)

    h5_0 = F.dropout(F.relu(model.gpu0.l5(h4_0)),  train=train)
    h5_1 = F.dropout(F.relu(model.gpu1.l5(h4_1)),  train=train)

    h6_0 = F.relu(model.gpu0.l6(h5_0))
    h6_1 = F.relu(model.gpu1.l6(h5_1))

    # Synchronize
    y = h6_0 + F.copy(h6_1, 0)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
コード例 #7
0
    def forward(self, x):
        if self.gpu0 != self.gpu1:
            # assume x is on gpu0
            x1 = F.copy(x, self.gpu1)

            z0 = self.first0(x)
            z1 = self.first1(x1)

            # synchronize
            h0 = z0 + F.copy(z1, self.gpu0)
            h1 = z1 + F.copy(z0, self.gpu1)

            y0 = self.second0(F.relu(h0))
            y1 = self.second1(F.relu(h1))

            y = y0 + F.copy(y1, self.gpu0)
            return y  # output is on gpu0
        else:
            z0 = self.first0(x)
            z1 = self.first1(x)
            h = z0 + z1

            y0 = self.second0(F.relu(h))
            y1 = self.second1(F.relu(h))
            y = y0 + y1

            return y
コード例 #8
0
ファイル: test_copy.py プロジェクト: zwh930712/chainer
    def check_invalid(self, src_device, dst_device_spec):
        x = src_device.send(
            numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype))

        x_var = chainer.Variable(x)

        with pytest.raises(RuntimeError):
            functions.copy(x_var, dst_device_spec)
コード例 #9
0
    def update_core(self):
        optimizer = self.get_optimizer('main')
        # it is main wrapper class: au_rcnn_train_chain, space_time_rnn
        model_main = optimizer.target
        loss_head_module = model_main.loss_head_module

        models_others = {k: v for k, v in self._models.items()
                         if v != model_main.au_rcnn_train_chain}

        batch = self.get_iterator('main').next()
        in_arrays = self.converter(batch, -1)
        images, bboxes, labels = in_arrays
        batch_size, T, channel, height, width = images.shape
        images = images.reshape(batch_size * T, channel, height, width)  # B*T, C, H, W
        bboxes = bboxes.reshape(batch_size * T, config.BOX_NUM[self.database], 4)  # B*T, 9, 4
        labels = chainer.cuda.to_gpu(labels, device=self._devices["main"])
        # labels = labels.reshape(batch_size * T, config.BOX_NUM[self.database], -1)  # B*T, 9, 12/22

        # For reducing memory
        for model in six.itervalues(models_others):
            model.cleargrads()
        model_main.cleargrads()
        #
        # Split the batch to sub-batches.
        #
        n = len(self._models)
        in_arrays_list = {}
        sub_index = self.split_list(list(range(batch_size * T)), n)
        for i, key in enumerate(sorted(self._models.keys(), key=lambda e:str(e))):  # self._models are all au_rcnn_train_chain includes main gpu
            in_arrays_list[key] = (F.copy(images[sub_index[i]], self._devices.get(key, self._devices["main"])),
                                   F.copy(bboxes[sub_index[i]], self._devices.get(key, self._devices["main"])))

        # self._models are all au_rcnn_train_chain includes main gpu
        with function.force_backprop_mode():
            roi_feature_multi_gpu = []
            for model_key, au_rcnn_train_chain in sorted(self._models.items(), key=lambda e:str(e[0])):
                images, bboxes = in_arrays_list[model_key]
                assert int(images.data.device) == au_rcnn_train_chain._device_id
                roi_feature = au_rcnn_train_chain(images, bboxes)  # shape =(B*T//n, F, D)
                roi_feature_multi_gpu.append(F.copy(roi_feature, self._devices["main"]))
            roi_feature = F.concat(roi_feature_multi_gpu, axis=0)  # multiple batch combine
            roi_feature = roi_feature.reshape(batch_size, T, config.BOX_NUM[self.database], roi_feature.shape[-1])
            loss = loss_head_module(roi_feature, labels)

        model_main.cleargrads()
        for model in six.itervalues(self._models):
            model.cleargrads()
        loss.backward()
        for model in six.itervalues(models_others):
            model_main.au_rcnn_train_chain.addgrads(model)
        optimizer.update()
        for model in six.itervalues(models_others):
            model.copyparams(model_main.au_rcnn_train_chain)  # only the main model will update parameter, so copy to each other models
コード例 #10
0
    def __call__(self, x):
        z0 = self.mlp1_gpu0(x)
        z1 = self.mlp1_gpu1(F.copy(x, 1))

        h0 = F.relu(z0 + F.copy(z1, 0))
        h1 = F.relu(z1 + F.copy(z0, 1))

        y0 = self.mlp2_gpu0(h0)
        y1 = self.mlp2_gpu1(h1)

        y = y0 + F.copy(y1, 0)
        return y
コード例 #11
0
    def prepare_images(self, images):
        if self.xp != np:
            device = images.data.device
            images = F.copy(images, -1)

        converted_images = [
            resnet.prepare(image.data, size=None)
            for image in F.separate(images, axis=0)
        ]
        converted_images = F.stack(converted_images, axis=0)

        if self.xp != np:
            converted_images = F.copy(converted_images, device.id)
        return converted_images
コード例 #12
0
ファイル: test_copy.py プロジェクト: zwh930712/chainer
    def test_double_backward(self, src_backend_config, dst_backend_config):
        src_device = src_backend_config.device
        dst_device = dst_backend_config.device
        if (src_device.xp is chainerx) is not (dst_device.xp is chainerx):
            raise unittest.SkipTest(
                'ChainerX to non-ChainerX does not support backward.')

        x = src_backend_config.get_array(self.x)
        gy = dst_backend_config.get_array(self.gy)
        ggx = src_backend_config.get_array(self.ggx)

        x_var = chainer.Variable(x, requires_grad=True)

        y_var = functions.copy(x_var, dst_device)

        y_var.grad = gy

        gy_var = y_var.grad_var
        y_var.backward(enable_double_backprop=True)

        assert x_var.grad_var.requires_grad is True

        x_var.grad_var.grad = ggx
        x_var.grad_var.backward()

        assert gy_var.grad_var.device == dst_device
        assert (backend.get_device_from_array(
            gy_var.grad_var.array) == dst_device)
        numpy.testing.assert_array_equal(
            _numpy_device.send(gy_var.grad_var.array), self.ggx)
コード例 #13
0
    def __call__(self, atom_array, adj, wle_array=None, is_real_node=None):
        self.reset_state()

        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)
        else:
            # TODO: GraphLinear or GraphMLP can be used.
            h = atom_array

        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)

        # all Combined NLE processes are done here.
        if self.with_wle:
            h_s = self.embed_wle(wle_array)

            # gated sum
            gate_input = self.gate_W1(h) + self.gate_W2(h_s)
            gate_coefff = functions.sigmoid(gate_input)
            h = (1.0 - gate_coefff) * h + gate_coefff * h_s

        additional_kwargs = self.preprocess_addtional_kwargs(
            atom_array, adj, wle_array=wle_array, is_real_node=is_real_node)

        if self.scale_adj:
            adj = rescale_adj(adj)

        g_list = []
        for step in range(self.n_update_layers):
            update_layer_index = 0 if self.weight_tying else step
            h = self.update_layers[update_layer_index](h=h,
                                                       adj=adj,
                                                       **additional_kwargs)

            if self.use_batchnorm:
                h = self.bnorms[update_layer_index](h)

            if self.dropout_ratio > 0.:
                h = functions.dropout(h, ratio=self.dropout_ratio)

            if self.activation is not None and step < self.n_activation:
                h = self.activation(h)

            if self.concat_hidden or self.sum_hidden:
                g = self.readout_layers[step](h=h,
                                              h0=h0,
                                              is_real_node=is_real_node,
                                              **additional_kwargs)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            if self.sum_hidden:
                g = functions.sum(functions.stack(g_list), axis=0)
            else:
                g = self.readout_layers[0](h=h,
                                           h0=h0,
                                           is_real_node=is_real_node)

            return g
コード例 #14
0
    def test_double_backward(self, src_backend_config, dst_backend_config):
        x = src_backend_config.get_array(self.x)
        gy = dst_backend_config.get_array(self.gy)
        ggx = src_backend_config.get_array(self.ggx)
        dst_device = dst_backend_config.device

        x_var = chainer.Variable(x, requires_grad=True)

        y_var = functions.copy(x_var, dst_device)

        y_var.grad = gy

        gy_var = y_var.grad_var
        y_var.backward(enable_double_backprop=True)

        assert x_var.grad_var.requires_grad is True

        x_var.grad_var.grad = ggx
        x_var.grad_var.backward()

        assert gy_var.grad_var.device == dst_device
        assert (backend.get_device_from_array(
            gy_var.grad_var.array) == dst_device)
        numpy.testing.assert_array_equal(
            _numpy_device.send(gy_var.grad_var.array), self.ggx)
コード例 #15
0
    def test_double_backward(self, src_backend_config, dst_backend_config):
        x = src_backend_config.get_array(self.x)
        gy = dst_backend_config.get_array(self.gy)
        ggx = src_backend_config.get_array(self.ggx)
        dst_device = dst_backend_config.device

        x_var = chainer.Variable(x, requires_grad=True)

        y_var = functions.copy(x_var, dst_device)

        # TODO(niboshi): Remove this workround after Variable.grad.setter is
        # fixed so that it calls gy.require_grad() internally.
        if dst_backend_config.xp is chainerx:
            gy.require_grad()

        y_var.grad = gy

        gy_var = y_var.grad_var
        y_var.backward(enable_double_backprop=True)

        assert x_var.grad_var.requires_grad is True

        x_var.grad_var.grad = ggx
        x_var.grad_var.backward()

        assert gy_var.grad_var.device == dst_device
        assert (backend.get_device_from_array(
            gy_var.grad_var.array) == dst_device)
        numpy.testing.assert_array_equal(
            _numpy_device.send(gy_var.grad_var.array), self.ggx)
    def __call__(self, atom_array, adj):
        """Forward propagation

        Args:
            atom_array (numpy.ndarray): minibatch of molecular which is
                represented with atom IDs (representing C, O, S, ...)
                `atom_array[mol_index, atom_index]` represents `mol_index`-th
                molecule's `atom_index`-th atomic number
            adj (numpy.ndarray): minibatch of adjancency matrix with edge-type
                information

        Returns:
            ~chainer.Variable: minibatch of fingerprint
        """
        # reset state
        self.update_layer.reset_state()
        if atom_array.dtype == self.xp.int32:
            raise "use vector expression"
        else:
            h = atom_array

        h0 = functions.copy(h, self.gpu_device)

        g_list = []
        for step in range(self.n_layers):
            h = self.update(h, adj, step)
            if self.concat_hidden:
                g = self.readout(h, h0, step)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = self.readout(h, h0, 0)
            return g
コード例 #17
0
    def check_forward(self, src_id, dst_id):
        x_data = _to_gpu(self.x_data, src_id)
        x = chainer.Variable(x_data)
        y = functions.copy(x, dst_id)

        self.assertEqual(self.x_data.dtype, self.dtype)
        numpy.testing.assert_array_equal(self.x_data, cuda.to_cpu(y.data))
コード例 #18
0
ファイル: ggnn_dev_jknet.py プロジェクト: Minys233/GCN-BMP
    def __call__(self, atom_array, adj):
        # reset state
        # self.update_layer.reset_state()
        # [layer.reset_state() for layer in self.update_layer]
        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)  # (minibatch, max_num_atoms)
        else:
            h = atom_array
        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
        g_list = []
        h_list = []
        for step in range(self.n_layers):
            h = self.update(h, adj, step)

            if self.dropout_rate != 0.0:
                h = functions.dropout(h, ratio=self.dropout_rate)

            if self.concat_hidden:
                g = self.readout(h, h0, step)
                g_list.append(g)

            if self.layer_aggr:
                h_list.append(h)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        elif self.layer_aggr:
            output = self.aggr(h_list)

            return self.readout(output, h0, 0)
        else:
            g = self.readout(h, h0, 0)
            return g
コード例 #19
0
ファイル: ggnn_chin.py プロジェクト: Minys233/GCN-BMP
    def __call__(self, atom_array, adj, is_real_node=None):
        """Forward propagation
        Args:
            atom_array (numpy.ndarray): minibatch of molecular which is
                represented with atom IDs (representing C, O, S, ...)
                `atom_array[mol_index, atom_index]` represents `mol_index`-th
                molecule's `atom_index`-th atomic number
            adj (numpy.ndarray): minibatch of adjancency matrix with edge-type
                information
            is_real_node (numpy.ndarray): 2-dim array (minibatch, num_nodes).
                1 for real node, 0 for virtual node.
                If `None`, all node is considered as real node.
        Returns:
            ~chainer.Variable: minibatch of fingerprint
        """
        # reset state
        self.reset_state()
        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)  # (minibatch, max_num_atoms)
        else:
            h = atom_array
        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
        g_list = []
        for step in range(self.n_layers):
            message_layer_index = 0 if self.weight_tying else step
            h = self.update_layers[message_layer_index](h, adj)
            if self.concat_hidden:
                g = self.readout_layers[step](h, h0, is_real_node)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = self.readout_layers[0](h, h0, is_real_node)
            return g
コード例 #20
0
ファイル: test_copy.py プロジェクト: 2php/chainer
    def check_forward(self, src_id, dst_id):
        x_data = _to_gpu(self.x_data, src_id)
        x = chainer.Variable(x_data)
        y = functions.copy(x, dst_id)

        self.assertEqual(self.x_data.dtype, self.dtype)
        numpy.testing.assert_array_equal(self.x_data, cuda.to_cpu(y.data))
コード例 #21
0
ファイル: s_rnn_plus.py プロジェクト: zhangxujinsh/AU_R-CNN
 def predict(self,
             x: np.ndarray,
             crf_pact_structure: CRFPackageStructure,
             is_bin=False):
     '''
     :param xs:
     :param crf_pact_structures:
     :return: bin array for multi-label, shape= B x N x D
     '''
     with chainer.no_backprop_mode():
         if not isinstance(x, chainer.Variable):
             x = chainer.Variable(x)
         xp = chainer.cuda.get_array_module(x)
         # return shape = B * N * D , B is batch_size(=1 only), N is one video all nodes count, D is each node output vector
         if self.with_crf:  # 作废,这句if不会进去
             xs = F.expand_dims(x, axis=0)
             crf_pact_structures = [crf_pact_structure]
             hs = self.structural_rnn(
                 xs, crf_pact_structures
             )  # hs shape = B x N x D, B is batch_size
             hs = F.copy(hs, -1)  # data transfer to cpu
             h = hs.data[0]
             pred_labels = self.open_crf.predict(
                 h, crf_pact_structures[0],
                 is_bin=is_bin)  # shape = N x D or N x 1
             return np.asarray(
                 pred_labels,
                 dtype=xp.int32)  # shape =N x D, where D = AU_squeeze_size
         else:
             return self.structural_rnn.predict(
                 x, crf_pact_structure,
                 is_bin=is_bin)  # 是binary形式的label. N x D
コード例 #22
0
    def __call__(self, atom_array, adj):
        # reset state
        self.reset_state()
        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)
        else:
            h = atom_array
        if self.readout_func == 'ggnn':
            h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
            readout_layers = [
                partial(readout_layer, h0=h0)
                for readout_layer in self.readout_layers
            ]
        else:
            readout_layers = self.readout_layers
        g_list = []
        for step in range(self.n_layers):
            message_layer_index = 0 if self.weight_tying else step
            h = self.update_layers[message_layer_index](h, adj)
            if self.concat_hidden:
                g = readout_layers[step](h)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = readout_layers[0](h)
            return g
コード例 #23
0
    def __call__(self, atom_array, adj):
        """Forward propagation

        Args:
            atom_array (numpy.ndarray): minibatch of molecular which is
                represented with atom IDs (representing C, O, S, ...)
                `atom_array[mol_index, atom_index]` represents `mol_index`-th
                molecule's `atom_index`-th atomic number
            adj (numpy.ndarray): minibatch of adjancency matrix with edge-type
                information

        Returns:
            ~chainer.Variable: minibatch of fingerprint
        """
        # reset state
        self.update_layer.reset_state()
        if atom_array.dtype == numpy.int32 \
                or atom_array.dtype == cuda.cupy.int32:
            h = self.embed(atom_array)  # (minibatch, max_num_atoms)
        else:
            h = atom_array
        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
        g_list = []
        for step in range(self.n_layers):
            h = self.update(h, adj, step)
            if self.concat_hidden:
                g = self.readout(h, h0, step)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=2)
        else:
            g = self.readout(h, h0, 0)
            return g
コード例 #24
0
    def __call__(self, sparse_batch, is_real_node=None):
        if sparse_batch.x.dtype == self.xp.int32:
            h = self.embed(sparse_batch.x)  # (minibatch, max_num_atoms)
        else:
            h = self.first_mlp(sparse_batch.x)

        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)

        g_list = []
        for step in range(self.n_message_layers):
            message_layer_index = 0 if self.weight_tying else step
            h = self.update_layers[message_layer_index](
                h, sparse_batch.edge_index)
            if step != self.n_message_layers - 1:
                h = functions.relu(h)
            if self.concat_hidden:
                g = self.readout_layers[step](h, h0, is_real_node)
                g_list.append(g)

        if self.node_embedding:
            return h

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = self.readout_layers[0](h, sparse_batch.batch, h0, is_real_node)
            return g
コード例 #25
0
ファイル: relgat.py プロジェクト: ir5/chainer-chemistry
    def __call__(self, atom_array, adj):
        """Forward propagation

        Args:
            atom_array (numpy.ndarray): minibatch of molecular which is
                represented with atom IDs (representing C, O, S, ...)
                `atom_array[mol_index, atom_index]` represents `mol_index`-th
                molecule's `atom_index`-th atomic number
            adj (numpy.ndarray): minibatch of adjancency matrix with edge-type
                information

        Returns:
            ~chainer.Variable: minibatch of fingerprint
        """
        # reset state
        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)  # (minibatch, max_num_atoms)
        else:
            h = atom_array
        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
        g_list = []
        for step in range(self.n_layers):
            message_layer_index = 0 if self.weight_tying else step
            h = self.update_layers[message_layer_index](h, adj)
            if self.concat_hidden:
                g = self.readout_layers[step](h, h0)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = self.readout_layers[0](h, h0)
            return g
コード例 #26
0
ファイル: s_rnn_plus.py プロジェクト: zhangxujinsh/AU_R-CNN
    def __call__(self, xs: chainer.Variable, crf_pact_structures
                 ):  # crf_pact_structure is batch of CRFPackageStructure
        xp = chainer.cuda.cupy.get_array_module(xs.data)  # xs is batch
        # return shape = B * N * D , B is batch_size, N is one video all nodes count, D is each node output vector dimension
        h = self.structural_rnn(xs, crf_pact_structures)

        if self.with_crf:
            #  open_crf only support CPU mode
            # convert_xs = self.bn(self.convert_dim_fc(xs.reshape(-1, xs.shape[-1])))  # note that we remove batch = 1 dimension
            # h = F.relu(h.reshape(-1, h.shape[-1]) + convert_xs) # just like ResNet
            # h = F.expand_dims(h, 0)  # add one batch dimension
            h = F.copy(h, -1)
            # gt_label is hidden inside crf_pact_structure's sample. this step directly compute loss
            loss = self.open_crf(h, crf_pact_structures)
        else:  # only structural_rnn
            ts = self.get_gt_labels(
                xp, crf_pact_structures,
                is_bin=False)  # B x N x 1, and B = 1 forever
            ts = chainer.Variable(
                ts.reshape(-1)
            )  # because ts label is 0~L which is one more than ground truth, 0 represent 0,0,0,0,0
            h = h.reshape(
                -1, h.shape[-1]
            )  # h must have 0~L which = L+1 including non_AU = 0(also background class)
            assert ts.shape[0] == h.shape[0]
            loss = F.hinge(h, ts, norm='L2', reduce='mean')

            accuracy = F.accuracy(h, ts)

        report_dict = {'loss': loss}
        if not self.with_crf:
            report_dict["accuracy"] = accuracy
        chainer.reporter.report(report_dict, self)
        return loss
コード例 #27
0
def extract_images(data):
    """Extract image data from array or chainer.Variable"""

    if isinstance(data, list):
        data = [extract_images(d) for d in data]

    if isinstance(data, chainer.Variable):
        data = F.copy(data, -1)
        data = data.array

    if data.ndim > 5:
        raise ValueError("invalid data: data.ndim > 5")
    elif data.ndim == 5:
        data = data[0]  # NCHW

    channels = data.shape[1]
    if channels == 1:  # mono
        out_shape = list(data.shape)
        out_shape[1] = 3
        data = np.broadcast_to(data, out_shape)

    data.flags.writeable = True

    # clip element
    data[data <= 0.0] = 0.0
    data[data >= 1.0] = 1.0
    return data
コード例 #28
0
ファイル: ggnn_dev.py プロジェクト: Minys233/GCN-BMP
    def __call__(self, atom_array, adj):
        # reset state
        self.atoms_list = []
        self.g_vec_list = []
        self.update_layer.reset_state()
        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)  # (minibatch, max_num_atoms)
        else:
            h = atom_array
        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
        g_list = []
        h_list = []
        for step in range(self.n_layers):
            h = self.update(h, adj, step)

            if self.dropout_rate != 0.0:
                h = functions.dropout(h, ratio=self.dropout_rate)

            if self.concat_hidden:
                g = self.readout(h, h0, step)
                g_list.append(g)

            h_list.append(h)
            self.atoms_list.append(h)
            g_vec = self.readout(h, h0, step)
            self.g_vec_list.append(g_vec)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = self.readout(h, h0, 0)
            # g = self.att_readout(h, h_list, 0)
            g = functions.sum(h, axis=1)
            return g
コード例 #29
0
    def __call__(self, atom_array, adj, super_node=None, is_real_node=None):
        self.reset_state()

        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)
        else:
            # TODO: GraphLinear or GraphMLP can be used.
            h = atom_array

        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)
        if self.with_gwm:
            h_s = self.embed_super(super_node)

        additional_kwargs = self.preprocess_addtional_kwargs(
            atom_array, adj, super_node=super_node, is_real_node=is_real_node)

        if self.scale_adj:
            adj = rescale_adj(adj)

        g_list = []
        for step in range(self.n_update_layers):
            update_layer_index = 0 if self.weight_tying else step
            h_new = self.update_layers[update_layer_index](h=h,
                                                           adj=adj,
                                                           **additional_kwargs)

            if self.with_gwm:
                h_new, h_s = self.gwm(h, h_new, h_s, update_layer_index)
            h = h_new

            if self.use_batchnorm:
                h = self.bnorms[update_layer_index](h)

            if self.dropout_ratio > 0.:
                h = functions.dropout(h, ratio=self.dropout_ratio)

            if self.activation is not None and step < self.n_activation:
                h = self.activation(h)

            if self.concat_hidden or self.sum_hidden:
                g = self.readout_layers[step](h=h,
                                              h0=h0,
                                              is_real_node=is_real_node,
                                              **additional_kwargs)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            if self.sum_hidden:
                g = functions.sum(functions.stack(g_list), axis=0)
            else:
                g = self.readout_layers[0](h=h,
                                           h0=h0,
                                           is_real_node=is_real_node)
            if self.with_gwm:
                g = functions.concat((g, h_s), axis=1)
                g = functions.relu(self.linear_for_concat_super(g))
            return g
コード例 #30
0
ファイル: _Model.py プロジェクト: hukuda222/Chat-Yojo-Bot
 def getDecoderInputEmbeddings(self,
                               xs):  # xs: [arr(l1), ...], still on cpu
     x_len = [len(x) for x in xs]
     x_section = np.cumsum(x_len[:-1])
     vxs = [F.copy(chainer.Variable(x), self.gpu) for x in xs]  # to gpu
     ex = self.model.decoderEmbed(F.concat(tuple(vxs), axis=0))
     exs = F.split_axis(ex, x_section, 0)
     return list(exs)
コード例 #31
0
    def __call__(self, x):
        # assume x is on gpu0
        x1 = F.copy(x, self.gpu1)

        z0 = self.first0(x)
        z1 = self.first1(x1)

        # synchronize
        h0 = z0 + F.copy(z1, self.gpu0)
        h1 = z1 + F.copy(z0, self.gpu1)

        y0 = self.second0(F.relu(h0))
        y1 = self.second1(F.relu(h1))

        # synchronize
        y = y0 + F.copy(y1, self.gpu0)
        return y  # output is on gpu0
コード例 #32
0
    def __call__(self, x):
        # assume x is on gpu0
        x1 = F.copy(x, self.gpu1)

        z0 = self.first0(x)
        z1 = self.first1(x1)

        # synchronize
        h0 = z0 + F.copy(z1, self.gpu0)
        h1 = z1 + F.copy(z0, self.gpu1)

        y0 = self.second0(F.relu(h0))
        y1 = self.second1(F.relu(h1))

        # synchronize
        y = y0 + F.copy(y1, self.gpu0)
        return y  # output is on gpu0
コード例 #33
0
ファイル: net.py プロジェクト: tsubone/test2
    def __call__(self, x):
        # assume x is on GPU 0
        x1 = F.copy(x, 1)

        z0 = self.first0(x)
        z1 = self.first1(x1)

        # sync
        h0 = z0 + F.copy(z1, 0)
        h1 = z1 + F.copy(z0, 1)

        y0 = self.second0(F.relu(h0))
        y1 = self.second1(F.relu(h1))

        # sync
        y = y0 + F.copy(y1, 0)
        return y
コード例 #34
0
ファイル: net.py プロジェクト: butyutyumpa/Autoencoder
    def __call__(self, x):
        # assume x is on GPU 0
        x1 = F.copy(x, 1)

        z0 = self.first0(x)
        z1 = self.first1(x1)

        # sync
        h0 = z0 + F.copy(z1, 0)
        h1 = z1 + F.copy(z0, 1)

        y0 = self.second0(F.relu(h0))
        y1 = self.second1(F.relu(h1))

        # sync
        y = y0 + F.copy(y1, 0)
        return y
コード例 #35
0
ファイル: autocopy.py プロジェクト: xuweidongkobe/kiss
def recurse_copy(object, device):
    if isinstance(object, tuple) or isinstance(object, list):
        object = [recurse_copy(sub_object, device) for sub_object in object]
    elif isinstance(object, chainer.Variable) or isinstance(
            object, cuda.cupy.ndarray) or isinstance(object, numpy.ndarray):
        object = F.copy(object, device)

    return object
コード例 #36
0
def copy():
    x = rand((1, 2, 3, 4))
    y = F.copy(x, -1) - 0
    # A dummy layer `-0` is inserted because our implementation of copy plugin
    # reuses input tensor as the layer output.
    # (a tensor cannot be both input and output of a network)
    # when using copy inside a network with multiple layers,
    # no need to insert any dummy operations.
    return {'input': x}, {'out': y}
コード例 #37
0
    def forward(self, x):
        # FIXME: Adding with constant and F.resize_images() using 0th device

        # Forward FCN
        score = super().forward(x)

        # Convert score to probability
        prob = F.softmax(score, axis=1)

        # Increase gradient of the probability
        prob = prob - 0.5
        prob = self.prob_scale(prob)
        prob = F.clip(prob, -0.5, 0.5)
        prob = prob + 0.5

        # Down sampling
        h, w = x.shape[2:4]
        down_shape = (h // self.mat_scale, w // self.mat_scale)
        prob = F.resize_images(prob, down_shape)
        x = F.resize_images(x, down_shape)

        # Split into foreground, background and unknown sores
        prob_b, _, prob_f = F.split_axis(prob, 3, axis=1)   # (n, 1, h, w)

        # Copy to CPU
        x = F.copy(x, -1)
        prob_b = F.copy(prob_b, -1)
        prob_f = F.copy(prob_f, -1)

        # Compute laplacian
        laplacian = _compute_laplacians(x, prob_b, prob_f)

        # Matting
        alpha = self.matting_link(prob_b, prob_f, laplacian)  # (n, 1, h, w)

        # Up sampling
        alpha = F.resize_images(alpha, (h, w))

        # Remove extra channel (n, 1, h, w) -> (n, h, w)
        alpha_shape = (alpha.shape[0], alpha.shape[2], alpha.shape[3])
        alpha = F.reshape(alpha, alpha_shape)

        self.alpha = alpha
        return alpha
コード例 #38
0
ファイル: test_copy.py プロジェクト: 2php/chainer
    def check_backward(self, src_id, dst_id):
        x_data = _to_gpu(self.x_data, src_id)
        x = chainer.Variable(x_data)

        y = functions.copy(x, dst_id)
        gy = _to_gpu(self.gy, dst_id)
        y.grad = gy

        y.backward()

        x_grad = x.grad
        numpy.testing.assert_array_equal(
            cuda.to_cpu(x_grad), self.gy)
コード例 #39
0
ファイル: gin_gwm.py プロジェクト: ir5/chainer-chemistry
    def __call__(self, atom_array, adj, super_node, is_real_node=None):
        """
        Describe a layer

        Args:
            atom_array (numpy.ndarray): mol-minibatch by node numpy.ndarray,
                minibatch of molecular which is represented with atom IDs (representing C, O, S, ...)
                atom_array[m, i] = a represents
                m-th molecule's i-th node is value a (atomic number)
            adj (numpy.ndarray): mol-minibatch by relation-types by node by node numpy.ndarray,
                       minibatch of multiple relational adjancency matrix with edge-type information
                       adj[i, j] = b represents
                       m-th molecule's  edge from node i to node j has value b
            super_node (numpy.ndarray): 1D array, the supernode hidden state
            is_real_node:

        Returns:
            numpy.ndarray: final molecule representation
        """
        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)  # (minibatch, max_num_atoms)
        else:
            h = atom_array
        # end if-else
        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)

        self.gwm.GRU_local.reset_state()
        self.gwm.GRU_super.reset_state()

        # ebmbed super node
        h_s = self.embed_super(super_node)

        g_list = []
        for step in range(self.n_message_layers):
            message_layer_index = 0 if self.weight_tying else step
            h2 = self.update_layers[message_layer_index](h, adj)
            h, h_s = self.gwm(h, h2, h_s, message_layer_index)
            if self.concat_hidden:
                g = self.readout_layers[step](h, h0, is_real_node)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = self.readout_layers[0](h, h0, is_real_node)
            g2 = functions.concat( (g, h_s), axis=1 )
            out_g = functions.relu(self.linear_for_concat_super(g2))
            return out_g
コード例 #40
0
ファイル: gin.py プロジェクト: ir5/chainer-chemistry
    def __call__(self, atom_array, adj, is_real_node=None):
        """
        Describe the whole forwar path

        Args:
            atom_array (numpy.ndarray): mol-minibatch by node numpy.ndarray,
                minibatch of molecular which is represented with atom IDs
                (representing C, O, S, ...) atom_array[m, i] = a represents
                m-th molecule's i-th node is value a (atomic number)
            adj (numpy.ndarray): mol-minibatch by relation-types by node by
                node numpy.ndarray,
                minibatch of multple relational adjancency matrix with
                edge-type information adj[i, j] = b represents
                m-th molecule's  edge from node i to node j has value b
            is_real_node:

        Returns:
            numpy.ndarray: final molecule representation
        """

        if atom_array.dtype == self.xp.int32:
            h = self.embed(atom_array)  # (minibatch, max_num_atoms)
        else:
            h = atom_array

        h0 = functions.copy(h, cuda.get_device_from_array(h.data).id)

        g_list = []
        for step in range(self.n_message_layers):
            message_layer_index = 0 if self.weight_tying else step
            h = self.update_layers[message_layer_index](h, adj)
            if self.concat_hidden:
                g = self.readout_layers[step](h, h0, is_real_node)
                g_list.append(g)

        if self.concat_hidden:
            return functions.concat(g_list, axis=1)
        else:
            g = self.readout_layers[0](h, h0, is_real_node)
            return g
コード例 #41
0
ファイル: test_copy.py プロジェクト: asi1024/chainer
 def test_call_forward_with_device(self):
     functions.copy(self.x_data, cuda.DummyDevice)
コード例 #42
0
ファイル: test_copy.py プロジェクト: asi1024/chainer
 def f(x):
     return functions.copy(x, -1)
コード例 #43
0
ファイル: test_copy.py プロジェクト: skallumadi/chainer
 def test_check_backward_cpu(self):
     x = chainer.Variable(self.x_data)
     y = functions.copy(x, -1)
     y.grad = self.gy
     y.backward()
     gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
コード例 #44
0
ファイル: test_copy.py プロジェクト: Fhrozen/chainer
 def f(x):
     y = functions.copy(x, -1)
     return y * y
コード例 #45
0
ファイル: test_copy.py プロジェクト: skallumadi/chainer
 def test_check_forward_cpu(self):
     x = chainer.Variable(self.x_data)
     y = functions.copy(x, -1)
     gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0)