Esempio n. 1
0
 def spartial_pyramid_pooling(self, x):
     padding = Variable(np.zeros((1, x.shape[2]), dtype=np.float32))
     h = [F.expand_dims(F.flatten(F.max(x, axis=3)), axis=0)]
     length = x.shape[3]
     for i in range(1, self.spp_level):
         division = 2**i
         window_size = length // division
         if window_size > 0:
             for j in range(i):
                 h.append(
                     F.expand_dims(F.flatten(
                         F.max(x[:, :, :,
                                 (window_size * j):(window_size * (j + 1))],
                               axis=3)),
                                   axis=0))
             h.append(
                 F.expand_dims(F.flatten(
                     F.max(x[:, :, :, (window_size * i):], axis=3)),
                               axis=0))
         else:
             for j in range(length):
                 h.append(F.expand_dims(F.flatten(x[:, :, :, j]), axis=0))
             extend = division - length
             for j in range(extend):
                 h.append(padding)
     return (h)
Esempio n. 2
0
    def fwd(self, x, train):
        n = 3
        _, _, x_h, x_w = x.shape
        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        h_b, h_c, h_h, h_w = h.shape
        if x_h != h_h and x_w != h_w:
            pad = Variable(np.zeros(
                (h_b, h_c, x_h - h_h, h_w)).astype(np.float32),
                           volatile=x.volatile)
            if np.ndarray is not type(h.data):
                pad.to_gpu()
            h = F.concat((h, pad), axis=2)
            pad = Variable(np.zeros(
                (h_b, h_c, x_h, x_w - h_w)).astype(np.float32),
                           volatile=x.volatile)
            if np.ndarray is not type(h.data):
                pad.to_gpu()
            h = F.concat((h, pad), axis=3)

        h_b, h_c, h_h, h_w = h.shape
        h_flat = F.reshape(F.flatten(h), (1, 3072))
        x_flat = F.reshape(F.flatten(x), (1, 3072))
        x = self.alpha(x_flat)
        h_flat += x
        h = F.reshape(h_flat, h.shape)
        x = F.reshape(x_flat, h.shape)
        h = self.fc(h)
        self.alpha_log.add(float(self.alpha.W.data[0][0]))
        self.alpha_log.save('./results/alpha_log')
        return h
        def _construct_relation_embedding(trig_embedding,
                                          relation,
                                          batch_i,
                                          bilstm_i,
                                          structures_above_threshold=None):

            role = relation[0][0]
            arg = relation[0][1]
            io = relation[1]

            role_type_embedding = self.embed_roletype(
                np.array([role]).astype("i"))
            triggers = batch_i[const.IDS_TRIGGERS_IDX]
            is_trigger = arg in triggers

            if is_trigger:
                arg_embedding = _represent_type_and_argument(
                    batch_i, const.IDS_TRIGGERS_IDX, bilstm_i, arg,
                    structures_above_threshold)
            else:
                arg_embedding = _represent_type_and_argument(
                    batch_i, const.IDS_ENTITIES_IDX, bilstm_i, arg)

            io_embedding = self.embed_io(np.array([io]).astype('i'))
            relation_embedding = []
            if len(arg_embedding) != 0:
                for i in range(len(arg_embedding)):
                    a = F.flatten(trig_embedding)
                    b = F.flatten(role_type_embedding)
                    c = F.flatten(arg_embedding[i])
                    d = F.flatten(io_embedding)
                    z = F.hstack([a, b, c, d])
                    emb = F.reshape(z, (1, self.len_relation))
                    relation_embedding.append(emb)
            return relation_embedding
Esempio n. 4
0
    def update_q_func(self, batch):
        """Compute loss for a given Q-function."""

        batch_next_state = batch['next_state']
        batch_rewards = batch['reward']
        batch_terminal = batch['is_state_terminal']
        batch_state = batch['state']
        batch_actions = batch['action']
        batch_discount = batch['discount']

        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            next_actions = self.target_policy_smoothing_func(
                self.target_policy(batch_next_state).sample().array)
            next_q1 = self.target_q_func1(batch_next_state, next_actions)
            next_q2 = self.target_q_func2(batch_next_state, next_actions)
            next_q = F.minimum(next_q1, next_q2)

            target_q = batch_rewards + batch_discount * \
                (1.0 - batch_terminal) * F.flatten(next_q)

        predict_q1 = F.flatten(self.q_func1(batch_state, batch_actions))
        predict_q2 = F.flatten(self.q_func2(batch_state, batch_actions))

        loss1 = F.mean_squared_error(target_q, predict_q1)
        loss2 = F.mean_squared_error(target_q, predict_q2)

        # Update stats
        self.q1_record.extend(cuda.to_cpu(predict_q1.array))
        self.q2_record.extend(cuda.to_cpu(predict_q2.array))
        self.q_func1_loss_record.append(float(loss1.array))
        self.q_func2_loss_record.append(float(loss2.array))

        self.q_func1_optimizer.update(lambda: loss1)
        self.q_func2_optimizer.update(lambda: loss2)
Esempio n. 5
0
        def _represent_type_and_argument(arg_ids,
                                         type_index,
                                         bilstm_i,
                                         type_label,
                                         structures_above_threshold=None):

            embedding_list = []
            if structures_above_threshold is not None:
                embedding_list = structures_above_threshold[type_label]
            else:
                defn = arg_ids[type_label]
                type_id = defn[const.IDS_ARG_TYPE]
                type_embedding = None
                if type_index == const.IDS_TRIGGERS_IDX:
                    type_embedding = self.embed_trigtype(
                        self.xp.array([type_id]).astype("i"))
                elif type_index == const.IDS_ENTITIES_IDX:
                    type_embedding = self.embed_enttype(
                        self.xp.array([type_id]).astype("i"))

                mention = defn[const.IDS_ARG_MENTION]
                mention_ids = _get_word_ids(instance, mention)
                mention_embedding = _represent_mentions(mention_ids, bilstm_i)

                flattened_type_embedding = F.flatten(type_embedding)
                flattened_mention_embedding = F.flatten(mention_embedding)
                type_and_argument_embedding = F.hstack(
                    [flattened_type_embedding, flattened_mention_embedding])

                reshaped_type_and_argument_embedding = F.reshape(
                    type_and_argument_embedding, (1, self.len_type_and_arg))
                embedding_list.append(reshaped_type_and_argument_embedding)
            return embedding_list
        def _represent_type_and_argument(batch_i,
                                         type_index,
                                         bilstm_i,
                                         type_label,
                                         structures_above_threshold=None):
            def _get_word_ids(xsi, mention):
                word_ind = []
                sentence_ids = xsi[const.IDS_SENTENCE_INFO_IDX][
                    const.IDS_SENTENCE_IDX]
                for i in mention:
                    if i in sentence_ids:
                        ind = sentence_ids.index(i)
                        word_ind.append(ind)
                return word_ind

            embedding_list = []

            if structures_above_threshold is not None:
                embedding_list = structures_above_threshold[type_label]
            else:
                defn = batch_i[type_index][type_label]

                type_id = defn[const.IDS_ARG_TYPE]
                type_embedding = None
                if type_index == const.IDS_TRIGGERS_IDX:
                    if self.REPLACE_TYPE:
                        trig_word = self.id2triggertype[type_id]
                        new_arg = Util.extract_category(
                            trig_word, self.GENERALISATION,
                            const.TYPE_GENERALISATION)
                        assert new_arg != '', "ERROR: new_arg is '' "
                        type_id = self.trigger_type2id[new_arg]
                    type_embedding = self.embed_trigtype(
                        np.array([type_id]).astype("i"))
                elif type_index == const.IDS_ENTITIES_IDX:
                    if self.REPLACE_TYPE:
                        ent_word = self.id2entitytype[type_id]
                        new_arg = Util.extract_category(
                            ent_word, self.GENERALISATION,
                            const.TYPE_GENERALISATION)
                        assert new_arg != '', "ERROR: new_arg is '' "
                        type_id = self.entity_type2id[new_arg]
                    type_embedding = self.embed_argtype(
                        np.array([type_id]).astype("i"))

                mention = defn[const.IDS_ARG_MENTION]
                mention_ids = _get_word_ids(batch_i, mention)
                mention_embedding = _represent_mentions(mention_ids, bilstm_i)

                flattened_type_embedding = F.flatten(type_embedding)
                flattened_mention_embedding = F.flatten(mention_embedding)
                type_and_argument_embedding = F.hstack(
                    [flattened_type_embedding, flattened_mention_embedding])

                reshaped_type_and_argument_embedding = F.reshape(
                    type_and_argument_embedding, (1, self.len_type_and_arg))
                embedding_list.append(reshaped_type_and_argument_embedding)

            return embedding_list
Esempio n. 7
0
        def _construct_relation_embedding(trig_embedding, pair, entities_ids, triggers_ids, bilstm_i, structures_above_threshold=None):
            relation = pair[0]
            action = pair[1]
            if action == const.ACTION_NONE:
                action_embedding = Variable(self.xp.zeros((self.action_dim), dtype=self.xp.float32))
            else:
                action_embedding = self.embed_action(self.xp.array([action]).astype("i"))

            if relation[1] == const.NONE_ROLE_TYPE:
                role = 0
                type_id = 0
                role_type_embedding = None
                try:
                    role_type_embedding = self.embed_roletype(self.xp.array([role]).astype("i"))
                except:
                    print("debug")
                type_embedding = self.embed_enttype(self.xp.array([type_id]).astype("i"))

                mention = [0]  # TODO: how to represent this better?
                mention_ids = _get_word_ids(instance, mention)
                mention_embedding = _represent_mentions(mention_ids, bilstm_i)

                flattened_type_embedding = F.flatten(type_embedding)
                flattened_mention_embedding = F.flatten(mention_embedding)
                type_and_argument_embedding = F.hstack([flattened_type_embedding, flattened_mention_embedding])

                arg_embedding = F.reshape(type_and_argument_embedding, (1, self.len_type_and_arg))
                relation_embedding = []
                a = F.flatten(trig_embedding)
                b = F.flatten(role_type_embedding)
                c = F.flatten(arg_embedding)
                d = F.flatten(action_embedding)
                z = F.hstack([a, b, c, d])
                emb = F.reshape(z, (1, self.len_relation + self.action_dim))
                relation_embedding.append(emb)
            else:
                role = relation[0]
                arg = relation[1]
                role_type_embedding = self.embed_roletype(self.xp.array([role]).astype("i"))
                is_trigger = arg in triggers_ids

                if is_trigger:
                    arg_embedding = _represent_type_and_argument(triggers_ids, const.IDS_TRIGGERS_IDX, bilstm_i, arg,
                                                                 structures_above_threshold)
                else:
                    arg_embedding = _represent_type_and_argument(entities_ids, const.IDS_ENTITIES_IDX, bilstm_i, arg)
                relation_embedding = []
                if len(arg_embedding) != 0:
                    for i in range(len(arg_embedding)):
                        a = F.flatten(trig_embedding)
                        b = F.flatten(role_type_embedding)
                        c = F.flatten(arg_embedding[i])
                        d = F.flatten(action_embedding)
                        z = F.hstack([a, b, c, d])
                        emb = F.reshape(z, (1, self.len_relation + self.action_dim))
                        relation_embedding.append(emb)
            return relation_embedding
Esempio n. 8
0
File: sqil.py Progetto: toy101/DSAC
 def _calc_absorb_value(self, batch_demo):
     absorb_state = self.xp.zeros((1, batch_demo['state'].shape[1]),
                                  dtype=batch_demo['state'].dtype)
     is_absorb = self.xp.ones((1, 1), dtype=self.xp.float32)
     absorb_action = self.xp.zeros((1, 1), dtype=self.xp.float32)
     # absorb_reward = self.reward_func(absorb_state, is_absorb, absorb_action)
     absorb_q1 = self.q_func1(absorb_state, is_absorb, absorb_action)
     absorb_q2 = self.q_func2(absorb_state, is_absorb, absorb_action)
     # assert absorb_q1.shape == absorb_reward.shape
     # return F.flatten(absorb_reward + self.gamma * F.minimum(absorb_q1, absorb_q2))
     return F.flatten(absorb_q1), F.flatten(absorb_q2)
def _elementwise_softmax_cross_entropy(x, t, two_class):
    assert x.shape[:-1] == t.shape
    shape = t.shape
    t = F.flatten(t)
    if two_class:
        x = F.flatten(x)
        return F.reshape(
            F.sigmoid_cross_entropy(x, t, reduce='no'), shape)
    else:
        x = F.reshape(x, (-1, x.shape[-1]))
        return F.reshape(
            F.softmax_cross_entropy(x, t, reduce='no'), shape)
Esempio n. 10
0
def dice_coefficent(self,predict, ground_truth):
       dice_numerator = 0.0
       dice_denominator = 0.0
       eps = 1e-16

       predict = F.flatten(predict)
       ground_truth = F.flatten(ground_truth.astype(np.float32))

       dice_numerator = F.sum(2*(predict * ground_truth))
       dice_denominator =F.sum(predict+ ground_truth)
       loss = dice_numerator/(dice_denominator+eps)

       return -loss
Esempio n. 11
0
File: sqil.py Progetto: toy101/DSAC
    def _compute_bc_loss(self, batch):

        # behavioral cloning
        batch_state = batch['state']
        batch_actions = batch['action']
        batch_absorb = batch['is_state_absorb']

        action_distrib = self.policy(batch_state, batch_absorb)
        action = action_distrib.sample()
        loss = F.mean_squared_error(F.flatten(batch_actions),
                                    F.flatten(action))

        return loss
Esempio n. 12
0
    def update_q_func(self, batch):
        """Compute loss for a given Q-function."""

        batch_next_state = batch['next_state']
        batch_rewards = batch['reward']
        batch_terminal = batch['is_state_terminal']
        batch_state = batch['state']
        batch_actions = batch['action']
        batch_discount = batch['discount']

        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            next_action_distrib = self.policy(batch_next_state)
            next_actions, next_log_prob =\
                next_action_distrib.sample_with_log_prob()
            next_q1 = self.target_q_func1(batch_next_state, next_actions)
            next_q2 = self.target_q_func2(batch_next_state, next_actions)
            next_q = F.minimum(next_q1, next_q2)
            entropy_term = self.temperature * next_log_prob[..., None]
            assert next_q.shape == entropy_term.shape

            target_q = batch_rewards + batch_discount * \
                (1.0 - batch_terminal) * F.flatten(next_q - entropy_term)

        predict_q1 = F.flatten(self.q_func1(batch_state, batch_actions))
        predict_q2 = F.flatten(self.q_func2(batch_state, batch_actions))

        loss1 = 0.5 * F.mean_squared_error(target_q, predict_q1)
        loss2 = 0.5 * F.mean_squared_error(target_q, predict_q2)

        if self.use_mutual_learning:
            for idx, agent in enumerate(self.all_agents):
                if idx != self.assigned_idx:
                    #self.logger.info('Mutual learn Q')
                    other_predict_q1 = F.flatten(
                        agent.q_func1(batch_state, batch_actions))
                    other_predict_q2 = F.flatten(
                        agent.q_func2(batch_state, batch_actions))
                    loss1 += 0.5 * F.mean_squared_error(
                        predict_q1, other_predict_q1)
                    loss2 += 0.5 * F.mean_squared_error(
                        predict_q2, other_predict_q2)

        # Update stats
        self.q1_record.extend(cuda.to_cpu(predict_q1.array))
        self.q2_record.extend(cuda.to_cpu(predict_q2.array))
        self.q_func1_loss_record.append(float(loss1.array))
        self.q_func2_loss_record.append(float(loss2.array))

        self.q_func1_optimizer.update(lambda: loss1)
        self.q_func2_optimizer.update(lambda: loss2)
Esempio n. 13
0
File: sac.py Progetto: toy101/DSAC
    def update_q_func(self, batch):
        """Compute loss for a given Q-function."""

        batch_next_state = batch['next_state']
        batch_rewards = batch['reward']
        batch_terminal = batch['is_state_terminal']
        batch_state = batch['state']
        batch_actions = batch['action']
        batch_discount = batch['discount']

        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            next_action_distrib = self.policy(batch_next_state)
            next_actions, next_log_prob =\
                next_action_distrib.sample_with_log_prob()
            entropy_term = self.temperature * next_log_prob
            if self.is_discrete:
                next_q1 = F.select_item(self.target_q_func1(batch_next_state),
                                        next_actions)
                next_q2 = F.select_item(self.target_q_func2(batch_next_state),
                                        next_actions)
            else:
                next_q1 = self.target_q_func1(batch_next_state, next_actions)
                next_q2 = self.target_q_func2(batch_next_state, next_actions)
                entropy_term = entropy_term[..., None]
            next_q = F.minimum(next_q1, next_q2)
            assert next_q.shape == entropy_term.shape

            target_q = batch_rewards + batch_discount * \
                (1.0 - batch_terminal) * F.flatten(next_q - entropy_term)

        if self.is_discrete:
            predict_q1 = F.flatten(
                F.select_item(self.q_func1(batch_state), batch_actions))
            predict_q2 = F.flatten(
                F.select_item(self.q_func2(batch_state), batch_actions))
        else:
            predict_q1 = F.flatten(self.q_func1(batch_state, batch_actions))
            predict_q2 = F.flatten(self.q_func2(batch_state, batch_actions))

        loss1 = 0.5 * F.mean_squared_error(target_q, predict_q1)
        loss2 = 0.5 * F.mean_squared_error(target_q, predict_q2)

        # Update stats
        self.q1_record.extend(cuda.to_cpu(predict_q1.array))
        self.q2_record.extend(cuda.to_cpu(predict_q2.array))
        self.q_func1_loss_record.append(float(loss1.array))
        self.q_func2_loss_record.append(float(loss2.array))

        self.q_func1_optimizer.update(lambda: loss1)
        self.q_func2_optimizer.update(lambda: loss2)
Esempio n. 14
0
 def __call__(self, *args):
     x = args[:-1]
     t = args[-1]
     self.y = None
     self.loss = None
     self.accuracy = None
     self.y = self.predictor(*x)
     if self.output_size == 1:
         self.y = F.flatten(self.y)
         t = F.flatten(t)
     self.loss = F.mean_absolute_error(self.y, t)
     reporter.report({'loss': self.loss}, self)
     reporter.report({'mae': self.loss}, self)
     return self.loss
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.flatten(x)

        self.assertEqual(y.shape, self.g_shape)
        self.assertEqual(y.dtype, self.dtype)
        testing.assert_allclose(self.x.flatten(), y.data)
Esempio n. 16
0
    def __call__(self, obs, conditional_input=None):
        o_c, o_a1, o_a2 = obs

        if self.conditional:
            # concat image obs and conditional image input
            o_c = F.concat((o_c, conditional_input), axis=1)

        # image encoding part
        h_a1 = self.f(self.e1_l1_a1(o_a1))
        h_a2 = self.f(self.e1_l1_a1(o_a2))
        h_a = F.concat((h_a1, h_a2), axis=1)
        h_c = self.f(self.e1_c1(o_c))

        # reshape
        imshape = h_c.shape[2:]
        h = h_c + F.reshape(F.tile(h_a, imshape), (1, 32) + imshape)
        h = self.f(self.e2_c1(h))
        h = self.f(self.e2_c2(h))
        h = self.f(self.e2_c3(h))
        h = F.expand_dims(F.flatten(h), 0)
        h = self.f(self.e2_l1(h))

        # lstm
        h = self.lstm(h)

        # output by the decoder
        return self.decoder(h)
Esempio n. 17
0
    def __call__(self, data, face, eyes_grid):
        # the network that uses data as input
        pool1 = F.max_pooling_2d(F.relu(self.conv1(data)), ksize=3, stride=2)
        norm1 = F.local_response_normalization(pool1,
                                               n=5,
                                               alpha=0.0001,
                                               beta=0.75)
        pool2 = F.max_pooling_2d(F.relu(self.conv2(norm1)), ksize=3, stride=2)
        norm2 = norm1 = F.local_response_normalization(pool2,
                                                       n=5,
                                                       alpha=0.0001,
                                                       beta=0.75)
        conv3 = F.relu(self.conv3(norm2))
        conv4 = F.relu(self.conv4(conv3))
        conv5 = F.relu(self.conv5(conv4))
        conv5_red = F.relu(self.conv5_red(conv5))

        # the network that uses face as input
        pool1_face = F.max_pooling_2d(F.relu(self.conv1_face(face)),
                                      ksize=3,
                                      stride=2)
        norm1_face = F.local_response_normalization(pool1_face,
                                                    n=5,
                                                    alpha=0.0001,
                                                    beta=0.75)
        pool2_face = F.max_pooling_2d(F.relu(self.conv2_face(norm1_face)),
                                      ksize=3,
                                      stride=2)
        norm2_face = F.local_response_normalization(pool2_face,
                                                    n=5,
                                                    alpha=0.0001,
                                                    beta=0.75)
        conv3_face = F.relu(self.conv3_face(norm2_face))
        conv4_face = F.relu(self.conv4_face(conv3_face))
        pool5_face = F.max_pooling_2d(F.relu(self.conv5_face(conv4_face)),
                                      ksize=3,
                                      stride=2)
        fc6_face = F.relu(self.fc6_face(pool5_face))

        # now the eyes
        eyes_grid_flat = F.flatten(eyes_grid)
        eyes_grid_mult = 24 * eyes_grid_flat
        eyes_grid_reshaped = F.reshape(
            eyes_grid_mult,
            (1, eyes_grid_mult.size))  # give it same ndim as fc6

        # now bring everything together
        face_input = F.concat((fc6_face, eyes_grid_reshaped), axis=1)
        fc7_face = F.relu(self.fc7_face(face_input))
        fc8_face = F.relu(self.fc8_face(fc7_face))
        importance_map_reshape = F.reshape(
            F.sigmoid(self.importance_no_sigmoid(fc8_face)), (1, 1, 13, 13))
        fc_7 = conv5_red * self.importance_map(importance_map_reshape)
        fc_0_0 = self.fc_0_0(fc_7)
        fc_1_0 = self.fc_1_0(fc_7)
        fc_0_1 = self.fc_0_1(fc_7)
        fc_m1_0 = self.fc_m1_0(fc_7)
        fc_0_m1 = self.fc_0_m1(fc_7)

        return fc_0_0, fc_1_0, fc_0_1, fc_0_m1, fc_m1_0
 def position2onehot(self, inds, dim):
     inds = chaFunc.flatten(inds)
     inds = inds.data.astype('float32') % self.max_n_spans
     inds = inds.astype('int32')
     eye = self.xp.identity(dim).astype(self.xp.float32)
     onehot = chaFunc.embed_id(inds, eye)
     return onehot
Esempio n. 19
0
    def __call__(self, z):
        # decode location
        z1 = z
        h = F.reshape(z1, (1, 64, 2, 2))
        h = self.f(self.l1_c1(h))
        h = self.f(self.l1_c2(h))
        h = self.f(self.l1_c3(h))
        h = self.f(self.l1_c4(h))
        h = self.l1_c5(h)
        h = F.expand_dims(F.flatten(h), 0)
        p1 = SoftmaxDistribution(h)
        a1 = p1.sample()

        # decode prob
        h_a1 = self.f(self.l1_l1(
            np.expand_dims(a1.data, 0).astype(np.float32)))
        h_a1 = F.concat((z1, h_a1), axis=1)

        z2 = self.f(self.l1_l2(h_a1))
        h_a2 = self.l2_l1(z2)
        p2 = SoftmaxDistribution(h_a2)
        a2 = p2.sample()

        probs = [p1, p2]
        acts = [a1, a2]

        return probs, acts
Esempio n. 20
0
 def __call__(self, xs, ys, train):
     decoder_logits = self.predictor.predict(xs, ys, train)
     labels = F.flatten(F.transpose(ys))
     loss = F.softmax_cross_entropy(decoder_logits, labels)
     accuracy = F.accuracy(decoder_logits, labels)
     reporter.report({"loss": loss, "accuracy": accuracy}, self)
     return loss
Esempio n. 21
0
def _elementwise_softmax_cross_entropy(x, t):
    assert x.shape[:-1] == t.shape
    shape = t.shape
    x = F.reshape(x, (-1, x.shape[-1]))
    t = F.flatten(t)
    return F.reshape(
        F.softmax_cross_entropy(x, t, reduce='no'), shape)
Esempio n. 22
0
def inverse_select_items_per_row(values2d, idx2d):
    """
    Selects items from 2-dimensional tensor (matrix) per row that are not in the
    given indices

    :param values2d: The values to choose from
    :type values2d: chainer.Variable

    :param idx2d: The indices to select
    :type idx2d: chainer.Variable

    :return: A matrix with the other elements selected
    :rtype: chainer.Variable
    """
    xp = cuda.get_array_module(values2d, idx2d)
    rows = idx2d.shape[0]
    cols_idx = idx2d.shape[1]
    cols_values = values2d.shape[1]

    flattened_idx = xp.ravel(idx2d.data) + xp.repeat(
        xp.arange(0, rows * cols_values, cols_values), cols_idx)

    all_idx = xp.arange(0, rows * cols_values, dtype=idx2d.data.dtype)
    all_idx[flattened_idx.data] = -1
    remaining_idx = all_idx[all_idx >= 0]

    flattened_values = F.flatten(values2d)
    flattened_values = flattened_values[remaining_idx]
    return F.reshape(flattened_values, (rows, cols_values - cols_idx))
    def __call__(self, x, c, alpha=1.0):
        if self.depth > 0 and alpha < 1:
            h1 = self['b%d' % (7 - self.depth)](x, c, True)
            x2 = F.average_pooling_2d(x, 2, 2)
            h2 = F.leaky_relu(self['b%d' % (7 - self.depth + 1)].fromRGB(x2))
            h = h2 * (1 - alpha) + h1 * alpha
        else:
            h = self['b%d' % (7 - self.depth)](x, c, True)

        for i in range(self.depth):
            h = self['b%d' % (7 - self.depth + 1 + i)](h, c)

        if not c is None:
            d = self.l(h)
            d = F.flatten(d)
            return d
        else:
            d = F.relu(self.l1(h))
            #d = self.l2(d)
            male = self.male(d)
            blond = self.blond(d)
            eye = self.eye(d)
            no_beard = self.no_beard(d)
            smiling = self.smiling(d)
            return male, blond, eye, no_beard, smiling
Esempio n. 24
0
	def __call__(self, x, c=None):
		if c is not None:
			embedded = self.embedder(c)
			normalized = embedded / sqrt(mean(embedded ** 2, axis=1, keepdims=True) + 1e-08)
			c1 = self.mapper(normalized)
		h = self.main(x)
		return flatten(h) if c is None else sum(h * c1, axis=1) / root(h.shape[1])
Esempio n. 25
0
    def __call__(self, xs, ys):
        """Calculate loss between outputs and ys.

        Args:
            xs: Source sentences.
            ys: Target sentences.

        Returns:
            loss: Cross-entoropy loss between outputs and ys.

        """
        batch_size = len(xs)

        hxs = self.encoder(xs)
        os = self.decoder(ys, hxs)

        concatenated_os = F.concat(os, axis=0)
        concatenated_ys = F.flatten(ys.T)
        n_words = len(self.xp.where(concatenated_ys.data != PAD)[0])

        loss = F.sum(
            F.softmax_cross_entropy(concatenated_os,
                                    concatenated_ys,
                                    reduce='no',
                                    ignore_label=PAD))
        loss = loss / n_words
        chainer.report({'loss': loss.data}, self)
        perp = self.xp.exp(loss.data * batch_size / n_words)
        chainer.report({'perp': perp}, self)

        return loss
Esempio n. 26
0
    def dice_coefficent(self, predict, ground_truth):
        '''
        Assume label 0 is background
        '''
        dice_numerator = 0.0
        dice_denominator = 0.0
        eps = 1e-16

        predict = F.flatten(predict[:,1:self._max_label,:,:,:])
        ground_truth = F.flatten(ground_truth[:,1:self._max_label,:,:,:].astype(np.float32))

        dice_numerator = F.sum(predict * ground_truth)
        dice_denominator =F.sum(predict+ ground_truth)
        dice = 2*dice_numerator/(dice_denominator+eps)

        return dice
Esempio n. 27
0
 def forward(self, x):
     x = F.relu(F.max_pooling_2d(self.conv1(x), 2))
     x = F.relu(F.max_pooling_2d(F.dropout(self.conv2(x)), 2))
     x = F.reshape(F.flatten(x), (-1, 320))
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     return x
Esempio n. 28
0
def GeneralizedDiceLossFunction(y,t,w):
    

    dice_numerator=0.0
    dice_denominator=0.0
    eps = 0.0001
    div = cp.float32(y.shape[0] * y.shape[1])
    
    y = F.softmax(y,axis=1)
    for i in range(y.shape[0]):#batch-size
        soft = y[i]
        tb = cp.array(t[i].flatten()).astype(cp.float32)
        for j in range(y.shape[1]):#class-size
            wb = cp.array(w[i][j].flatten()).astype(cp.float32)
            V_in = cp.where(tb == j,1,0).astype(cp.float32)

            t_temp = chainer.Variable(V_in)
            w_temp = chainer.Variable(wb)
            soft_temp = F.flatten(soft[j])

            dice_numerator += F.sum(w_temp * soft_temp * t_temp)
            dice_denominator += F.sum(w_temp * (soft_temp + t_temp))

    loss =  2.0 * dice_numerator / (dice_denominator+eps)

    return -loss
Esempio n. 29
0
 def __call__(self, x_p, x_v):
     h_l = F.tanh(self.bn_l0(self.l_phr(x_p)))
     h_v = F.tanh(self.bn_v0(self.l_img(x_v)))
     h = h_l * 0.5 + h_v * 0.5
     h = F.relu(self.bn_1(self.l_1(h)))
     h = self.cls(h)
     h = F.flatten(h)
     return h
Esempio n. 30
0
    def forward(self, xs):
        hs_0 = F.leaky_relu(self.l0_conv(xs), slope=0.2)
        hs_1 = F.leaky_relu(self.l1_bn(self.l1_conv(hs_0)), slope=0.2)
        hs_2 = F.leaky_relu(self.l2_bn(self.l2_conv(hs_1)), slope=0.2)
        hs_3 = F.leaky_relu(self.l3_bn(self.l3_conv(hs_2)), slope=0.2)
        hs_4 = self.l4_dense(F.flatten(hs_3).reshape(len(xs), -1))

        return hs_4
 def _calc_rpn_loss_bbox(self, rpn_bbox_pred, bbox_reg_targets, inds_inside):
     # rpn_bbox_pred has the shape of (1, 4 x n_anchors, feat_h, feat_w)
     n_anchors = self.proposal_layer._num_anchors
     # Reshape it into (4, A, K)
     rpn_bbox_pred = rpn_bbox_pred.reshape(4, n_anchors, -1)
     # Transpose it into (K, A, 4)
     rpn_bbox_pred = rpn_bbox_pred.transpose(2, 1, 0)
     # Reshape it into (K x A, 4)
     rpn_bbox_pred = rpn_bbox_pred.reshape(-1, 4)
     # Keep the number of bbox
     n_bbox = rpn_bbox_pred.shape[0]
     # Select bbox and ravel it
     rpn_bbox_pred = F.flatten(rpn_bbox_pred[inds_inside])
     # Create batch dimension
     rpn_bbox_pred = F.expand_dims(rpn_bbox_pred, 0)
     # Ravel the targets and create batch dimension
     bbox_reg_targets = bbox_reg_targets.ravel()[None, :]
     # Calc Smooth L1 Loss (When delta=1, huber loss is SmoothL1Loss)
     rpn_loss_bbox = F.huber_loss(rpn_bbox_pred, bbox_reg_targets,
                                  self._delta)
     rpn_loss_bbox /= n_bbox
     return rpn_loss_bbox.reshape(())
Esempio n. 32
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.flatten(x)
     return y,