示例#1
0
 def test_maximum_inconsistent_shapes(self):
     x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
     x2_data = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     with self.assertRaises(type_check.InvalidType):
         functions.maximum(x1, x2)
示例#2
0
 def test_maximum_inconsistent_shapes(self):
     x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
     x2_data = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     with self.assertRaises(type_check.InvalidType):
         functions.maximum(x1, x2)
示例#3
0
    def calculate_gaussian_loss(self, y, t):
        xp = chainer.cuda.get_array_module(t)
        if xp != numpy:
            xp.cuda.Device(t.device).use()
        nr_mix = y.shape[1] // 3

        logits = y[:, :nr_mix]
        means = y[:, nr_mix:2 * nr_mix]
        log_scales = y[:, 2 * nr_mix:3 * nr_mix]
        log_scales = F.maximum(
            log_scales, self.scalar_to_tensor(log_scales, self.log_scale_min))

        t = F.broadcast_to(t, means.shape)

        ditstribution = chainer.distributions.Normal(
            means, log_scale=log_scales)
        cdf_plus = ditstribution.cdf(t + 1 / (self.quantize - 1))
        cdf_min = ditstribution.cdf(t - 1 / (self.quantize - 1))
        probs = cdf_plus - cdf_min
        probs = F.maximum(probs, self.scalar_to_tensor(probs, 1e-12))
        if nr_mix == 1:
            loss = -F.mean(F.log(probs))
        else:
            log_probs = F.log_softmax(logits) + F.log(probs)
            loss = -F.mean(F.logsumexp(log_probs, axis=1))
        return loss
示例#4
0
def intersection(bbox0, bbox1):
    x0, y0, w0, h0 = bbox0
    x1, y1, w1, h1 = bbox1

    w = F.relu(F.minimum(x0 + w0 / 2, x1 + w1 / 2) - F.maximum(x0 - w0 / 2, x1 - w1 / 2))
    h = F.relu(F.minimum(y0 + h0 / 2, y1 + h1 / 2) - F.maximum(y0 - h0 / 2, y1 - h1 / 2))

    return w * h
示例#5
0
    def calc_loss(self, grids, image_size):
        width, height = self.get_bbox_side_lengths(grids, image_size)

        # penalize aspect ratios that are higher than wide, and penalize aspect ratios that are tooo wide
        aspect_ratio = height / F.maximum(width, self.xp.ones_like(width))
        # do not give an incentive to bboxes with a width that is 2x the height of the box
        aspect_loss = F.maximum(aspect_ratio - 0.5, self.xp.zeros_like(aspect_ratio))

        return F.mean(aspect_loss)
示例#6
0
 def loss(self, z):
     if self.ls == 'sq':
         return ((z - 1) ** 2) / 4
     elif self.ls == 'dh':
         zeros = self.xp.zeros(z.shape, dtype=self.xp.float32)
         return F.maximum(-z, F.maximum(zeros, (1-z)/2))
     elif self.ls == 'lg':
         return F.log(1 + F.exp(-z)) / self.xp.log(2)
     elif self.ls == 'exp':
         return F.exp(-z)
示例#7
0
def emb_crits(emb_flows, margin, vlamda=1, llamda=1):
    xp = cuda.get_array_module(emb_flows['vis'][0])
    batch_size = emb_flows['vis'][0].shape[0]

    zeros = Variable(xp.zeros(batch_size, dtype=xp.float32))
    vis_loss = F.mean(
        F.maximum(zeros, margin + emb_flows['vis'][1] - emb_flows['vis'][0]))
    lang_loss = F.mean(
        F.maximum(zeros, margin + emb_flows['lang'][1] - emb_flows['lang'][0]))
    return vlamda * vis_loss + llamda * lang_loss
示例#8
0
def multi_box_intersection(a, b):
    w = multi_overlap(a.x, a.w, b.x, b.w)
    h = multi_overlap(a.y, a.h, b.y, b.h)
    zeros = Variable(np.zeros(w.shape, dtype=w.data.dtype))
    zeros.to_gpu()

    w = F.maximum(w, zeros)
    h = F.maximum(h, zeros)

    area = w * h
    return area
示例#9
0
def multi_box_intersection(a, b):
    w = multi_overlap(a.x, a.w, b.x, b.w)
    h = multi_overlap(a.y, a.h, b.y, b.h)
    zeros = Variable(np.zeros(w.shape, dtype=w.data.dtype))
    zeros.to_gpu()

    w = F.maximum(w, zeros)
    h = F.maximum(h, zeros)

    area = w * h
    return area
示例#10
0
    def forward(self, img, text, w_img, w_text):
        x = self.rnn_encoder(text)
        w_x = self.rnn_encoder(w_text)
        v = self.cnn_encoder(img)
        w_v = self.cnn_encoder(w_img)

        zeros = cuda.to_gpu(xp.array(0., dtype="float32"), self._gpu_id)
        loss = F.mean(F.maximum(zeros, self.alpha - cosine_similarity(x, v) + cosine_similarity(x, w_v))) +\
                    F.mean(F.maximum(zeros, self.alpha - cosine_similarity(x, v) + cosine_similarity(w_x, v)))

        return x, w_x, loss
示例#11
0
    def calc_loss(self, grids, image_size):
        top_left_x, top_right_x, _, top_left_y, _, bottom_left_y = self.get_corners(grids, image_size)

        grid_widths = top_right_x - top_left_x
        grid_heights = bottom_left_y - top_left_y
        expected_width = self.xp.full_like(grid_widths, image_size.width, dtype=grid_widths.dtype)
        expected_height = self.xp.full_like(grid_heights, image_size.height, dtype=grid_heights.dtype)

        width_loss = F.maximum(self.xp.zeros_like(grid_widths), grid_widths - expected_width)
        height_loss = F.maximum(self.xp.zeros_like(grid_heights), grid_heights - expected_height)

        return sum(width_loss) + sum(height_loss)
示例#12
0
def mixture_of_discretized_logistics_nll(x, y):
    """
    Args:
        x: (b, c, n, n)
        y: (b, 10*n_mix, n, n)
    """
    xp = get_array_module(x)
    n_mix = y.shape[1] // 10
    logit_prob = y[:, :n_mix, :, :]
    y = F.reshape(y[:, n_mix:, :, :], x.shape + (n_mix * 3, ))
    mean = y[:, :, :, :, 0:n_mix]
    log_scale = y[:, :, :, :, n_mix:2 * n_mix]
    log_scale = F.maximum(log_scale, -7 * xp.ones(log_scale.shape, dtype='f'))
    coeff = F.tanh(y[:, :, :, :, 2 * n_mix:3 * n_mix])

    x = xp.repeat(xp.expand_dims(x, 4), n_mix, 4)
    m1 = F.expand_dims(mean[:, 0, :, :, :], 1)
    m2 = F.expand_dims(
        mean[:, 1, :, :, :] + coeff[:, 0, :, :, :] * x[:, 0, :, :, :], 1)
    m3 = F.expand_dims(
        (mean[:, 2, :, :, :] + coeff[:, 1, :, :, :] * x[:, 0, :, :, :] +
         coeff[:, 2, :, :, :] * x[:, 1, :, :, :]), 1)
    mean = F.concat([m1, m2, m3])
    centered_x = x - mean
    inv_std = F.exp(-log_scale)
    max_in = inv_std * (centered_x + 1. / 255.)
    cdf_max = F.sigmoid(max_in)
    min_in = inv_std * (centered_x - 1. / 255.)
    cdf_min = F.sigmoid(min_in)
    log_cdf_max = max_in - F.softplus(max_in)  # 0
    log_one_minus_cdf_min = -F.softplus(min_in)  # 255
    cdf_delta = cdf_max - cdf_min  # 0 ~ 255
    mid_in = inv_std * centered_x
    log_pdf_mid = mid_in - log_scale - 2. * F.softplus(mid_in)  # mid

    log_prob = F.where(
        x < -0.999, log_cdf_max,
        F.where(
            x > 0.999, log_one_minus_cdf_min,
            F.where(
                cdf_delta.array > 1e-5,
                F.log(
                    F.maximum(cdf_delta,
                              xp.ones(cdf_delta.shape, dtype='f') * 1e-12)),
                log_pdf_mid - xp.log(127.5))))

    log_prob = F.transpose(F.sum(log_prob, 1), (0, 3, 1, 2))
    log_prob = log_prob + log_prob_from_logit(logit_prob)

    loss = F.logsumexp(log_prob, 1)
    loss = F.sum(loss, axis=(1, 2))
    return -F.mean(loss)
示例#13
0
    def calc_loss(self, grids, image_size):
        top_left_x, top_right_x, _, top_left_y, _, bottom_left_y = self.get_corners(grids, image_size)

        # penalize upside down images
        distance = top_left_y - bottom_left_y
        loss_values = F.maximum(distance, self.xp.zeros_like(distance))
        up_down_loss = F.average(loss_values)

        # penalize images that are vertically mirrored
        distance = top_left_x - top_right_x
        loss_values = F.maximum(distance, self.xp.zeros_like(distance))
        left_right_loss = F.average(loss_values)

        return up_down_loss + left_right_loss
示例#14
0
    def calc_intersection(self, top_left_x_1, width_1, top_left_x_2, width_2,
                          top_left_y_1, height_1, top_left_y_2, height_2):
        width_overlap = self.calc_overlap(top_left_x_1, width_1, top_left_x_2,
                                          width_2)

        height_overlap = self.calc_overlap(top_left_y_1, height_1,
                                           top_left_y_2, height_2)

        width_overlap = F.maximum(width_overlap,
                                  self.xp.zeros_like(width_overlap))
        height_overlap = F.maximum(height_overlap,
                                   self.xp.zeros_like(height_overlap))

        return width_overlap * height_overlap
示例#15
0
    def __call__(self, t, condition):
        # t(timesteps): 1-T

        distribution = chainer.distributions.Normal(
            self.xp.array(0, dtype='f'), self.xp.array(1, dtype='f'))
        z = distribution.sample(t.shape)
        # z(timesteps): 1-T

        condition = self.encoder(condition)
        # condition(timesteps): 1-T

        s_means, s_scales = self.student(z, condition)
        s_clipped_scales = F.maximum(
            s_scales, self.scalar_to_tensor(s_scales, -7))
        # s_means, s_scales(timesteps): 2-(T+1)

        x = z[:, :, 1:] * F.exp(s_scales[:, :, :-1]) + s_means[:, :, :-1]
        # x(timesteps): 2-T

        with chainer.using_config('train', False):
            y = self.teacher(x, condition[:, :, 1:])
        t_means, t_scales = y[:, 1:2], y[:, 2:3]
        t_clipped_scales = F.maximum(
            t_scales, self.scalar_to_tensor(t_scales, -7))
        # t_means, t_scales(timesteps): 3-(T+1)

        s_distribution = chainer.distributions.Normal(
            s_means[:, :, 1:], log_scale=s_clipped_scales[:, :, 1:])
        t_distribution = chainer.distributions.Normal(
            t_means, log_scale=t_clipped_scales)
        # s_distribution, t_distribution(timesteps): 3-(T+1)

        kl = chainer.kl_divergence(s_distribution, t_distribution)
        kl = F.minimum(
            kl, self.scalar_to_tensor(kl, 100))
        kl = F.average(kl)

        regularization = F.mean_squared_error(
            t_scales, s_scales[:, :, 1:])

        spectrogram_frame_loss = F.mean_squared_error(
            self.stft.magnitude(t[:, :, 1:]), self.stft.magnitude(x))

        loss = kl + self.lmd * regularization + spectrogram_frame_loss
        chainer.reporter.report({
            'loss': loss, 'kl_divergence': kl,
            'regularization': regularization,
            'spectrogram_frame_loss': spectrogram_frame_loss}, self)
        return loss
示例#16
0
    def calc_loss(self, grids, image_size):
        """
            Calculate a loss based on the expected grid size. Penalize all predicted grids, where the area of the grid
            is smaller than the area of the crop area
        """
        top_left_x, top_right_x, _, top_left_y, _, bottom_left_y = self.get_corners(grids, image_size)

        grid_widths = top_right_x - top_left_x
        grid_heights = bottom_left_y - top_left_y
        expected_width = self.xp.full_like(grid_widths, grids.shape[-1], dtype=grid_widths.dtype)
        expected_height = self.xp.full_like(grid_heights, grids.shape[2], dtype=grid_heights.dtype)

        width_loss = F.maximum(self.xp.zeros_like(grid_widths), expected_width - grid_widths)
        height_loss = F.maximum(self.xp.zeros_like(grid_heights), expected_height - grid_heights)

        return sum(width_loss) + sum(height_loss)
示例#17
0
 def lf(frames):
     mu, ln_var = self.encode(frames)
     z = F.gaussian(mu, ln_var)
     frames_flat = F.reshape(
         frames,
         (-1, frames.shape[1] * frames.shape[2] * frames.shape[3]))
     variational_flat = F.reshape(
         self.decode(z),
         (-1, frames.shape[1] * frames.shape[2] * frames.shape[3]))
     rec_loss = F.sum(F.square(frames_flat - variational_flat),
                      axis=1)  # l2 reconstruction loss
     rec_loss = F.mean(rec_loss)
     kl_loss = F.sum(F.gaussian_kl_divergence(mu, ln_var, reduce="no"),
                     axis=1)
     if self._cpu:
         kl_tolerance = np.asarray(self.kl_tolerance *
                                   self.n_latent).astype(np.float32)
     else:
         kl_tolerance = cp.asarray(self.kl_tolerance *
                                   self.n_latent).astype(cp.float32)
     kl_loss = F.maximum(kl_loss,
                         F.broadcast_to(kl_tolerance, kl_loss.shape))
     kl_loss = F.mean(kl_loss)
     loss = rec_loss + kl_loss
     chainer.report({'loss': loss}, observer=self)
     chainer.report({'kl_loss': kl_loss}, observer=self)
     chainer.report({'rec_loss': rec_loss}, observer=self)
     return loss
示例#18
0
 def maximum(self, a, b):
     assert a.dtype == b.dtype
     if a.dtype.name.startswith('float'):
         x = F.maximum(a, b)
     else:
         x = Variable(np.maximum(a.data, b.data))
     return x
示例#19
0
 def check_forward(self, x1_data, x2_data, y_expected):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = functions.maximum(x1, x2)
     self.assertEqual(y.data.dtype, self.dtype)
     testing.assert_allclose(
         y_expected, y.data, **self.check_forward_options)
示例#20
0
    def _lossfun(self, entropy, vs_pred, log_probs, vs_pred_old, log_probs_old,
                 advs, vs_teacher):

        prob_ratio = F.exp(log_probs - log_probs_old)

        loss_policy = -F.mean(
            F.minimum(
                prob_ratio * advs,
                F.clip(prob_ratio, 1 - self.clip_eps, 1 + self.clip_eps) *
                advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(
                F.maximum(
                    F.square(vs_pred - vs_teacher),
                    F.square(
                        _elementwise_clip(vs_pred, vs_pred_old -
                                          self.clip_eps_vf, vs_pred_old +
                                          self.clip_eps_vf) - vs_teacher)))
        loss_entropy = -F.mean(entropy)

        self.value_loss_record.append(float(loss_value_func.array))
        self.policy_loss_record.append(float(loss_policy.array))

        loss = (loss_policy + self.value_func_coef * loss_value_func +
                self.entropy_coef * loss_entropy)

        return loss
示例#21
0
 def check_forward(self, x1_data, x2_data, y_expected):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = functions.maximum(x1, x2)
     self.assertEqual(y.data.dtype, self.dtype)
     testing.assert_allclose(
         y_expected, y.data, **self.check_forward_options)
示例#22
0
 def __call__(self, x):
     x = F.log_softmax(x)
     h = x + x * F.broadcast_to(self.W, x.shape) + F.broadcast_to(self.b, x.shape)
     mx = F.maximum(h, F.broadcast_to(self.lb, x.shape))
     mn = F.minimum(h, F.broadcast_to(self.lb, x.shape))
     y = mx + F.log(1.0 + F.exp(mn - mx))
     return y
示例#23
0
def multi_overlap(x1, len1, x2, len2):
    len1_half = len1 / 2
    len2_half = len2 / 2

    left = F.maximum(x1 - len1_half, x2 - len2_half)
    right = F.minimum(x1 + len1_half, x2 + len2_half)

    return right - left
示例#24
0
def multi_overlap(x1, len1, x2, len2):
    len1_half = len1/2
    len2_half = len2/2

    left = F.maximum(x1 - len1_half, x2 - len2_half)
    right = F.minimum(x1 + len1_half, x2 + len2_half)

    return right - left
示例#25
0
    def calc_aspect_ratio_loss(self, width, height, label_lengths=None):
        # penalize aspect ratios that are higher than wide, and penalize aspect ratios that are tooo wide
        aspect_ratio = height / F.maximum(width, self.xp.ones_like(width))
        # do not give an incentive to bboxes with a width that is 2x the height of the box
        aspect_loss = F.maximum(aspect_ratio - 0.5,
                                self.xp.zeros_like(aspect_ratio))

        # penalize very long bboxes (based on the underlying word), by assuming that a single letter
        # has a max width of its height, if the width of the bbox is too large it will be penalized
        if label_lengths is not None:
            max_width = label_lengths * height
            width_ratio = width - max_width
            width_threshold = F.maximum(width_ratio,
                                        self.xp.zeros_like(width_ratio))
            aspect_loss = aspect_ratio + width_threshold

        return sum(aspect_loss) / len(aspect_loss)
示例#26
0
    def _compute_y_and_t(self, exp_batch):

        batch_state = exp_batch['state']
        batch_size = len(exp_batch['reward'])

        if self.recurrent:
            qout, _ = self.model.n_step_forward(batch_state,
                                                exp_batch['recurrent_state'],
                                                output_mode='concat')
        else:
            qout = self.model(batch_state)

        batch_actions = exp_batch['action']
        batch_q = qout.evaluate_actions(batch_actions)

        # Compute target values

        with chainer.no_backprop_mode():
            batch_next_state = exp_batch['next_state']
            if self.recurrent:
                next_qout, _ = self.model.n_step_forward(
                    batch_next_state,
                    exp_batch['next_recurrent_state'],
                    output_mode='concat')
                target_qout, _ = self.target_model.n_step_forward(
                    batch_state,
                    exp_batch['recurrent_state'],
                    output_mode='concat')
                target_next_qout, _ = self.target_model.n_step_forward(
                    batch_next_state,
                    exp_batch['next_recurrent_state'],
                    output_mode='concat')
            else:
                next_qout = self.model(batch_next_state)
                target_qout = self.target_model(batch_state)
                target_next_qout = self.target_model(batch_next_state)

            next_q_max = F.reshape(
                target_next_qout.evaluate_actions(next_qout.greedy_actions),
                (batch_size, ))

            batch_rewards = exp_batch['reward']
            batch_terminal = exp_batch['is_state_terminal']

            # T Q: Bellman operator
            t_q = batch_rewards + exp_batch['discount'] * \
                (1.0 - batch_terminal) * next_q_max

            # T_PAL Q: persistent advantage learning operator
            cur_advantage = F.reshape(
                target_qout.compute_advantage(batch_actions), (batch_size, ))
            next_advantage = F.reshape(
                target_next_qout.compute_advantage(batch_actions),
                (batch_size, ))
            tpal_q = t_q + self.alpha * \
                F.maximum(cur_advantage, next_advantage)

        return batch_q, tpal_q
示例#27
0
 def greedy_actions(self):
     a = self.mu
     if self.min_action is not None:
         a = F.maximum(
             self.xp.broadcast_to(self.min_action, a.data.shape), a)
     if self.max_action is not None:
         a = F.minimum(
             self.xp.broadcast_to(self.max_action, a.data.shape), a)
     return a
示例#28
0
    def __call__(self, x0, x1, cs_map=False):
        xp = cuda.get_array_module(x0.data)

        h0 = x0
        h1 = x1
        msssim = 1

        for i in range(self.level - 1):
            cs = super(MSSSIM, self).__call__(h0, h1, cs_map=True)
            cs = F.maximum(cs, xp.zeros_like(cs.data))
            msssim *= cs**self.weight[i]
            h0 = F.average_pooling_2d(h0, 2)
            h1 = F.average_pooling_2d(h1, 2)

        ssim = super(MSSSIM, self).__call__(h0, h1)
        ssim = F.maximum(ssim, xp.zeros_like(ssim.data))
        msssim *= ssim**self.weight[-1]
        return msssim
示例#29
0
 def triplet_loss(flow, num_label):
     pairGenP = flow[0]
     unpairGenP = flow[1]
     zeros = Variable(xp.zeros(pairGenP.shape[1], dtype=xp.float32))
     pairSentProbs = F.sum(pairGenP, axis=0) / (num_label + 1)
     unpairSentProbs = F.sum(unpairGenP, axis=0) / (num_label + 1)
     trip_loss = F.mean(
         F.maximum(zeros, margin + unpairSentProbs - pairSentProbs))
     return trip_loss
示例#30
0
    def calc_bboxes(self, predicted_bboxes, image_size, out_size):
        predicted_bboxes = (predicted_bboxes + 1) / 2
        x_points = predicted_bboxes[:, 0, ...] * image_size.width
        y_points = predicted_bboxes[:, 1, ...] * image_size.height
        top_left_x = F.get_item(x_points, [..., 0, 0])
        top_left_y = F.get_item(y_points, [..., 0, 0])
        bottom_right_x = F.get_item(x_points, [..., out_size.height - 1, out_size.width - 1])
        bottom_right_y = F.get_item(y_points, [..., out_size.height - 1, out_size.width - 1])

        bboxes = F.stack(
            [
                F.minimum(top_left_x, bottom_right_x),
                F.minimum(top_left_y, bottom_right_y),
                F.maximum(top_left_x, bottom_right_x),
                F.maximum(top_left_y, bottom_right_y),
            ],
            axis=1
        )
        return bboxes
示例#31
0
 def greedy_actions(self):
     with chainer.force_backprop_mode():
         a = self.mu
         if self.min_action is not None:
             a = F.maximum(
                 self.xp.broadcast_to(self.min_action, a.array.shape), a)
         if self.max_action is not None:
             a = F.minimum(
                 self.xp.broadcast_to(self.max_action, a.array.shape), a)
         return a
示例#32
0
    def calc_loss(self, grids, image_size, **kwargs):
        normalize = kwargs.get('normalize', True)
        top_left_x, top_right_x, _, _, top_left_y, _, bottom_left_y, _ = self.get_corners(
            grids, image_size)

        # penalize upside down images
        distance = top_left_y - bottom_left_y
        up_down_loss = F.maximum(distance, self.xp.zeros_like(distance.array))
        if normalize:
            up_down_loss = F.sum(up_down_loss)

        # penalize images that are vertically mirrored
        distance = top_left_x - top_right_x
        left_right_loss = F.maximum(distance,
                                    self.xp.zeros_like(distance.array))
        if normalize:
            left_right_loss = F.sum(left_right_loss)

        return up_down_loss + left_right_loss
示例#33
0
    def calc_overlap(self, left_1, width_1, left_2, width_2):
        radius_1 = width_1 / 2
        center_1 = left_1 + radius_1
        radius_2 = width_2 / 2
        center_2 = left_2 + radius_2

        center_distance = center_2 - center_1
        center_distance = F.maximum(center_distance, center_distance * -1)
        min_distance_for_no_overlap = radius_1 + radius_2
        return min_distance_for_no_overlap - center_distance
    def __call__(self, distances, points1, points2):
        """

        Args:
            distances (numpy.ndarray or cupy.ndarray):
                3-dim array (bs, num_point2, num_point1)
            points1 (Variable): 3-dim (batch_size, num_point1, ch1)
            points2 (Variable): 3-dim (batch_size, num_point2, ch2)
                points2 is deeper, rich feature. num_point1 > num_point2

        Returns (Variable): 3-dim (batch_size, num_point1, ch1+ch2)

        """
        # batch_size, num_point1, ch1 = points1.shape
        # batch_size2, num_point2, ch2 = points2.shape
        batch_size, num_point2, num_point1 = distances.shape
        # assert batch_size == batch_size2
        if distances is None:
            print('[WARNING] distances is None')
            # calculate distances by feature vector (not coord vector)
            distances = self.xp(self.metric(points1, points2))
            # Better in this form
            # distances = self.xp(self.metric(coord1, coord2))

        # --- weight calculation ---
        # k-nearest neighbor with k=self.num_fp_point
        # sorted_indices (bs, num_fp_point, num_point1)
        sorted_indices = self.xp.argsort(distances,
                                         axis=1)[:, :self.num_fp_point, :]
        # sorted_dists (bs, num_fp_point, num_point1)
        sorted_dists = distances[self.xp.arange(batch_size)[:, None, None],
                                 sorted_indices,
                                 self.xp.arange(num_point1)[None, None, :]]

        eps_array = self.xp.ones(sorted_dists.shape,
                                 dtype=sorted_dists.dtype) * self.eps
        sorted_dists = functions.maximum(sorted_dists, eps_array)
        inv_dist = 1.0 / sorted_dists
        norm = functions.sum(inv_dist, axis=1, keepdims=True)
        norm = functions.broadcast_to(norm, sorted_dists.shape)
        # weight (bs, num_fp_point, num_point1)
        weight = inv_dist / norm
        # --- weight calculation end ---
        # point2_selected (bs, num_fp_point, num_point1, ch2)
        points2_selected = points2[self.xp.arange(batch_size)[:, None, None],
                                   sorted_indices, :]
        # print('debug', weight.shape, points2_selected.shape)
        weight = functions.broadcast_to(weight[:, :, :, None],
                                        points2_selected.shape)
        # interpolated_points (bs, num_point1, ch2)
        interpolated_points = functions.sum(weight * points2_selected, axis=1)
        if points1 is None:
            return interpolated_points
        else:
            return functions.concat([interpolated_points, points1], axis=2)
示例#35
0
文件: losses.py 项目: kzky/works
    def __call__(self, y, ):
        bs = y.data.shape[0]
        d = np.prod(y.data.shape[1:])

        y_normalized = F.softmax(y)
        y_log_softmax = F.log_softmax(y)
        negentropy = F.sum(y_normalized * y_log_softmax, axis=1) / d

        #zeros = to_device(np.zeros(bs).astype(np.float32), 2)
        ones = to_device(-np.ones(bs).astype(np.float32), 2)
        self.loss = F.sum(F.maximum(
            Variable(ones), 
            - negentropy)) / bs
        
        return self.loss
示例#36
0
    def __call__(self, *xs):
        operation = self.operation

        if operation == 0:      # PROD
            return six.moves.reduce(lambda x, y: x * y, xs),

        elif operation == 1:    # SUM
            coeffs = self.coeffs
            if coeffs is not None:
                assert len(xs) == len(coeffs)
                xs = [x * coeff for x, coeff in zip(xs, coeffs)]
            return six.moves.reduce(lambda x, y: x + y, xs),

        elif operation == 2:    # MAX
            return six.moves.reduce(lambda x, y: functions.maximum(x, y), xs),

        else:
            raise ValueError('Invalid EltwiseParameter.EltwiseOp value.')
示例#37
0
 def forward(self, x_list):
     cl_list = []
     for x in x_list:
         wvec = self.embed(x)
         cl_list.append(wvec)
     cr_list = []
     for x in reversed(x_list):
         wvec = self.embed(x)
         cr_list.append(wvec)
     xi_list = []
     for cl, cr in zip(cl_list, cr_list):
         xi_list.append(F.concat((cl, cr)))
     yi_list = []
     for xi in xi_list:
         yi_list.append(F.tanh(self.fc1(xi)))
     y3 = yi_list[0]
     for yi in yi_list[1:]:
         y3 = F.maximum(yi, y3)
     y4 = self.fc2(y3)
     return y4
示例#38
0
 def func(x1, x2):
     y = functions.maximum(x1, x2)
     return y * y
示例#39
0
 def check_forward(self, x1_data, x2_data, y_expected):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = functions.maximum(x1, x2)
     gradient_check.assert_allclose(
         y_expected, y.data, **self.check_forward_options)
示例#40
0
 def forward(self, inputs, devices):
     x1, x2 = inputs
     return functions.maximum(x1, x2),