Ejemplo n.º 1
0
    def write(self, z, time):
        # update usage indicator
        self.u += F.matmul(Variable(np.ones((1, Kr), dtype=np.float32)),
                           self.W_predictor)

        # update writing weights
        prev_v_wr = self.v_wr
        v_wr = np.zeros((N_mem, 1), dtype=np.float32)
        if time < N_mem:
            v_wr[time][0] = 1
        else:
            waste_index = int(F.argmin(self.u).data)
            v_wr[waste_index][0] = 1
        self.v_wr = Variable(v_wr)

        # writing
        # z: (1, Z_DIM)
        if USE_RETROACTIVE:
            # update retroactive weights
            self.v_ret = GAMMA * self.v_ret + (1 - GAMMA) * prev_v_wr
            z_wr = F.concat((z, Variable(np.zeros((1, Z_DIM),
                                                  dtype=np.float32))))
            z_ret = F.concat((Variable(np.zeros((1, Z_DIM),
                                                dtype=np.float32)), z))
            self.M += F.matmul(self.v_wr, z_wr) + F.matmul(self.v_ret, z_ret)
        else:
            self.M += F.matmul(self.v_wr, z)
Ejemplo n.º 2
0
def tgsm(model,
         images,
         target=None,
         eps=0.01,
         iterations=1,
         clip_min=0.,
         clip_max=1.):
    """ Computing adversarial images based on Target class Gradient Sign Method.

    Args:
        model (chainer.Link): Predictor network excluding softmax.
        images (numpy.ndarray or cupy.ndarray): Initial images.
        target (None or int or list of integers): Target class.
            If target is None, this implements least-likely class method.
            If target is int or list of integers, the original images are
                modified towards label target.
        eps (float): Attack step size.
        iterations (int): Number of attack iterations.
        clip_min (float): Minimum input component value.
        clip_max (float): Maximum input component value.

    Returns:
        adv_images (numpy.ndarray or cupy.ndarray):
            Generated adversarial images.

    Reference:
        Adversarial examples in the physical world,
        Kurakin et al., ICLR2017, https://arxiv.org/abs/1607.02533

    """
    n_batch = images.shape[0]
    adv_images = images
    xp = chainer.cuda.get_array_module(adv_images)
    if target is None:
        targets = F.argmin(model(images), axis=1)
    else:
        if isinstance(target, int):
            targets = xp.full(n_batch, target).astype(xp.int32)
        elif isinstance(target, list):
            assert (len(target) == n_batch)
            targets = xp.array(target).astype(xp.int32)
        else:
            raise NotImplementedError

    eps = -xp.abs(eps)

    for _ in range(iterations):
        adv_images = chainer.Variable(adv_images)
        loss = F.softmax_cross_entropy(model(adv_images), targets)
        loss.backward()
        adv_images = adv_images.data + eps * xp.sign(adv_images.grad)
        adv_images = xp.clip(adv_images, a_min=clip_min, a_max=clip_max)
    return adv_images.astype(xp.float32)
Ejemplo n.º 3
0
    def _sample(self, log_p):
        """
        Samples an index with probabilities p. This is a modification to the
        reservoir sampling algorithm made efficient on GPUs and numerically
        stable by operating on log-probabilities

        :param log_p: The log probabilities per row
        :type log_p: chainer.Variable

        :return: For each row, one index, sampled proportional to p
        :rtype: chainer.Variable
        """
        xp = cuda.get_array_module(log_p)

        u = xp.random.uniform(0.0, 1.0, log_p.shape).astype(dtype=log_p.dtype)
        r = F.log(-F.log(u)) - log_p
        return F.argmin(r, axis=1)
Ejemplo n.º 4
0
    def forward(self, source, target):
        # source: from cad
        # target: from depth

        source = morefusion.functions.transform_points(source, self.T[None])[0]

        dists = F.sum((source[None, :, :] - target[:, None, :])**2,
                      axis=2).array
        correspondence = F.argmin(dists, axis=1).array
        dists = dists[np.arange(dists.shape[0]), correspondence]

        keep = dists < 0.02
        target_match = target[keep]
        correspondence = correspondence[keep]
        source_match = source[correspondence]

        loss = F.sum(F.sum((source_match - target_match)**2, axis=1), axis=0)
        return loss
Ejemplo n.º 5
0
def find_closest_latent_state(real_o, generator, transition, classifier, args):
    trials = 400
    target = OptimizableLatentState(s_shape=(trials, 7), z_shape=(trials, 4))
    if not args.gpu < 0:
        target.to_gpu()

    _, channels, height, width = real_o.shape
    real_o = real_o.reshape((channels, height, width))
    real_o = F.broadcast_to(real_o, (trials, ) + real_o.shape)
    print('real_o shape: ', real_o.shape)

    optimizer = optimizers.Adam(alpha=1e-2)
    optimizer.setup(target)

    iterations = 1000

    def compute_loss(real_o, o_current):
        concat_image = F.concat((real_o, o_current), axis=1)
        classification_loss = classifier(concat_image)
        classification_loss = F.squeeze(classification_loss)
        l2_loss = F.batch_l2_norm_squared(real_o - o_current)
        assert classification_loss.shape == l2_loss.shape
        loss = l2_loss - classification_loss
        return loss

    s_current, z = target()
    for i in range(iterations):
        optimizer.target.cleargrads()

        s_next, _ = transition(s_current)
        # print('s_current shape: ', s_current.shape, 's_next shape: ', s_next.shape)
        x = F.concat((z, s_current, s_next), axis=1)
        x = F.reshape(x, shape=x.shape + (1, 1))
        o = generator(x)
        o_current, _ = F.split_axis(o, 2, axis=1, force_tuple=True)
        # print('o shape: ', o_current.shape)
        # print('real_o shape: ', real_o.shape)

        loss = compute_loss(real_o, o_current)
        mean_loss = F.mean(loss)
        mean_loss.backward()
        optimizer.update()
        mean_loss.unchain_backward()

        if i % 100 == 0:
            index = F.argmin(loss).data
            print('loss at: ', i, ' min index: ', index, ' min loss: ',
                  loss[index])

    # Select s and z with min loss
    s_current, z = target()
    s_next, _ = transition(s_current)
    x = F.concat((z, s_current, s_next), axis=1)
    x = F.reshape(x, shape=x.shape + (1, 1))
    o = generator(x)
    o_current, _ = F.split_axis(o, 2, axis=1, force_tuple=True)
    loss = compute_loss(real_o, o_current)

    index = F.argmin(loss).data
    print('min index: ', index, ' min loss: ', loss[index])

    s_min = s_current.data[index]
    print('s min: ', s_min)
    z_min = z.data[index]
    print('z min: ', z_min)
    return chainer.Variable(s_min), chainer.Variable(z_min)
Ejemplo n.º 6
0
 def argmin(self, x, axis=-1):
     return F.argmin(x, axis)
Ejemplo n.º 7
0
 def forward(self, v1):
     return F.argmin(v1)