Beispiel #1
0
 def correct_cov(self, u_odo_fog, x_cor, compute_G):
     G = torch.zeros(u_odo_fog.shape[1], 15, 9)
     if compute_G:
         for i in range(u_odo_fog.shape[1]):
             G[i, :6, :u_odo_fog.shape[2]] = jacobian(
                 u_odo_fog[0, i], x_cor)
     return G
Beispiel #2
0
def solve_tool(theta, option):
    print("Começo dos resultados para opção: " + str(option) + "-----------------")
    j = utils.jacobian(l1, l2, theta[option,])
    print("Jacobiano: \n" + str(j))

    vl = j @ (np.array([20, -10, 12]) * np.pi / 180)

    omega = np.array([0, 0, 20 - 10 + 12])

    vw = np.concatenate((vl, omega))

    print("Velocidade W,W: \n" + str(vw))

    t = np.array([[np.sqrt(3)/2, 1/2, 0, 0.1], 
                [-1/2, np.sqrt(3)/2, 0, 0.2],
                [0, 0, 1, 0],
                [0, 0, 0, 1]])

    vT = vt.veltrans(t, vw)
    print("Velocidade T,T: \n" + str(vT))
    print("Fim dos resultados para opção: " + str(option) + "-----------------")
    #Modo 1
    b = np.array([0, 0])
    bIy = l1 * np.sin(theta[option,0])
    bIx = l1 * np.cos(theta[option,0])
    bI = np.array([bIx, bIy])
    bw = np.array([0.45, -0.47])
    bt = np.array([0.6, -0.3])

    x = np.array([b[0], bI[0], bw[0], bt[0]])
    y = np.array([b[1], bI[1], bw[1], bt[1]])

    pyplot.subplot(121 + option)
    utils.plot_robot(x, y, "Robô disposição " + str(option + 1))
Beispiel #3
0
    def _net_build(self):
        self.input = tf.placeholder(shape=[None, 784], dtype=tf.float32)
        self.target = tf.placeholder(shape=[None, 10], dtype=tf.float32)
        self.fc1 = tf.contrib.layers.fully_connected(self.input,
                                                     500,
                                                     activation_fn=tf.nn.relu)
        self.fc2 = tf.contrib.layers.fully_connected(self.fc1,
                                                     10,
                                                     activation_fn=None)

        self.loss_vector = tf.reshape(
            tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.target,
                                                       logits=self.fc2), [-1])
        self.real_loss = tf.reduce_mean(self.loss_vector)

        self.var = tf.trainable_variables()
        self.grad = tf.gradients(self.real_loss, self.var)

        # embed()

        self.jacobian = jacobian(self.loss_vector, self.var, False)

        flat_j = tf.concat([
            tf.reshape(self.jacobian[0], shape=[-1, 784 * 500
                                                ]), self.jacobian[1],
            tf.reshape(self.jacobian[2], shape=[-1, 500 * 10]),
            self.jacobian[3]
        ],
                           axis=1)
        root_j = tf.sqrt(tf.reduce_mean(tf.square(flat_j),
                                        axis=0)) + self.epsilon

        cur_g = tf.concat([tf.reshape(g, [-1]) for g in self.grad], axis=0)

        self.batch_var = tf.reduce_mean(
            tf.reduce_sum(tf.square(flat_j - cur_g), axis=1))

        [g1, g2, g3, g4] = tf.split(tf.divide(cur_g, root_j),
                                    [784 * 500, 500, 500 * 10, 10])
        adjust_g = [
            tf.reshape(g1, [784, 500]), g2,
            tf.reshape(g3, [500, 10]), g4
        ]

        # embed()

        self.minimizer = []
        for v, g in zip(self.var, adjust_g):
            self.minimizer.append(tf.assign_sub(v, g * self.lr))
        self.minimizer = tf.group(self.minimizer)

        tf.identity(self.real_loss, name='loss')
        tf.identity(self.batch_var, name='batch_variance')
        tf.summary.scalar('loss', self.real_loss)
        tf.summary.scalar('batch_variance', self.batch_var)
        self.merge_summary = tf.summary.merge_all()
Beispiel #4
0
def test_grad():
    x0 = torch.tensor([-1.0, 1.0], requires_grad=True)
    fx = rosenbrock(*x0)

    auto_dx = U.jacobian(fx, x0)
    analytical_dx = grad_rosenbrock(*x0)

    assert (torch.norm(auto_dx - analytical_dx.T[0]) == 0)

    print("grad test success")
Beispiel #5
0
    def apply_step(self, *args):
        loss_g, loss_h = args[:2]

        for x in self.params:
            g = jacobian(loss_g, x)
            h = hessian(loss_h, x)

            with torch.no_grad():
                g = g.reshape((-1, 1))
                h = h.reshape((g.shape[0], g.shape[0]))
                dx = conjugate_gradient(h,
                                        g,
                                        n_iterations=self.n_cg,
                                        tol=self.tol).reshape(x.shape)
                x.add_(dx, alpha=-self.lr)
Beispiel #6
0
    def find_perturb(perturbation):
        logits_os = model(inputs + (1 + over_shoot) * perturbation)
        y_pred = T.argmax(logits_os, axis=1)
        is_mistake = T.neq(y_pred, labels)
        current_ind = batch_indices[(1 - is_mistake).nonzero()]
        should_stop = T.all(is_mistake)

        # continue generating perturbation only for correctly classified
        inputs_subset = inputs[current_ind]
        perturbation_subset = perturbation[current_ind]
        labels_subset = labels[current_ind]
        batch_subset = T.arange(inputs_subset.shape[0])

        x_adv = inputs_subset + perturbation_subset
        logits = model(x_adv)
        corrects = logits[batch_subset, labels_subset]
        jac = jacobian(logits, x_adv, num_classes)

        # deepfool
        f = logits - T.shape_padright(corrects)
        w = jac - T.shape_padaxis(jac[batch_subset, labels_subset], axis=1)
        reduce_ind = range(2, inputs.ndim + 1)
        if norm == 'l2':
            dist = T.abs_(f) / w.norm(2, axis=reduce_ind)
        else:
            dist = T.abs_(f) / T.sum(T.abs_(w), axis=reduce_ind)
        # remove correct targets
        dist = T.set_subtensor(dist[batch_subset, labels_subset],
                               T.constant(np.inf))
        l = T.argmin(dist, axis=1)
        dist_l = dist[batch_subset, l].dimshuffle(0, 'x', 'x', 'x')
        # avoid numerical instability and clip max value
        if clip_dist is not None:
            dist_l = T.clip(dist_l, 0, clip_dist)
        w_l = w[batch_subset, l]
        if norm == 'l2':
            reduce_ind = range(1, inputs.ndim)
            perturbation_upd = dist_l * w_l / w_l.norm(
                2, reduce_ind, keepdims=True)
        else:
            perturbation_upd = dist_l * T.sgn(w_l)
        perturbation = ifelse(
            should_stop, perturbation,
            T.inc_subtensor(perturbation[current_ind], perturbation_upd))
        return perturbation, scan_module.until(should_stop)
Beispiel #7
0
def margin_sensitivity(inputs, logits, labels, num_outputs, ord=2):
    """Compute margin sensitivity (proposed regularization).
    """
    assert ord in [2, np.inf]

    batch_size = inputs.shape[0]
    batch_indices = T.arange(batch_size)

    # shape: labels, batch, channels, height, width
    jac = jacobian(logits, inputs, num_outputs=num_outputs, pack_dim=0)

    # basically jac_labels = jac[labels, batch_indices]
    jac_flt = jac.reshape(
        (-1, inputs.shape[1], inputs.shape[2], inputs.shape[3]))
    jac_labels_flt = jac_flt[labels * batch_size + batch_indices]
    jac_labels = jac_labels_flt.reshape(inputs.shape)

    w = jac - T.shape_padaxis(jac_labels, axis=0)
    reduce_ind = range(2, inputs.ndim + 1)
    if ord == 2:
        dist = T.sum(w**2, axis=reduce_ind)
    elif ord == np.inf:
        dist = T.sum(T.abs_(w), axis=reduce_ind)
    else:
        raise ValueError

    l = T.argmax(dist, axis=0)
    l = gradient.disconnected_grad(l)

    corrects = logits[batch_indices, labels]
    others = logits[batch_indices, l]

    corrects_grad = T.grad(corrects.sum(), inputs)
    others_grad = T.grad(others.sum(), inputs)
    reduce_ind = range(1, inputs.ndim)
    if ord == 2:
        return T.sum((corrects_grad - others_grad)**2, axis=reduce_ind)
    elif ord == np.inf:
        return T.sum(T.abs_(corrects_grad - others_grad), axis=reduce_ind)
    else:
        raise ValueError
Beispiel #8
0
 def correct_cov(self, u_imu, y_cor):
     J = torch.zeros(u_imu.shape[0], 9, 6)
     for i in range(u_imu.shape[0]):
         J[i] = jacobian(u_imu[i], y_cor)
     return J
Beispiel #9
0
import numpy as np
import utils

theta1 = 20
theta2 = -10
theta3 = 12

j = utils.jacobian(0.5, 0.5, np.array([theta1, theta2, theta3]) * np.pi / 180)
print(j)

















Beispiel #10
0
def train_network(network, datagen, epochs, metric_loss, reconstruction_loss, 
                  optimizer, mu=1, contractive=False, device=None):
    """[summary]

    Args:
        network ([type]): [description]
        datagen ([type]): [description]
        epochs ([type]): [description]
        metric_loss ([type]): [description]
        reconstruction_loss ([type]): [description]
        optimizer ([type]): [description]
        mu (int, optional): [description]. Defaults to 1.
        contractive (bool, optional): [description]. Defaults to False.
        device ([type], optional): [description]. Defaults to None.

    Returns:
        [type]: [description]
    """
    
    recon_losses = []
    metric_losses = []
    total_losses = []

    print("using %s" % device)

    network = network.to(device)
    network.train() 

    for epoch in range(epochs):

        # generate the batch
        batch, labels = datagen.generate_batch()
        batch = tuple(x.to(device) for x in batch) 
        labels = labels.to(device)

        # zero gradients
        optimizer.zero_grad()

        # forward step
        outputs = network(*batch)

        # separate into encoded and reconstructed 
        encoded, reconstructed = list(zip(*outputs))
        # encoded = [outputs[0] for output in outputs]
        # reconstructed = [i[1] for i in outputs]
        m_loss = metric_loss(*encoded, labels) * mu 

        j_norm = 0

        if contractive:
            for i in range(len(encoded)):
                J = jacobian(batch[i], encoded[i])
                j_norm += torch.norm(J)
            print("j_norm: ", j_norm)

        recon_loss = reconstruction_loss(batch, reconstructed, additional_losses=j_norm) * (1-mu)

        # add the losses together
        loss = recon_loss + m_loss 

        # append losses to history. 
        total_losses.append(loss.item())
        recon_losses.append(recon_loss.item())
        metric_losses.append(m_loss.item())

        # print("Iteration %s. Metric Loss: %s | Reconstruction Loss: %s" % (epoch, round(m_loss.item(), 3), round(recon_loss.item(), 3)))
        if epoch % (epochs/100) == 0 and epoch != 0:
            print("Iteration %s. Metric Loss: %s | Reconstruction Loss: %s" % (epoch, round(m_loss.item(), 3), round(recon_loss.item(), 3)))
            
        # backward step
        loss.backward() 
        optimizer.step() 

    return network, total_losses, recon_losses, metric_losses
Beispiel #11
0
def find_next_target(inputs,
                     logits,
                     labels,
                     random=False,
                     uniform=False,
                     label_smoothing=0.0,
                     attack_topk=None,
                     ord=2):
    """Find closest decision boundary as in Deepfool algorithm"""
    ndims = inputs.get_shape().ndims
    batch_size = tf.shape(logits)[0]
    num_classes = tf.shape(logits)[1]
    batch_indices = tf.range(batch_size)
    labels_idx = batch_indices * num_classes + labels
    if not random:
        logits_flt = tf.reshape(logits, (-1, ))
        logits_labels = tf.expand_dims(tf.gather(logits_flt, labels_idx), 1)
        grad_labels = tf.gradients(logits_labels, inputs)[0]

        if attack_topk is not None:
            topk_logits, topk_indices = tf.nn.top_k(logits, k=attack_topk)
            topk_jac = jacobian(topk_logits, inputs)

            f = topk_logits - logits_labels
            w = topk_jac - tf.expand_dims(grad_labels, 1)
        else:
            jac = jacobian(logits, inputs)
            f = logits - logits_labels
            w = jac - tf.expand_dims(grad_labels, 1)

        reduce_ind = list(range(2, ndims + 1))
        if ord == 2:
            dist = tf.abs(f) / tf.sqrt(tf.reduce_sum(w**2, axis=reduce_ind))
        else:
            dist = tf.abs(f) / tf.reduce_sum(tf.abs(w), axis=reduce_ind)
        if attack_topk is not None:
            labels_tile = tf.expand_dims(labels, 1)
            topk_labels_onehot = tf.equal(
                topk_indices, tf.tile(labels_tile, [1, attack_topk]))
            dist = tf.where(topk_labels_onehot, np.inf * tf.ones_like(dist),
                            dist)
        else:
            labels_onehot = tf.cast(
                tf.one_hot(labels, num_classes, dtype=tf.int32), tf.bool)
            dist = tf.where(labels_onehot, np.inf * tf.ones_like(dist), dist)

        l = tf.cast(tf.argmin(dist, axis=1), batch_indices.dtype)
        assert 0 <= label_smoothing < 1
        if label_smoothing > 0.0:
            l_onehot = tf.one_hot(l, num_classes, dtype=tf.float32)
            labels_onehot = tf.one_hot(labels, num_classes, dtype=tf.float32)
            p = (1 - label_smoothing) * l_onehot + (
                1.0 - l_onehot - labels_onehot) * label_smoothing / tf.cast(
                    (num_classes - 2), tf.float32)
            log_p = tf.log(p)
            l = tf.cast(tf.multinomial(log_p, 1), batch_indices.dtype)
            l = tf.reshape(l, (-1, ))

        if attack_topk is not None:
            topk_indices_flt = tf.reshape(topk_indices, (-1, ))
            l_indices = batch_indices * attack_topk + l
            targets = tf.gather(topk_indices_flt, l_indices)
        else:
            targets = l
    else:
        targets = random_targets(logits, labels, uniform=uniform)
    return targets
Beispiel #12
0
 def log_p(self, reward):
     r = self.total(reward)
     g = utils.grad(r, self.u)
     H = utils.jacobian(g, self.u)
     return 0.5 * tt.dot(g, tt.dot(tn.matrix_inverse(H), g)) + 0.5 * tt.log(
         abs(tn.det(-H)))
Beispiel #13
0
ul = renormFn(ul)

for no in reversed(range(1)):

    ur = UR[no]
    dl = DL[no]
    dr = DR[no]

    upper = torch.cat([ul, ur], -1)
    down = torch.cat([dl, dr], -1)
    ul = torch.cat([upper, down], -2)

y = ul[:1, 0, 0, :]

grad = utils.jacobian(y, img)

H = grad[0, :targetSize[-1], :targetSize[-1]]

plt.imshow(H.detach())
plt.axis('off')
plt.savefig(rootFolder + 'pic/grad.png', bbox_inches="tight", pad_inches=0)

plt.rc('font', size=14)
plt.axis('on')

colormap = plt.cm.Spectral
colormap = plt.cm.nipy_spectral

from cycler import cycler