Beispiel #1
0
 def acc(output, label):
     min_acc = 1
     for i in range(output.shape[0]):
         x = nd.sum(nd.abs(nd.subtract(output[i], label[i])), axis=0)
         if 1 - x / 96 < min_acc:
             min_acc = 1 - x / 96
     return min_acc.asscalar()
Beispiel #2
0
    def where(self, mask, tensor_in_1, tensor_in_2):
        """
        Apply a boolean selection mask to the elements of the input tensors.

        Example:

            >>> import pyhf
            >>> pyhf.set_backend(pyhf.tensor.mxnet_backend())
            >>> pyhf.tensorlib.where(
            ...   pyhf.tensorlib.astensor([1, 0, 1]),
            ...   pyhf.tensorlib.astensor([1, 1, 1]),
            ...   pyhf.tensorlib.astensor([2, 2, 2]))
            ...
            <BLANKLINE>
            [1. 2. 1.]
            <NDArray 3 @cpu(0)>

        Args:
            mask (bool): Boolean mask (boolean or tensor object of booleans)
            tensor_in_1 (Tensor): Tensor object
            tensor_in_2 (Tensor): Tensor object

        Returns:
            MXNet NDArray: The result of the mask being applied to the tensors.
        """
        mask = self.astensor(mask)
        tensor_in_1 = self.astensor(tensor_in_1)
        tensor_in_2 = self.astensor(tensor_in_2)
        return nd.add(
            nd.multiply(mask, tensor_in_1),
            nd.multiply(nd.subtract(1, mask), tensor_in_2),
        )
Beispiel #3
0
    def sample_neighbours(self, data, query_network):
        num_stored_samples = self.key_memory.shape[0]
        batch_size = data[0].shape[0]

        query = query_network(*data).as_in_context(mx.cpu())

        vec1 = nd.repeat(query, repeats=num_stored_samples, axis=0)
        vec2 = nd.tile(self.key_memory, reps=(batch_size, 1))
        diff = nd.subtract(vec1, vec2)
        sq = nd.square(diff)
        batch_sum = nd.sum(sq, exclude=1, axis=0)
        sqrt = nd.sqrt(batch_sum)

        dist = nd.reshape(sqrt, shape=(batch_size, num_stored_samples))

        sample_ind = nd.topk(dist, k=self.k, axis=1, ret_typ="indices")
        num_outputs = len(self.label_memory)

        sample_labels = [
            self.label_memory[i][sample_ind] for i in range(num_outputs)
        ]
        sample_batches = [[
            self.value_memory[j][sample_ind]
            for j in range(len(self.value_memory))
        ], sample_labels]

        return sample_batches
Beispiel #4
0
    def where(self, mask, tensor_in_1, tensor_in_2):
        """
        Apply a boolean selection mask to the elements of the input tensors.

        Example::

            >>> where(
                astensor([1, 0, 1]),
                astensor([1, 1, 1]),
                astensor([2, 2, 2]))
            [1. 2. 1.]

        Args:
            mask (bool): Boolean mask (boolean or tensor object of booleans)
            tensor_in_1 (Tensor): Tensor object
            tensor_in_2 (Tensor): Tensor object

        Returns:
            MXNet NDArray: The result of the mask being applied to the tensors.
        """
        mask = self.astensor(mask)
        tensor_in_1 = self.astensor(tensor_in_1)
        tensor_in_2 = self.astensor(tensor_in_2)
        return nd.add(nd.multiply(mask, tensor_in_1),
                      nd.multiply(nd.subtract(1, mask), tensor_in_2))
    def dense_bw(self, input_layer, input_error):
        """Fully connected layer backward process"""
        self.d_act_z = self.d_act(self.z)

        self.delta_b = nd.multiply(input_error, self.d_act_z)
        x = nd.transpose(input_layer)

        self.delta_W = nd.dot(x, self.delta_b)

        output_bp = nd.dot(self.delta_b, nd.transpose(self.W))

        self.delta_b = nd.sum(self.delta_b, axis=0)
        assert self.batch_size == input_error.shape[0]

        self.W = nd.subtract(
            self.W, self.delta_W * (self.learning_rate / self.batch_size))
        self.b = nd.subtract(
            self.b, self.delta_b * (self.learning_rate / self.batch_size))

        return output_bp
Beispiel #6
0
def partial_trim(epoch, v, net, f):
    # apply partial knowledge trimmed mean attack

    vi_shape = v[0].shape

    #first compute the distribution parameters
    all_grads = nd.concat(*v, dim=1)
    adv_grads = all_grads[:, :f]
    e_mu = nd.mean(adv_grads, axis=1)  # mean
    e_sigma = nd.sqrt(
        nd.sum(nd.square(nd.subtract(adv_grads, e_mu.reshape(-1, 1))), axis=1)
        / f)  # standard deviation

    for i in range(f):
        # apply attack to compromised worker devices with randomness
        v[i] = (
            e_mu - nd.multiply(e_sigma, nd.sign(e_mu)) *
            (3. + nd.random.uniform(shape=e_sigma.shape))).reshape(vi_shape)

    return v
Beispiel #7
0
def eval_model():
    ctx = [mx.gpu(i) for i in range(1)]
    test_batch_size = 1

    net = get_model()
    net.initialize(ctx=ctx)
    net.collect_params().reset_ctx(ctx)
    net.load_params('ckp/CSRNet-5.params', ctx=ctx)
    loss = FocalLoss()
    # for layer in net:
    #     print(layer)
    mae_arr = []
    for X, y in data_iter(test_batch_size, data_test_index, data_test_im, data_train_gt, ctx):
        X = X.copyto(mx.gpu(0))
        y = y.copyto(mx.gpu(0))
        # print('x  shape is ', X.shape)
        # print('y  shape is ', y.shape)
        predict = net(X)
        mae = nd.subtract(nd.sum(predict), nd.sum(y))
        mae_arr.append(abs(mae.asscalar()))
        ls = loss(predict, y)
        print('label car num is : {}, predict car num is : {}'.format(nd.sum(y).asscalar(), nd.sum(predict).asscalar()))
        print("predict loss: %f  mae: %f  " % (ls.asscalar(), abs(mae.asscalar())))
    print('average mae is : {}'.format(np.mean(np.array(mae_arr))))
 def mse(o, y):
     return nd.subtract(o, y)