Exemplo n.º 1
0
def _get_interp_idxs_weights_2d(x, xp, y, yp, x_log_spacing=False):
    with tf.name_scope('get_interp_idxs_weights_2d'):
        if x_log_spacing:
            x = tf.log(x)
            xp = tf.log(xp)

        with tf.control_dependencies([yp]):
            xp = tf.tile(xp, tf.shape(yp))  #yp.shape)
        xyp = tf.expand_dims(tf.parallel_stack([xp, yp]), 1)
        xy0 = tf.reshape(tf.parallel_stack([x[0], y[0]]), [2, 1, 1])
        xy1 = tf.reshape(tf.parallel_stack([x[1], y[1]]), [2, 1, 1])

        spacing = xy1 - xy0
        ind_grid = (xyp - xy0) / spacing
        ind = tf.cast(ind_grid, tf.int32) + [[[0], [1]]]

        max_ind = [[[x.shape[0].value - 1]], [[y.shape[0].value - 1]]]
        #max_ind = [[[x.shape[0] - 1]], [[y.shape[0] - 1]]]
        ind = tf.minimum(ind, max_ind)
        ind_float = tf.cast(ind, tf.float64)

        xy_grid = ind_float * spacing + xy0

        weight = tf.abs(xyp - xy_grid) / spacing
        if x_log_spacing:
            weight = tf.parallel_stack([tf.exp(weight[0]), weight[1]])
        weight = 1. - weight

        weight_sum = tf.reduce_sum(weight, axis=1, keepdims=True)
        weight /= weight_sum

        return ind, weight
Exemplo n.º 2
0
 def aggregate(self, gradients):
     # Assertion
     assert len(gradients) > 0, "Empty list of gradient to aggregate"
     # Computation
     reshape_gradients = gradients
     shape = gradients[0].shape
     if len(shape) == 2 and shape[1] == 10:
         for i in range(len(gradients)):
             reshape_gradients[i] = tf.reshape(gradients[i], [
                 54080,
             ])
     else:
         for i in range(len(gradients)):
             reshape_gradients[i] = tf.reshape(gradients[i], [-1])
     grad_avg = native.instantiate_op(type(self).co_name,
                                      tf.parallel_stack(gradients),
                                      f=self.__nbbyzwrks,
                                      m=self.__nbselected)
     # lib = tf.load_op_library('/home/starly/Desktop/Fed/autodist/autodist/native/op_krum.so')
     # grad_avg = lib.Krum(gradients=tf.parallel_stack(gradients), f=self.__nbbyzwrks, m=self.__nbselected)
     if len(shape) == 2 and shape[1] == 10:
         #x = tf.placeholder(tf.float32, shape=[10,None], name="tmp")
         #grad_avg = tf.reshape(grad_avg, shape=[tf.shape(x)[0],10])
         grad_avg = tf.reshape(grad_avg, shape=[5408, 10])
     else:
         grad_avg = tf.reshape(grad_avg, shape)
     return grad_avg
Exemplo n.º 3
0
 def aggregate(self, gradients):
     # Assertion
     assert len(gradients) > 0, "Empty list of gradient to aggregate"
     # Computation
     gradients = tf.parallel_stack(gradients)
     return tf.py_func(self._aggregate, [gradients],
                       gradients.dtype,
                       stateful=False,
                       name="GAR_bulyan")
Exemplo n.º 4
0
 def aggregate(self, gradients):
     with tf.name_scope("GAR_krum_tf"):
         # Assertion
         assert len(gradients) > 0, "Empty list of gradient to aggregate"
         # Distance computations
         distances = []
         for i in range(self.__nbworkers - 1):
             dists = list()
             for j in range(i + 1, self.__nbworkers):
                 sqr_dst = tf.reduce_sum(
                     tf.squared_difference(gradients[i], gradients[j]))
                 dists.append(
                     tf.negative(
                         tf.where(tf.is_finite(sqr_dst), sqr_dst,
                                  tf.constant(np.inf, dtype=sqr_dst.dtype)))
                 )  # Use of 'negative' to get the smallest distances and score indexes in 'nn.top_k'
             distances.append(dists)
         # Score computations
         scores = []
         for i in range(self.__nbworkers):
             dists = []
             for j in range(self.__nbworkers):
                 if j == i:
                     continue
                 if j < i:
                     dists.append(distances[j][i - j - 1])
                 else:
                     dists.append(distances[i][j - i - 1])
             dists = tf.parallel_stack(dists)
             dists, _ = tf.nn.top_k(dists,
                                    k=(self.__nbworkers - self.__nbbyzwrks -
                                       2),
                                    sorted=False)
             scores.append(tf.reduce_sum(dists))
         # Average of the 'nbselected' smallest scoring gradients
         gradients = tf.parallel_stack(gradients)
         scores = tf.parallel_stack(scores)
         _, indexes = tf.nn.top_k(scores, k=self.__nbselected, sorted=False)
         return tf.reduce_mean(tf.gather(gradients, indexes), axis=0)
Exemplo n.º 5
0
 def aggregate(self, gradients):
     # Assertion
     assert len(gradients) > 0, "Empty list of gradient to aggregate"
     # Computation
     reshape_gradients = gradients
     shape = gradients[0].shape
     if len(shape) == 2 and shape[1] == 10:
         for i in range(len(gradients)):
             reshape_gradients[i] = tf.reshape(gradients[i], [
                 54080,
             ])
     else:
         for i in range(len(gradients)):
             reshape_gradients[i] = tf.reshape(gradients[i], [-1])
     grad_avg = native.instantiate_op(type(self).co_name,
                                      tf.parallel_stack(gradients),
                                      f=self.__nbbyzwrks,
                                      m=self.__multikrum)
     if len(shape) == 2 and shape[1] == 10:
         grad_avg = tf.reshape(grad_avg, shape=[5408, 10])
     else:
         grad_avg = tf.reshape(grad_avg, shape)
     return grad_avg
Exemplo n.º 6
0
    def k_full(self,
               input1,
               w_input1,
               input2=None,
               w_input2=None):  #k_type: data_data, data_test, test_test
        input1 = self._input_layer_normalization(input1)  #by row

        if input2 is None:
            input2 = input1
            w_input2 = w_input1
        else:
            input2 = self._input_layer_normalization(input2)
            #w_input2 = self._input_layer_normalization(w_input2)

        with tf.name_scope("k_full"):
            cov_init = tf.matmul(
                input1, input2, transpose_b=True
            ) / input1.shape[1].value  #100,100; den is a tensor
            #w_cov = tf.matmul(w_input1, w_input2, transpose_b=True)/ w_input1.shape[1].value

            self.k_diag(input1, w_input1)  #110 vars
            q_aa_init = self.layer_qaa_dict[0]  #L个

            q_ab = cov_init
            q_ab = self.weight_var * q_ab + self.bias_var  #100, 100
            #corr = q_ab / q_aa_init[0]
            corr = tf.cast(q_ab, tf.float32) / tf.cast(q_aa_init[0],
                                                       tf.float32)  #100, 100

            if 32 > 1:
                batch_size, batch_count = self._get_batch_size_and_count(
                    input1, input2)

                with tf.name_scope("q_ab"):
                    q_ab_all = []
                    for b_x in range(batch_count):
                        #tf.logging.info('computing kernel for batch:{}'.format(b_x))
                        with tf.name_scope("batch_%d" % b_x):
                            corr_flat_batch = corr[batch_size *
                                                   b_x:batch_size *
                                                   (b_x +
                                                    1), :]  #batchsize, 100
                            corr_flat_batch = tf.reshape(
                                corr_flat_batch, [-1])  #batchsize*100
                            #w_cov_flat_batch = tf.reshape(w_cov[batch_size * b_x : batch_size * (b_x + 1), :], [-1])
                            for l in range(self.depth):
                                with tf.name_scope("layer_%d" % l):
                                    q_aa = self.layer_qaa_dict[l]
                                    q_ab = interp1.interp_lin_2d(
                                        x=self.var_aa_grid,
                                        y=self.corr_ab_grid,
                                        z=self.qab_grid,
                                        xp=q_aa,
                                        yp=tf.cast(corr_flat_batch,
                                                   tf.float64))  #10000

                                    q_ab = self.weight_var * q_ab + self.bias_var
                                    corr_flat_batch = q_ab / self.layer_qaa_dict[
                                        l + 1][0]

                            q_ab_all.append(q_ab)

                    q_ab_all = tf.parallel_stack(q_ab_all)
            else:
                with tf.name_scope("q_ab"):
                    corr_flat = tf.reshape(corr, [-1])  #10000
                    w_cov_flat = tf.reshape(w_cov, [-1])
                    for l in rangenp.reshape(scores_tr, [-1, 1])(self.depth):
                        with tf.name_scope("layer_%d" % l):
                            q_aa = self.layer_qaa_dict[l]
                            q_ab = interp1.interp_lin_2d(x=self.var_aa_grid,
                                                         y=self.corr_ab_grid,
                                                         z=self.qab_grid,
                                                         xp=q_aa,
                                                         yp=tf.cast(
                                                             corr_flat,
                                                             tf.float64))
                            if l == self.depth - 1:
                                q_ab = self.weight_var * q_ab + self.bias_var  #+ self.weight_var * w_cov_flat)
                            else:
                                q_ab = self.weight_var * q_ab + self.bias_var
                                corr_flat = q_ab / self.layer_qaa_dict[l +
                                                                       1][0]
                        q_ab_all = q_ab

        return tf.reshape(q_ab_all, cov_init.shape, "qab")