Пример #1
0
def efficiency(G, wei_loc=None):
    """Efficiency of a network
    https://groups.google.com/forum/#!topic/networkx-discuss/ycxtVuEqePQ"""
    avg = 0.0
    graph = G.copy()
    n = len(graph)

    if is_weighted(graph):  # efficiency_wei
        for (u, v, d) in graph.edges(data=True):
            # Compute the connection-length matrix
            d['weight'] = 1 / (d['weight'] * 1.0)
        if (wei_loc is None):
            for node in graph:
                path_length = nx.single_source_dijkstra_path_length(
                    graph, node)
                avg += sum(1.0 / v for v in path_length.values() if v != 0)
            avg *= 1.0 / (n * (n - 1))
        else:
            mat = dist_inv_wei(graph)
            e = np.multiply(mat, wei_loc)**(1 / 3.0)
            e_all = np.matrix(e).ravel().tolist()
            avg = sum(e_all[0])
            avg *= 1.0 / (n * (n - 1))  # local efficiency
    else:  # efficiency_bin
        for node in graph:
            path_length = nx.single_source_shortest_path_length(graph, node)
            avg += sum(1.0 / v for v in path_length.values() if v != 0)
        avg *= 1.0 / (n * (n - 1))
    return avg
Пример #2
0
 def shrink_layer(self, i, data, factor=1, difference=0):
     if i == len(self.layers) - 1 or i == -1:
         raise IndexError('Can not shrink output layer')
     out_size, _ = utils.get_layer_input_output_size(self.layers[i])
     new_layer, new_size = utils.change_layer_output(self.layers[i],
                                                     factor=factor,
                                                     difference=difference)
     if new_layer is None and self.is_last_conv(i):
         return False
     # self.layers[i] = new_layer
     self.replace_layer(i, new_layer)
     if self.layers[i] is None:
         while not utils.is_weighted(self.layers[i]):
             print('deleting', self.layers[i])
             self.layers.__delitem__(i)
         # self.layers[i] = utils.change_layer_input(self.layers[i], new_size)
         new_layer = utils.change_layer_input(self.layers[i], new_size)
         self.replace_layer(i, new_layer)
         return False
     else:
         while i < len(self.layers) - 1:
             i += 1
             if utils.is_input_modifiable(self.layers[i]):
                 _, in_size = utils.get_layer_input_output_size(
                     self.layers[i])
                 scale = in_size // out_size
                 # self.layers[i] = utils.change_layer_input(self.layers[i], new_size*scale)
                 new_layer = utils.change_layer_input(
                     self.layers[i], new_size * scale)
                 self.replace_layer(i, new_layer)
                 if not isinstance(self.layers[i], nn.BatchNorm2d):
                     break
         return True
Пример #3
0
def local_efficiency(graph):
    """Local efficiency vector
    efficiency_vector = local_efficiency(G)

    This function compute for each node the efficiency of its
    immediate neighborhood and is related to the clustering coefficient.

    Inputs: G,      The graph on which we want to compute the local efficiency

    Output: efficiency_vector,  return as many local efficiency value as
    there are nodes in our graph

    Algorithm: algebraic path count

    Reference: Latora and Marchiori (2001) Phys Rev Lett 87:198701.
    Mika Rubinov, UNSW, 2008-2010
    Jean-Christophe KETZINGER, INSERM UMRS678 PARIS, 2013

    Modification history:
    2010: Original version from BCT (Matlab)
    Python Conversion Jean-Christophe KETZINGER, INSERM UMRS678, 2013
    """
    assert isinstance(graph, nx.Graph)
    efficiency_vector = []
    for node in graph:
        # Get the neighbors of our interest node
        neighbors = graph.neighbors(node)
        neighbors = np.sort(neighbors, axis=None)  # sort the neighbors list
        # Create the subgraph composed exclusively with neighbors
        SG = nx.subgraph(graph, neighbors)
        # assert that the subragh is not only one edge
        if (len(neighbors) > 2):
            if is_weighted(SG):
                GuV = []
                GVu = []
                GWDegree = nx.to_numpy_matrix(graph)
                for neighbor in neighbors:
                    GuV.append(GWDegree[node, neighbor])
                    GVu.append(GWDegree[neighbor, node])
                GVuGuV = (np.outer(np.array(GVu), np.array(GuV).T))
                node_efficiency = efficiency(SG, GVuGuV)
                # compute the global efficiency of this subgraph
                efficiency_vector.append(node_efficiency)
            else:
                efficiency_vector.append(efficiency(SG))
        else:
            efficiency_vector.append(0.0)  # or set it's efficiency value to 0
    return efficiency_vector
Пример #4
0
    def get_Y(self, data, i):
        i += 1
        while i < len(self.layers) and not utils.is_weighted(self.layers[i]):
            i += 1
        trunc_model = self.layers[:i + 1]
        trunc_model = trunc_model.eval()

        print(trunc_model)

        Ys = []
        for batch in data:
            x = batch[0]
            x = x.to(self.device)
            Y = trunc_model(x)
            Ys.append(Y.detach().cpu())
        Y = torch.cat(Ys, dim=0).numpy()

        return Y
Пример #5
0
    def shrink_layer(self,
                     i,
                     data,
                     factor=1,
                     difference=0,
                     pruned_neurons=None,
                     A=None,
                     mean_Z=None):
        if i == len(self.layers) - 1 or i == -1:
            raise IndexError('Can not shrink output layer')

        # if self.shrinking_state['layer_idx'] != i:
        #     self.update_shrinking_state(i, None, None)
        # A1 = self.shrinking_state['A1']
        # A2 = self.shrinking_state['A2']
        # print(A1)

        outsize, _ = utils.get_layer_input_output_size(self.layers[i])
        if pruned_neurons is None:
            new_size = int(outsize * factor - difference)
            if self.args.readjust_weights:
                A, salience_scores, mean_Z = self.compute_prune_probability(
                    i, data)
                # if self.is_last_conv(i):
                #     del A
                #     torch.cuda.ipc_collect()
                #     A, _, mean_Z = self.compute_prune_probability(i, data, flatten_conv_map=True)
                #     torch.cuda.ipc_collect()
            else:
                _, salience_scores, _ = self.compute_prune_probability(i, data)

            prune_probs = utils.Softmax(salience_scores, 0, 1)

            # salience_scores = np.random.rand(outsize)
            # salience_scores /= np.sum(salience_scores)
            print(salience_scores.shape)
            if salience_scores.shape[0] <= 1:
                return False
            # pruned_neurons = np.random.choice(np.arange(len(salience_scores)),
            #                                     (outsize - new_size),
            #                                     replace=False, p=prune_probs)
            pruned_neurons = sorted(range(len(salience_scores)),
                                    key=lambda k: salience_scores[k],
                                    reverse=True)[:(outsize - new_size)]
            # print(salience_scores)
            # print(salience_scores[pruned_neurons])
            print('mean error in removed neurons: ',
                  np.mean(salience_scores[pruned_neurons]))
            self.logger.info('mean error in removed neurons: %f' %
                             np.mean(salience_scores[pruned_neurons]))
            pruned_neurons = sorted(pruned_neurons)
        else:
            if self.args.readjust_weights and (A is None or mean_Z is None):
                A, salience_scores, mean_Z = self.compute_prune_probability(
                    i, data)

        k = i + 1
        while k < len(self.layers) and not utils.is_weighted(self.layers[k]):
            k += 1
        # if utils.is_input_modifiable(self.layers[k]) and not isinstance(self.layers[k], nn.BatchNorm2d):
        _, old_insize = utils.get_layer_input_output_size(self.layers[k])
        nrepeats = old_insize // outsize

        # if self.is_last_conv(i):
        #     num_last_conv_filts = self.layers[i].weight.data.shape[0]
        #     idxs = np.arange(old_insize).reshape(num_last_conv_filts, -1)
        #     pruned_input_neurons_ = pruned_input_neurons = idxs[pruned_neurons].flatten()
        # else:
        pruned_input_neurons_ = pruned_input_neurons = pruned_neurons

        if isinstance(self.layers[k], nn.BatchNorm2d):
            pruned_input_neurons = pruned_neurons

        init_w_norm = torch.norm(self.layers[k].weight.data, dim=-1).mean()
        if self.args.readjust_weights:
            if self.is_last_conv(i):
                # A2 = torch.repeat_interleave(A, repeats=nrepeats, dim=0)
                # A2 = torch.repeat_interleave(A2, repeats=nrepeats, dim=1)[:-nrepeats+1, :-nrepeats+1]
                # mean_Z2 = np.repeat(mean_Z, nrepeats) // nrepeats
                self.layers[k], _ = self.remove_input_neurons(
                    k, pruned_input_neurons, A, mean_Z, nrepeats)
                # self.shrinking_state['A2'] = updated_A2
                torch.cuda.ipc_collect()
            else:
                self.layers[k], _ = self.remove_input_neurons(
                    k, pruned_input_neurons, A, mean_Z)
                # self.shrinking_state['A1'] = updated_A1
                # print(updated_A1.shape)
        else:
            if self.is_last_conv(i):
                num_last_conv_filts = self.layers[i].weight.data.shape[0]
                idxs = np.arange(old_insize).reshape(num_last_conv_filts, -1)
                pruned_input_neurons_ = pruned_input_neurons = idxs[
                    pruned_neurons].flatten()
            self.layers[k], _ = self.remove_input_neurons(
                k, pruned_input_neurons)
        pruned_input_neurons = pruned_input_neurons_

        w_norm = torch.norm(self.layers[k].weight.data, dim=-1).mean()
        print('change in weight norm: %.4f -> %.4f' % (init_w_norm, w_norm))

        # if not isinstance(self.layers[k], nn.BatchNorm2d):
        #     break
        # k += 1

        bn_idx = i + 1
        while bn_idx < len(self.layers) and not isinstance(
                self.layers[bn_idx], nn.BatchNorm2d):
            bn_idx += 1

        _, new_insize = utils.get_layer_input_output_size(self.layers[k])
        print(self.layers[k].weight.data.shape, old_insize, new_insize)

        if self.args.readjust_weights:
            if self.is_last_conv(i):
                num_removed_neurons = (old_insize - new_insize) // nrepeats
                # self.A_cache.setdefault(i, [None,None])[0] = A[num_removed_neurons:, num_removed_neurons:]
            else:
                num_removed_neurons = old_insize - new_insize
            self.layers[i] = self.remove_output_neurons(
                i, pruned_neurons[:num_removed_neurons])
            print('layer[i].shape:', self.layers[i].weight.shape)
            if bn_idx < len(self.layers) and isinstance(
                    self.layers[bn_idx], nn.BatchNorm2d):
                self.layers[bn_idx], _ = self.remove_input_neurons(
                    bn_idx, pruned_neurons[:num_removed_neurons])
        else:
            self.layers[i] = self.remove_output_neurons(i, pruned_neurons)

            if bn_idx < len(self.layers) and isinstance(
                    self.layers[bn_idx], nn.BatchNorm2d):
                self.layers[bn_idx], _ = self.remove_input_neurons(
                    bn_idx, pruned_neurons)

        new_outsize, insize = utils.get_layer_input_output_size(self.layers[i])
        return new_outsize < outsize
Пример #6
0
    def compute_prune_probability(self,
                                  i,
                                  data,
                                  flatten_conv_map=False,
                                  init_A1=None,
                                  init_A2=None,
                                  normalize=True):
        k = i
        while i + 1 < len(self.layers) and not utils.is_weighted(
                self.layers[i + 1]) and not isinstance(
                    self.layers[i + 1], Flatten) and not isinstance(
                        self.layers[i + 1], nn.Dropout):
            i += 1
        trunc_model = self.layers[:i + 1]
        trunc_model = trunc_model.eval()

        print('---------------------truncated_layers-----------------------')
        print(self.layers[k:i + 1])
        print('------------------------------------------------------------')
        Zs = []
        Ys = []
        Xs = []
        for x, y, _ in data:
            x = x.to(self.device)
            Z = trunc_model(x)
            Zs.append(Z.detach().cpu())
            Ys.append(y.detach().cpu())
            Xs.append(x.detach().cpu())
        Z = torch.cat(Zs, dim=0)
        Y = torch.cat(Ys, dim=0)
        X = torch.cat(Xs, dim=0)
        if self.args.scale_by_grad != 'none':
            scale = self.compute_neuron_derivatives(Z,
                                                    Y,
                                                    self.layers[i + 1:],
                                                    normalize=normalize)
        else:
            scale = 1

        Z = Z.permute([j for j in range(len(Z.shape)) if j != 1] + [1])
        if flatten_conv_map:
            Z = Z.contiguous().view(Z.shape[0], -1)
            init_A = init_A2
        else:
            Z = Z.contiguous().view(-1, Z.shape[-1])
            init_A = init_A1

        if self.args.scale_by_mi == 'features' or self.args.score_by_mi == 'features':
            if self.args.mizx_weight > 0:
                mi_zx = self.compute_neuron_mutual_info(
                    Z, X.view(X.shape[0], -1))
                print('mi_zx - min: %f mean: %f max: %f' %
                      (mi_zx.min(), mi_zx.mean(), mi_zx.max()))
            else:
                mi_zx = 0
            mi_zy = self.compute_neuron_mutual_info(
                Z,
                Y.view(Y.shape[0], 1).float())
            mi_ratio = mi_zy - self.args.mizx_weight * mi_zx
            if self.args.scale_by_mi == 'features':
                scale *= mi_ratio
            else:
                scores = mi_ratio

            print('mi_zy - min: %f mean: %f max: %f' %
                  (mi_zy.min(), mi_zy.mean(), mi_zy.max()))
            print('mi_ratio - min: %f mean: %f max: %f' %
                  (mi_ratio.min(), mi_ratio.mean(), mi_ratio.max()))

        ones = torch.ones((Z.shape[0], 1), device=Z.device)
        Z = torch.cat((Z, ones), dim=1)
        print('Z.shape =', Z.shape)
        A, scores, residuals = self.score_neurons(Z,
                                                  init_A=init_A,
                                                  normalize=normalize)

        if self.args.scale_by_grad == 'loss':
            scores = residuals.mean(0)
        if self.args.scale_by_mi == 'residual' or self.args.score_by_mi == 'residual':
            delta = torch.from_numpy(residuals[:, :-1])
            if self.args.mizx_weight > 0:
                mi_zx = self.compute_neuron_mutual_info(
                    delta, X.view(X.shape[0], -1))
                print('mi_zx - min: %f mean: %f max: %f' %
                      (mi_zx.min(), mi_zx.mean(), mi_zx.max()))
            else:
                mi_zx = 0
            mi_zy = self.compute_neuron_mutual_info(
                delta,
                Y.view(Y.shape[0], 1).float())
            mi_ratio = mi_zy - self.args.mizx_weight * mi_zx
            if self.args.scale_by_mi == 'residual':
                scale *= mi_ratio
            else:
                scores = mi_ratio
            print('mi_zy - min: %f mean: %f max: %f' %
                  (mi_zy.min(), mi_zy.mean(), mi_zy.max()))
            print('mi_ratio - min: %f mean: %f max: %f' %
                  (mi_ratio.min(), mi_ratio.mean(), mi_ratio.max()))

        if self.args.score_by_mi == 'none':
            scores = scores[:-1]
            scores = scores * scale

        Z = Z.detach().cpu().numpy()
        torch.cuda.ipc_collect()

        print('L2: max=%f median=%f min=%f' %
              (np.max(scores), np.median(scores), np.min(scores)))
        # self.logger.info('L2: max=%f median=%f min=%f' % (np.max(scores), np.median(scores), np.min(scores)))
        return A, np.array(scores), np.mean(np.abs(Z), axis=0)