Ejemplo n.º 1
0
 def test_directed(self):
     """Tests that each directed edge is counted once in the cut."""
     G = nx.barbell_graph(3, 0).to_directed()
     S = set([0, 1, 2])
     T = set([3, 4, 5])
     assert_equal(nx.cut_size(G, S, T), 2)
     assert_equal(nx.cut_size(G, T, S), 2)
Ejemplo n.º 2
0
 def test_single_edge(self):
     """Tests for a cut of a single edge."""
     G = nx.barbell_graph(3, 0)
     S = set([0, 1, 2])
     T = set([3, 4, 5])
     assert_equal(nx.cut_size(G, S, T), 1)
     assert_equal(nx.cut_size(G, T, S), 1)
Ejemplo n.º 3
0
 def test_symmetric(self):
     """Tests that the cut size is symmetric."""
     G = nx.barbell_graph(3, 0)
     S = set([0, 1, 4])
     T = set([2, 3, 5])
     assert_equal(nx.cut_size(G, S, T), 4)
     assert_equal(nx.cut_size(G, T, S), 4)
Ejemplo n.º 4
0
 def test_directed(self):
     """Tests that each directed edge is counted once in the cut."""
     G = nx.barbell_graph(3, 0).to_directed()
     S = {0, 1, 2}
     T = {3, 4, 5}
     assert nx.cut_size(G, S, T) == 2
     assert nx.cut_size(G, T, S) == 2
Ejemplo n.º 5
0
 def test_directed_symmetric(self):
     """Tests that a cut in a directed graph is symmetric."""
     G = nx.barbell_graph(3, 0).to_directed()
     S = set([0, 1, 4])
     T = set([2, 3, 5])
     assert_equal(nx.cut_size(G, S, T), 8)
     assert_equal(nx.cut_size(G, T, S), 8)
Ejemplo n.º 6
0
 def test_single_edge(self):
     """Tests for a cut of a single edge."""
     G = nx.barbell_graph(3, 0)
     S = {0, 1, 2}
     T = {3, 4, 5}
     assert_equal(nx.cut_size(G, S, T), 1)
     assert_equal(nx.cut_size(G, T, S), 1)
Ejemplo n.º 7
0
 def test_directed_symmetric(self):
     """Tests that a cut in a directed graph is symmetric."""
     G = nx.barbell_graph(3, 0).to_directed()
     S = {0, 1, 4}
     T = {2, 3, 5}
     assert_equal(nx.cut_size(G, S, T), 8)
     assert_equal(nx.cut_size(G, T, S), 8)
Ejemplo n.º 8
0
 def test_directed_symmetric(self):
     """Tests that a cut in a directed graph is symmetric."""
     G = nx.barbell_graph(3, 0).to_directed()
     S = {0, 1, 4}
     T = {2, 3, 5}
     assert nx.cut_size(G, S, T) == 8
     assert nx.cut_size(G, T, S) == 8
Ejemplo n.º 9
0
 def test_symmetric(self):
     """Tests that the cut size is symmetric."""
     G = nx.barbell_graph(3, 0)
     S = {0, 1, 4}
     T = {2, 3, 5}
     assert_equal(nx.cut_size(G, S, T), 4)
     assert_equal(nx.cut_size(G, T, S), 4)
Ejemplo n.º 10
0
 def test_symmetric(self):
     """Tests that the cut size is symmetric."""
     G = nx.barbell_graph(3, 0)
     S = {0, 1, 4}
     T = {2, 3, 5}
     assert nx.cut_size(G, S, T) == 4
     assert nx.cut_size(G, T, S) == 4
Ejemplo n.º 11
0
 def test_single_edge(self):
     """Tests for a cut of a single edge."""
     G = nx.barbell_graph(3, 0)
     S = {0, 1, 2}
     T = {3, 4, 5}
     assert nx.cut_size(G, S, T) == 1
     assert nx.cut_size(G, T, S) == 1
Ejemplo n.º 12
0
 def test_directed(self):
     """Tests that each directed edge is counted once in the cut."""
     G = nx.barbell_graph(3, 0).to_directed()
     S = {0, 1, 2}
     T = {3, 4, 5}
     assert_equal(nx.cut_size(G, S, T), 2)
     assert_equal(nx.cut_size(G, T, S), 2)
def evaluate_conductance(graph: nx.Graph, subgraphs, tau):
    """
    :param graph: the graph being evaluated
    :param subgraphs: K cluster of Subsets of the main graph
    :param tau: tuning parameter, tau = 0 = vanilla conductance
    :return: core_cut, vanilla_conductance
    """

    vanilla_conductances = []
    core_cuts = []
    for _, nodes in subgraphs.items():
        subgraph = graph.subgraph(nodes).copy()
        subgraph_complement = set(graph) - set(subgraph)
        cut = nx.cut_size(graph, subgraph, subgraph_complement)
        volume_subgraph = nx.volume(graph, subgraph)
        volume_subgraph_complement = nx.volume(graph, subgraph_complement)
        volume_div = min(volume_subgraph, volume_subgraph_complement)
        vanilla_conductances.append((cut / volume_div))
        core_cuts.append((cut + ((tau / len(graph)) * len(subgraph) * len(subgraph_complement))) / (
                volume_div + (tau * len(subgraph))))
    vanilla_conductance = min(vanilla_conductances)
    core_cut = min(core_cuts)
    logging.debug('Vanilla graph conductance: %f', vanilla_conductance)
    logging.debug('CoreCut graph conductance: %f', core_cut)

    return core_cut, vanilla_conductance
Ejemplo n.º 14
0
def print_summary_statistics(G, partitions):
    sizes = []
    cuts = []
    interior = []
    TATBcut = []
    for x in partitions:
        sizes += [len(y.nodes()) for y in x]
        cut_size = nx.cut_size(G, x[0], x[1])
        cuts.append(cut_size)
        #boundary = nx.node_boundary(G,x[0],x[1])
        interior_count = 0
        for a in x[0].nodes():
            y = 1
            for z in G.neighbors(a):
                if z in x[1]:
                    y = 0
            interior_count += y
        for a in x[1].nodes():
            y = 1
            for z in G.neighbors(a):
                if z in x[0]:
                    y = 0
            interior_count += y

        interior.append(interior_count)
        logTA = log_number_trees(x[0])
        logTB = log_number_trees(x[1])
        TATBcut.append([logTA + logTB + np.log(cut_size)])
    print([np.mean(TATBcut), np.var(TATBcut)],
          [np.mean(cuts), np.var(cuts)],
          [np.mean(interior), np.var(interior)])
Ejemplo n.º 15
0
def score_tree_edge_pair(G, T, e):
    partition = R(G, T, e)
    G_A = partition[0]
    G_B = partition[1]
    cut_size = nx.cut_size(G, G_A, G_B)
    return -1 * (log_number_trees(G_A) + log_number_trees(G_B) +
                 np.log(cut_size))
def expansion(G, S1, S2, weight=None):
    """Return the edge expansion for G with node sets S1 and S2.

    Expansion is the cut size between two sets of nodes divided
    by the minimum of the sizes of U and V [1]_.

    Parameters
    ----------
    G : NetworkX Graph
    S1, S2 : container
      Container of nodes (e.g. set)
    weight : key
      Edge data key to use as weight.  If None, edge weights are set to 1.

    Returns
    -------
    expansion : float

    See Also
    --------
    cut_size
    volume
    conductance
    normalized_cut_size

    References
    ----------
    .. [1] Fan Chung, Spectral Graph Theory 
       (CBMS Regional Conference Series in Mathematics, No. 92), 
       American Mathematical Society, 1997, ISBN 0-8218-0315-8
       http://www.math.ucsd.edu/~fan/research/revised.html
    """
    return float(nx.cut_size(G, S1, S2, weight)) / min(len(S1), len(S2))
def normalized_cut_size(G,c1,c2,weight=None):
    """Returns the normalized cut size between two containers of nodes
    c1 and c2.

    The normalized cut size is defined as the cut size times the sum
    of the reciprocal sizes of the volumes of the two cuts.[1]

    Parameters
    ----------
    G : NetworkX Graph
    c1, c2 : container
      containsers of nodes
    weight : keyword, optional default=None
      keyword for weight on edges

    Returns
    -------
    normalized_cut_size : float

    See Also
    --------
    cut_size
    volume
    conductance
    expansion

    References
    ----------
    ..[1] David Gleich. 'Heirarchicical Directed Spectral Graph Partitioning'. Website
          report. http://www.stanford.edu/~dgleich/publications/directed-spectral.pdf
    """
    return nx.cut_size(G,c1,c2,weight)*(1./nx.volume(c1,weight) + \
                                        1./nx.volume(c2,weight))
def self_entropy(G, T, alpha):
    """
    self_entropy is a function that estimates the entropy of a node (with id alpha) in a codetree T of graph G
    param G: the given graph
    param T: a codetree of given graph G
    param alpha: the id of a node in T
    return: the self entropy of the node that with id alpha
    """
    if T.get_node(alpha).is_root():
        print("Error paramator in function self_entropy: alpha is illegel.")
        return 0

    parent_id = T.get_node(alpha).bpointer
    leaf_ids_of_node = []
    leaf_ids_of_parent = []
    for node in T.leaves(alpha):
        leaf_ids_of_node.append(node.identifier)
    for node in T.leaves(parent_id):
        leaf_ids_of_parent.append(node.identifier)

    g_node = nx.cut_size(
        G, leaf_ids_of_node,
        list(set(nx.nodes(G)).difference(set(leaf_ids_of_node))))
    v_G = nx.volume(G, nx.nodes(G))
    v_node = nx.volume(G, leaf_ids_of_node)
    v_parent = nx.volume(G, leaf_ids_of_parent)

    entropy = -g_node / v_G * math.log2(v_node / v_parent)
    return entropy
Ejemplo n.º 19
0
def greedy_cut(G, prefiltered_true_variants=[]):
    G.remove_nodes_from(prefiltered_true_variants)
    all_nodes = set(G.nodes)
    all_edges = list(G.edges)

    # 1. Heuristic to find a cut in whith the highly negative weight edges do not cross the cut
    S1 = set()
    S2 = set()
    u, v = random.choice(all_edges)
    S1.add(u)
    S2.add(v)

    while True:
        S = S1.union(S2)
        C = all_nodes.difference(S)
        if len(C) == 0:
            break
        A = {}
        for w in C:
            A[w] = nx.cut_size(G, {w}, S1, weight="weight") - nx.cut_size(
                G, {w}, S2, weight="weight")
        maxkey = max(A, key=lambda x: abs(A[x]))
        rn = random.uniform(0, 1)
        if (A[maxkey] < 0) or ((A[maxkey] == 0) and (rn < 0.5)):
            S1.add(maxkey)
        else:
            S2.add(maxkey)

    # 2. Greedy max-cut algorithm
    while True:
        A1 = {}
        A2 = {}
        old_cut = nx.cut_size(G, S1, S2, weight="weight")
        for w in S1:
            A1[w] = nx.cut_size(G, {w}, S1, weight="weight") - nx.cut_size(
                G, {w}, S2, weight="weight")
        for w in S2:
            A2[w] = nx.cut_size(G, {w}, S1, weight="weight") - nx.cut_size(
                G, {w}, S2, weight="weight")

        V1 = [k for k, v in A1.items() if v > 0]
        V2 = [k for k, v in A2.items() if v < 0]

        for w in V1:
            S2.add(w)
            S1.remove(w)

        for w in V2:
            S1.add(w)
            S2.remove(w)

        new_cut = nx.cut_size(G, S1, S2, weight="weight")
        if new_cut <= old_cut:
            break
    S = S1 if len(S1) < len(S2) else S2
    return (S)
Ejemplo n.º 20
0
def cone_properties(G, cone, num_edges):
    cone_subgraph_size = G.subgraph(cone).size()
    cone_cut_size = nx.cut_size(G, cone)
    volume = cone_subgraph_size + cone_cut_size

    if cone_subgraph_size == 0: mu = (volume + 1) * log(num_edges + 1, 2)
    else: mu = (volume) * log(num_edges / cone_subgraph_size)
    return mu, cone_cut_size
Ejemplo n.º 21
0
    def partitionCheck(self, partition):
    #checks partition for local search optimality
    #returns nodeset that increases cut
        algResult = nx.cut_size(self.g, set(partition), T=None, weight = 'w')
        result = True
        nodeSet = []
        for v in self.g:
            p = copy.deepcopy(partition)
            if v in partition:
                p.remove(v)
            else:
                p.append(v)
            p = set(p)
            neighborCut = nx.cut_size(self.g, p, T=None, weight='w')

            if neighborCut > algResult:
                result = False
                nodeSet.append((v, neighborCut))
        return (result, nodeSet)
Ejemplo n.º 22
0
def ball_cut(G, dist_from_center, rho, delta, num_edges, source):
    radius = rho * delta
    c = log(num_edges + 1, 2) / ((1 - 2 * delta) * rho)

    if radius >= 1:
        ball = get_ball(dist_from_center, radius)
        cut_size = nx.cut_size(G, ball)
        volume = len(G.edges(ball))
    else:
        ball = [source]
        cut_size = G.degree[source]  #UNWEIGHTED
        volume = cut_size

    while cut_size > c * (volume + 1):
        radius += 1
        ball += dist_from_center[floor(radius)]
        cut_size = nx.cut_size(G, ball)
        volume = len(G.edges(ball))
    return radius, ball
def _basic_partitioning(G, n1, n2):
    """
    Genera le 2 classi composte da n1 e n2 nodi a partire da G.
    La divisione viene effettuata con l'algoritmo spettrale
    Da usare all'interno di spectral_partitioning(G, class_nodes)
    :param G: grafo semplice connesso
    :param n1: nodi della prima classe
    :param n2: nodi della seconda classe
    :return: un sottografo, vista di G. La struttura del sottografo non può essere modificata
    """

    # Lista di nodi ordinati secondo il vettore di Fiedler
    ordered_nodes = nx.spectral_ordering(
        G, method="lanczos")  # Torna una list non un nunmpy array

    group_test_1 = set(ordered_nodes[:n1])  # primi n1
    group_test_2 = set(ordered_nodes[:n2])  # primi n2

    cut_size_1 = nx.cut_size(G, group_test_1)
    cut_size_2 = nx.cut_size(G, group_test_2)

    # Scelgo la componente che dividerà il grafo in base al peso del suo insieme di taglio
    if cut_size_1 < cut_size_2:
        final_group = group_test_1
        remaining_group = set(ordered_nodes[n1:])
        G_1 = G.subgraph(final_group)
        G_2 = G.subgraph(remaining_group)
    else:
        final_group = group_test_2  # n2
        remaining_group = set(ordered_nodes[n2:])  #n1
        G_1 = G.subgraph(remaining_group)
        G_2 = G.subgraph(final_group)

    # Fuori dall'if non va bene per il caso di spectral_partitioning([9,(5, 4)]
    # G_1 = G.subgraph(final_group)
    # G_2 = G.subgraph(remaining_group)
    # G_1 avrà sempre big_class1_nodes e G_2 avrà sempre big_class2_nodes
    # print('basic part. primo gruppo ha nodi: ', nx.number_of_nodes(G_1))
    # print('basic part. secondo gruppo ha nodi: ', nx.number_of_nodes(G_2))
    yield from (G_1, G_2)
Ejemplo n.º 24
0
def get_corecut(G, S, tau, n):
    vol = nx.volume(G, S)
    cut = nx.cut_size(G, S)
    s_size = len(S)
    sc_size = n - s_size
    up = cut + (tau / n) * s_size * sc_size
    down = vol + tau * s_size
    if down == 0:
        print("cut: {cut} ,   up: {up},      vol: {vol},      down: {down} ".
              format(cut=cut, up=up, vol=vol, down=down))
        return 1
    else:
        return up / down
Ejemplo n.º 25
0
def findM(G, LC):

    #cut
    cut = nx.cut_size(G, LC, weight='weight')
    #print("cut =", cut)

    #volume
    vol = nx.cuts.volume(G, LC, weight='weight')
    #print("vol =", vol)

    M = (vol - cut) / (2 * cut)

    return M
def calc_sum_ncut(column_name):
    sum_ncut = 0
    unique_cluster_names = list(set(df_with_clusters_classified[column_name]))
    for cluster_name in unique_cluster_names:
        S = set(df_with_clusters_classified[
            df_with_clusters_classified[column_name] == cluster_name]
                ['node ID'].values)
        try:
            ncut = (nx.cut_size(G, S) / nx.volume(G, S))
        except ZeroDivisionError:
            continue
        sum_ncut += ncut
    return sum_ncut
Ejemplo n.º 27
0
def discrete_pp(G,H,boundary_nodes):
    # Adding a supernode for proper perimeter calculation
    # Name of the supernode is -1
    boundary_edges = [(-1, node) for node in G.nodes if boundary_nodes[node] is True]
    G.add_edges_from(boundary_edges)

    nodes_H = {node for node in H.nodes}
    perimeter = nx.cut_size(G, nodes_H)
    area = len(H.nodes())
    # Remove supernode as to not alter the graph G
    G.remove_node(-1)
    
    return (perimeter**2)/area
def calc_sum_conductance(column_name):
    sum_conductance = 0
    unique_cluster_names = list(set(df_with_clusters_classified[column_name]))
    for cluster_name in unique_cluster_names:
        S = set(df_with_clusters_classified[
            df_with_clusters_classified[column_name] == cluster_name]
                ['node ID'].values)
        try:
            conductance = (nx.cut_size(G, S) /
                           min(nx.volume(G, S), nx.volume(G, nodes_G - S)))
        except ZeroDivisionError:
            continue
        sum_conductance += conductance
    return sum_conductance
Ejemplo n.º 29
0
def CONDUCTANCE(G, C):

    #cut
    cut = nx.cut_size(G, C, weight='weight')
    #print("cut =", cut)

    #bazei epipleon mia fora to metaksu 3 kai 11 baros
    vol = nx.cuts.volume(G, C, weight='weight')
    #print("vol =", vol)

    conductance = cut / vol
    #print("Conductance =", conductance)

    return conductance
Ejemplo n.º 30
0
def print_next_results(y):
    max1 = max(y)
    x_max1 = y.index(max1)
    x_max1_bin = "{0:b}".format(x_max1).zfill(node_count)
    y.remove(max1)
    S = set()
    for i in range(len(x_max1_bin)):
        if x_max1_bin[i] == "1":
            S.add(i)

    T = set(G.nodes) - S
    cut_size = nx.cut_size(G, S, T, weight="weight")
    print("Subgraph string: %s" % x_max1_bin)
    print("Cut size: %s" % cut_size)
    draw_cut_edges(S, T, cut_size)
Ejemplo n.º 31
0
def inverse_conductance(G, S):
    weight = "weight"
    T = set(G) - set(S)
    num_cut_edges = nx.cut_size(G, S, T, weight=weight)
    volume_S = nx.volume(G, S, weight=weight)

    if len(T
           ) == 0:  #if all nodes in the commmunity, bad conductance (avoid /0)
        return 0
    volume_T = nx.volume(G, T, weight=weight)
    volume_T = volume_T + len(
        T
    )  #If only a few nodes outside the community, poor score (trivial solution),
    #but if many nodes outside the community, return good score. And avoid /0

    return 1 - num_cut_edges / min(volume_T, volume_S)
def cut(pentomino):
    block = { covering_map(p, pentomino.torus.graph["size"]) for p in pentomino.nodes}
    return nx.cut_size(pentomino.torus, block)**pentomino.power
Ejemplo n.º 33
0
 def test_multigraph(self):
     """Tests that parallel edges are each counted for a cut."""
     G = nx.MultiGraph(["ab", "ab"])
     assert nx.cut_size(G, {"a"}, {"b"}) == 2
Ejemplo n.º 34
0
 def test_multigraph(self):
     """Tests that parallel edges are each counted for a cut."""
     G = nx.MultiGraph(['ab', 'ab'])
     assert_equal(nx.cut_size(G, {'a'}, {'b'}), 2)
Ejemplo n.º 35
0
for i in range(0, pathLength-1):
	G.add_node(i)

file = open("/home/yinhuan/mapModel/yq_500/greedy/DP/pose_graph.txt")
# file = open("/home/yh/mapModel/2018/11.07/pose_graph_yq_vis200.txt")

print('file open, adding edges')

for line in file.readlines():
	line.strip('\n')	
	nums = line.split(" ")
	nums = [int(x) for x in nums ]
	G.add_weighted_edges_from([tuple(nums)])

file.close()

print('start eval cut costs')


saveFile = open("/home/yinhuan/mapModel/yq_500/greedy/DP/cut_costs.txt", 'w')
setA = []
for i in range(0, pathLength-2):  # the last point is not in
	print i
	setA.append(i)
	# setB is the complement
	cut_value = nx.cut_size(G, setA, weight='weight')
	saveFile.write(str(cut_value) + "\n")

saveFile.close()
Ejemplo n.º 36
0
 def test_multigraph(self):
     """Tests that parallel edges are each counted for a cut."""
     G = nx.MultiGraph(['ab', 'ab'])
     assert_equal(nx.cut_size(G, {'a'}, {'b'}), 2)
Ejemplo n.º 37
0
    def analysis(self, G, a, b):
        n = G.number_of_nodes()

        # Threshold from the theoretical draft
        threshold = 2 * G.b * interval_u(2 * G.b) * np.log(n)
        # print(threshold)

        # Form a list of neighbours for each vertex in a form of set: that works faster
        list_neighbrs = {}
        for v in G.nodes:
            list_neighbrs.update({v: set(nx.neighbors(G, v))})
        self.list_neighbrs = list_neighbrs

        # print("The graph...")
        Gc = nx.Graph()
        Gc.add_nodes_from(G)

        A = nx.adjacency_matrix(G).asfptype()
        A_2 = A.dot(A)
        A_2 = A_2.multiply(1 / threshold)
        A_2 = A_2.astype(np.float64)
        A_2.setdiag(0)
        A_2 = A_2.floor()
        A_2 = A_2.sign()
        A_2 = A_2.astype(int)
        A_2.eliminate_zeros()

        Gc = nx.from_scipy_sparse_matrix(A_2, parallel_edges=False)
        # # # print(edges)
        # Gc.add_edges_from(edges)

        # print(Gc.number_of_nodes())
        # print(Gc.number_of_edges())
        # print(list(nx.neighbors(Gc, 0)))

        # fig, ax1 = plt.subplots(1, 1, sharey = True, figsize=(14, 7))
        # pos = nx.spring_layout(Gc)
        # nx.draw(Gc, pos, ax1, with_labels=False, node_color='black', edge_color = 'gray', node_size = 20)
        # plt.show()
        # plt.close()

        # print("Components...")
        cc = [
            c
            for c in sorted(nx.connected_components(Gc), key=len, reverse=True)
        ][:50]
        if len(cc) <= 1:
            A = nx.adjacency_matrix(Gc)
            clustering = SpectralClustering(n_clusters=2,
                                            assign_labels="discretize",
                                            affinity='precomputed',
                                            random_state=0).fit(A)
            labels_pred = clustering.labels_

            self.labels = labels_pred
            self.accuracy = G.GetAccuracy(self.labels)
            return

        cc_labels = []
        c0_edges = []
        c1_edges = []
        cluster0 = cc[0]
        cluster1 = cc[1]
        for c in cc:
            c_labels = np.array([G.nodes[v]['ground_label'] for v in c])
            cc_labels += [np.mean(c_labels)]
            edges_to_0 = nx.cut_size(G, cc[0], c)
            edges_to_1 = nx.cut_size(G, cc[1], c)
            c0_edges += [edges_to_0]
            c1_edges += [edges_to_1]
            if edges_to_0 > 0 and edges_to_1 == 0:
                # print("I'm here")
                cluster1 = cluster1.union(c)
            if edges_to_0 == 0 and edges_to_1 > 0:
                # print("I'm here")
                cluster0 = cluster0.union(c)
            if edges_to_0 > 0 and edges_to_1 > 0 and c not in (cc[0], cc[1]):
                if edges_to_0 > edges_to_1:
                    cluster1 = cluster1.union(c)
                else:
                    cluster0 = cluster0.union(c)
        cc_lens = [len(c) for c in cc]
        # print("Connected components:")
        # print(cc_lens[:20])
        # print("Average labels in connected components:")
        # print(cc_labels[:20])
        # print("Number of edges to 0 community:")
        # print(c0_edges[:20])
        # print("Number of edges to 1 community:")
        # print(c1_edges[:20])
        # print(len(cluster0), len(cluster1))

        # Start with a random node
        zero_node = choice(list(G))

        # Run a an iteration of the algorithm
        # iter_labels = self.Iteration(G, threshold, zero_node)
        # print(len(iter_labels))

        labels_pred = [random.randint(0, 1) for i in range(n)]
        # labels_pred = np.ones(n)
        for v in cluster0:
            labels_pred[v] = 0
        for v in cluster1:
            labels_pred[v] = 1

        self.labels = labels_pred
        self.accuracy = G.GetAccuracy(labels_pred)