コード例 #1
0
def group_settings_clique_removal(experiments: Experiment) -> Experiment:
    """
    Group experiments that are diagonal in a shared tensor product basis (TPB) to minimize number
    of QPU runs, using a graph clique removal algorithm.

    :param experiments: a tomography experiment
    :return: a tomography experiment with all the same settings, just grouped according to shared
        TPBs.
    """
    g = construct_tpb_graph(experiments)
    _, cliqs = clique_removal(g)
    new_cliqs: List[List[ExperimentSetting]] = []
    for cliq in cliqs:
        new_cliq: List[ExperimentSetting] = []
        for expt in cliq:
            # duplicate `count` times
            new_cliq += [expt] * g.nodes[expt]["count"]

        new_cliqs += [new_cliq]

    return Experiment(
        new_cliqs,
        program=experiments.program,
        symmetrization=experiments.symmetrization,
    )
コード例 #2
0
ファイル: utils.py プロジェクト: mikh3x4/forest-benchmarking
def determine_simultaneous_grouping(experiments: Sequence[DataFrame],
                                    equivalent_column_label: str = None) -> List[Set[int]]:
    """
    Determines a grouping of experiments acting on disjoint sets of qubits that can be run
    simultaneously.

    :param experiments:
    :return: a list of the simultaneous groups, each specified by a set of indices of each grouped
        experiment in experiments
    """
    g = nx.Graph()
    nodes = np.arange(len(experiments))
    g.add_nodes_from(nodes)
    qubits = [expt["Qubits"].values[0] for expt in experiments]

    need_equiv = None
    if equivalent_column_label is not None:
        need_equiv = [expt[equivalent_column_label].values for expt in experiments]

    for node1 in nodes:
        qbs1 = qubits[node1]
        for node2 in nodes[node1+1:]:
            if len(qbs1.intersection(qubits[node2])) == 0:
                # check that the requested columns are equivalent
                if equivalent_column_label is not None:
                    if not np.array_equal(need_equiv[node1], need_equiv[node2]):
                        continue
                # no shared qubits, and requested columns are identical, so add edge
                g.add_edge(node1, node2)

    # get the largest groups of nodes with shared edges, as each can be run simultaneously
    _, cliqs = clique_removal(g)

    return cliqs
def immSimCliqs(dg):
    cliq_rem = clique.clique_removal(dg)
    maxCliqs = cliq_rem[1]
    cliqueChoose2 = list(itertools.combinations(maxCliqs, 2))
    cliq_imm_sim = []
    for pair in cliqueChoose2:
        imm_sim = get_immediate_similarity(dg, pair[0], pair[1])
        cliq_imm_sim.append(imm_sim)
        #print(pair, imm_sim)
    return cliq_imm_sim
def dgWithCliqs(num_cliq):
    #create DAG
    rand_num_nodes = random.randint(num_cliq, num_cliq * 3)
    dg = makeDirectedGraph(rand_num_nodes, int(rand_num_nodes * 1.5))

    #count num of cliques in graph
    cliq_rem = clique.clique_removal(dg)
    cliqCount = len(cliq_rem[1])

    # remake graph if it doesn't match the input
    if (cliqCount == num_cliq):
        return dg
    else:
        return dgWithCliqs(num_cliq)
コード例 #5
0
def generate_nodes(graph, num_seeds):
    
    seeds = []
    max_ind, cliques = clique.clique_removal(graph)
    # sort such that the largest cliques are in front
    cliques = map(list, (sorted(cliques, key = len, reverse = True)))
    # [ [(n11, deg(n11), (n12, deg(n12)), ... (n1m, deg(n1m))], [(n21, deg(n21)), ( 
    clique_degree = map(lambda clique: map(lambda node: (node, nx.degree(graph, node)), clique), cliques)
    clique_degree = map(lambda clique: sorted(clique, key = lambda (node, degree): degree, reverse = True), clique_degree)
    
    degree_rank_within_clique = 0
    while len(seeds) < num_seeds:
        clique_degree = filter(lambda cliq: len(cliq) > degree_rank_within_clique, clique_degree) 
        seeds.extend(cliq[degree_rank_within_clique][0] for cliq in clique_degree if cliq[degree_rank_within_clique][0] not in seeds)
    
    return seeds[:num_seeds]
def getCliqueData(gm):
    node_list = list(gm.nodes)
    num_rows = len(list(it.combinations(node_list, 2))) * 2

    longest_max_indep_set = len(independent_set.maximum_independent_set(gm))
    max_cliques = list(clique.clique_removal(gm)[1]) * num_rows
    num_max_cliques = len(max_cliques)
    longest_max_clique = len(max_cliques[0])

    clique_data = {
        "Longest Maximum Independent Set": [longest_max_indep_set] * num_rows,
        "Number of Maximum Cliques": [num_max_cliques] * num_rows,
        "Longest Maximum Clique": [longest_max_clique] * num_rows
    }

    return clique_data
コード例 #7
0
def group_experiments_clique_removal(experiments: TomographyExperiment) -> TomographyExperiment:
    """
    Group experiments that are diagonal in a shared tensor product basis (TPB) to minimize number
    of QPU runs, using a graph clique removal algorithm.

    :param experiments: a tomography experiment
    :return: a tomography experiment with all the same settings, just grouped according to shared
        TPBs.
    """
    g = construct_tpb_graph(experiments)
    _, cliqs = clique_removal(g)
    new_cliqs = []
    for cliq in cliqs:
        new_cliq = []
        for expt in cliq:
            # duplicate `count` times
            new_cliq += [expt] * g.nodes[expt]['count']

        new_cliqs += [new_cliq]

    return TomographyExperiment(new_cliqs, program=experiments.program, qubits=experiments.qubits)
コード例 #8
0
def group_settings_clique_removal(experiment: ObservablesExperiment) -> ObservablesExperiment:
    """
    Group settings that are diagonal in a shared tensor product basis (TPB) to minimize number
    of QPU runs, using a graph clique removal algorithm.

    :param experiment: an ObservablesExperiment
    :return: a ObservablesExperiment with all the same settings, just grouped according to shared
        TPBs.
    """
    g = construct_tpb_graph(experiment)
    _, cliqs = clique_removal(g)
    new_cliqs = []
    for cliq in cliqs:
        new_cliq = []
        for sett in cliq:
            # duplicate `count` times
            new_cliq += [sett] * g.nodes[sett]['count']

        new_cliqs += [new_cliq]

    return ObservablesExperiment(new_cliqs, program=experiment.program)
コード例 #9
0
def generate_nodes(graph, num_seeds):

    seeds = []
    max_ind, cliques = clique.clique_removal(graph)
    # sort such that the largest cliques are in front
    cliques = map(list, (sorted(cliques, key=len, reverse=True)))
    # [ [(n11, deg(n11), (n12, deg(n12)), ... (n1m, deg(n1m))], [(n21, deg(n21)), (
    clique_degree = map(
        lambda clique: map(lambda node:
                           (node, nx.degree(graph, node)), clique), cliques)
    clique_degree = map(
        lambda clique: sorted(
            clique, key=lambda (node, degree): degree, reverse=True),
        clique_degree)

    degree_rank_within_clique = 0
    while len(seeds) < num_seeds:
        clique_degree = filter(
            lambda cliq: len(cliq) > degree_rank_within_clique, clique_degree)
        seeds.extend(cliq[degree_rank_within_clique][0]
                     for cliq in clique_degree
                     if cliq[degree_rank_within_clique][0] not in seeds)

    return seeds[:num_seeds]
コード例 #10
0
def generate_features(clusters):
    count = 0
    print len(clusters)
    clusters = sorted(clusters.items(),cmp=len_cmp)
    ncl_real = []
    for key,val in clusters:
        l = len(val)
        #TODO: handle "" clusters and single element clusters, "vaziraniz"
        val = sorted(val,cmp=val_cmp)
        #uf.insert_objects([c[0] for c in val]) 
        print "cluster: "+key+" nelems:%d"%len(val)
        count += l*l
        if l==1:
            ncl_real.append([val[0][0]])
            continue
        if key == '':
            ncl_real.extend([[c[0]] for c in val])
            continue

        graph = nx.Graph()
         
        node_map = {}
        for c in val:
            node_map[c[0]] = c

        graph.add_nodes_from([c[0] for c in val])
        for i in range(l):
            for j in range(i+1,l):
                scoresvec = gen_features(val[i],val[j],fgen_pos_array)
                res = pos_classifier.predict(scoresvec)
                if res[0]>0.0:
                    #print "joining %s,%s"%(val[i][1],val[j][1]) 
                    graph.add_edges_from([(val[i][0],val[j][0])],weight=res)
                    #graph.add_edges_from([(val[i],val[j])],weight=res)
        
        #cli = clique_removal(graph)
        cli = nx.connected_components(graph)
        #pdb.set_trace()
        #for c in cli[1]:
        #    h = []
        #    for m in c:
        #        h.append(m)
        #    ncl_real.append(h)

        new_real = []
        for cl in cli:
            val = [node_map[c] for c in cl]    
            l = len(val)
            val = sorted(val,cmp=val_cmp)
            #print "cluster: "+key+" nelems:%d"%len(val)

            if l==1:
                new_real.append([val[0][0]])
                continue
            #if key == '':
            #    new_real.append([val[0][0]])
            #    continue

            graph = nx.Graph()
             
            graph.add_nodes_from([c[0] for c in val])
            for i in range(l):
                for j in range(i+1,l):
                    scoresvec = gen_features(val[i],val[j],fgen_neg_array)
                    res = neg_classifier.predict(scoresvec)
                    #print "looking at %s,%s %f"%(val[i][1],val[j][1],res) 
                    #print scoresvec
                    if res > 0.0:
                        #print "making %s,%s"%(val[i][1],val[j][1]) 
                        graph.add_edges_from([(val[i][0],val[j][0])],weight=res)
                        #graph.add_edges_from([(val[i],val[j])],weight=res)
            
            #pdb.set_trace()
            cli = clique_removal(graph)
            for c in cli[1]:
                h = []
                for m in c:
                    h.append(m)
                new_real.append(h)
        ncl_real.extend(new_real)

    return ncl_real