コード例 #1
0
        IID_list, tuples_list = inputgen.inputgen_from_catalog_range(icatalog, gcatalog, **catalog_range)
        for j in range(len(IID_list)):
            
            # Here, read in G and P from the catalog.
            # Add weights, set up the RNG, and start the parallel
            # processes.  Here, we use the trial function for the case
            # where we already have the paths.
            
            IID = IID_list[j]

            G = nx.Graph(tuples_list[j][0])
            P = ps.removeSelfLoops(tuples_list[j][2])

            if edge_attr is not None:
                weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
                weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
                if node_attr is not None:
                    weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
                    weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)

            control_RNG = random.Random()
            control_RNG.seed(a=1)

            psize = num_probing_paths(P)
            p = min(1.0, float(k)/psize)

            output = {'IID': IID, 'p':p, 'k':k, 'maxS':maxs, 'edgeAttr':edge_attr_str, 'nodeAttr':node_attr_str, 'statImp':statImpStr, 'statTol':statTolStr}

            trial_arg_gen = ((ofile, header_row, i, control_RNG.randint(-sys.maxint - 1, sys.maxint), maxs, P, G, p, copy.deepcopy(output), statImp, statTol) for i in range(number_of_trials))
            
コード例 #2
0
def randbasic_trial_from_num_beacons(rb_args):
    """
    Run a single randbasic trial when given the graph and number of beacons
    
    :param rb_args is a tuple with the following:
    
    ofname: file name for output
    header_row: header for use in opening a DictWriter
    trial_num: index of this trial
    G: NetworkX graph object
    num_beacons: number of nodes from G to use as beacons
    trial_seed: main seed used for this trial
    k: number of paths to probe in expectation
    output: dict with partial info about this trial
    edge_attr: weight attribute for edges
    node_attr: weight attribute for nodes
    statImp: importance weight for computing result statistics
    statTol: tolerance weight for computing result statistics
    
    This chooses a set of beacons of specified size, chooses probing paths between the set of beacons, runs a trial, computes the statistics on the probing sequence, and writes the result to ofile (after taking a lock on it so that many copies of this can be run in parallel).
    """
    (ofname, header_row, trial_num, G, num_beacons, trial_seed, maxs, k, output, edge_attr, node_attr, statImp, statTol) = rb_args

    # Create the RNG for this trial and seed it with the seed from the
    # top-level RNG passed in the argument.  Even if we don't use all these
    # seeds, create them in the same order so that, e.g., sim_seed is always
    # the seventh output of our trial's RNG.  Save the relevant ones in 
    # the output dict to be written to file.
    output['trialseed'] = trial_seed
    output['trialnum'] = trial_num
    trial_RNG = random.Random()
    trial_RNG.seed(a=trial_seed)
    g_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    b_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    p_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    w_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    t_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    res_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    sim_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    # Seeds:
    # g: graph G
    # b: beacons
    # p: paths P
    # w: weighting
    # t: test set
    # res: reserved for future use
    # sim: simulation

    output['gseed'] = g_seed
    output['bseed'] = b_seed
    output['pseed'] = p_seed
    output['wseed'] = 'na'
    output['tseed'] = 'na'
    output['simseed'] = sim_seed    

    # Below, olock is a global lock that is shared by all processes
    # running this in parallel.  It is acquired before writing to the
    # output file and released after the writing is done.

    # Choose num_beacons beacons using a RNG seeded with b_seed
    beacon_RNG = random.Random()
    beacon_RNG.seed(a=b_seed)
    B = beacon_RNG.sample(sorted(G.nodes()),num_beacons)

    # Construct all paths in G with randomness p_seed
    all_paths = jrnx.jr_all_pairs_shortest_path(G, seed=p_seed)
    P = dict()

    # Now select only the paths that are between (distinct) beacons
    for s in B:
        for d in B:
            if d != s:
                P.setdefault(s, dict())[d] = all_paths[s][d]

    # Add weights to edges and nodes if desired
    if edge_attr is not None:
        weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
        weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
        if node_attr is not None:
            weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
            weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)

    psize = num_probing_paths(P)
    p = min(1.0, float(k)/psize)

    output['p'] = p


    # Run a trial, with sim_seed as the randomness, to produce the
    # probing sequence seq
    try:
        t = timer()
        start_time = timer()
        seq = strategy.simulate(copy.deepcopy(G), copy.deepcopy(P), p, trialseed=sim_seed, maxsteps=maxs)
        output['simtime'] = timer() - start_time
    except Exception as e:
        output['simtime'] = timer() - start_time
        output['status'] = 'Exception raised during simulate: ' + repr(e)
        with olock:
            with open(ofname, 'a') as csvfile:
                writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
                writer.writerow(output)
        return

    # Compute the statistics on seq
    try:
        t = timer()
        results = stats.all_sequence_stats(seq, P, G, importance=statImp, tolerance=statTol)
    except Exception as e:
        output['status'] = 'Exception raised during stats computation ' + repr(e)
        with olock:
            with open(ofname, 'a') as csvfile:
                writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
                writer.writerow(output)
        return

    # Update output with the statistics and write it to file
    output.update(results)
    with olock:
        with open(ofname, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
            writer.writerow(output)
    return
コード例 #3
0
def setcover_ba_trial(sc_args):
    """
    Run a single setcover trial for specified B--A graph parameters
    
    :param rb_args is a tuple with the following:
    
    fractional: whether to use fractional version of B--A algorithm
    ofname: file name for output
    header_row: header for use in opening a DictWriter
    trial_num: index of this trial
    b: number of beacons to use
    n: number of graph nodes
    bam: int number of edges for each new node in B--A model
    trial_seed: main seed used for this trial
    alpha: setcover parameter alpha
    k: number of paths to probe in expectation
    output: dict with partial info about this trial; this is mutated here
    edge_attr: weight attribute for edges
    node_attr: weight attribute for nodes
    statImp: importance weight for computing result statistics
    statTol: tolerance weight for computing result statistics
    
    This generates a B--A preferential-attachment graph, chooses beacons, computes probing paths, runs a trial, computes the statistics on the probing sequence, and writes the result to ofile (after taking a lock on it so that many copies of this can be run in parallel).
    """
    (fractional, ofname, header_row, trial_num, b, n, bam, trial_seed, maxs, alpha, k, output, edge_attr, node_attr, statImp, statTol) = sc_args

    # Create the RNG for this trial and seed it with the seed from the
    # top-level RNG passed in the argument.  Even if we don't use all these
    # seeds, create them in the same order so that, e.g., sim_seed is always
    # the seventh output of our trial's RNG.  Save the relevant ones in 
    # the output dict to be written to file.
    output['trialseed'] = trial_seed
    output['trialnum'] = trial_num
    output['fractional'] = fractional
    trial_RNG = random.Random()
    trial_RNG.seed(a = trial_seed)
    g_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    b_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    p_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    w_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    t_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    res_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    sim_seed = trial_RNG.randint(-sys.maxint - 1, sys.maxint)
    # Seeds:
    # g: graph G
    # b: beacons
    # p: paths P
    # w: weighting
    # t: test set
    # res: reserved for future use
    # sim: simulation

    output['gseed'] = g_seed
    output['bseed'] = b_seed
    output['pseed'] = p_seed
    output['wseed'] = 'na'
    output['tseed'] = 'na'
    output['simseed'] = sim_seed    

    # Below, olock is a global lock that is shared by all processes
    # running this in parallel.  It is acquired before writing to the
    # output file and released after the writing is done.

    # Construct a B--A graph using g_seed as its randomness.  Use fractional
    # version as indicated by fractional parameter.  Make
    # sure G is connected.
    if fractional:
        G = jrnx.jr_fractional_barabasi_albert_graph(n,bam,seed=g_seed)
    else:
        G = jrnx.jr_barabasi_albert_graph(n,bam,seed=g_seed)
    if not nx.is_connected(G):
        output['status'] = 'G is unexpectedly not connected!'
        with olock:
            with open(ofname, 'a') as csvfile:
                writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
                writer.writerow(output)
        return

    # Choose b beacons using a RNG seeded with b_seed
    beacon_RNG = random.Random()
    beacon_RNG.seed(a=b_seed)
    beacons = beacon_RNG.sample(sorted(G.nodes()),b)

    # Construct all paths in G with randomness p_seed
    all_paths = jrnx.jr_all_pairs_shortest_path(G, seed=p_seed)
    P = dict()

    # Now select only the paths that are between (distinct) beacons
    for s in beacons:
        for d in beacons:
            if d != s:
                P.setdefault(s, dict())[d] = all_paths[s][d]
    
    # Add weights to edges and nodes if desired
    if edge_attr is not None:
        weighting.add_edge_weights_from_paths(G, P, edge_attr_name=edge_attr)
        weighting.add_reciprocal_weights(G, src_attr_name=edge_attr, dst_attr_name=statTol, components='edges', use_default=True, default=0)
        if node_attr is not None:
            weighting.add_node_weights_from_edge_mean(G, node_attr_name=node_attr, edge_attr_name=edge_attr)
            weighting.add_reciprocal_weights(G, src_attr_name=node_attr, dst_attr_name=statTol, components='nodes', use_default=True, default=0)


    # Run a trial, with sim_seed as the randomness, to produce the
    # probing sequence seq
    try:
        t = timer()
        start_time = timer()
        seq = strategy.simulate(copy.deepcopy(G), copy.deepcopy(P), alpha=alpha, trialseed=sim_seed, k=k, maxsteps=maxs)
        output['simtime'] = timer() - start_time
    except Exception as e:
        output['simtime'] = timer() - start_time
        output['status'] = 'Exception raised during simulate: ' + repr(e)
        with olock:
            with open(ofname, 'a') as csvfile:
                writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
                writer.writerow(output)
        return

    # Compute the statistics on seq
    try:
        t = timer()
        results = stats.all_sequence_stats(seq, P, G, importance=statImp, tolerance=statTol)
    except Exception as e:
        output['status'] = 'Exception raised during stats computation ' + repr(e)
        with olock:
            with open(ofname, 'a') as csvfile:
                writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
                writer.writerow(output)
        return

    # Update output with the statistics and write it to file
    output.update(results)
    with olock:
        with open(ofname, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, header_row, extrasaction='ignore')
            writer.writerow(output)
    return