Example #1
0
def eval_all(dags, filename):

    results = {}

    inner_eval = Evaluator(filename)

    for e in futures.map_as_completed(inner_eval, enumerate(dags)):
        results[str(e[1])] = e

    res = [results[str(d[0])][0] for d in enumerate(dags)]

    return res
Example #2
0
def simulate(config):
    config_data = json.load(config)
    config.close()
    config = config_data
    
    output_formatter = config["output_formatter"]
    subs = {a:b for a,b in config.get("range_substitutions", [["Z",{"min":0,"max":0.5,"step":1}]])}
    range_constraints = [eval("lambda {}:{}".format(",".join(subs.keys()),con)) for con in config["range_constraints"]]
    def switch_weighter(vector):
        return lambda x,i,j: x if vector[j] is None else vector[j].pop() if x else 0
    switch = config['switch']
    write_delay = config.get("write_delay",10)
    
    V = {}
    V['choice'] = matrix_map(config['choice'],lambda x,i,j:parse_expr(str(x)))
    V['i_ip'] = int(config['power_iterations']["super_iterations"])
    V['i_i'] = int(config['power_iterations']["iterations"])
    V['iterations'] = int(config['iterations'])
    if "starting_points" in config:
        origional_pops = [[a/sum(s) if sum(s)>0 else 1.0/len(s) for a in s] for s in config['starting_points']]
        V['origional_pops'] = [numpy.matrix([a]).transpose() for a in origional_pops]
    else:
        V['origional_pops'] = [numpy.matrix([[1.0/len(V['choice'])] for s in range(len(V['choice']))])]
    V['extrema'] = [numpy.matrix(matrix_map(switch,switch_weighter(e))) for e in generate_switch_extrema(switch)]
    V['pop_symbols'] = [sympy.symbols("s{}".format(i)) for i in range(len(V['choice']))]
    V['subs_symbols'] = {a:symbols(a) for a,b in subs.iteritems()}
    V['sequence'] = config.get('sequence',"1.0/(x+2)")
    V['weight_function'] = config['weight_function']
    shared.setConst(V=V)
    
    output_json = []
    
    big_list = [a for a in multi_iterate(subs) if False not in [con(**a) for con in range_constraints]]
    progress = tqdm(total=len(big_list))
    last_write_time = time()
    def output_file(output_json):
        out_file = file(output_formatter, "w")
        out_file.write(json.dumps({"output":output_json},sort_keys=True).replace('{"parameters":','\n{"parameters":'))
        out_file.flush()
        out_file.close()
    for result in futures.map_as_completed(process, big_list):
        output_json.append(result)
        progress.update()
        if time()-last_write_time > write_delay:
            output_file(output_json)
            last_write_time = time()
    output_file(output_json)
    progress.close()
Example #3
0
def funcMapAsCompleted(n):
    result = list(futures.map_as_completed(func4, [i+1 for i in range(n)]))
    return sum(result)
Example #4
0
            execPred = arg
        elif opt in ("-i"):
            imgPath = arg
        elif opt in ("-g"):
            gtPath = arg
        elif opt in ("-u"):
            unPath = arg
        elif opt in ("-o"):
            outPath = arg
        elif opt in ("-w"):
            weightsFile = arg
        elif opt in ("-f"):
            featureWeightsFile = arg
        elif opt in ("-n"):
            numClusters = arg
        elif opt in ("-t"):
            trainingFile = arg

    with open(trainingFile) as f:
        lines = f.read().splitlines()

    random.shuffle(
        lines
    )  # If a host doesn't work properly it is unlikely that the same job is scheduled to it twice in a row
    #print("{}".format(lines))

    for errcode, filename, attempts in futures.map_as_completed(
            runWorker, lines):
        print("Finished {} with return code {} after {} attempts.".format(
            filename, errcode, attempts))
from scoop import futures
import scoop
import random

def pi(_):
    if random.random()**2 + random.random()**2 <= 1:
        return 1
    return 0

if __name__ == '__main__':
    samples = 10000
    parallel = futures.map_as_completed(pi, range(samples))
    print("Estimated Pi value is:", 4*sum(parallel)/samples)

Example #6
0
#
#    SCOOP is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#    GNU Lesser General Public License for more details.
#
#    You should have received a copy of the GNU Lesser General Public
#    License along with SCOOP. If not, see <http://www.gnu.org/licenses/>.
#
"""
Shows the usage of the map_as_completed() function
"""
from scoop import futures


def hello(input_):
    return input_


if __name__ == "__main__":
    print("Execution of map():")
    # Example of how to use a normal map function
    for out in futures.map(hello, range(10)):
        print("Hello from #{}!".format(out))

    print("Execution of map_as_completed():")
    # Example of map_as_completed usage. Note that the results won't necessarily be ordered
    # like the previous 
    for out in futures.map_as_completed(hello, range(10)):
        print("Hello from #{}!".format(out))
Example #7
0
def infer_network(network_inference,
                  time_series,
                  parallel_target_analysis=False):
    # Define parameter options dictionaries
    network_inference_algorithms = pd.DataFrame()
    network_inference_algorithms['Description'] = pd.Series({
        'bMI_greedy':
        'Bivariate Mutual Information via greedy algorithm',
        'bTE_greedy':
        'Bivariate Transfer Entropy via greedy algorithm',
        'mMI_greedy':
        'Multivariate Mutual Information via greedy algorithm',
        'mTE_greedy':
        'Multivariate Transfer Entropy via greedy algorithm',
        'cross_corr':
        'Cross-correlation thresholding algorithm'
    })
    network_inference_algorithms['Required parameters'] = pd.Series({
        'bMI_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources', 'tau_target',
            'cmi_estimator', 'z_standardise', 'permute_in_time',
            'n_perm_max_stat', 'n_perm_min_stat', 'n_perm_omnibus',
            'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'bTE_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources',
            'max_lag_target', 'tau_target', 'cmi_estimator', 'z_standardise',
            'permute_in_time', 'n_perm_max_stat', 'n_perm_min_stat',
            'n_perm_omnibus', 'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'mMI_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources', 'tau_target',
            'cmi_estimator', 'z_standardise', 'permute_in_time',
            'n_perm_max_stat', 'n_perm_min_stat', 'n_perm_omnibus',
            'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'mTE_greedy': [
            'min_lag_sources', 'max_lag_sources', 'tau_sources',
            'max_lag_target', 'tau_target', 'cmi_estimator', 'z_standardise',
            'permute_in_time', 'n_perm_max_stat', 'n_perm_min_stat',
            'n_perm_omnibus', 'n_perm_max_seq', 'fdr_correction', 'p_value'
            # 'alpha_max_stats',
            # 'alpha_min_stats',
            # 'alpha_omnibus',
            # 'alpha_max_seq',
            # 'alpha_fdr'
        ],
        'cross_corr': ['min_lag_sources', 'max_lag_sources']
    })
    try:
        # Ensure that a network inference algorithm has been specified
        if 'algorithm' not in network_inference:
            raise ParameterMissing('algorithm')
        # Ensure that the provided algorithm is implemented
        if network_inference.algorithm not in network_inference_algorithms.index:
            raise ParameterValue(network_inference.algorithm)
        # Ensure that all the parameters required by the algorithm have been provided
        par_required = network_inference_algorithms['Required parameters'][
            network_inference.algorithm]
        for par in par_required:
            if par not in network_inference:
                raise ParameterMissing(par)

    except ParameterMissing as e:
        print(e.msg, e.par_names)
        raise
    except ParameterValue as e:
        print(e.msg, e.par_value)
        raise

    else:
        nodes_n = np.shape(time_series)[0]

        can_be_z_standardised = True
        if network_inference.z_standardise:
            # Check if data can be normalised per process (assuming the
            # first dimension represents processes, as in the rest of the code)
            can_be_z_standardised = np.all(np.std(time_series, axis=1) > 0)
            if not can_be_z_standardised:
                print('Time series can not be z-standardised')

        if len(time_series.shape) == 2:
            dim_order = 'ps'
        else:
            dim_order = 'psr'

        # initialise an empty data object
        dat = Data()

        # Load time series
        dat = Data(time_series,
                   dim_order=dim_order,
                   normalise=(network_inference.z_standardise
                              & can_be_z_standardised))

        algorithm = network_inference.algorithm
        if algorithm in [
                'bMI_greedy', 'mMI_greedy', 'bTE_greedy', 'mTE_greedy'
        ]:
            # Set analysis options
            if algorithm == 'bMI_greedy':
                network_analysis = BivariateMI()
            if algorithm == 'mMI_greedy':
                network_analysis = MultivariateMI()
            if algorithm == 'bTE_greedy':
                network_analysis = BivariateTE()
            if algorithm == 'mTE_greedy':
                network_analysis = MultivariateTE()

            settings = {
                'min_lag_sources': network_inference.min_lag_sources,
                'max_lag_sources': network_inference.max_lag_sources,
                'tau_sources': network_inference.tau_sources,
                'max_lag_target': network_inference.max_lag_target,
                'tau_target': network_inference.tau_target,
                'cmi_estimator': network_inference.cmi_estimator,
                'kraskov_k': network_inference.kraskov_k,
                'num_threads': network_inference.jidt_threads_n,
                'permute_in_time': network_inference.permute_in_time,
                'n_perm_max_stat': network_inference.n_perm_max_stat,
                'n_perm_min_stat': network_inference.n_perm_min_stat,
                'n_perm_omnibus': network_inference.n_perm_omnibus,
                'n_perm_max_seq': network_inference.n_perm_max_seq,
                'fdr_correction': network_inference.fdr_correction,
                'alpha_max_stat': network_inference.p_value,
                'alpha_min_stat': network_inference.p_value,
                'alpha_omnibus': network_inference.p_value,
                'alpha_max_seq': network_inference.p_value,
                'alpha_fdr': network_inference.p_value
            }

            # # Add optional settings
            # optional_settings_keys = {
            #     'config.debug',
            #     'config.max_mem_frac'
            # }

            # for key in optional_settings_keys:
            #     if traj.f_contains(key, shortcuts=True):
            #         key_last = key.rpartition('.')[-1]
            #         settings[key_last] = traj[key]
            #         print('Using optional setting \'{0}\'={1}'.format(
            #             key_last,
            #             traj[key])
            #         )

            if parallel_target_analysis:
                # Use SCOOP to create a generator of map results, each
                # correspinding to one map iteration
                res_iterator = futures.map_as_completed(
                    network_analysis.analyse_single_target,
                    itertools.repeat(settings, nodes_n),
                    itertools.repeat(dat, nodes_n), list(range(nodes_n)))
                # Run analysis
                res_list = list(res_iterator)
                if settings['fdr_correction']:
                    res = network_fdr({'alpha_fdr': settings['alpha_fdr']},
                                      *res_list)
                else:
                    res = res_list[0]
                    res.combine_results(*res_list[1:])
            else:
                # Run analysis
                res = network_analysis.analyse_network(settings=settings,
                                                       data=dat)
            return res

        else:
            raise ParameterValue(
                algorithm,
                msg='Network inference algorithm not yet implemented')
Example #8
0
    targets = preprocessing.LabelEncoder().fit_transform(targets)

    print(np.bincount(targets))
    param_grid = [
        {
            'C': [0.1, 0.5, 1.0, 2, 5, 10, 15],
            'gamma': [0.0, 0.0001, 0.001, 0.01, 0.1, 0.5],
            'tol': [0.0001, 0.001, 0.01]
        }
    ]

    p_grid = list(grid_search.ParameterGrid(param_grid))

    print("Starting...", len(p_grid))

    sys.stdout.flush()

    from pprint import pprint

    results = []
    for r in futures.map_as_completed(lambda x: eval_model(x[1], features, targets, x[0]), enumerate(p_grid)):
        pprint(r, width=120)
        sys.stdout.flush()
        results.append(r)

    sor_res = sorted(results, key=lambda res: res[0], reverse=True)

    print('Best Score:', sor_res[0][0])
    print('Best Estimator', sor_res[0][2])

Example #9
0
def funcMapAsCompleted(n):
    result = list(futures.map_as_completed(func4, [i+1 for i in range(n)]))
    return sum(result)
Example #10
0
        if key + ".json" not in f:
            os.remove(join(dir, f))
    pass


if __name__ == "__main__":
    # dir = '../results/m_[30x3]/m75_[30x3]_10by10_tour4/'
    # mergefiles(dir, key)

    def generate_pathes(dir, key):
        seq = (generate_pathes(dir + entry + "/", key)
               for entry in os.listdir(dir) if os.path.isdir(dir + entry))
        result = functools.reduce(lambda x, y: x + y, seq, [dir])
        return result

    # dir = "D:/wspace/heft/results/new_experiments_for_ECTA/sw2/additional_strongest/"
    # dir = "D:/wspace/heft/results/m250/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by20]_[18_06_14_17_52_26]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by20]_[18_06_14_18_16_37]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by5]_[18_06_14_18_41_42]/"
    # dir = "D:/wspace/heft/results/m250_[120-180]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by20]_[18_06_14_19_09_24]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by5]_[19_06_14_10_43_15]/"
    dir = "D:/wspace/heft/results/[Montage_100]_[50]_[10by1]_[20_06_14_13_13_51]/"
    # dir = "D:/wspace/heft/results/"
    # dir = "D:/wspace/heft/results/m_[50x3]/tournament/"
    pathes = generate_pathes(dir, key)

    fnc = partial(mergefiles, key=key)
    list(futures.map_as_completed(fnc, pathes))
Example #11
0
    pass

if __name__ == "__main__":
    # dir = '../results/m_[30x3]/m75_[30x3]_10by10_tour4/'
    # mergefiles(dir, key)

    def generate_pathes(dir, key):
        seq = (generate_pathes(dir + entry + "/", key) for entry in os.listdir(dir) if os.path.isdir(dir + entry))
        result = functools.reduce(lambda x, y: x + y, seq, [dir])
        return result

    # dir = "D:/wspace/heft/results/new_experiments_for_ECTA/sw2/additional_strongest/"
    # dir = "D:/wspace/heft/results/m250/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by20]_[18_06_14_17_52_26]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by20]_[18_06_14_18_16_37]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by5]_[18_06_14_18_41_42]/"
    # dir = "D:/wspace/heft/results/m250_[120-180]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by20]_[18_06_14_19_09_24]/"
    # dir = "D:/wspace/heft/results/[Montage_250]_[50]_[10by5]_[19_06_14_10_43_15]/"
    dir = "D:/wspace/heft/results/[Montage_100]_[50]_[10by1]_[20_06_14_13_13_51]/"
    # dir = "D:/wspace/heft/results/"
    # dir = "D:/wspace/heft/results/m_[50x3]/tournament/"
    pathes = generate_pathes(dir, key)

    fnc = partial(mergefiles, key=key)
    list(futures.map_as_completed(fnc, pathes))