Exemplo n.º 1
0
def setup_parameter_files(parameters):

    # 1 is subtracted because pcp only runes N-1 processes in parallel.
    max_proc = min(
        parameters['num_threads'] - 1,
        len(parameters['q1_list']) * len(parameters['q2_list']) *
        parameters['num_reservoir_samplings'])

    parallel_input_sequence = generate_parallel_input_sequence(
        parameters['q1_list'], parameters['q2_list'], parameters)
    parallel_input_chunks = utilities.split_list(parallel_input_sequence,
                                                 max_proc)
    parameter_file_names = write_command_file(parameters['worker_file'],
                                              parameters['command_prefix'],
                                              len(parallel_input_chunks),
                                              parameters['full_path'])

    write_parameter_files(parameter_file_names, parallel_input_chunks)
    write_output_filenamelist(parameters['command_prefix'],
                              len(parameter_file_names),
                              parameters['full_path'])
    utilities.save_object(
        {
            'parameters': parameters,
            'q1_list': parameters['q1_list'],
            'q2_list': parameters['q2_list']
        }, parameters['full_path'] + parameters['command_prefix'] +
        "_paramfile.pyobj")
Exemplo n.º 2
0
def write_output_filenamelist(command_prefix, num_chunks, full_path):

    output_paths = [ full_path + command_prefix + "_output" + str(i) + ".pyobj" \
        for i in xrange(num_chunks) ]

    utilities.save_object(output_paths,
                          full_path + command_prefix + "_outputfilelist.pyobj")
def main(argv):

    experiment_parameter_file = str(argv[0])
    chunkID = str(argv[1])

    experimental_parameters = utilities.load_object(experiment_parameter_file)
    results = worker(experimental_parameters)
    utilities.save_object(results, experimental_parameters[0]['full_path'] +\
        experimental_parameters[0]['command_prefix'] + "_output" + chunkID + ".pyobj")
Exemplo n.º 4
0
def spectral_analysis(network_parameters, savefile=True):

    largest_eigvals_adj_by_mu = []
    second_largest_eigvals_adj_by_mu = []
    # second_smallest_eigvals_lap_by_mu = []
    spectrums = {'adj_eigvals': [], 'mus': network_parameters['mus']}
    for mu in network_parameters['mus']:

        adj_eigvals_by_trial = []
        # lap_eigvals_by_trial = []

        largest_eigvals_adj_by_trial = []
        second_largest_eigvals_adj_by_trial = []
        # second_smallest_eigvals_lap_by_trial = []
        for j in range(network_parameters['num_reservoir_samplings']):
            network = generate_network(
                N=network_parameters['N'],
                mu=mu,
                k=network_parameters['k'],
                maxk=network_parameters['maxk'],
                minc=network_parameters['minc'],
                maxc=network_parameters['maxc'],
                deg_exp=network_parameters['deg_exp'],
                temp_dir_ID=network_parameters['temp_dir_ID'],
                full_path=network_parameters['full_path'],
                weight_scale=network_parameters['reservoir_weight_scale'],
                lower_weight_bound=network_parameters['lower_reservoir_bound'],
                upper_weight_bound=network_parameters['upper_reservoir_bound'])

            adj_eigvals = adj_spectrum(network)
            # lap_eigvals = norm_laplacian_spectrum(network)

            largest_eigvals_adj_by_trial.append(adj_eigvals[-1])
            second_largest_eigvals_adj_by_trial.append(adj_eigvals[-2])
            # second_smallest_eigvals_lap_by_trial.append(lap_eigvals[1])

            adj_eigvals_by_trial.append(adj_eigvals)
            # lap_eigvals_by_trial.append(lap_eigvals)

        largest_eigvals_adj_by_mu.append(largest_eigvals_adj_by_trial)
        second_largest_eigvals_adj_by_mu.append(
            second_largest_eigvals_adj_by_trial)

        spectrums['adj_eigvals'].append(adj_eigvals_by_trial)
        # spectrums['lap_eigvals'].append(lap_eigvals_by_trial)

    utilities.save_object(
        spectrums, network_parameters['command_prefix'] + "_spectrum.pyobj")

    return np.array(largest_eigvals_adj_by_mu), \
        np.array(second_largest_eigvals_adj_by_mu)
def main(argv):

    if len(argv) == 0:
        print """
        Call as: bigred2_result_consolidation command_prefix path
        Where path is the full working directory where the files are locations
        """

    command_prefix = str(argv[0])
    full_path = str(argv[1])

    parameters = utilities.load_object(full_path + command_prefix + "_paramfile.pyobj")
    output_file_list = utilities.load_object(full_path + command_prefix + "_outputfilelist.pyobj")

    results = consolidate_data(parameters, output_file_list)
    utilities.save_object({'parameters': parameters, 'results': results}, full_path + command_prefix + "_final_results.pyobj")

    cleanup(command_prefix, full_path, output_file_list)
Exemplo n.º 6
0
def XmlParser(path, index_path):
    global index_folder_path
    index_folder_path = index_path
    parser = xml.sax.make_parser()
    parser.setFeature(xml.sax.handler.feature_namespaces, 0)

    Handler = WikiContentHandler()
    parser.setContentHandler(Handler)

    parser.parse(path)
    global i_count
    #    return doc_list, docid_title_map
    if len(doc_list) >= 1:
        i_count += 1
        #        path_to_save = "../index/i_index" + str(i_count) + ".txt"
        fname = create_inverted_index(doc_list, i_count, index_folder_path)
        filenames.append(fname)
#        write_index_to_file(path_to_save, i_index)

    save_object(docid_title_map, index_folder_path + "doc_title_map")
    return filenames
def write_parameter_files(parameter_file_names, parallel_input_chunks):

    for i in xrange(len(parallel_input_chunks)):
        utilities.save_object(parallel_input_chunks[i], parameter_file_names[i])
        for k in xrange(num_trials)] \
        for i in xrange(num_mus)] \
        for j in xrange(num_ratios) ]
    listRatios_listMus_listMeanResults = [ [ np.mean([ model_results[(num_trials*num_ratios*i + num_trials*j + k)] \
        for k in xrange(num_trials)], axis=0) \
        for i in xrange(num_mus)] \
        for j in xrange(num_ratios) ]

    # Sort
    sorted_listRatios_listMus_listMeanResults = sorted([
        sorted(inner_list, key=lambda x: x[0])
        for inner_list in listRatios_listMus_listMeanResults
    ],
                                                       key=lambda x: x[0][1])
    utilities.save_object((sorted(list_mus), sorted(list_signal_ratio),
                           sorted_listRatios_listMus_listMeanResults,
                           listRatios_listMus_listTrials_listResults),
                          prefix + "_simresults_vs_mu_vs_signal.pyobj")

    # Plot results
    # list_mus, list_signal_ratio, sorted_listRatios_listMus_listMeanResults, \
    #     listRatios_listMus_listTrials_listResults = utilities.load_object("test0.4_simresults_vs_mu_vs_signal.pyobj")
    # activity_contour_plot(prefix, list_mus, list_signal_ratio, sorted_listRatios_listMus_listMeanResults)
    # activity_vs_mu_plot(prefix, 9, list_mus, list_signal_ratio, listRatios_listMus_listTrials_listResults)
    fixed_point_contour_plot(prefix, list_mus, list_signal_ratio,
                             sorted_listRatios_listMus_listMeanResults)
    # fixed_point_vs_mu_plot(prefix, 5, list_mus, list_signal_ratio, listRatios_listMus_listTrials_listResults)
    # halting_time_contour_plot(prefix, list_mus, list_signal_ratio, sorted_listRatios_listMus_listMeanResults)
    # time_to_activation_contour_plot(prefix, list_mus, list_signal_ratio, sorted_listRatios_listMus_listMeanResults)
    # halting_time_vs_mu_plot(prefix, 9, list_mus, list_signal_ratio, listRatios_listMus_listTrials_listResults)
    # time_to_activation_vs_mu_plot(prefix, 0, list_mus, list_signal_ratio, listRatios_listMus_listTrials_listResults)