コード例 #1
0
def analyze_network_cc(neighbor_indices, cells_response_curve_parts,
                       kpss_and_adf_filter, max_tpd):

    result_df = pd.DataFrame(columns=[
        'source', 'destination', 'part', 'topological_distance',
        'cross_correlation'
    ])

    simple_network = build_network(neighbor_indices)
    tp_indices_by_td_all = get_all_indices_with_toplogical_distance_all(
        simple_network, max_tpd, neighbor_indices)
    number_of_parts = len(cells_response_curve_parts)

    pool = Pool()
    results = [
        pool.apply_async(analyze_network_cc_by_part_private,
                         (create_analyze_network_cc_by_td_context(
                             tp_indices_by_td_all, neighbor_indices,
                             cells_response_curve_parts[part],
                             kpss_and_adf_filter, max_tpd, part), ))
        for part in range(0, number_of_parts)
    ]

    for p_result in results:
        result_df = result_df.append(p_result.get())

    pool.close()

    return result_df
コード例 #2
0
def analyze_network(neighbor_indices, cells_response_curve_parts, kpss_and_adf_filter, max_tpd):
    from tqdm.notebook import tqdm
    
    result_df = pd.DataFrame(columns=['source', 'destination', 'part', 'topological_distance', 'optimal_lag', 'granger_causality_mag', 'granger_causality_pvalue'])

    simple_network = build_network(neighbor_indices)
    tp_indices_by_td_all = get_all_indices_with_toplogical_distance_all(simple_network, max_tpd, neighbor_indices)
    number_of_parts = len(cells_response_curve_parts)

    pool = Pool()
    results = [pool.apply_async(analyze_network_by_part_private, (create_analyze_network_by_td_context(tp_indices_by_td_all,
                            neighbor_indices,
                            cells_response_curve_parts[part],
                            kpss_and_adf_filter,
                            max_tpd,
                            part),)) for part in range(0, number_of_parts)]
    
    for p_result in tqdm(results, total=number_of_parts):
        result_df = result_df.append(p_result.get())

    pool.close()
    
    return result_df
コード例 #3
0
def analyze_network_by_td(neighbor_indices, cells_response_curve,
                          kpss_and_adf_filter, max_tpd, random_mode,
                          max_neighbors):
    '''
    Analyze the network by topological distance.

    Parameters
    ----------
    neighbor_indices : dict
        The key is the node idice and the value are list of neighbor indices
    cells_response_curve : array[#cells, #frames]
        The cell response of each cell

    max_tpd : int
        Max topological distance to estimate
    random_mode : string
        'g' - global mode.
        'c' - cell mode.
    max_neighbors : int
        Max neighbors for each topological distance. 

        In global mode 'g' the max_neighbors is counted globaly.
        In cell mode 'c' the max_neighbors is counted per cell.
        
        If there is more than the max_neighbors then @max_neighbors are chosen uniformly.

    Returns
    -------
    tp_indices_by_td : dict
        The key is the distance and the value is dict which the key are the node idice 
        and the value are list of neighbor indices by topological distance.
    tp_indices_random_by_td : dict
        Same as @tp_indices_by_td but after random choice.
    optimal_lag_by_td: dict
        The key is the distance and the value is the optimal lag for each pairs.
    '''
    #key - topological distance
    #value - dict(vertice index, neighbors indexes)  neighbors of each vertices
    tp_indices_by_td = {}
    tp_indices_random_by_td = {}
    optimal_lag_by_td = {}

    simple_network = build_network(neighbor_indices)
    tp_indices_by_td_all = get_all_indices_with_toplogical_distance_all(
        simple_network, max_tpd, neighbor_indices)

    pool = Pool(processes=4)
    results = [
        pool.apply_async(analyze_network_by_td_private,
                         (create_analyze_network_by_td_context(
                             tp_indices_by_td_all, neighbor_indices,
                             random_mode, max_neighbors, cells_response_curve,
                             kpss_and_adf_filter, tp_distance), ))
        for tp_distance in range(1, max_tpd + 1)
    ]

    for td in range(1, max_tpd + 1):
        tp_indices_by_td[td], tp_indices_random_by_td[td], optimal_lag_by_td[
            td] = results[td - 1].get()

    pool.close()
    '''
    for td in range(1, max_tpd + 1):
        #print 'analyze_network_by_td td: ' + str(td) + ' range: 1-' + str(max_tpd)

        tp_indices_by_td[td] = get_all_indices_with_toplogical_distance_specific(tp_indices_by_td_all, td, neighbor_indices)        
        #choice random neighbors
        if random_mode == 'g':
            tp_indices_random_by_td[td] = choice_random_neighbors_global_level(tp_indices_by_td[td], count_neighbors(tp_indices_by_td[td]), max_neighbors)
        elif random_mode == 'c':
            tp_indices_random_by_td[td] = choice_random_neighbors_cell_level(tp_indices_by_td[td], count_neighbors(tp_indices_by_td[td]), max_neighbors)
        else:
            raise Exception('random_mode is unknown. please choose global or cell level.')        

        #Var Analysis Find The Optimal Lag by Information Criterion
        optimal_lag_by_td[td] = get_collective_optimal_lag(neighbor_indices, cells_response_curve, tp_indices_random_by_td[td], kpss_and_adf_filter)
    '''

    return tp_indices_by_td, tp_indices_random_by_td, optimal_lag_by_td
コード例 #4
0
def calc_hub_indv_perm_scores(analyze_cell_stats_flat, neighbor_indices,
                              pipe_norm_df, kpss_and_adf_filter, centroids,
                              cells_response_curve_parts, get_role, base_path):
    # get original hub/individual cells indices
    state_matrix = analyze_cell_stats_flat.unstack()['Manual_' +
                                                     str(0.5)].transpose()
    cells_state_matrix_loc_indexes = np.where(state_matrix == 0)[1]
    cells_state_matrix_loc_indexes = np.concatenate(
        [cells_state_matrix_loc_indexes,
         np.where(state_matrix == 1)[1]])
    cells_indices = state_matrix.iloc[:,
                                      cells_state_matrix_loc_indexes].columns
    cells_indices = cells_indices.astype(int)

    orginal_hub_ind_count_ratio = get_hub_ind_count_ratio(
        state_matrix, cells_indices)

    hub_indv_perm_scores = load_object(base_path + 'hub_indv_perm_scores_df')
    print(hub_indv_perm_scores)

    if hub_indv_perm_scores is None:
        from module.networkhelper import build_network
        from module.networkhelper import get_all_indices_with_toplogical_distance_all, get_all_indices_with_toplogical_distance_specific
        from module.stathelper import check_raw_gc, get_collective_optimal_lag
        from modulev2.analyzetools import analyze_network

        #calculate neighbor
        ##build network
        simple_network = build_network(neighbor_indices)
        tp_indices_by_td_all = get_all_indices_with_toplogical_distance_all(
            simple_network, 2, neighbor_indices)

        tp_indices_1 = get_all_indices_with_toplogical_distance_specific(
            tp_indices_by_td_all, 1, neighbor_indices)
        tp_indices_2 = get_all_indices_with_toplogical_distance_specific(
            tp_indices_by_td_all, 2, neighbor_indices)

        from multiprocessing import Pool
        from tqdm.notebook import tqdm
        import time
        import multiprocessing

        class NoDaemonProcess(multiprocessing.Process):
            # make 'daemon' attribute always return False
            def _get_daemon(self):
                return False

            def _set_daemon(self, value):
                pass

            daemon = property(_get_daemon, _set_daemon)

        # We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
        # because the latter is only a wrapper function, not a proper class.
        class MyPool(multiprocessing.pool.Pool):
            Process = NoDaemonProcess

        def mute():
            sys.stdout = open(os.devnull, 'w')
            if not sys.warnoptions:
                import warnings
                warnings.simplefilter("ignore")

        scores = []

        from concurrent.futures import ProcessPoolExecutor

        #pool = ThreadPool(processes=10, maxtasksperchild=1)

        pool_size = 100
        '''
        context_array = np.zeros((pool_size), dtype=object)
        
        for i in range(0, pool_size):
            context_array[i] = get_hub_ind_count_ratio_perm_function_context(cells_indices, tp_indices_1, 
                                                                            tp_indices_2, kpss_and_adf_filter, 
                                                                            pipe_norm_df, centroids, 
                                                                            cells_response_curve_parts, get_role)
        '''
        '''
        with ProcessPoolExecutor(max_workers = 50) as executor:
            results = list(tqdm(executor.map(get_hub_ind_count_ratio_perm_function, context_array), total=len(context_array)))
            
            for result in results:
                scores.append(result[0])
        '''

        #for score in tqdm(pool.imap_unordered(get_hub_ind_count_ratio_perm_function, context_array), total=1):
        #    scores.append(score[0])
        #    pass

        for i in tqdm(range(pool_size)):
            scores.append(
                get_hub_ind_count_ratio_perm(cells_indices, tp_indices_1,
                                             tp_indices_2, kpss_and_adf_filter,
                                             pipe_norm_df, centroids,
                                             cells_response_curve_parts,
                                             get_role)[0])

        hub_indv_perm_scores = scores

        save_object(hub_indv_perm_scores,
                    base_path + 'hub_indv_perm_scores_df')

    return orginal_hub_ind_count_ratio, hub_indv_perm_scores