コード例 #1
0
def mlv_plot_sim_results_heatmaps(dir, parameter1, parameter2, save=False):
    """
    Plot results from file consolidated results. If it doesn't exist,
    creates it here.
    """
    filename = dir + os.sep + 'consolidated_results.npz'

    if not os.path.exists(filename):
        mlv_consolidate_sim_results(dir, parameter1, parameter2)

    with np.load(filename) as f:
        param1_2D = f[parameter1]
        mean_pop2D = f['mean_pop']
        mean_rich2D = f['mean_rich']
        mean_time_present2D = f['mean_time_present']
        P02D = f['P0']
        nbr_local_max2D = f['nbr_local_max']
        H2D = f['entropy']
        GS2D = f['gs_idx']
        nbr_spec2D = f['nbr_species']
        param2_2D = f[parameter2]
        det_mean_present2D = f['det_mean_present']
        rich_dist2D = f['rich_dist']
        correlation2D = f['correlation']

    labelx = VAR_NAME_DICT[parameter1]
    labely = VAR_NAME_DICT[parameter2]

    ## Entropy 2D
    #heatmap(xrange, yrange, arr, xlabel, ylabel, title)
    heatmap(param1_2D, param2_2D, H2D.T, labelx, labely, 'entropy', save=save)

    ## Gini-Simpson
    heatmap(param1_2D,
            param2_2D,
            GS2D.T,
            labelx,
            labely,
            'Gini-Simpson index',
            save=save)

    ## Richness ( divide nbr_species*(1-P0) by mean_pop )
    heatmap(param1_2D,
            param2_2D, (nbr_spec2D * (1.0 - P02D)),
            labelx,
            labely,
            r'$S(1-P(0))$',
            save=save)
    heatmap(param1_2D,
            param2_2D,
            mean_rich2D.T,
            labelx,
            labely,
            r'$\langle S \rangle$',
            save=save)

    #heatmap(param1_2D, param2_2D, np.divide(nbr_spec2D*(1.0-P02D),mean_rich2D).T
    #        , labelx, labely, r'$S(1-P(0))/\langle S \rangle$', save=save)

    ## mean_n
    heatmap(param1_2D,
            param2_2D,
            mean_pop2D.T,
            labelx,
            labely,
            r'$\langle n \rangle$',
            save=save)

    ## det_mean_n_present
    heatmap(param1_2D,
            param2_2D,
            det_mean_present2D.T,
            labelx,
            labely,
            r'Lotka Voltera steady state with $S(1-P(0))$',
            save=save)

    # COrrelation need not be flipped....????
    heatmap(param2_2D,
            param1_2D,
            correlation2D,
            labelx,
            labely,
            r'$\rho_{Pears}(n_i,n_j)$',
            save=save)

    ## diversity distribution
    binom_approx = np.zeros(rich_dist2D.shape)
    JS_rich = np.zeros(P02D.shape)
    mean_rich_sim = np.zeros(P02D.shape)
    mean_rich_binom = np.zeros(P02D.shape)
    var_rich_sim = np.zeros(P02D.shape)
    var_rich_binom = np.zeros(P02D.shape)

    for i in np.arange(len(param1_2D)):
        for j in np.arange(len(param2_2D)):
            binom_approx[i,j,:] =\
                            theqs.Model_MultiLVim().binomial_diversity_dstbn(
                                    P02D[i,j] , nbr_spec2D[i,j])
            JS_rich[i, j] = theqs.Model_MultiLVim().JS_divergence(
                binom_approx[i, j, :], rich_dist2D[i, j, :])

    mean_rich_sim = np.tensordot(np.arange(0, 31),
                                 rich_dist2D,
                                 axes=([0], [2]))
    mean_rich_binom = np.tensordot(np.arange(0, 31),
                                   binom_approx,
                                   axes=([0], [2]))

    var_rich_sim = (
        np.tensordot(np.arange(0, 31)**2, rich_dist2D, axes=([0], [2])) -
        mean_rich_sim**2)
    var_rich_binom = np.tensordot(np.arange(0, 31)**2,
                                  binom_approx,
                                  axes=([0], [2])) - mean_rich_binom**2

    # Somehow none of these need to be flipped... weird.
    heatmap(param1_2D,
            param2_2D,
            mean_rich_sim,
            labelx,
            labely,
            r'mean richness (sim.)',
            save=save)  # mean diveristy
    heatmap(param1_2D,
            param2_2D,
            mean_rich_binom,
            labelx,
            labely,
            r'mean richness (binom.)',
            save=save)  # av div. binonmia;
    heatmap(param1_2D,
            param2_2D, (mean_rich_sim / mean_rich_binom),
            labelx,
            labely,
            r'mean richness (sim./binom.)',
            save=save)
    # mean/mean diveristy

    heatmap(param1_2D,
            param2_2D,
            JS_rich,
            labelx,
            labely,
            r'Jensen-Shannon divergence (sim./binom.)',
            save=save)
    # JS divergenced

    heatmap(param1_2D,
            param2_2D,
            var_rich_sim,
            labelx,
            labely,
            r'variance richness (sim.)',
            save=save)  # variance
    heatmap(param1_2D,
            param1_2D,
            var_rich_binom,
            labelx,
            labely,
            r'variance richness (binom.)',
            save=save)  # variance

    heatmap(param1_2D,
            param2_2D, (var_rich_sim / var_rich_binom),
            labelx,
            labely,
            r'var richness (sim./binom.)',
            save=save)
    # variance/variance

    #plot() # many distributions
    f = plt.figure()

    return 0
コード例 #2
0
def mlv_sim2theory_results_heatmaps(dir, parameter1, parameter2, save=False):
    """
    Plot results from file consolidated results. If it doesn't exist,
    creates it here.
    THIS IS AN AWEFUL FUNCTION THAT NEED METRICS AND CONSOLIDATED TO BE THE SAME
    LENGTH I HATE IT
    """
    theory_fname = theqs.THRY_FIG_DIR + os.sep + 'metric45.npz'
    simulation_fname = dir + os.sep + 'consolidated_results.npz'

    if not os.path.exists(simulation_fname):
        mlv_consolidate_sim_results(dir, parameter1, parameter2)

    if not os.path.exists(theory_fname):
        print('Warning : ' + theory_fname + " doesn't exist!")
        raise SystemExit

    with np.load(simulation_fname) as f:
        param1_2D = f[parameter1]
        mean_pop2D = f['mean_pop']
        mean_rich_sim = f['mean_rich']
        mean_time_present2D = f['mean_time_present']
        P02D = f['P0']
        nbr_local_max2D = f['nbr_local_max']
        H2D = f['entropy']
        GS2D = f['gs_idx']
        nbr_spec2D = f['nbr_species']
        param2_2D = f[parameter2]
        dist_sim = f['ss_dist']
        det_mean_present2D = f['det_mean_present']

    labelx = VAR_NAME_DICT[parameter1]
    labely = VAR_NAME_DICT[parameter2]

    with np.load(theory_fname) as f:
        dist_thry2 = f['approx_dist_nava']
        richness_thry = f['richness']
        det_mean = f['det_mean']
        dist_thry = f['approx_dist_sid']

    ## J-S divergence
    JS = np.zeros((len(param1_2D), len(param2_2D)))
    for i in range(np.shape(dist_sim)[0]):
        for j in range(np.shape(dist_sim)[1]):
            JS[i, j] = theqs.Model_MultiLVim().JS_divergence(
                dist_sim[i, j], dist_thry[i, j])

    heatmap(param1_2D,
            param2_2D,
            JS.T,
            labelx,
            labely,
            r'Jensen-Shannon Divergence',
            save=save)

    ## mean richness
    heatmap(param1_2D,
            param2_2D, (30 * richness_thry).T,
            labelx,
            labely,
            r'Method 2 richness',
            save=save)
    heatmap(param1_2D,
            param2_2D,
            np.divide(30 * richness_thry, mean_rich_sim),
            labelx,
            labely,
            r'Method 2 richness / richness simulation',
            save=save)

    ## mean deterministic vs mean simulation
    heatmap(param1_2D,
            param2_2D, (np.divide(det_mean, mean_pop2D)),
            labelx,
            labely,
            r'LV mean / $\langle n \rangle_{sim}$',
            save=save)

    ## mean deterministic with S(1-P(0)) vs mean simulation
    heatmap(param1_2D,
            param2_2D, (np.divide(det_mean_present2D, mean_pop2D)),
            labelx,
            labely,
            r'LV mean $S(1-P(0))$ / $\langle n \rangle_{sim}$',
            save=save)

    heatmap(param1_2D,
            param2_2D, (np.divide(det_mean_present2D, det_mean)),
            labelx,
            labely,
            r'LV mean $(S(1-P(0)))$ / LV mean $S$',
            save=save)

    ## Number of peaks
    peaks = np.zeros((len(param1_2D), len(param2_2D)))
    for i in range(np.shape(dist_sim)[0]):
        for j in range(np.shape(dist_sim)[1]):
            peakhigh = 0
            peak0 = 0
            if np.argmax(dist_sim[i, j, 1:]) > 1:
                peakhigh = 1
                peak0 = int(dist_sim[i, j, 0] > dist_sim[i, j, 1])

            peaks[i, j] = peak0 + peakhigh

    heatmap(param1_2D,
            param2_2D, (np.divide(det_mean_present2D, det_mean)).T,
            labelx,
            labely,
            r'Local maxima',
            save=save)

    return 0
コード例 #3
0
def mlv_plot_single_sim_results(dir, sim_nbr=1):
    """
    Plot information collected in a single results_(sim_nbr).pickle

    Input :
        dir     : directory that we're plotting from
        sim_nbr : simulation number (subdir sim%i %(sim_nbr))

    Output :
        Plots of a single simulation
    """
    # TODO : replace with a dict
    param_dict, ss_dist_sim, richness_sim, time_present_sim, mean_pop_sim\
                  , mean_rich_sim, mean_time_present_sim, _, _, _, _, _ , _, _\
                  , conditional, _\
                  = mlv_extract_results_sim(dir, sim_nbr=sim_nbr)

    # theory equations
    theory_models = theqs.Model_MultiLVim(**param_dict)
    #conv_dist, _ = theory_models.abund_1spec_MSLV()
    mf_dist, mf_abund = theory_models.abund_sid()
    title = r'$\rho=$' + str(param_dict['comp_overlap']) + r', $\mu=$' \
            + str(param_dict['immi_rate']) + r', $S=$' + str(param_dict['nbr_species'])

    fig = plt.figure()
    plt.scatter(np.arange(len(richness_sim)), richness_sim, color='b')
    plt.ylabel(r"probability of richness")
    plt.xlabel(r'richness')
    plt.axvline(mean_rich_sim, color='k', linestyle='dashed', linewidth=1)
    plt.title(title)
    fname = 'richness' + 'sim' + str(sim_nbr)
    plt.savefig(dir + os.sep + fname + '.pdf')
    #plt.yscale('log')
    #plt.xscale('log')
    #plt.show()

    ## dstbn present
    if time_present_sim != []:
        nbins = 100
        logbins = np.logspace(np.log10(np.min(time_present_sim)),
                              np.log10(np.max(time_present_sim)), nbins)
        counts, bin_edges = np.histogram(
            time_present_sim,
            density=True
            #                                , bins=logbins)
            ,
            bins=nbins)
        fig = plt.figure()
        axes = plt.gca()
        plt.scatter((bin_edges[1:] + bin_edges[:-1]) / 2, counts, color='g')
        plt.axvline(mean_time_present_sim,
                    color='k',
                    linestyle='dashed',
                    linewidth=1)  # mean
        plt.ylabel(r"probability of time present")
        plt.xlabel(r'time present between extinction')
        plt.yscale('log')
        axes.set_ylim([np.min(counts[counts != 0.0]), 2 * np.max(counts)])
        plt.title(title)
        fname = 'time_present' + 'sim' + str(sim_nbr)
        plt.savefig(dir + os.sep + fname + '.pdf')
        #plt.xscale('log')
        #plt.show()

    ## ss_dstbn (compare with deterministic mean)
    fig = plt.figure()
    axes = plt.gca()

    plt.scatter(np.arange(len(ss_dist_sim)), ss_dist_sim, label='simulation')
    #plt.plot(np.arange(len(conv_dist)),conv_dist,label='convolution approx.')
    plt.plot(np.arange(len(mf_dist)), mf_dist, label='mean field approx.')
    plt.ylabel(r"probability distribution function")
    plt.xlabel(r'n')
    plt.axvline(mean_pop_sim, color='r', linestyle='dashed',
                linewidth=1)  #mean
    plt.axvline(theory_models.deterministic_mean(),
                color='k',
                linestyle='dashdot',
                linewidth=1)  #mean
    setattr(theory_models, 'nbr_species', int(mean_rich_sim))
    plt.axvline(theory_models.deterministic_mean(),
                color='b',
                linestyle='-',
                linewidth=1)  #mean
    plt.yscale('log')
    axes.set_ylim(
        [np.min(ss_dist_sim[ss_dist_sim != 0.0]), 2 * np.max(ss_dist_sim)])
    title = r'$\rho=$' + str(param_dict['comp_overlap']) + r', $\mu=$' \
            + str(param_dict['immi_rate']) + r', $S=$' + str(param_dict['nbr_species'])
    plt.title(title)
    axes.set_xlim([0.0, np.max(np.nonzero(ss_dist_sim))])
    plt.legend(loc='best')
    fname = 'distribution' + 'sim' + str(sim_nbr)
    plt.savefig(dir + os.sep + fname + '.pdf')
    #plt.xscale('log')
    #plt.show()
    print('done distribution of single sim')

    fig = plt.figure()
    axes = plt.gca()
    my_cmap = copy.copy(mpl.cm.get_cmap('PuBu'))
    my_cmap.set_bad((0, 0, 0))
    print(np.sum(conditional, axis=0), np.sum(conditional, axis=1))
    plt.imshow(conditional[:2 * param_dict['carry_capacity'], :2 *
                           param_dict['carry_capacity']].T,
               norm=mpl.colors.LogNorm(),
               cmap=my_cmap,
               interpolation='nearest')
    plt.gca().invert_yaxis()
    plt.ylabel(r"i")
    plt.xlabel(r'j')
    title = r'$\rho=$' + str(param_dict['comp_overlap']) + r', $\mu=$' \
            + str(param_dict['immi_rate']) + r', $S=$' + str(param_dict['nbr_species'])
    plt.title(title)
    cbar = plt.colorbar()
    cbar.set_label(r'$P(i|j)$')
    fname = 'conditional' + 'sim' + str(sim_nbr)
    plt.savefig(dir + os.sep + fname + '.pdf')
    #plt.xscale('log')
    #plt.show()

    return 0
コード例 #4
0
def mlv_plot_average_sim_results(dir, parameter='comp_overlap'):
    """
    Plot the average of a many results_(sim_nbr).pickle

    Input :
        dir     : directory that we're plotting from
        sim_nbr : simulation number (subdir sim%i %(sim_nbr))

    Output :
        Plots of a single simul
    """
    filename = dir + os.sep + 'consolidated_results.npz'

    if not os.path.exists(filename):
        mlv_consolidate_sim_results(dir, parameter)

    with np.load(filename) as f:
        dist_sim = f['ss_dist']
        cond_dist = f['conditional']

    param_dict, ss_dist, richness_dist, time_btwn_ext, mean_pop, mean_rich\
        , mean_time_present, P0, nbr_local_max, H, GS, nbr_species\
        , det_mean_present, correlation, conditional\
        = mlv_extract_results_sim(dir, 1)

    ss_dist_sim = np.mean(dist_sim, axis=0)
    mean_cond = np.mean(cond_dist, axis=0)

    fig = plt.figure()
    axes = plt.gca()
    r = np.random.randint(np.shape(dist_sim)[0], size=3)
    plt.plot(np.arange(len(ss_dist_sim)), ss_dist_sim, label='mean simulation')
    plt.scatter(np.arange(len(dist_sim[r[0]])),
                dist_sim[r[0]],
                label='simulation i')
    plt.scatter(np.arange(len(dist_sim[r[1]])),
                dist_sim[r[1]],
                label='simulation j')
    plt.scatter(np.arange(len(dist_sim[r[2]])),
                dist_sim[r[2]],
                label='simulation k')
    plt.ylabel(r"probability distribution function")
    plt.xlabel(r'n')
    plt.yscale('log')
    axes.set_ylim(
        [np.min(ss_dist_sim[ss_dist_sim != 0.0]), 2 * np.max(ss_dist_sim)])
    title = r'$\rho=$' + str(param_dict['comp_overlap']) + r', $\mu=$' \
            + str(param_dict['immi_rate']) + r', $S=$' + str(param_dict['nbr_species'])
    plt.title(title)
    axes.set_xlim([0.0, np.max(np.nonzero(ss_dist_sim))])
    plt.legend()
    fname = 'av_distribution'
    plt.savefig(dir + os.sep + fname + '.pdf')
    #plt.xscale('log')
    #plt.show()

    fig = plt.figure()
    axes = plt.gca()
    my_cmap = copy.copy(mpl.cm.get_cmap('PuBu'))
    my_cmap.set_bad((0, 0, 0))
    #print(np.sum( mean_cond[:200,:200].T + mean_cond[:200,:200] ,axis=1))
    axis_show = int(param_dict['carry_capacity'] * 1.5)
    plt.imshow(mean_cond[:axis_show, :axis_show].T,
               norm=mpl.colors.LogNorm(),
               cmap=my_cmap,
               interpolation='nearest')
    plt.gca().invert_yaxis()
    plt.ylabel(r"i")
    plt.xlabel(r'j')
    title = r'$\rho=$' + str(param_dict['comp_overlap']) + r', $\mu=$' \
            + str(param_dict['immi_rate']) + r', $S=$' + str(param_dict['nbr_species'])
    plt.title(title)
    cbar = plt.colorbar()
    cbar.set_label(r'$P(i|j)$')
    #plt.xscale('log')
    fname = 'conditional'
    plt.savefig(dir + os.sep + fname + '.pdf')

    param_dict['conditional'] = mean_cond
    model = theqs.Model_MultiLVim(**param_dict)
    probability = model.abund_jer(approx='simulation')

    fig = plt.figure()
    axes = plt.gca()
    r = np.random.randint(np.shape(dist_sim)[0], size=3)
    plt.plot(np.arange(len(ss_dist_sim)),
             ss_dist_sim,
             label='mean trajectories')
    plt.plot(np.arange(len(probability)),
             probability,
             label=r'from $P(n_i|n_j)$')
    plt.ylabel(r"probability distribution function")
    plt.xlabel(r'n')
    plt.yscale('log')
    axes.set_ylim(
        [np.min(ss_dist_sim[ss_dist_sim != 0.0]), 2 * np.max(ss_dist_sim)])
    title = r'$\rho=$' + str(param_dict['comp_overlap']) + r', $\mu=$' \
            + str(param_dict['immi_rate']) + r', $S=$' + str(param_dict['nbr_species'])
    plt.title(title)
    axes.set_xlim([0.0, np.max(np.nonzero(ss_dist_sim))])
    plt.legend()
    #plt.xscale('log')
    fname = 'check_steady_state'
    plt.savefig(dir + os.sep + fname + '.pdf')
    #plt.show()

    return 0
コード例 #5
0
def mlv_extract_results_sim(dir, sim_nbr=1):
    """
    Analyze information collected in many results_(sim_nbr).pickle, output to
    other functions as arrays.

    Input :
        dir     : directory that we're extracting
        sim_nbr : simulation number (subdir sim%i %(sim_nbr))

    Output :
        param_dict          :
        ss_dist             :
        richness_dist       :
        time_btwn_ext       :
        mean_pop            :
        mean_rich           :
        mean_time_present   :
        P0                  :
        nbr_local_max       :
        H                   :
        GS                  :

    """
    # TODO QUICK FIX
    while not os.path.exists(dir + os.sep + 'sim' + str(sim_nbr) + os.sep +
                             'results_0.pickle'):
        sim_nbr += 1

    with open(
            dir + os.sep + 'sim' + str(sim_nbr) + os.sep + 'results_0.pickle',
            'rb') as handle:
        param_dict = pickle.load(handle)

    model = MultiLV(**param_dict)
    theory_model = theqs.Model_MultiLVim(**param_dict)

    # distribution
    if 'ss_distribution' in model.results:
        ss_dist     = model.results['ss_distribution'] \
                              / np.sum(model.results['ss_distribution'])
        mean_pop = np.dot(ss_dist, np.arange(len(ss_dist)))
        P0 = ss_dist[0]
        nbr_local_max = np.min([len(argrelextrema(ss_dist, np.greater)), 2])
        H = -np.dot(ss_dist[ss_dist > 0.0], np.log(ss_dist[ss_dist > 0.0]))
        GS = 1.0 - np.dot(ss_dist, ss_dist)
        setattr(theory_model, 'nbr_species',
                int((1.0 - P0) * param_dict['nbr_species']))
        det_mean_present = theory_model.deterministic_mean()
    else:        ss_dist, mean_pop, P0, nbr_local_max, H, GS = None, None, None, None \
                                                        , None, None

    # richness
    if 'richness' in model.results:
        richness_dist = model.results['richness']
        mean_rich = np.dot(model.results['richness'],
                           np.arange(len(model.results['richness'])))
    else:
        richness_dist, mean_rich = None, None

    # time
    if 'time_btwn_ext' in model.results:
        time_btwn_ext = model.results['time_btwn_ext']
        if time_btwn_ext != []:
            mean_time_present = np.mean(model.results['time_btwn_ext'])
        else:
            mean_time_present = np.nan
    else:
        time_btwn_ext, mean_time_present = None, None

    if 'corr_ni_nj' in model.results:
        correlation = model.results['corr_ni_nj']
    else:
        correlation = None

    if 'conditional' in model.results:
        conditional = model.results['conditional']
    else:
        conditional = None

    if 'av_J' in model.results:
        av_J = model.results['av_J']
    else:
        av_J = None

    # TODO : change to dictionary
    return param_dict, ss_dist, richness_dist, time_btwn_ext, mean_pop\
                     , mean_rich, mean_time_present, P0, nbr_local_max, H, GS\
                     , param_dict['nbr_species'], det_mean_present, correlation\
                     , conditional, av_J
コード例 #6
0
def mlv_consolidate_sim_results(dir,
                                parameter1='immi_rate',
                                parameter2='comp_overlap'):
    """
    Analyze how the results from different simulations differ for varying
    (parameter)

    Input :
        dir       : directory that we're plotting from
        parameter1 : the parameter that changes between different simulations
                    (string)
        parameter2 : second parameter that changes between different simulations
                    (string)
    """
    filename = dir + os.sep + NPZ_SHORT_FILE

    with open(dir + os.sep + 'sim1' + os.sep + 'results_0.pickle',
              'rb') as handle:
        param_dict = pickle.load(handle)

    model = MultiLV(**param_dict)
    dict_array_flat = {}
    dict_array_2D = {}

    entries_to_remove = {'time_btwn_ext'}

    for k in entries_to_remove:
        (model.results).pop(k, None)

    list_results_keys = (model.results).keys()

    for key in list_results_keys:
        dict_array_flat[key] = []

    # Additional computations
    dict_array_flat['sim_dist'] = []
    dict_array_flat['rich_dist'] = []
    dict_array_flat['mf3_dist'] = []
    dict_array_flat['mf_dist'] = []
    dict_array_flat['mfdet_dist'] = []
    dict_array_flat['conv_dist'] = []
    dict_array_flat['av_ni_given_nj'] = []

    dict_array_flat['time_autocor_spec'] = []
    dict_array_flat['mean_time_autocor_abund'] = []
    dict_array_flat['std_time_autocor_abund'] = []
    dict_array_flat['dominance_turnover'] = []
    dict_array_flat['suppress_turnover'] = []
    dict_array_flat['dominance_return'] = []
    dict_array_flat['suppress_return'] = []

    # count number of subdirectories
    nbr_sims = len(next(os.walk(dir))[1])
    param1 = np.zeros(nbr_sims)
    param2 = np.zeros(nbr_sims)

    #nbr_sims=100
    # TODO change to dictionary
    for i in np.arange(nbr_sims):
        sim_nbr = i + 1
        with open(
                dir + os.sep + 'sim' + str(sim_nbr) + os.sep +
                'results_0.pickle', 'rb') as handle:
            param_dict = pickle.load(handle)
        with open(
                dir + os.sep + 'sim' + str(sim_nbr) + os.sep +
                'sim_param.pickle', 'rb') as handle:
            parameters = pickle.load(handle)
            #print(parameters['comp_overlap'],parameters['immi_rate'])

        model = MultiLV(**param_dict)
        theory_model = theqs.Model_MultiLVim(**param_dict)
        param1[i] = param_dict[parameter1]
        param2[i] = param_dict[parameter2]

        for key in list_results_keys:
            dict_array_flat[key].append(model.results[key])

        # distribution
        start = time.time()
        #print(model.results['ss_distribution'])
        ss_dist_sim     = model.results['ss_distribution'] \
                                / np.sum(model.results['ss_distribution'])
        #ss_dist_conv, _ = theory_model.abund_1spec_MSLV()
        ss_dist_mf, _ = theory_model.abund_sid()
        ss_dist_mf3, _ = theory_model.abund_sid_J()
        ss_dist_mfdet = theory_model.abund_jer()
        dict_array_flat['rich_dist'].append(model.results['richness'])
        dict_array_flat['sim_dist'].append(np.array(ss_dist_sim))
        #conv_dist_vadict_array_flat['rich_dist'] = []ry.append( np.array( ss_dist_conv ) )
        dict_array_flat['mf_dist'].append(np.array(ss_dist_mf))
        dict_array_flat['mf3_dist'].append(np.array(ss_dist_mf3))
        dict_array_flat['mfdet_dist'].append(np.array(ss_dist_mfdet))

        conditional = model.results['conditional']
        av_ni_given_nj = average_ni_given_nj(conditional)
        dict_array_flat['av_ni_given_nj'].append(av_ni_given_nj)

        S = model.nbr_species
        K = model.carry_capacity
        mu = model.immi_rate
        rho = model.comp_overlap
        rplus = model.birth_rate
        rminus = model.death_rate
        #print(ss_dist_sim)
        nbr_species = int(S * (1.0 - ss_dist_sim[0]))

        nbr = int(eq.deterministic_mean(nbr_species, mu, rho, rplus, rminus,
                                        K))
        dict_array_flat['dominance_turnover'].append(
            eq.mfpt_a2a(ss_dist_sim, nbr, mu, rplus, rminus, K, rho, S))
        dict_array_flat['suppress_turnover'].append(
            eq.mfpt_020(ss_dist_sim, mu))
        dict_array_flat['dominance_return'].append(
            eq.mfpt_a2b(ss_dist_sim, 0, nbr, mu, rplus))
        dict_array_flat['suppress_return'].append(
            eq.mfpt_b2a(ss_dist_sim, 0, nbr, mu, rplus))
        """ This previous code needs to be fixed.
        # TEMP FIX FOR SOME WRONG COEFFICIENT OF VARIATION
        correlations_fix(model, dir + os.sep + 'sim' + str(sim_nbr) + os.sep +
                  'results_0.pickle')

        # TEMP FIX FOR SOMETHING WRONG CONDITIONAL MLV71
        conditional = model.results['conditional']
        for j in np.arange(0,conditional.shape[0]):
            conditional[j,j] *= 2
            if np.sum(conditional[j][:]) != 0.0:
                conditional[j,:] /= np.sum(conditional[j,:])

        # TEMP AUTOCORRELATION TIME SAVING. This is an aweful way of doing it. Fix it.

        n=2; fracTime=100

        autocor, _, specAutocor, _, newTimes =\
                 autocorrelation_spectrum(model.results['times'][n:],\
                model.results['trajectory'][n:])

        _, time_autocor_spec[sim_nbr-1] = exponential_fit_autocorrelation(specAutocor, newTimes, fracTime)
        mean_time_autocor_abund[sim_nbr-1], std_time_autocor_abund[sim_nbr-1] =\
                average_timescale_autocorrelation( autocor, newTimes, fracTime)
                """
        end = time.time()
        hours, rem = divmod(end - start, 3600)
        minutes, seconds = divmod(rem, 60)
        print(">>{}: Time elapsed: {:0>2}:{:0>2}:{:05.2f}".format(
            i, int(hours), int(minutes), seconds))

    # For heatmap stuff
    param1_2D = np.unique(param1)
    param2_2D = np.unique(param2)
    dim1 = len(param1_2D)
    dim2 = len(param2_2D)

    for keys in dict_array_flat.keys():
        if dict_array_flat[keys] == []:
            pass
        elif not hasattr(dict_array_flat[keys][0], '__len__'):
            dict_array_2D[keys] = np.zeros((dim1, dim2))
            print(keys, 'here')
        elif len(np.shape(dict_array_flat[keys][0])) < 2:
            # 2 dimensional properties (joint dist) ignored for now
            max_len = len(max(dict_array_flat[keys], key=len))
            dict_array_2D[keys] = np.zeros((dim1, dim2, max_len))
            print(keys, 'done')
        else:
            pass

    # put into a 2d array all the previous results
    for key in (dict_array_2D).keys():
        if not hasattr(dict_array_flat[key][0], '__len__'):
            for sim in np.arange(nbr_sims):
                i = np.where(param1_2D == param1[sim])[0][0]
                j = np.where(param2_2D == param2[sim])[0][0]
                dict_array_2D[key][i, j] = dict_array_flat[key][sim]
        else:
            for sim in np.arange(nbr_sims):
                i = np.where(param1_2D == param1[sim])[0][0]
                j = np.where(param2_2D == param2[sim])[0][0]
                dict_array_2D[key][i,j,:len(dict_array_flat[key][sim])] \
                                                = dict_array_flat[key][sim]

    dict_array_2D['carry_capacity'] = model.carry_capacity
    dict_array_2D['birth_rate'] = model.birth_rate
    dict_array_2D['death_rate'] = model.death_rate
    dict_array_2D['nbr_species'] = model.nbr_species
    dict_array_2D['immi_rate'] = model.immi_rate
    dict_array_2D['comp_overlap'] = model.comp_overlap
    dict_array_2D[parameter1] = param1_2D
    dict_array_2D[parameter2] = param2_2D
    # save results in a npz file
    np.savez(filename, **dict_array_2D)

    return filename, dict_array_2D
コード例 #7
0
def mlv_consolidate_sim_results_old(dir,
                                    parameter1='immi_rate',
                                    parameter2='comp_overlap'):
    """
    Analyze how the results from different simulations differ for varying
    (parameter)

    Input :
        dir       : directory that we're plotting from
        parameter1 : the parameter that changes between different simulations
                    (string)
        parameter2 : second parameter that changes between different simulations
                    (string)
    """
    filename = dir + os.sep + NPZ_SHORT_FILE

    # count number of subdirectories
    nbr_sims = len(next(os.walk(dir))[1])

    # initialize the
    param1 = np.zeros(nbr_sims)
    param2 = np.zeros(nbr_sims)
    sim_dist_vary = []
    rich_dist_vary = []
    conv_dist_vary = []
    mf_dist_vary = []
    mf3_dist_vary = []
    av_ni_given_nj_vary = []
    coeff_ni_nj = np.zeros(nbr_sims)
    corr_ni_nj = np.zeros(nbr_sims)
    coeff_J_n = np.zeros(nbr_sims)
    corr_J_n = np.zeros(nbr_sims)
    coeff_Jminusn_n = np.zeros(nbr_sims)
    corr_Jminusn_n = np.zeros(nbr_sims)
    coeff_ni_nj_S = np.zeros(nbr_sims)
    corr_ni_nj_S = np.zeros(nbr_sims)
    coeff_J_n_S = np.zeros(nbr_sims)
    corr_J_n_S = np.zeros(nbr_sims)
    coeff_Jminusn_n_S = np.zeros(nbr_sims)
    corr_Jminusn_n_S = np.zeros(nbr_sims)
    # TEMP TIMES
    time_autocor_spec = np.zeros(nbr_sims)
    mean_time_autocor_abund = np.zeros(nbr_sims)
    std_time_autocor_abund = np.zeros(nbr_sims)
    dominance_turnover = np.zeros(nbr_sims)
    suppress_turnover = np.zeros(nbr_sims)
    dominance_return = np.zeros(nbr_sims)
    suppress_return = np.zeros(nbr_sims)

    # TODO change to dictionary
    for i in np.arange(nbr_sims):
        sim_nbr = i + 1
        if not os.path.exists(dir + os.sep + 'sim' + str(sim_nbr) + os.sep +
                              'results_0.pickle'):

            print("Missing simulation: " + str(sim_nbr))
            rich_dist_vary.append(np.array([0]))
            sim_dist_vary.append(np.array([0]))
            #conv_dist_vary.append( np.array( ss_dist_conv ) )
            mf_dist_vary.append(np.array([0]))
            mf3_dist_vary.append(np.array([0]))

        else:

            with open(
                    dir + os.sep + 'sim' + str(sim_nbr) + os.sep +
                    'results_0.pickle', 'rb') as handle:
                param_dict = pickle.load(handle)

            model = MultiLV(**param_dict)
            theory_model = theqs.Model_MultiLVim(**param_dict)

            # distribution
            start = time.time()
            ss_dist_sim     = model.results['ss_distribution'] \
                                    / np.sum(model.results['ss_distribution'])
            #ss_dist_conv, _ = theory_model.abund_1spec_MSLV()
            ss_dist_mf, _ = theory_model.abund_sid()
            ss_dist_mf3, _ = theory_model.abund_sid_J()
            richness_dist = model.results['richness']
            rich_dist_vary.append(np.array(richness_dist))
            sim_dist_vary.append(np.array(ss_dist_sim))
            #conv_dist_vary.append( np.array( ss_dist_conv ) )
            mf_dist_vary.append(np.array(ss_dist_mf))
            mf3_dist_vary.append(np.array(ss_dist_mf3))

            # TEMP FIX FOR SOME WRONG COEFFICIENT OF VARIATION
            #correlations_fix(model, dir + os.sep + 'sim' + str(sim_nbr) + os.sep +
            #           'results_0.pickle')

            # TEMP FIX FOR SOMETHING WRONG CONDITIONAL MLV71
            conditional = model.results['conditional']
            #for j in np.arange(0,conditional.shape[0]):
            #    conditional[j,j] *= 2
            #    if np.sum(conditional[j][:]) != 0.0:
            #        conditional[j,:] /= np.sum(conditional[j,:])

            av_ni_given_nj = average_ni_given_nj(conditional)
            av_ni_given_nj_vary.append(av_ni_given_nj)

            corr_ni_nj[sim_nbr - 1] = model.results['corr_ni_nj']
            coeff_ni_nj[sim_nbr - 1] = model.results['coeff_ni_nj']
            corr_J_n[sim_nbr - 1] = model.results['corr_J_n']
            coeff_J_n[sim_nbr - 1] = model.results['coeff_J_n']
            corr_Jminusn_n[sim_nbr - 1] = model.results['corr_Jminusn_n']
            coeff_Jminusn_n[sim_nbr - 1] = model.results['coeff_Jminusn_n']
            corr_ni_nj_S[sim_nbr - 1] = model.results['corr_ni_nj_S']
            coeff_ni_nj_S[sim_nbr - 1] = model.results['coeff_ni_nj_S']
            corr_J_n_S[sim_nbr - 1] = model.results['corr_J_n_S']
            coeff_J_n_S[sim_nbr - 1] = model.results['coeff_J_n_S']
            corr_Jminusn_n_S[sim_nbr - 1] = model.results['corr_Jminusn_n_S']
            coeff_Jminusn_n_S[sim_nbr - 1] = model.results['coeff_Jminusn_n_S']

            ############################################################################################################################
            # TEMP AUTOCORRELATION TIME SAVING. This is an aweful way of doing it. Fix it.
            ############################################################################################################################
            n = 2
            fracTime = 100
            """
            autocor, _, specAutocor, _, newTimes =\
                     autocorrelation_spectrum(model.results['times'][n:],\
                    model.results['trajectory'][n:])

            _, time_autocor_spec[sim_nbr-1] = exponential_fit_autocorrelation(specAutocor, newTimes, fracTime)
            mean_time_autocor_abund[sim_nbr-1], std_time_autocor_abund[sim_nbr-1] =\
                    average_timescale_autocorrelation( autocor, newTimes, fracTime)
            """

            S = model.nbr_species
            K = model.carry_capacity
            mu = model.immi_rate
            rho = model.comp_overlap
            rplus = model.birth_rate
            rminus = model.death_rate
            nbr_species = int(S * (1.0 - ss_dist_sim[0]))
            nbr = int(
                eq.deterministic_mean(nbr_species, mu, rho, rplus, rminus, K))

            dominance_turnover[sim_nbr - 1] = eq.mfpt_a2a(
                ss_dist_sim, nbr, mu, rplus, rminus, K, rho, S)
            suppress_turnover[sim_nbr - 1] = eq.mfpt_020(ss_dist_sim, mu)
            dominance_return[sim_nbr - 1] = eq.mfpt_a2b(
                ss_dist_sim, 0, nbr, mu, rplus)
            suppress_return[sim_nbr - 1] = eq.mfpt_b2a(ss_dist_sim, 0, nbr, mu,
                                                       rplus)

            ############################################################################################################################
            # TEMP AUTOCORRELATION TIME SAVING. This is an aweful way of doing it. Fix it.
            ############################################################################################################################

            end = time.time()
            hours, rem = divmod(end - start, 3600)
            minutes, seconds = divmod(rem, 60)
            print(">>{}: Time elapsed: {:0>2}:{:0>2}:{:05.2f}".format(
                i, int(hours), int(minutes), seconds))
            # Value of parameters
            param1[i] = param_dict[parameter1]
            param2[i] = param_dict[parameter2]

    # making all sims have same distribution length
    len_longest_sim = len(max(sim_dist_vary, key=len))
    length_longest_rich = len(max(rich_dist_vary, key=len))
    sim_dist = np.zeros((nbr_sims, len_longest_sim))
    conv_dist = np.zeros((nbr_sims, len_longest_sim))
    mf_dist = np.zeros((nbr_sims, len_longest_sim))
    mf3_dist = np.zeros((nbr_sims, len_longest_sim))
    rich_dist = np.zeros((nbr_sims, length_longest_rich))
    av_ni_given_nj = np.zeros((nbr_sims, len_longest_sim))

    for i in np.arange(nbr_sims):
        #conv_idx    = np.min( [ len(conv_dist_vary[i]), len_longest_sim ] )
        mf_idx = np.min([len(mf_dist_vary[i]), len_longest_sim])
        mf3_idx = np.min([len(mf3_dist_vary[i]), len_longest_sim])
        sim_dist[i, :len(sim_dist_vary[i])] = sim_dist_vary[i]
        #conv_dist[i,:conv_idx]                  = conv_dist_vary[i][conv_idx]
        mf_dist[i, :mf_idx] = mf_dist_vary[i][:mf_idx]
        mf3_dist[i, :mf_idx] = mf3_dist_vary[i][:mf_idx]
        rich_dist[i, :len(rich_dist_vary[i])] = rich_dist_vary[i]
        av_ni_given_nj[
            i, :len(av_ni_given_nj_vary[i])] = av_ni_given_nj_vary[i]

    # For heatmap stuff
    param1_2D = np.unique(param1)
    param2_2D = np.unique(param2)
    dim_1 = len(param1_2D)
    dim_2 = len(param2_2D)

    # initialize
    mf_dist2D = np.zeros((dim_1, dim_2, len_longest_sim))
    mf3_dist2D = np.zeros((dim_1, dim_2, len_longest_sim))
    conv_dist2D = np.zeros((dim_1, dim_2, len_longest_sim))
    sim_dist2D = np.zeros((dim_1, dim_2, len_longest_sim))
    av_ni_given_nj2D = np.zeros((dim_1, dim_2, len_longest_sim))
    rich_dist2D = np.zeros((dim_1, dim_2, length_longest_rich))
    corr_ni_nj2D = np.zeros((dim_1, dim_2))
    coeff_ni_nj2D = np.zeros((dim_1, dim_2))
    corr_J_n2D = np.zeros((dim_1, dim_2))
    coeff_J_n2D = np.zeros((dim_1, dim_2))
    corr_Jminusn_n2D = np.zeros((dim_1, dim_2))
    coeff_Jminusn_n2D = np.zeros((dim_1, dim_2))

    time_autocor_spec2D = np.zeros((dim_1, dim_2))
    mean_time_autocor_abund2D = np.zeros((dim_1, dim_2))
    std_time_autocor_abund2D = np.zeros((dim_1, dim_2))
    dominance_turnover2D = np.zeros((dim_1, dim_2))
    suppress_turnover2D = np.zeros((dim_1, dim_2))
    dominance_return2D = np.zeros((dim_1, dim_2))
    suppress_return2D = np.zeros((dim_1, dim_2))

    # put into a 2d array all the previous results
    for sim in np.arange(nbr_sims):
        i = np.where(param1_2D == param1[sim])[0][0]
        j = np.where(param2_2D == param2[sim])[0][0]
        sim_dist2D[i, j] = sim_dist[sim]
        mf_dist2D[i, j] = mf_dist[sim]
        mf3_dist2D[i, j] = mf3_dist[sim]
        av_ni_given_nj2D[i, j] = av_ni_given_nj[sim]
        #conv_dist2D[i,j]       = conv_dist[sim]
        rich_dist2D[i, j] = rich_dist[sim]
        corr_ni_nj2D[i, j] = corr_ni_nj[sim]
        coeff_ni_nj2D[i, j] = coeff_ni_nj[sim]
        corr_J_n2D[i, j] = corr_J_n[sim]
        coeff_J_n2D[i, j] = coeff_J_n[sim]
        corr_Jminusn_n2D[i, j] = corr_Jminusn_n[sim]
        coeff_Jminusn_n2D[i, j] = coeff_Jminusn_n[sim]

        #----------------------------------------------------------------------------------------------------------------------------
        # TEMP AUTOCORRELATION TIME SAVING. This is an aweful way of doing it. Fix it.
        #----------------------------------------------------------------------------------------------------------------------------

        time_autocor_spec2D[i, j] = time_autocor_spec[sim]
        mean_time_autocor_abund2D[i, j] = mean_time_autocor_abund[sim]
        std_time_autocor_abund2D[i, j] = std_time_autocor_abund[sim]
        dominance_turnover2D[i, j] = dominance_turnover[sim]
        suppress_turnover2D[i, j] = suppress_turnover[sim]
        dominance_return2D[i, j] = dominance_return[sim]
        suppress_return2D[i, j] = suppress_return[sim]

        #----------------------------------------------------------------------------------------------------------------------------
        # TEMP AUTOCORRELATION TIME SAVING. This is an aweful way of doing it. Fix it.
        #----------------------------------------------------------------------------------------------------------------------------

    # arrange into a dictionary to save
    dict_arrays = {
        'sim_dist': sim_dist2D,
        'mf_dist': mf_dist2D,
        'mf3_dist': mf3_dist2D,
        'conv_dist': conv_dist2D,
        'rich_dist': rich_dist2D,
        'rich_dist2D': rich_dist,
        'corr_ni_nj2D': corr_ni_nj2D,
        'coeff_ni_nj2D': coeff_ni_nj2D,
        'corr_J_n2D': corr_J_n2D,
        'coeff_J_n2D': coeff_J_n2D,
        'corr_Jminusn_n2D': corr_Jminusn_n2D,
        'coeff_Jminusn_n2D': coeff_Jminusn_n2D,
        'carry_capacity': model.carry_capacity,
        'birth_rate': model.birth_rate,
        'death_rate': model.death_rate,
        'nbr_species': model.nbr_species,
        'immi_rate': model.immi_rate,
        'comp_overlap': model.comp_overlap,
        'time_autocor_spec2D': time_autocor_spec2D,
        'mean_time_autocor_abund2D': mean_time_autocor_abund2D,
        'std_time_autocor_abund2D': std_time_autocor_abund2D,
        'dominance_turnover2D': dominance_turnover2D,
        'suppress_turnover2D': suppress_turnover2D,
        'dominance_return2D': dominance_return2D,
        'suppress_return2D': suppress_return2D,
        'av_ni_given_nj2D': av_ni_given_nj2D
    }
    dict_arrays[parameter1] = param1_2D
    dict_arrays[parameter2] = param2_2D
    # save results in a npz file
    np.savez(filename, **dict_arrays)

    return filename, dict_arrays