Exemple #1
0
def sample_param_grid_mpi():
    """Samples k1, kinv, k2 over a lattice and records obj. fn. evaluations. A starting point for future analysis"""
    # set up mpi
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nprocs = comm.Get_size()

    # set up base model
    A0 = 1.0  # initial concentration of A
    k1_true = 0.1
    kinv_true = 1000.0
    k2_true = 1000.0
    eigval_plus_true = 0.5 * (-(kinv_true + k1_true + k2_true) + np.sqrt(
        np.power(k1_true + kinv_true + k2_true, 2) - 4 * k1_true * k2_true))
    dt = 0.5 / np.abs(eigval_plus_true)
    # start at dt, end at 5*dt
    ntimes = 5  # somewhat arbitrary
    times = np.linspace(dt, 5 * dt, ntimes)
    model = Rawlings_Model(times,
                           A0,
                           k1_true,
                           kinv_true,
                           k2_true,
                           using_sympy=False)

    # plot analytical results vs. qssa
    if rank == 0:
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.plot(times, model.gen_timecourse(k1_true, kinv_true, k2_true))
        ax.plot(
            times,
            A0 * (1 - np.exp(-k1_true * k2_true * times /
                             (kinv_true + k2_true))))
        plt.show()

    nks = 10000000
    # k1 \in [10^{-4}, 10^{-1}],    kinv, k2 \in [10^1, 10^4]
    ks = np.power(
        10,
        np.random.uniform(size=(nks, 3)) * np.array((3, 3, 3)) - np.array(
            (4, -1, -1)))
    nks_per_proc = nks / nprocs + 1  # add one to avoid rounding errors
    params_and_of_evals = np.empty((nks_per_proc, 4))
    count = 0
    for k in uf.parallelize_iterable(ks, rank, nprocs):
        params_and_of_evals[count] = model.lsq_of(k[0], k[1],
                                                  k[2]), k[0], k[1], k[2]
        count = count + 1

    all_params_and_of_evals = comm.gather(params_and_of_evals[:count], root=0)
    if rank == 0:
        all_params_and_of_evals = np.concatenate(all_params_and_of_evals)
        print all_params_and_of_evals.shape
        all_params_and_of_evals.dump('./data/params-ofevals.pkl')
Exemple #2
0
def mm_contour_grid_mpi():
    """Calculates three-dimensional contours in K/V/S_t space in parallel through mpi4py, distributing S_t values over different processes and saving the output in './data/of_evals.csv'"""

    # # attempt to turn off error messages
    # np.seterr(all='ignore')

    # init MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nprocs = comm.Get_size()
    # nks = 1000 # number of K vals to sample
    # nvs = 1000 # number of S vals to sample
    # nsts = 6 # number of S_t vals to sample
    # npts_per_proc = nsts/nprocs # number of St values to distribute to each processor
    # Ks = 2*np.logspace(-1, 3, nks) # k vals
    # Vs = np.logspace(-1, 3, nvs) # v vals
    # Sts = 2*np.logspace(-0.5, 0.5, nsts) # st vals
    # npts_per_proc = 1
    # nepsilons = 400
    # nkappas = 400
    # epsilons = np.logspace(-6, 0, nepsilons)
    # kappas = np.logspace(-5, 4, nkappas)
    # nks = 500 # number of K vals to sample
    # nvs = 500 # number of S vals to sample
    # npts_per_proc = 1 #nsts/nprocs # number of St values to distribute to each processor
    # Ks = 2*np.logspace(-1, 3, nks) # k vals
    # Vs = np.logspace(-1, 3, nvs) # v vals

    # set up base system
    params = OrderedDict((('K',2.0), ('V',1.0), ('St',2.0), ('epsilon',1e-3), ('kappa',10.0))) # from Antonios' writeup
    true_params = np.array(params.values())
    nparams = true_params.shape[0]
    transform_id = 't2'
    nstate_params = 100
    ncontinuation_params = 100
    nthird_params = 1*nprocs
    # state_params = {'id':'St', 'data':np.linspace(1.9, 2.1, nstate_params)}#2*np.logspace(-1, 3, nstate_params)}
    state_params = {'id':'V', 'data':np.logspace(-1, 3, ncontinuation_params)} #np.linspace(0.5, 3.5, nstate_params)}
    third_params = {'id':'K', 'data':np.logspace(-1, 3, ncontinuation_params)} #np.linspace(0.5, 4.5, ncontinuation_params)}
    continuation_params = {'id':'St', 'data':[2.0]}#np.linspace(2, 3, nthird_params)}
    # set init concentrations
    S0 = params['St']; C0 = 0.0; P0 = 0.0 # init concentrations
    Cs0 = np.array((S0, C0, P0))
    # set times at which to collect data
    tscale = (params['St'] + params['K'])/params['V'] # timescale of slow evolution
    npts = 20
    times = tscale*np.linspace(1,npts,npts)/5.0
    # use these params, concentrations and times to define the MM system
    contour = 0.01 # f_avg_error below tol will be saved
    MM_system = MMS.MM_Specialization(Cs0, times, true_params, transform_id, [state_params['id']], continuation_params['id'], contour)
    # true_traj_squared_norm = np.power(np.linalg.norm(MM_system.gen_profile(Cs0, times, true_params)[:,2]), 2) # for recording relative as error, scale of_eval by this norm
    #  loop over all parameter combinations
    st_slices = []
    # suppress output to stdout in the inner loop, as it's always (hopefully) about lsoda's performance
    # with stdout_redirected():
    for third_param in uf.parallelize_iterable(third_params['data'], rank, nprocs):
        MM_system.adjust_const_param(third_params['id'], third_param)
        count = 0 # counter of number of parameter combinations that pass tolerance
        kept_pts = np.empty((nstate_params*ncontinuation_params,4)) # storage for parameter combinations that pass obj. fn. tol.
        for state_param in state_params['data']:
            for continuation_param in continuation_params['data']:
                try:
                    # record relative error
                    f_eval = MM_system.f_avg_error(np.array((state_param,)), continuation_param)
                except CustomErrors.EvalError:
                    continue
                else:
                    if f_eval < 0:
                        kept_pts[count] = (state_param, continuation_param, third_param, f_eval)
                        count = count + 1

        kept_pts = kept_pts[:count]
        if count > 0:
            st_slices.append(kept_pts)

    if len(st_slices) > 0:
        st_slices = np.concatenate(st_slices)
        print st_slices.shape
    else:

        print 'no slices dawg'

        st_slices = 'None'
    # gather all the points to root and save
    # kept_pts = kept_pts[:count]
    all_pts = comm.gather(st_slices, root=0)
    if rank is 0:
        while 'None' in all_pts:
            all_pts.remove('None')
        full_pts = np.concatenate(all_pts)
        header = ','.join([key + "=" + str(val) for key, val in params.items()]) + ',Tested=' + state_params['id'] + continuation_params['id'] + third_params['id']
        np.savetxt('./data/contours_' + state_params['id'] + '_' + continuation_params['id'] + '_' + third_params['id'] +  '.csv', full_pts, delimiter=',', header=header, comments='')
        plt.scatter(full_pts[:,0], full_pts[:,2])
        plt.show()
Exemple #3
0
def dmaps_two_important_one_sloppy():
    """Generate parameter combinations in which there are two important (alpha, lambda) and one sloppy (epsilon) parameter(s) and use DMAPS with a kernel that accounts for both parameter-space distance and distances in model output, with the aim to uncover the alpha and lambda parameters"""
    if os.path.isfile('./data/a-lam-eps-of-params-new.pkl'):
        # already have data saved, load and trim data to approx 5000 pts for DMAPS
        params = np.load('./data/a-lam-eps-of-params.pkl')
        trajs = np.load('./data/a-lam-eps-trajs.pkl')
        tol = 2e-2
        trajs = trajs[params[:,3] < tol]
        params = params[params[:,3] < tol]
        params = params[:,:3]
        print 'Have', params.shape[0], 'pts in dataset'
        data = zip(params, trajs)
        # epsilons = np.logspace(-3, 1, 5)
        # kernels = [DMAPS_Data_Kernel(epsilon) for epsilon in epsilons]
        # dmaps.kernel_plot(kernels, epsilons, data)
        epsilon = 1e-2 # from epsilon plot
        kernel = DMAPS_Data_Kernel(epsilon)
        k = 30
        eigvals, eigvects = dmaps.embed_data_customkernel(data, k, kernel, symmetric=True)
        eigvals.dump('./data/dmaps-data-kernel-eigvals.pkl')
        eigvects.dump('./data/dmaps-data-kernel-eigvects.pkl')
        for i in range(1,k):
            fig = plt.figure()
            ax = fig.add_subplot(111, projection='3d')
            ax.scatter(np.log10(params[:,0]), np.log10(params[:,1]), np.log10(params[:,2]), c=eigvects[:,i])
            plt.savefig('./figs/data-space-dmaps' + str(i) + '.png')
            # plt.show()
    else:
        # need to generate dataset

        # CREATE DATASET (no dataset exists):
        # init MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        nprocs = comm.Get_size()

        # set up base system
        # specify ode parameters
        (a_true, b_true, lam_true, eps_true) = (0.1, 1.0, 0.1, 0.001)
        params = np.array((a_true, b_true, lam_true, eps_true))
        # create system with given params
        z_system = ZM.Z_Model(params)

        # set up integration times
        t0 = 3*eps_true # 0
        tfinal = 1/lam_true
        dt = eps_true
        ntimes = 50
        times = np.linspace(t0, tfinal, ntimes)

        # get true trajectory based on true initial conditions
        x0_true = np.array((1, a_true))
        x_true_traj = z_system.get_trajectory(x0_true, times)

        # set up sampling grid and storage space for obj. fn. evals
        # lam_max = 1.2
        nsamples = 100
        lam_samples = np.linspace(0.9*lam_true, 1.1*lam_true, nsamples)
        a_samples = np.linspace(0.9*a_true, 1.1*a_true, nsamples)
        epsmin = 1e-6
        epsmax = 1e-1
        eps_samples = np.logspace(np.log10(epsmin), np.log10(epsmax), nsamples)
        # add noise to each individual parameter combination to create nice dataset
        params_noise = np.empty((nsamples*nsamples*nsamples, 4))
        params_noise[:,0] = 0.01*np.random.normal(loc=0, size=nsamples*nsamples*nsamples) # same noise for both lam and a
        params_noise[:,1] = 0 # no noise in b
        params_noise[:,2] = 0.01*np.random.normal(loc=0, size=nsamples*nsamples*nsamples)
        params_noise[:,3] = 0.1*np.random.normal(loc=0, size=nsamples*nsamples*nsamples) # noise for eps must vary with scale
        # eps_samples = np.logspace(-6, np.log10(epsmax), nsamples)
        params = np.empty((nsamples*nsamples*nsamples, 4)) # space for obj. fn. evals
        trajs = np.empty((nsamples*nsamples*nsamples, ntimes, 2))

        count = 0
        for lam in uf.parallelize_iterable(lam_samples, rank, nprocs):
            for eps in eps_samples:
                for a in a_samples:
                    new_params = np.array((a, b_true, lam, eps)) + params_noise[count]*np.array((1,1,1,eps))
                    z_system.change_parameters(new_params)
                    try:
                        x_sample_traj = z_system.get_trajectory(x0_true, times)
                    except CustomErrors.IntegrationError:
                        continue
                    else:
                        params[count] = (new_params[0], new_params[2], new_params[3], get_of(x_sample_traj, x_true_traj)) # a, lam, eps
                        trajs[count] = x_sample_traj
                        count = count + 1

        params = params[:count]
        all_params = comm.gather(params, root=0)
        trajs = trajs[:count]
        all_trajs = comm.gather(trajs, root=0)

        if rank is 0:
            all_params = np.concatenate(all_params)
            all_params.dump('./data/a-lam-eps-of-params-new.pkl')
            all_trajs = np.concatenate(all_trajs)
            all_trajs.dump('./data/a-lam-eps-trajs-new.pkl')

            print '******************************\nData saved in ./data/a-lam-eps-...\n******************************'
Exemple #4
0
def check_transformed_params_contours():
    """Attempt to generate a contour plot in c1/c2 space using the transformed model. **Does not work, dmaps_transformed_params successfully performs a similar function**"""
    # init MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nprocs = comm.Get_size()

    # specify ode parameters
    (a_true, b_true, c1_true, c2_true) = (0.1, 0.1, 0.1, 0.001)
    params = np.array((a_true, b_true, c1_true, c2_true))
    # create system with given params
    lam_max = 1.2
    epsmax = 1e-1
    S = 5.0
    z_system = ZMT.Z_Model_Transformed(params, lam_max, epsmax, S)

    # set up integration times
    lam_true = S*lam_max*(c1_true*c1_true+c2_true*c2_true+np.arctan(c2_true/c1_true)/(2*np.pi*S))
    eps_true = epsmax*np.arctan(c2_true/c1_true)/(2*np.pi*S)
    print 'lamtrue, epstrue:', lam_true, eps_true
    t0 = 0
    tfinal = 1/lam_true
    dt = eps_true
    times = np.arange(t0, tfinal, dt)
    ntimes = times.shape[0]

    # get true trajectory based on true initial conditions
    x0_true = np.array((1, a_true))
    x_true_traj = z_system.get_trajectory(x0_true, times)

    # set up sampling grid and storage space for obj. fn. evals
    nsamples = 100
    # c1_samples = np.logspace(-3, 3, nsamples)
    # c2_samples = np.logspace(-3, 3, nsamples)
    c1_samples = np.linspace(-np.sqrt(1 + 1/S), np.sqrt(1 + 1/S), nsamples)
    c2_samples = np.linspace(-np.sqrt(1 + 1/S), np.sqrt(1 + 1/S), nsamples)
    data = np.empty((nsamples*nsamples, 3)) # space for obj. fn. evals

    count = 0
    for c1 in uf.parallelize_iterable(c1_samples, rank, nprocs):
        for c2 in c2_samples:
            z_system.change_transformed_parameters(np.array((a_true, b_true, c1, c2)))
            try:
                x_sample_traj = z_system.get_trajectory(x0_true, times)
            except CustomErrors.IntegrationError:
                continue
            else:
                data[count] = (c1, c2, get_of(x_sample_traj, x_true_traj))
                count = count + 1

    data = data[:count]
    all_data = comm.gather(data, root=0)

    if rank is 0:
        all_data = np.concatenate(all_data)
        print 'of min,max:', np.min(data[:,2]), np.max(data[:,2])

        # plot output
        fig = plt.figure()
        ax = fig.add_subplot(111)
        # ax.set_xscale('log')
        # ax.set_yscale('log')
        tol = 1e2
        all_data = all_data[all_data[:,2] < tol]
        ax.scatter(all_data[:,0], all_data[:,1], c=all_data[:,2], s=40, lw=0)
        # ax.set_ylabel(r'$\lambda$')
        # ax.set_xlabel(r'$\epsilon$')
        ax.set_xlabel('c1')
        ax.set_ylabel('c2')
        plt.show()
Exemple #5
0
def data_space_plot():
    """Creates three-dimensional dataspace plot, creating trajectories from different values of a and b (parameters could be changed), and then plots this three-dimensional figure and colors by both selected parameters. **Note: This function uses the quadratic functional form for the fast manifold, as opposed to the linear form used above**"""
    # init MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nprocs = comm.Get_size()

    params = np.array((0.1, 0.5, 1, 0.001)) # (a, b, lambda, epsilon)
    (a_true, b_true, lam_true, eps_true) = (0.1, 0.5, 1, 1000)
    # create system with given params
    z_system = ZM.Z_Model(params)

    # set up integration times
    t0 = 1.5
    tfinal = 5
    ntimes = 3
    times = np.linspace(t0, tfinal, ntimes)

    x0_true = np.array((1, 0.1))
    if rank is 0:
        # get true trajectory based on true initial conditions
        x0_true = np.array((1, 0.1))
        x_true_traj = z_system.get_trajectory_quadratic(x0_true, times)
        plt.plot(x_true_traj[:,0], x_true_traj[:,1], lw=5)
        # x_true_traj2 = z_system.get_trajectory_quadratic(x0_true, np.linspace(0,50,50))
        # plt.plot(np.linspace(0,50,50), x_true_traj2[:,1])
        plt.show()
    
    nsamples = 50
    a_samples = np.linspace(-.2, .2, nsamples)
    b_samples = np.linspace(-3, 3, nsamples)
    data = np.empty((nsamples*nsamples, 5)) # space for obj. fn. evals

    count = 0
    for a in uf.parallelize_iterable(a_samples, rank, nprocs):
        for b in b_samples:
            z_system.change_parameters(np.array((a, b, lam_true, eps_true)))
            try:
                x_sample_traj = z_system.get_trajectory_quadratic(x0_true, times)
            except CustomErrors.IntegrationError:
                continue
            else:
                data[count,:3] = x_sample_traj[:,1]
                data[count,3:] = (a, b)
                count = count + 1

    data = data[:count]
    all_data = comm.gather(data, root=0)

    if rank is 0:
        all_data = np.concatenate(all_data)

        # plot output
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        # ax.set_xscale('log')
        # ax.set_yscale('log')
        ax.scatter(np.log10(all_data[:,0]), np.log10(all_data[:,1]), np.log10(all_data[:,2]), c=all_data[:,3], lw=0)
        # ax.set_ylabel(r'$\lambda$')
        # ax.set_xlabel(r'$\epsilon$')
        ax.set_xlabel(r'$log(y_1)$')
        ax.set_ylabel(r'$log(y_2)$')
        ax.set_zlabel(r'$log(y_3)$')
        ax.set_title('colored by a')
        plt.show()

        # plot output
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        # ax.set_xscale('log')
        # ax.set_yscale('log')
        ax.scatter(np.log10(all_data[:,0]), np.log10(all_data[:,1]), np.log10(all_data[:,2]), c=all_data[:,4], lw=0)
        # ax.set_ylabel(r'$\lambda$')
        # ax.set_xlabel(r'$\epsilon$')
        ax.set_xlabel(r'$log(y_1)$')
        ax.set_ylabel(r'$log(y_2)$')
        ax.set_zlabel(r'$log(y_3)$')
        ax.set_title('colored by b')
        plt.show()
Exemple #6
0
def check_params():
    """Plots grid of parameters colored by objective function value"""
    # init MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nprocs = comm.Get_size()

    # specify ode parameters
    (a_true, b_true, lam_true, eps_true) = (1.0, 0.01, 1.0, 0.001) #(0.1, 0.01, 0.1, 0.1)
    params = np.array((a_true, b_true, lam_true, eps_true))
    # create system with given params
    z_system = ZM.Z_Model(params)

    # set up integration times
    t0 = 0
    tfinal = 1/lam_true
    dt = eps_true
    times = np.arange(t0, tfinal, dt)
    ntimes = times.shape[0]

    # get true trajectory based on true initial conditions
    x0_true = np.array((1, a_true))
    x_true_traj = z_system.get_trajectory_quadratic(x0_true, times)

    # set up sampling grid and storage space for obj. fn. evals
    nsamples = 100
    a_samples = np.logspace(-2, 1, nsamples)
    lam_samples = np.logspace(-2, 1, nsamples)
    # b_samples = np.logspace(-4, -1, nsamples)
    # a_samples = np.linspace(-.2, .2, nsamples)
    # b_samples = np.linspace(-3, 3, nsamples)
    data = np.empty((nsamples*nsamples, 3)) # space for obj. fn. evals

    count = 0
    for a in uf.parallelize_iterable(a_samples, rank, nprocs):
        for lam in lam_samples:
            z_system.change_parameters(np.array((a, b_true, lam, eps_true)))
            try:
                x_sample_traj = z_system.get_trajectory_quadratic(x0_true, times)
            except CustomErrors.IntegrationError:
                continue
            else:
                data[count] = (a, lam, get_of(x_sample_traj, x_true_traj))
                count = count + 1

    data = data[:count]
    all_data = comm.gather(data, root=0)

    if rank is 0:
        all_data = np.concatenate(all_data)
        all_data.dump('./data/a-lam-of.pkl')

        # plot output
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.set_xscale('log')
        ax.set_yscale('log')
        ax.scatter(all_data[:,0], all_data[:,1], c=np.log10(all_data[:,2]), s=40)
        # ax.set_ylabel(r'$\lambda$')
        # ax.set_xlabel(r'$\epsilon$')
        ax.set_xlabel('a')
        ax.set_ylabel('b')
        plt.show()
Exemple #7
0
def dmaps_transformed_params():
    """Perform DMAP on nonlinear, swirled transformation of parameters lambda/epsilon (important/sloppy)"""
    
    if os.path.isfile('./data/a-lam-ofevals-2016.csv'): # os.path.isfile('./data/lam_eps_ofevals-2016.csv'):
        # PERFORM THE DMAP (already generated pts):

        print 'Already have sloppy data, transforming and embedding'

        data = np.genfromtxt('./data/a-lam-ofevals-2016.csv', delimiter=',')

        # extract sloppy parameter combinations
        tol = 50 # 0.01
        data = data[data[:,-1] < tol]

        # transform into swirl in c1/c2
        S = 1.0 # for now we require S <= 1 to invert back to lambda/epsilon
        # questionable redefinition of max param values
        lam_max, epsmax = np.max(data[:,:2], axis=0)
        # c1 = lambda l, e: np.sqrt(e/epsmax + l/(S*lam_max))*np.cos(2*np.pi*S*e/epsmax)
        # c2 = lambda l, e: np.sqrt(e/epsmax + l/(S*lam_max))*np.sin(2*np.pi*S*e/epsmax)

        y1 = lambda l, e: l + np.power(np.log10(e)- np.average(np.log10(e)), 2)
        y2 = lambda l, e: np.log10(e) - np.average(np.log10(e))

        a = 1.3
        b = 0.3
        
        # do the actual transformation
        cs1 = np.array(henon(data[:,1], data[:,0], 1, a, b)).T
        cs2 = np.array(henon(data[:,1], data[:,0], 2, a, b)).T
        cs3 = np.array(henon(data[:,1], data[:,0], 3, a, b)).T
        cs4 = np.array(henon(data[:,1], data[:,0], 4, a, b)).T

        # look at dataset and subsequent transformations
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(data[:,1], data[:,0], c=data[:,2], s=3)
        ax.set_xlabel(r'$x_0 (= \lambda)$', fontsize=72)
        ax.set_ylabel(r'$y_0 (= a)$', fontsize=72)
        fig.subplots_adjust(bottom=0.15)


        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(cs1[:,0], cs1[:,1], c=data[:,2], s=3)
        ax.set_xlabel(r'$x_1$', fontsize=72)
        ax.set_ylabel(r'$y_1$', fontsize=72)
        fig.subplots_adjust(bottom=0.15)


        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(cs2[:,0], cs2[:,1], c=data[:,2], s=3)
        ax.set_xlabel(r'$x_2$', fontsize=72)
        ax.set_ylabel(r'$y_2$', fontsize=72)
        fig.subplots_adjust(bottom=0.15)


        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(cs3[:,0], cs3[:,1], c=data[:,2], s=3)
        ax.set_xlabel(r'$x_3$', fontsize=72)
        ax.set_ylabel(r'$y_3$', fontsize=72)
        fig.subplots_adjust(bottom=0.15)


        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(cs4[:,0], cs4[:,1], c=data[:,2], s=3)
        ax.set_xlabel(r'$x_4$', fontsize=72)
        ax.set_ylabel(r'$y_4$', fontsize=72)
        fig.subplots_adjust(bottom=0.15)


        plt.show()

        # neps = 8
        # eps = np.logspace(-3, 3, neps)
        # epsilon_plot(eps, cs)
        eps = 1e-1
        eigvals, eigvects = dmaps.embed_data(cs2, k=12, epsilon=eps)
        plot_dmaps.plot_xy(cs2[:,0], cs2[:,1], color=eigvects[:,1], scatter=True, xlabel=r'$y_1$', ylabel=r'$y_2$')
        # plot_dmaps.plot_embeddings(eigvects, eigvals, k=4)
    else:
        # CREATE DATASET (no dataset exists):
        # init MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        nprocs = comm.Get_size()

        # set up base system
        # specify ode parameters
        (a_true, b_true, lam_true, eps_true) = (1.0, 0.01, 1.0, 0.001) # (0.1, 0.01, 0.1, 0.001)
        params = np.array((a_true, b_true, lam_true, eps_true))
        # create system with given params
        z_system = ZM.Z_Model(params)

        # set up integration times
        t0 = 0
        tfinal = 1.0/lam_true
        dt = eps_true
        times = np.arange(t0, tfinal, dt)
        ntimes = times.shape[0]

        # get true trajectory based on true initial conditions
        x0_true = np.array((1, a_true))
        x_true_traj = z_system.get_trajectory_quadratic(x0_true, times)

        # # set up sampling grid and storage space for obj. fn. evals
        # lam_max = 1.2
        # epsmax = 1e-1
        # nsamples = 500
        # lam_samples = np.linspace(0.9*lam_true, 1.1*lam_true, nsamples)
        # eps_samples = np.linspace(0, epsmax, nsamples)
        # eps_samples = np.logspace(-6, np.log10(epsmax), nsamples)
        # data = np.empty((nsamples*nsamples, 3)) # space for obj. fn. evals
        nsamples = 40000
        data = np.empty((nsamples, 3))
        a_lam_samples = np.random.uniform(size=(nsamples, 2))*np.array((1.5,1.5)) + np.array((0.25, 0.25)) # a \in (7, 9) lamb \in (6, 11)

        count = 0
        for a, lam in uf.parallelize_iterable(a_lam_samples, rank, nprocs):
            z_system.change_parameters(np.array((a, b_true, lam, eps_true)))
            try:
                x_sample_traj = z_system.get_trajectory_quadratic(x0_true, times)
            except CustomErrors.IntegrationError:
                continue
            else:
                data[count] = (a, lam, get_of(x_sample_traj, x_true_traj))
                count = count + 1


        # count = 0
        #     for eps in eps_samples:
        #         z_system.change_parameters(np.array((a_true, b_true, lam, eps)))
        #         try:
        #             x_sample_traj = z_system.get_trajectory(x0_true, times)
        #         except CustomErrors.IntegrationError:
        #             continue
        #         else:
        #             data[count] = (lam, eps, get_of(x_sample_traj, x_true_traj))
        #             count = count + 1

        data = data[:count]
        all_data = comm.gather(data, root=0)

        if rank is 0:
            all_data = np.concatenate(all_data)
            np.savetxt('./data/a-lam-ofevals-2016.csv', all_data, delimiter=',')
            print '******************************\n \