Пример #1
0
def initial_conditions(basic_prm, city_data, min_days, Julia, correction=1.0):
    """Fits data and define initial contidions of the SEIR model.
    """
    population = city_data["estimated_population_2019"].iloc[0]
    confirmed = city_data["confirmed"]

    # Compute the new cases from the confirmed sum
    new_cases = confirmed.values[1:] - confirmed.values[:-1]

    # Use a mean in a week to smooth the data (specially to deal with weekends)
    observed_I = np.convolve(new_cases, np.ones(7, dtype=int), 'valid') / 7.0

    # Now accumulate in the inf_window
    inf_window = int(round(basic_prm["tinf"]))
    observed_I = np.convolve(observed_I, np.ones(inf_window, dtype=int), 'valid')

    ndays = len(observed_I)
    if ndays >= min_days and sum(observed_I) > 0:
        observed_I /= population
        Julia.observed_I = correction*observed_I
        Julia.tinc = basic_prm["tinc"]
        Julia.tinf = basic_prm["tinf"]
        Julia.rep = basic_prm["rep"]
        Julia.eval('initialc = fit_initial(tinc, tinf, rep, observed_I)')
        S1 = Julia.initialc[0]
        E1 = Julia.initialc[1]
        I1 = Julia.initialc[2]
        R1 = Julia.initialc[3]
        return (S1, E1, I1, R1, ndays), observed_I, population
    else:
        raise ValueError("Not enough data for %s only %d days available" % 
            (city_data["city"].iloc[0], len(observed_I)))
Пример #2
0
def import_julia_and_robot_dance():
    # To use PyJulia
    print('Loading Julia library...')
    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    from julia import Main as Julia
    print('Loading Julia library... Ok!')
    print('Loading Robot-dance Julia module...')
    Julia.eval('include("robot_dance.jl")')
    print('Loading Robot-dance Julia module... Ok!')
def train_oct(X_train, y_train, X_test, y_test, output_path, seed=1):
    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    from interpretableai import iai

    X_train = impute_missing(X_train)
    X_test = impute_missing(X_test)

    oct_grid = iai.GridSearch(
        iai.OptimalTreeClassifier(random_seed=seed, ),
        max_depth=range(1, 10),
        # minbucket=[5, 10, 15, 20, 25, 30, 35],
        criterion=['gini', 'entropy', 'misclassification'],
        ls_num_tree_restarts=200,
    )
    oct_grid.fit_cv(X_train, y_train, n_folds=5, validation_criterion='auc')
    best_learner = oct_grid.get_learner()
    best_learner.write_json('%s/learner.json' % output_path)
    best_learner.write_questionnaire('%s/app.html' % output_path)
    best_learner.write_html('%s/tree.html' % output_path)
    best_learner.write_png('%s/tree.png' % output_path)
    in_auc = oct_grid.score(X_train, y_train, criterion='auc')
    out_auc = oct_grid.score(X_test, y_test, criterion='auc')
    in_mis = oct_grid.score(X_train, y_train, criterion='misclassification')
    out_mis = oct_grid.score(X_test, y_test, criterion='misclassification')
    print('In Sample AUC', in_auc)
    print('Out of Sample AUC', out_auc)
    print('In Sample Misclassification', in_mis)
    print('Out of Sample Misclassification', out_mis)
    return best_learner, in_auc, out_auc, in_mis, out_mis
Пример #4
0
    def objective(**params):

        if name_algo != 'oct':
            model = algorithm()
            model.set_params(**params)
            score = np.mean(
                cross_val_score(model,
                                X,
                                y,
                                cv=cv,
                                n_jobs=-1,
                                scoring="roc_auc"))

        else:
            from julia.api import Julia
            jl = Julia(compiled_modules=False)
            from interpretableai import iai

            params["max_depth"] = int(params["max_depth"])
            grid = iai.GridSearch(iai.OptimalTreeClassifier(random_seed=1),
                                  **params)

            grid.fit_cv(X, y, n_folds=cv, validation_criterion='auc')
            score = float(grid.get_grid_results()[[
                'split' + str(i) + '_valid_score' for i in range(1, cv + 1)
            ]].T.mean())

        return -score
Пример #5
0
def initialize(
        runtime = default_runtime,
        project = default_project,
        compiled_modules = default_compiled_modules,
        **kwargs,
    ):
    """
    Initialize julia runtime and import DECAES
    """
    if project is not None:
        os.environ["JULIA_PROJECT"] = project

    # Initialize Julia runtime. Precompilation of Julia modules is not supported on Debian-based
    # Linux distributions such as Ubuntu, or python on installations via Conda.
    #   See: https://pyjulia.readthedocs.io/en/stable/troubleshooting.html#your-python-interpreter-is-statically-linked-to-libpython
    global julia_runtime
    julia_runtime = Julia(
        runtime = runtime,
        compiled_modules = compiled_modules,
        **kwargs,
    )

    # Import DECAES
    global DECAES
    from julia import DECAES
Пример #6
0
def scenario_julia_call(scenario_info, start_index, end_index):
    """
    Starts a Julia engine, runs the add_path file to load Julia code.
    Then, loads the data path and runs the scenario.

    :param dict scenario_info: scenario information.
    :param int start_index: start index.
    :param int end_index: end index.
    """

    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    from julia import Main
    from julia import REISE

    interval = int(scenario_info['interval'].split('H', 1)[0])
    n_interval = int((end_index - start_index + 1) / interval)

    input_dir = os.path.join(const.EXECUTE_DIR,
                             'scenario_%s' % scenario_info['id'])
    output_dir = os.path.join(const.EXECUTE_DIR,
                              'scenario_%s/output/' % scenario_info['id'])

    REISE.run_scenario(interval=interval,
                       n_interval=n_interval,
                       start_index=start_index,
                       inputfolder=input_dir,
                       outputfolder=output_dir)
    Main.eval('exit()')
Пример #7
0
def spectral(data, name, embedding_dim=128):
    try:
        result = load_embedding(name, 'spectral')
        return result
    except FileNotFoundError:
        print(
            f'cache/feature/spectral_{name}.pt not found! Regenerating it now')

    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    from julia import Main
    Main.include(f'{CWD}/norm_spec.jl')
    print('Setting up spectral embedding')

    if data.setting == 'inductive':
        N = data.num_train_nodes
        edge_index = to_undirected(data.train_edge_index,
                                   num_nodes=data.num_train_nodes)
    else:
        N = data.num_nodes
        edge_index = to_undirected(data.edge_index, num_nodes=data.num_nodes)

    np_edge_index = np.array(edge_index.T)
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N))
    adj = adj.to_scipy(layout='csr')

    result = torch.tensor(Main.main(adj, embedding_dim)).float()
    save_embedding(result, name, 'spectral')
    return result
Пример #8
0
def setup(compiled_modules=True):
    julia.install()
    jl = Julia(compiled_modules=compiled_modules)

    from julia import Pkg
    Pkg.add("[email protected]")  # Lock to specific version for stability
    Pkg.add("Distributions")  # Install Distributions after Bigsimr
Пример #9
0
def prepare_optimization(large_cities,
                         population,
                         initial_values,
                         M,
                         out,
                         target,
                         window=14,
                         ndays=400,
                         min_level=1.0,
                         hammer_duration=14,
                         hammer_level=0.89,
                         force_dif=1):
    # Infected upper bound, it is larger in São Paulo.
    ncities = len(large_cities)

    if force_dif is 1:
        force_dif = np.ones((ncities, ndays))

    Julia.s1 = initial_values.loc[large_cities, "S0"].values
    Julia.e1 = initial_values.loc[large_cities, "E0"].values
    Julia.i1 = initial_values.loc[large_cities, "I0"].values
    Julia.r1 = initial_values.loc[large_cities, "R0"].values
    Julia.out = out.values
    Julia.M = M.values.copy()
    Julia.population = population.values.copy()
    Julia.ndays = ndays
    Julia.target = target
    Julia.hammer_duration = hammer_duration
    Julia.hammer_level = hammer_level
    Julia.min_level = min_level
    Julia.force_dif = force_dif
    if window == 1:
        Julia.eval("""
            prm = SEIR_Parameters(ndays, s1, e1, i1, r1, out, sparse(M), sparse(M'))
            m = control_multcities(prm, population, target, force_dif, hammer_duration, 
                                   hammer_level, min_level)
        """)
    else:
        Julia.window = window
        Julia.eval("""
            prm = SEIR_Parameters(ndays, s1, e1, i1, r1, out, sparse(M), sparse(M'))
            m = window_control_multcities(prm, population, target, window, force_dif, 
                                          hammer_duration, hammer_level, min_level);
        """)
Пример #10
0
def save_result(cities_names, filename):
    """Save the result of a run for further processing.
    """
    Julia.eval(
        "s = value.(m[:s]); e = value.(m[:e]); i = value.(m[:i]); r = value.(m[:r])"
    )
    Julia.eval("rt = expand(value.(m[:rt]), prm)")
    df = []
    for i in range(len(cities_names)):
        c = cities_names[i]
        df.append([c, "s"] + list(Julia.s[i, :]))
        df.append([c, "e"] + list(Julia.e[i, :]))
        df.append([c, "i"] + list(Julia.i[i, :]))
        df.append([c, "r"] + list(Julia.r[i, :]))
        df.append([c, "rt"] + list(Julia.rt[i, :]))
    df = pd.DataFrame(df,
                      columns=["City", "Variable"] +
                      list(range(len(Julia.s[0, :]))))
    df.set_index(["City", "Variable"], inplace=True)
    return df.to_csv(filename)
Пример #11
0
def prepare_optimization(basic_prm,
                         cities_data,
                         mob_matrix,
                         target,
                         force_dif=1):
    ncities, ndays = len(cities_data.index), int(basic_prm["ndays"])
    if force_dif is 1:
        force_dif = np.ones((ncities, ndays))

    Julia.tinc = basic_prm["tinc"]
    Julia.tinf = basic_prm["tinf"]
    Julia.rep = basic_prm["rep"]
    Julia.s1 = cities_data["S1"].values
    Julia.e1 = cities_data["E1"].values
    Julia.i1 = cities_data["I1"].values
    Julia.r1 = cities_data["R1"].values
    Julia.population = cities_data["population"].values
    Julia.out = mob_matrix["out"].values
    Julia.M = mob_matrix.values[:, :-1]
    Julia.ndays = ndays
    Julia.target = target.values
    Julia.hammer_duration = int(basic_prm["hammer_duration"])
    Julia.hammer_level = basic_prm["hammer_level"]
    Julia.min_level = basic_prm["min_level"]
    Julia.force_dif = force_dif
    if basic_prm["window"] == 1:
        Julia.eval("""
            prm = SEIR_Parameters(tinc, tinf, rep, ndays, s1, e1, i1, r1, 1, out, sparse(M), 
                                  sparse(M'))
            m = control_multcities(prm, population, target, force_dif, hammer_duration, 
                                   hammer_level, min_level)
        """)
    else:
        Julia.window = basic_prm["window"]
        Julia.eval("""
            prm = SEIR_Parameters(tinc, tinf, rep, ndays, s1, e1, i1, r1, window, out, 
                                  sparse(M), sparse(M'))
            m = window_control_multcities(prm, population, target, force_dif, 
                                          hammer_duration, hammer_level, min_level);
        """)
Пример #12
0
def initialize_worker(params, proc_num):
	global my_number 
	my_number = proc_num
	#make new random seed, as seed+Process_number
	local_seed = int(params.seed)+my_number
	random.seed(local_seed)
	
	global jl
	from julia.api import Julia
	from julia import Base, Main
	from julia.Main import println, redirect_stdout
	
	#establish connection to julia
	print("Worker",proc_num,"connecting to Julia...\n")
	if params.sys_image is not None:
		jl = Julia(init_julia=False, sysimage=params.sys_image, julia=params.julia, compiled_modules=params.compiled_modules)
	else:
		jl = Julia(init_julia=False, julia=params.julia, compiled_modules=params.compiled_modules)
	
	if my_number == 0:
		print("Loading Circuitscape in Julia...\n")
	#jl.eval("using Pkg;")
	jl.eval("using Circuitscape; using Suppressor;")
	#Main.eval("stdout")
	
	load_data(params, my_number)
	
	return(local_seed)
Пример #13
0
    def run_julia(self):
        print("Loading Julia....")
        from julia.api import Julia
        jl = Julia(compiled_modules=False)
        from julia import Main as j

        I = np.genfromtxt(self.us_exchanges_filename, delimiter=" ")
        age_fracs_orig = np.genfromtxt(self.age_fracs_filename, delimiter=",")
        # first column is
        age_fracs = np.delete(age_fracs_orig, 0, 1)

        employed = pd.read_csv(self.employment_filename,
                               dtype={
                                   "Sector": str,
                                   "Feb": np.int64
                               })
        print(
            f"Employed dataframe from {self.employment_filename}:\n {employed}"
        )

        # if np.isnan(I).any() or np.isnan(age_fracs).any() or np.isnan(employed).any():
        if np.isnan(I).any():
            raise ValueError(
                f"NAN found in US_EXCHANGES file {self.us_exchanges_filename}; skipping"
            )

        if np.isnan(age_fracs).any():
            raise ValueError(
                f"NAN found in age_fracs file {self.age_fracs_filename}; skipping"
            )

        j.eval("using DataFrames")
        julia_formatted_employed = j.DataFrame(employed.to_dict(orient='list'))

        if employed.isnull().values.any():
            raise ValueError(
                f"NAN found in employment data {self.employment_filename}; skipping"
            )

        print(f"Starting Julia run for SES {self.employment_filename}...")
        j.include("CASES.jl")
        returned_result = j.main(I, age_fracs, julia_formatted_employed,
                                 self.employment_percentage)
        print("Julia run complete.")

        self.cases_surfaces = returned_result[1]
        self.cases_complete = returned_result[0]

        self.write_binary_dump(self.binary_dump_filename_surfaces,
                               self.cases_surfaces)
        self.write_binary_dump(self.binary_dump_filename_complete,
                               self.cases_complete)
Пример #14
0
def optimize_and_show_results(i_fig, rt_fig, data_file, large_cities):
    """Optimize and save figures and data for further processing.
    """

    Julia.eval("""
        optimize!(m)
        rt = value.(m[:rt]); i = value.(m[:i])
    """)

    for i in range(len(large_cities)):
        plt.plot(Julia.rt[i, :], label=large_cities[i], lw=5, alpha=0.5)
    plt.legend()
    plt.title("Target reproduction rate")
    plt.savefig(rt_fig)

    plt.clf()
    for i in range(len(large_cities)):
        plt.plot(Julia.i[i, :], label=large_cities[i])
    plt.legend()
    plt.title("Infection level")
    plt.savefig(i_fig)

    save_result(large_cities, data_file)
Пример #15
0
def initial_conditions(city,
                       covid_data,
                       covid_window,
                       min_days,
                       Julia,
                       correction=1.0):
    """Fits data and define initial contidions of the SEIR model.
    """
    # Gets the city data
    city_data = covid_data[covid_data["city"] == city].copy()
    city_data.reset_index(inplace=True)
    city_data.sort_values(by=["date"], inplace=True)
    population = city_data["estimated_population_2019"].iloc[0]
    confirmed = city_data["confirmed"]

    # I am computing the new cases instead of using the new_confirmed column because
    # there is error at least in the first element for São Paulo. It should be 1.
    new_cases = confirmed.values[1:] - confirmed.values[:-1]
    new_cases = np.append(confirmed[0], new_cases)
    city_data["new_cases"] = new_cases

    observed_I = city_data["new_cases"].rolling(covid_window).sum()
    observed_I[:covid_window] = confirmed[:covid_window]
    ndays = len(observed_I)
    if ndays >= min_days:
        observed_I /= population
        Julia.observed_I = correction * observed_I.values
        Julia.eval('initialc = fit_initial(observed_I)')
        S0 = Julia.initialc[0]
        E0 = Julia.initialc[1]
        I0 = Julia.initialc[2]
        R0 = Julia.initialc[3]
        return (S0, E0, I0, R0, ndays), observed_I
    else:
        raise ValueError("Not enough data for %s only %d days available" %
                         (city, len(observed_I)))
Пример #16
0
def spectral(data, post_fix):
    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    from julia import Main
    Main.include("./norm_spec.jl")
    print('Setting up spectral embedding')
    data.edge_index = to_undirected(data.edge_index)
    np_edge_index = np.array(data.edge_index.T)

    N = data.num_nodes
    row, col = data.edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N))
    adj = adj.to_scipy(layout='csr')
    result = torch.tensor(Main.main(adj, 128)).float()
    torch.save(result, f'embeddings/spectral{post_fix}.pt')

    return result
Пример #17
0
def prepare_optimization(basic_prm,
                         cities_data,
                         mob_matrix,
                         target,
                         hammer_data,
                         force_dif=1,
                         verbosity=0):
    ncities, ndays = len(cities_data.index), int(basic_prm["ndays"])
    if force_dif is 1:
        force_dif = np.ones((ncities, ndays))

    Julia.tinc = basic_prm["tinc"]
    Julia.tinf = basic_prm["tinf"]
    Julia.rep = basic_prm["rep"]
    Julia.s1 = cities_data["S1"].values
    Julia.e1 = cities_data["E1"].values
    Julia.i1 = cities_data["I1"].values
    Julia.r1 = cities_data["R1"].values
    Julia.population = cities_data["population"].values
    Julia.out = mob_matrix["out"].values
    Julia.M = mob_matrix.values[:, :-1]
    Julia.ndays = ndays
    Julia.target = target.values
    Julia.min_level = basic_prm["min_level"]
    Julia.force_dif = force_dif
    Julia.hammer_duration = hammer_data["duration"].values
    Julia.hammer_level = hammer_data["level"].values
    Julia.verbosity = verbosity
    if basic_prm["window"] == 1:
        Julia.eval("""
            prm = SEIR_Parameters(tinc, tinf, rep, ndays, s1, e1, i1, r1, 1, out, sparse(M), 
                                  sparse(M'))
            m = control_multcities(prm, population, target, force_dif, hammer_duration, 
                                   hammer_level, min_level, verbosity)
        """)
    else:
        Julia.window = basic_prm["window"]
        Julia.eval("""
            prm = SEIR_Parameters(tinc, tinf, rep, ndays, s1, e1, i1, r1, window, out, 
                                  sparse(M), sparse(M'))
            m = window_control_multcities(prm, population, target, force_dif, 
                                          hammer_duration, hammer_level, min_level, verbosity);
        """)

    # Check if there is a ramp parameter (delta_rt_max)
    # If so, add ramp constraints to the model
    if 'delta_rt_max' in basic_prm:
        Julia.delta_rt_max = basic_prm["delta_rt_max"]
        Julia.verbosity = verbosity
        Julia.eval("""
            m = add_ramp(m, prm, hammer_duration, delta_rt_max, verbosity)
        """)
Пример #18
0
def save_result(basic_prm, cities_data, target, filename):
    """Save the result of a run for further processing.
    """
    cities_names = cities_data.index
    n_cities = len(cities_names)
    Julia.eval(
        "s = value.(m[:s]); e = value.(m[:e]); i = value.(m[:i]); r = value.(m[:r])"
    )
    Julia.eval("rt = expand(value.(m[:rt]), prm)")
    n = len(Julia.s[0, :])
    Julia.eval("test = value.(m[:test])")
    df = []

    for i in range(n_cities):
        c = cities_names[i]
        df.append([c, "s"] + list(Julia.s[i, :]))
        df.append([c, "e"] + list(Julia.e[i, :]))
        df.append([c, "i"] + list(Julia.i[i, :]))
        df.append([c, "r"] + list(Julia.r[i, :]))
        df.append([c, "rt"] + list(Julia.rt[i, :]))
        df.append([c, "rel. test"] + list(Julia.test[i, :]))
        df.append([c, "test"] +
                  list(Julia.test[i, :] * cities_data.loc[c, "population"]))

        # Information on ICU
        icu_capacity = cities_data.loc[c, "population"] * cities_data.loc[
            c, "icu_capacity"]
        df.append([c, "icu_capacity"] + list(icu_capacity * np.ones(n)))
        icu_target = icu_capacity * target.loc[c, :]
        df.append([c, "target_icu"] + list(icu_target))
        rho_icu = SimpleTimeSeries(*cities_data.iloc[i, 7:-2])
        confidence = cities_data.loc[c, "confidence"]
        mean_icu, upper_icu = rho_icu.get_upper_bound(n, confidence)
        df.append([c, "mean_rho_icu"] + list(mean_icu))
        df.append([c, "upper_rho_icu"] + list(upper_icu))
        mean_icu = cities_data.loc[c, "time_icu"] / basic_prm[
            "tinf"] * mean_icu * cities_data.loc[c,
                                                 "population"] * Julia.i[i, :]
        df.append([c, "mean_used_icu"] + list(mean_icu))
        upper_icu = cities_data.loc[c, "time_icu"] / basic_prm[
            "tinf"] * upper_icu * cities_data.loc[c,
                                                  "population"] * Julia.i[i, :]
        df.append([c, "upper_used_icu"] + list(upper_icu))

    df = pd.DataFrame(df,
                      columns=["City", "Variable"] +
                      list(range(len(Julia.s[0, :]))))
    df.set_index(["City", "Variable"], inplace=True)
    df.to_csv(filename)
    return df
Пример #19
0
def OptimalRoute(request):
    with connection.cursor() as cursor:
        cursor.execute(
            "SELECT id, location, amount, instance, ST_AsText(geolocation) FROM nodes_garbagenodes;"
        )
        row = cursor.fetchall()
    columns = ["id", "Location", "amount", "instance", "Geolocation"]
    df = pd.DataFrame(row, columns=columns)
    NonOptDict = df.to_dict('records')
    """
    Julia Api backend for genetic algorithm, which will input the 
    dictionnary to be optimised.
    """
    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    from julia import Main
    Main.include('./nodes/Genetic.jl')
    res, des = Main.initialMatrix(NonOptDict)

    return Response(res)
Пример #20
0
def NearestPlaces(request):
    with connection.cursor() as cursor:
        cursor.execute(
            "SELECT id, location, amount, instance, ST_AsText(geolocation) FROM nodes_garbagenodes;"
        )
        row = cursor.fetchall()
    columns = ["id", "Location", "amount", "instance", "Geolocation"]
    df = pd.DataFrame(row, columns=columns)
    NonOptDict = df.to_dict('records')
    """
    Julia Api backend for genetic algorithm, which will input the
    dictionnary to be optimised.
    """
    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    from julia import Main

    # data = request.data
    Main.include('./nodes/Genetic.jl')
    data = (57.48358652376267, -20.260487410613507)
    res = Main.binsearch(NonOptDict, data)
    return Response(res)
Пример #21
0
    def launch_scenario(self,
                        execute_dir=None,
                        threads=None,
                        solver_kwargs=None):
        """Launches the scenario.

        :param None/str execute_dir: directory for execute data. None defaults to an
            execute folder that will be created in the input directory
        :param None/int threads: number of threads to use.
        :param None/dict solver_kwargs: keyword arguments to pass to solver (if any).
        :return: (*int*) runtime of scenario in seconds
        """
        self.execute_dir = execute_dir
        self.threads = threads
        self._print_settings()
        # Import these within function because there is a lengthy compilation step
        from julia.api import Julia

        Julia(compiled_modules=False)
        from julia import Gurobi  # noqa: F401
        from julia import REISE

        start = time()
        REISE.run_scenario_gurobi(
            interval=self.interval,
            n_interval=self.n_interval,
            start_index=self.start_index,
            inputfolder=self.input_dir,
            outputfolder=self.execute_dir,
            threads=self.threads,
        )
        end = time()

        runtime = round(end - start)
        hours, minutes, seconds = sec2hms(runtime)
        print(f"Run time: {hours}:{minutes:02d}:{seconds:02d}")

        return runtime
import numpy, ctypes
import julia
from julia.api import Julia

jl = Julia()
from julia import Main

jl.eval('include("alter_array.jl")')

a = numpy.array([1, 2, 3, 4, 5], 'uint32')
print(a)

addr = a.ctypes.data
length = a.shape[0]

Main.fn(a)
print(a)

Main.fn(addr, length)
print(a)
#
# Make sure a mesh object to be subdivided is selected before running this script
#

import bpy, time
import numpy

scene = bpy.context.scene

from julia.api import Julia

print('Initializing Julia (this might take a moment the first time)...')
# The compiled_modules option is to work around the fact that libpython
# is linked statically in Blender.
# https://pyjulia.readthedocs.io/en/latest/troubleshooting.html#your-python-interpreter-is-statically-linked-to-libpython
jl = Julia(compiled_modules=False)
print('Done!')

# Needs to come after the creation of the Julia instance above
from julia import Main

jl.eval('include("catmull-clark.jl")')

# Delete previous output mesh

if 'subdivided' in bpy.data.objects:
    bpy.ops.object.select_all(action='DESELECT')
    bpy.data.objects['subdivided'].select_set(True)
    bpy.ops.object.delete()

#
How long to wait for a transition in the L96 EBM?

In this notebook we run the stochastic L96 EBM for different noise strengths and see how long we must wait for a transition to occur.

We save the corresponding results in pickle dictionaries.
"""
from transition_time_test_utilities import *
import numpy as np
import sys
import time as tm
from pathlib import Path

print('Starting Julia Import')
# !! Julia import can take a few minutes
from julia.api import Julia
jl = Julia(compiled_modules=False)
from diffeqpy import de
import numba
print('Julia Imported')

####################################
### Experiment Set Up
####################################

# Noise Parameters
deltas = [1.0, 0.1]
epsilons = [8, 4, 2, 1, 0.5]
epislon_delta_pairs = []
for d in deltas:
    epislon_delta_pairs += [(e, d) for e in epsilons]
epsilon, delta = epislon_delta_pairs[int(sys.argv[1]) - 1]
Пример #25
0
# pip install julia si_prefix

import sys
import time
from functools import wraps

import numpy as np
from si_prefix import si_format

scale = 6
iterations = 1000

# Initializes PyJulia
print('Initializing Julia. Takes ~30 sec.')
from julia.api import Julia
jl = Julia(compiled_modules=False) # See https://pyjulia.readthedocs.io/en/latest/troubleshooting.html#your-python-interpreter-is-statically-linked-to-libpython

from julia import Main  # Loads main Julia interface
Main.eval(f'scale = {scale}')
Main.include('benchmark_tools.jl')  # Run file to add functions to namespace

def timethis(n):
    def decorate(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            t = np.zeros(n, dtype=np.float64)
            for i in range(n):
                start = time.time()
                func(*args, **kwargs)
                t[i] = time.time() - start
            return t
Пример #26
0
from skopt.utils import use_named_args
import json
import shutil

from multiprocessing import Pool

checkmethod = "DINCAE"
#checkmethod = "DIVAnd"

if len(sys.argv) > 1:
    checkmethod = sys.argv[1]

print("checkmethod ", checkmethod)
if checkmethod == "DIVAnd":
    from julia.api import Julia
    jl = Julia(compiled_modules=False)
    jl.eval('push!(LOAD_PATH,joinpath(ENV["HOME"],"projects/Julia/share"))')
    jl.eval('push!(LOAD_PATH,joinpath(ENV["HOME"],"src","CAE"))')
    from julia import dincae_insitu

epochs = 5000 * 2
epochs = 300
#epochs = 50
#epochs = 5
#epochs = 1

reconstruct_params = {
    #"epochs": 1,
    #"epochs": 1_000 * 5 * 2,
    "epochs": epochs,
    #"epochs": 5,
try:
    from julia.api import Julia
    from julia import Main

    julia = Julia()
    Main.eval("using IterativeSolvers")
except:
    print("Installing Julia is Required")


def cg(A, b, **kwargs):
    '''
        The function is a wrapper for the julia implementation of Conjugate Gradients solver in IterativeSolver.jl package. \
        Conjugate Gradients solves :math:`Ax = b` approximately for :math:`x` where :math:`A` is a symmetric, positive-definite linear operator and :math:`b` the right-hand side vector. The method uses short recurrences and therefore has fixed memory costs and fixed computational costs per iteration.

        **Arguments**

            * A: linear operator
            * b: right-hand side.

        **Keywords**

            * statevars::CGStateVariables: Has 3 arrays similar to x to hold intermediate results.
            * initially_zero::Bool: If true assumes that iszero(x) so that one matrix-vector product can be saved when computing the initial residual vector
            * Pl = Identity(): left preconditioner of the method. Should be symmetric, positive-definite like A
            * tol::Real = sqrt(eps(real(eltype(b)))): tolerance for stopping condition :math:`|r_k| / |r_0| ≤ tol`
            * maxiter::Int = size(A,2): maximum number of iterations
            * verbose::Bool = false: print method information
            * log::Bool = false: keep track of the residual norm in each iteration.

        **Output**
Пример #28
0
# workaround static linked python
from julia.api import Julia
__julia__ = Julia(compiled_modules=False)

import os
import sys
import subprocess

from .wrappers import apply

script_dir = os.path.dirname(os.path.realpath(__file__))


def install():
    """
    Install Julia packages required for yao-framework.
    """
    subprocess.check_call(['julia', os.path.join(script_dir, 'install.jl')])
Пример #29
0
'''

import os
import os.path as path
from optparse import OptionParser
import pandas as pd
import numpy as np
import pylab as plt
from pylab import rcParams
rcParams['figure.figsize'] = 14, 7

import prepare_data

# To use PyJulia
from julia.api import Julia
jl = Julia(compiled_modules=False)
from julia import Main as Julia
Julia.eval('ENV["OMP_NUM_THREADS"] = 8')
Julia.eval('include("robot_dance.jl")')


def get_options():
    '''Get options with file locations from command line.
    '''
    parser = OptionParser()
    parser.add_option(
        "--basic_parameters",
        dest="basic_prm",
        default=path.join("data", "basic_parameters.csv"),
        help="Basic parameters of the SEIR model [default: %default]")
    parser.add_option(
Пример #30
0
#!/usr/bin/env python3

from julia.api import Julia

julia_file = 'calc_PR.jl'

jl = Julia(compiled_modules=False)

jl.eval('include("' + julia_file + '")')