def predict(): json_ = request.json a = json.dumps(json_, sort_keys=True).encode("utf-8") seed = hashlib.md5(a).hexdigest() df = {} path = "{}/{}".format(base_folder, seed) exists = os.path.exists(path) nb_files = len([name for name in os.listdir( path) if os.path.isfile(name)]) if exists else 0 if True or nb_files != 6: # disable cache measures_to_lift = [json_.get("measures")] measure_values = json_.get("values") dates = json_.get("dates") measure_dates = [pd.to_datetime(d) for d in dates] country_name = json_.get("country_name") country_df = merged[merged["CountryName"] == country_name] end_date = pd.to_datetime("2020-9-11") df = simulate(country_df, measures_to_lift, 0, end_date, None, columns, yvar, mlp_clf, scaler, measure_values=measure_values, base_folder=None, seed=seed, lift_date_values=measure_dates) df = df.to_dict(orient='records') print("processed") return jsonify({'path': seed, 'df': df})
def run_simulation(countries, scenarios): end_date = pd.to_datetime("2020-9-11") results = pd.DataFrame() for country in countries: for scenario in scenarios: parameters = build_parameters(country, scenario) df = simulate(parameters['country_df'], [ parameters['measures_to_lift'], ], 0, end_date, None, columns, yvar, mlp_clf, scaler, measure_values=parameters['measure_values'], base_folder=None, lift_date_values=parameters['measure_dates']) df['Scenario'] = scenario df['Country'] = country results = results.append(df, ignore_index=True) return results
def predict(): client = storage.Client() bucket = client.bucket(GCS_BUCKET) json_ = request.json a = json.dumps(json_, sort_keys=True).encode("utf-8") seed = hashlib.md5(a).hexdigest() df = {} path = "{}/{}".format(base_folder, seed) blob = bucket.blob(path) exists = blob.exists(path) if True or not exists: measures_to_lift = [json_.get("measures")] measure_values = json_.get("values") dates = json_.get("dates") measure_dates = [pd.to_datetime(d) for d in dates] country_name = json_.get("country_name") country_df = merged[merged["CountryName"] == country_name] end_date = pd.to_datetime("2020-9-11") df = simulate(country_df, measures_to_lift, 0, end_date, None, columns, yvar, mlp_clf, scaler, measure_values=measure_values, base_folder=None, seed=seed, lift_date_values=measure_dates) df = df.to_dict(orient='records') # print("processed") return jsonify({'path': seed, 'df': df})
import numpy as np from simulations import simulate [actual, predictions] = np.genfromtxt('data/test-output.csv', delimiter=',') n_correct = np.sum(actual == predictions) days = len(actual) accuracy = n_correct / days amount = 10000.0 threshold = 0.001 leverage = 4.0 result, benchmark = simulate(actual, predictions, amount=amount, threshold=threshold, leverage=leverage) print('accuracy: %s' % accuracy) print('you start with %s SEK' % amount) print('you end up with %s SEK %s days later' % (result, days)) print('you would have ended up with %s SEK if you just bought' % benchmark)
max_circle_flux = np.max(image_circle) max_circle_flux_beam = max_circle_flux / (np.pi * beam[0] * beam[1] / pixsize**2) print "Maximum flux at 2 BW from core : ", max_circle_flux, max_circle_flux_beam max_flux = np.max(image.image) max_flux_beam = max_flux / (np.pi * beam[0] * beam[1] / pixsize**2) print "Maximum flux : ", max_flux, max_flux_beam simulate(source, epoch, bands, n_sample=100, max_jet_flux=0.05, rotm_clim_sym=[-300, 300], path_to_script=path_to_script, mapsize_dict=mapsize_dict, mapsize_common=mapsize_common, base_dir=data_dir, rotm_value_0=0., rotm_grad_value=0., n_rms=3., download_mojave=True, spix_clim_sym=[-1, 1]) break # ############################################################################ # # Test for ModelGenerator # # Create jet model, ROTM & alpha images # imsize = (512, 512) # center = (256, 256) # # from `y` band # pixsize = 4.848136191959676e-10
# sources = ['1514-241', '1302-102', '0754+100', '0055+300', '0804+499', # '1749+701', '0454+844'] mapsize_dict = { 'x': (512, 0.1), 'y': (512, 0.1), 'j': (512, 0.1), 'u': (512, 0.1) } mapsize_common = (512, 0.1) source = '0454+844' epoch = '2006_03_09' max_jet_flux = 0.0015 epochs = get_epochs_for_source(source, use_db='multifreq') simulate(source, epoch, ['x', 'y', 'j', 'u'], n_sample=3, max_jet_flux=max_jet_flux, rotm_clim_sym=[-300, 300], rotm_clim_model=[-300, 300], path_to_script=path_to_script, mapsize_dict=mapsize_dict, mapsize_common=mapsize_common, base_dir=base_dir, rotm_value_0=0., rotm_grad_value=0., n_rms=2., download_mojave=False, spix_clim_sym=[-1.5, 1], spix_clim_model=[-1.5, 1], qu_fraction=0.3)
'ETnoD': 0.0840269645109 } param_dict = { 'logETD': np.log(intsy_dict['ETD']), 'logETnoD': np.log(intsy_dict['ETnoD']), 'logPTR': np.log(intsy_dict['PTR']), 'cleavage_probabilities': [ 1. / len(breakable_bonds) if i in breakable_bonds else 0. for i in range(len(P[0]) - 1) ] } print "Simulating the data..." data = simulate(P, n=10000, multiple_ETD=multiple_ETD, **intsy_dict) prediction = mean_values(P, **intsy_dict) target = make_target_function(P, data, use_null_observations=False) print "Actual goodness of fit:", print target(cleavage_probabilities=param_dict['cleavage_probabilities'], **intsy_dict) print '' # # Parameters for bayesian optimization: # fit_param_dict = {'init_points':10, # 'n_iter': 20, # 'xi': 0.0, # xi = 0.0 => exploitation, xi = 0.1 => exploration # 'acq': 'ei'} # print "Optimizer parameters:" # print fit_param_dict # fit = fit_model_to_data(P, # data,