def primary_analysis(args): ds, edge_aw = load_all_vars_datasets(args) sts = {} st_diffs = {} toa_net_flux = {} toa_gm = {} G = {} for scenario in SCENARIOS.keys(): # Get the control, 2co2 and 1%-2co2 surface temperatures. sts[scenario] = ds[scenario].variables['surf_temp'][0, 0] # Calc Top Of Atm net fluxes (incoming solar - outgoing solar - outgoing longwave). toa_net_flux[scenario] = ds[scenario].variables['toa_swdown'][0, 0] -\ ds[scenario].variables['toa_swup'][0, 0] -\ ds[scenario].variables['olr'][0, 0] # Calc some global means using a weighted average (Note calculation). toa_gm[scenario] = weighted_average(toa_net_flux[scenario], edge_aw) # Work out some differences. st_diffs['2co2'] = sts['2co2'] - sts['ctrl'] st_diffs['1pct'] = sts['1pct'] - sts['ctrl'] sa_mask = create_sa_mask(ds) #import ipdb; ipdb.set_trace() sa_st_diff = np.ma.array(st_diffs['1pct'], mask=sa_mask) masked_edge_aw = np.ma.array(edge_aw, mask=sa_mask) sa_tcr = weighted_average(sa_st_diff, masked_edge_aw) tcr = weighted_average(st_diffs['1pct'], edge_aw) G['2co2'] = toa_gm['2co2'] - toa_gm['ctrl'] G['1pct'] = toa_gm['1pct'] - toa_gm['ctrl'] # Work out alpha (climate feedback param) and climate sensitivity. alpha = (G['2co2'] - G['1pct']) / tcr clim_sens = G['2co2'] / alpha # Plot on nice overlays. Note this WILL NOT work on UCL computers. if args.plot_local or args.plot_global: from plotting import plot_all plot_all(ds['ctrl'], st_diffs['1pct'], toa_net_flux['2co2'], sa_mask, args) res = {'ds': ds, 'edge_aw': edge_aw, 'sts': sts, 'toa_net_flux': toa_net_flux, 'toa_gm': toa_gm, 'st_diffs': st_diffs, 'tcr': tcr, 'sa_tcr': sa_tcr, 'G': G, 'alpha': alpha, 'clim_sens': clim_sens} if args.output: print_res(res) return res
def solver(centroids_set, inputs, outputs): centroids = list(centroids_set) did_change = True centroid_to_inputs = {centroid: [] for centroid in centroids} while did_change: did_change = False input_to_centroid = {} for row in inputs: input_to_centroid[row] = min( centroids, key=lambda centroid: euclidean_distance(row, centroid)) centroid_to_inputs = {} new_centroids = [] for centroid in centroids: x_sum = 0 y_sum = 0 count = 0 centroid_to_inputs[centroid] = [] for row, inputCentroid in input_to_centroid.items(): if inputCentroid == centroid: x_sum += row[0] y_sum += row[1] count += 1 centroid_to_inputs[centroid].append(row) new_centroids.append((x_sum / count, y_sum / count)) invalid_count = len(centroids_set) - len(new_centroids) if invalid_count > 0: new_centroids += list( generate_centroids(get_domain(inputs), invalid_count)) for newCentroid in new_centroids: did_change = did_change or newCentroid not in centroids if did_change: centroids = new_centroids else: display_results(centroid_to_inputs, inputs, outputs) points = plot_all(inputs, outputs, centroid_to_inputs, COLORS) if did_change: points.remove() return centroid_to_inputs
if live_plot: plt.ioff() Xs = se.get_Xs() xls = pandas.ExcelWriter('results/result.xlsx', engine='xlsxwriter') model_names = [ 'Ng', 'Nx', 'Nfa', 'Ne', 'Nco', 'No', 'Nn', 'Na', 'Nb', 'Nz', 'Ny', 'V', 'Vg', 'T', 'pH' ] model_data = pandas.DataFrame(m.get_data(), index=ts, columns=model_names) model_data.index.name = 'ts' model_data.to_excel(xls, 'model') se_names = [name + add for add in ['', '_cov'] for name in model_names[:-1]] se_data = pandas.DataFrame(se.get_data(), index=ts, columns=se_names) se_data.index.name = 'ts' se_data.to_excel(xls, 'se') su_names = ['Cg', 'Cfa', 'Ce'] su_data = pandas.DataFrame(su.get_data(), index=su.get_times(), columns=su_names) su_data.index.name = 'ts' su_data.to_excel(xls, 'su') xls.save() plotting.plot_all('results/result.xlsx')
plotname += "_{}".format(base) except NameError: plotname = "_{}".format(base) for directory, name in zip(args.directories, args.names): if base in directory: plotname += "_{}".format(name) if args.offsets != [-1, 100]: plotname += "_offsets" for offset in args.offsets: plotname += "_{}".format(offset) # # plot everybin in same plot if args.offsets == [-1, 100]: plotting.plot_all(data_dict, args.offsets, args.names, odir, plotname, args.outname) ################################ # one plot for each name plotting.plot_per_name(data_dict, args.offsets, args.names, odir, args.outname) plotting.plot_per_offbin(data_dict, args.offsets, args.names, odir, args.outname) #if (len(args.names) >= 2) & ("default" in args.names): ################################ # improvement over default # plot_improvement(data_dict, args.offsets, args.names, odir, plotname, args.outname) if (len(args.names) > 1): plotting.plot_improvement_methods(data_dict, args.offsets, args.names, odir, args.outname)
aug_thrust = dry_thrust * 1.4 # max number convergence exit: mdot_ab = 1.93; my F = 89.895 kN # loop for ab. This uses scipy.optimize.minimize() (aka python's fmincon) to # look for the optimum mass flow to meet the augmented thrust requirement. # abfun (and itbfun) are functions that take a mass flow and caclulate the # difference between the thermocycle with that much mass in the 2nd burner and # the augmented thrust. This lets minimize() find the closest value. abfun = lambda mdot_ab: numpy.abs(aug_thrust - turbojet.thermocycle( 0, mdot_ab)[0][0]) info_ab = minimize(abfun, (1)) mdot_ab_opt = info_ab.x # loop for itb # This does the same for ab but now with itb mass flow. A mass flow of zero for # itb means that the itb changes nothing, and same for ab. itbfun = lambda mdot_itb: numpy.abs(aug_thrust - turbojet.thermocycle( mdot_itb, 0)[0][0]) info_itb = minimize(itbfun, (1)) mdot_itb_opt = info_itb.x abans, abvals = turbojet.thermocycle(0, mdot_ab_opt) itbans, itbvals = turbojet.thermocycle(mdot_itb_opt, 0) # Assignment 3 T-s diagrams #turbojet.T_sDiagram('dry', dryans[-2], dryans[-1]) #turbojet.T_sDiagram('ab', abans[-2], abans[-1]) #turbojet.T_sDiagram('itb', itbans[-2], itbans[-1]) slopes = plotting.plot_all(dryans[-2:] + ['dry'], abans[-2:] + ['ab'], itbans[-2:] + ['itb'])
def predict_stability(args): logging.info("Starting pipeline") os.chdir(os.getcwd()) logging.info(f'Current working directory: {os.getcwd()}') # Obtain, redirect and adapt user arguments chain_id = args.CHAIN ddgfile = check_path(args.DDG_FLAG_FILE) mode = args.MODE mutation_input = check_path(args.MUTATION_INPUT) outpath = check_path(args.OUTPUT_FILE) overwrite_path = args.OVERWRITE_PATH relaxfile = check_path(args.RELAX_FLAG_FILE) structure_list = check_path(args.STRUC_FILE) uniprot_accesion = check_path(args.UNIPROT_ID) run_struc = args.RUN_STRUC ligand = args.LIGAND mp_span = args.MP_SPAN_INPUT verbose = args.VERBOSE partition=args.SLURM_PARTITION if run_struc == None: run_struc = chain_id # System name name = os.path.splitext(os.path.basename(structure_list))[0] # Initiate folder structure folder = folder2(outpath, overwrite_path, is_mp=args.IS_MP) logger = make_log(folder,verbose) # Store input files input_dict = storeinputs.storeinputfuc(name, args, folder) if mode == "proceed" or mode == "relax" or mode == "ddg_calculation": mutation_input == "proceed" logger.info(f'No preparation, proceeding to execution') # Preprocessing if mode == 'create' or mode == 'fullrun': logger.info(f'Preparation started') # Get input files prep_struc = create_copy( input_dict['STRUC_FILE'], folder.prepare_input, name='input.pdb') # Defining structure parameters # Create structure instance logger.info(f'Creating structure instance') structure_instance = structure(chain_id,name,folder,prep_struc,run_struc,logger,uniprot_accesion=uniprot_accesion,) run_name = 'input' # adjust mp structure if MP_ALIGN_MODE is selected if args.IS_MP == True and args.MP_ALIGN_MODE != 'False': logger.info(f'Align the structure along the membrane using {args.MP_CALC_SPAN_MODE}') if args.MP_ALIGN_MODE == 'OPM': if args.MP_ALIGN_REF != '': run_name = 'input_mp_aligned' structure_instance.path = os.path.join( folder.prepare_mp_superpose, f'{run_name}.pdb') try: mp_prepare.mp_superpose_opm( args.MP_ALIGN_REF, prep_struc, structure_instance.path, target_chain=structure_instance.chain_id, write_opm=True) except: mp_prepare.mp_TMalign_opm( args.MP_ALIGN_REF, prep_struc, structure_instance.path, target_chain=structure_instance.chain_id, write_opm=True) elif args.UNIPROT_ID != '': logger.error('Uniprot-ID to ref pdb not implemented yet') sys.exit() else: logger.error( 'No reference or Uniprot-ID provided. Automatic extraction via sequence not yet implemented.') sys.exit() else: logger.error( 'Other modes (PDBTM, TMDET, MemProtMD) not yet implemented.') sys.exit() structure_dic = get_structure_parameters( folder.prepare_checking, prep_struc) # Cleaning pdb and making fasta based on pdb or uniprot-id if provided logger.info(f'Prepare the pdb and extract fasta file') structure_instance.path_to_cleaned_pdb, struc_dic_cleaned = structure_instance.clean_up_and_isolate() structure_instance.fasta_seq = pdb_to_fasta_seq( structure_instance.path_to_cleaned_pdb) if uniprot_accesion != "": structure_instance.uniprot_seq = read_fasta( uniprot_accesion) structure_instance.muscle_align_to_uniprot(structure_instance.uniprot_seq) else: structure_instance.muscle_align_to_uniprot(structure_instance.fasta_seq) # Get span file for mp from cleaned file if not provided if args.IS_MP == True: if input_dict['MP_SPAN_INPUT'] == None: logger.info(f'Calculate span file with option {args.MP_CALC_SPAN_MODE}') if args.MP_CALC_SPAN_MODE == 'DSSP': structure_instance.span = mp_prepare.mp_span_from_pdb_dssp( structure_instance.path_to_cleaned_pdb, folder.prepare_mp_span, thickness=args.MP_THICKNESS, SLURM=False) elif args.MP_CALC_SPAN_MODE == 'octopus': structure_instance.span = mp_prepare.mp_span_from_pdb_octopus( structure_instance.path_to_cleaned_pdb, folder.prepare_mp_span, thickness=args.MP_THICKNESS, SLURM=False) elif args.MP_CALC_SPAN_MODE == 'False': logger.warn( 'No span file provided and no calculation method selected.') else: logger.error( 'Other modes (struc, bcl, Boctopus) not yet implemented.') sys.exit() elif input_dict['MP_SPAN_INPUT'] != None: structure_instance.span = create_copy( input_dict['MP_SPAN_INPUT'], folder.prepare_mp_span, name='input.span') # Making mutfiles and checks print(f'Convert prism file if present: {input_dict["PRISM_INPUT"]}') if input_dict['PRISM_INPUT'] == None: new_mut_input = input_dict['MUTATION_INPUT'] # mut_dic = get_mut_dict(input_dict['MUTATION_INPUT']) else: new_mut_input = os.path.join(folder.prepare_input, 'input_mutfile') mut_dic = prism_to_mut(input_dict['PRISM_INPUT'], new_mut_input) logger.info(f'Generate mutfiles.') print(input_dict['MUTATION_INPUT']) check2 = structure_instance.make_mutfiles( new_mut_input) check1 = compare_mutfile(structure_instance.fasta_seq, folder.prepare_mutfiles, folder.prepare_checking, new_mut_input) check3, errors = pdbxmut(folder.prepare_mutfiles, struc_dic_cleaned) check2 = False if check1 == True or check2 == True or check3 == True: print("check1:", check1, "check2:", check2, "check3:", check3) logger.error( "ERROR: STOPPING SCRIPT DUE TO RESIDUE MISMATCH BETWEEN MUTFILE AND PDB SEQUENCE") sys.exit() # Create hard link to mutfile directory and to output structure prepare_output_struc = create_copy( structure_instance.path_to_cleaned_pdb, folder.prepare_output, name='output.pdb') if args.IS_MP == True: prepare_output_span_dir = create_copy(folder.prepare_mp_span, f'{folder.prepare_output}', name='spanfiles', directory=True) else: prepare_output_ddg_mutfile_dir = create_copy( folder.prepare_mutfiles, folder.prepare_output, name='mutfiles', directory=True) # Copy files for relax & run relax_input_struc = create_copy( prepare_output_struc, folder.relax_input, name='input.pdb') # Generate sbatch files logger.info(f'Generate sbatch files') if args.IS_MP == True: # copy MP relax input files logger.info('Copy MP relax input files') relax_input_xml = create_copy( input_dict['RELAX_XML_INPUT'], folder.relax_input, name='relax.xml') relax_input_span_dir = create_copy( prepare_output_span_dir, folder.relax_input, name='spanfiles', directory=True) # Parse sbatch relax file logger.info('Create MP relax sbatch files.') path_to_relax_sbatch = mp_prepare.rosetta_relax_mp( folder, SLURM=True, num_struc=3, sys_name=name, partition=partition) # Parse sbatch relax parser path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch( folder, sys_name=f'{name}_relax', sc_name='relax_scores', partition=args.SLURM_PARTITION) # Parse sbatch ddg file ddg_input_ddgfile = create_copy( input_dict['DDG_FLAG_FILE'], folder.ddG_input, name='ddg_flagfile') ddg_input_span_dir = create_copy( prepare_output_span_dir, folder.ddG_input, name='spanfiles', directory=True) if args.MP_PH == -1: is_pH = 0 pH_value = 7 else: is_pH = 1 pH_value = args.MP_PH path_to_ddg_calc_sbatch = mp_ddG.rosetta_ddg_mp_pyrosetta( folder, mut_dic, SLURM=True, sys_name=name, partition=args.SLURM_PARTITION, repack_radius=args.BENCH_MP_REPACK, lipids=args.MP_LIPIDS, temperature=args.MP_TEMPERATURE, repeats=args.BENCH_MP_REPEAT, is_pH=is_pH, pH_value=pH_value) # Parse sbatch ddg parser path_to_parse_ddg_sbatch = mp_ddG.write_parse_rosetta_ddg_mp_pyrosetta_sbatch( folder, uniprot=args.UNIPROT_ID, sys_name=name, output_name='ddG.out', partition=partition) else: # Parse sbatch relax file relax_input_relaxfile = create_copy( input_dict['RELAX_FLAG_FILE'], folder.relax_input, name='relax_flagfile') path_to_relax_sbatch = structure_instance.rosetta_sbatch_relax( folder, relaxfile=relax_input_relaxfile, sys_name=name, partition=partition) # Parse sbatch relax parser path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch( folder, partition=partition) # Parse sbatch relax parser path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch( folder, partition=args.SLURM_PARTITION) # Parse sbatch ddg file ddg_input_ddgfile = create_copy( input_dict['DDG_FLAG_FILE'], folder.ddG_input, name='ddg_flagfile') ddg_input_mutfile_dir = create_copy( prepare_output_ddg_mutfile_dir, folder.ddG_input, name='mutfiles', directory=True) path_to_ddg_calc_sbatch = structure_instance.write_rosetta_cartesian_ddg_sbatch( folder, ddg_input_mutfile_dir, ddgfile=ddg_input_ddgfile, sys_name=name, partition=partition) # Parse sbatch ddg parser path_to_parse_ddg_sbatch = structure_instance.write_parse_cartesian_ddg_sbatch( folder, partition=partition) # Parse sbatch ddg parser path_to_parse_ddg_sbatch = structure_instance.write_parse_cartesian_ddg_sbatch( folder, structure_instance.fasta_seq, structure_instance.chain_id, sys_name=name, partition=args.SLURM_PARTITION) # Execution # Single SLURM execution if mode == 'relax': parse_relax_process_id = run_modes.relaxation(folder) relax_output_strucfile = find_copy( folder.relax_run, '.pdb', folder.relax_output, 'output.pdb') # if SLURM == False: # path_to_scorefile = os.path.join(structure_instance.path_to_run_folder + '/relax_scores.sc') # relax_pdb_out = relax_parse_results.parse_relax_results(path_to_scorefile, path_to_run_folder) # else: # path_to_parse_relax_results_sbatch = structure_instance.parse_relax_sbatch(os.path.join(structure_instance.path_to_run_folder + '/relax_scores.sc'), structure_instance.path_to_run_folder) # relax_pdb_out = parse_relax_process_id = run_modes.relaxation(structure_instance.path_to_run_folder) # logger.info(f"Relaxed structure for ddG calculations: {relax_pdb_out}") if mode == 'ddg_calculation': run_modes.ddg_calculation(folder) # ddg_output_score = find_copy( # folder.ddG_run, '.sc', folder.ddG_output, 'output.sc') if mode == 'analysis': calc_all(folder, sys_name=name) plot_all(folder, sys_name=name) # Full SLURM execution if mode == 'proceed' or mode == 'fullrun': # Start relax calculation parse_relax_process_id = run_modes.relaxation(folder) # relax_output_strucfile = find_copy( # folder.relax_run, '.pdb', folder.relax_output, 'output.pdb') # Start ddG calculation # ddg_input_struc = create_copy( # os.path.join(folder.relax_output, 'output.pdb'), folder.ddG_input, # name='input.pdb') run_modes.ddg_calculation(folder, parse_relax_process_id)
from utilities import save_config from plotting import plot_all #from solar import SolarLocations #%% Setup # Load configuration settings config = Settings() # Process configuration settings config = process_settings(config) # Create directory for run results config = setup_directories(config) # Save copy of settings in output folder save_config(config) #%% Initialization # Steady state solution circular_orbit(config) # Initialize GEKKO model m = define_model(config) #%% Solve # Optimize optimize_trajectory(m,config) #%% Post Processing and Plotting plot_all(config.results_folder,config)
def simulate(conn, output_dir, fname_peaks, fname_lfps_prefix, dt, n_runs, total_time, temperature, with_v1_l4, with_v1_l6, with_trn, input, con_input_lgn, n_e_lgn, n_i_lgn, n_e_l6, n_i_l6, n_e_l4, n_i_l4, n_trn, threshold, delay, delay_distbtn_e_l6_lgn, delay_e_l4_e_lgn, delay_e_lgn_i_l4, delay_e_lgn_e_l4, delay_e_lgn_e_l6, delay_e_lgn_trn, delay_e_l4_trn, delay_distbtn_e_l6_trn, delay_e_lgn_i_l6, lgn_params, l4_params, l6_params, trn_params, w_e_lgn_trn, w_trn_e_lgn, w_e_l6_trn, w_e_l4_e_l6, w_e_lgn_e_l4, w_e_l4_e_lgn, w_e_l6_e_lgn, w_e_lgn_e_l6, w_e_lgn_i_l6, w_e_lgn_i_l4, w_e_l4_trn, connect_e_lgn_e_l4, connect_e_lgn_i_l4, connect_e_l4_e_lgn, connect_e_lgn_i_l6, connect_e_lgn_e_l6, connect_e_l6_e_lgn, connect_e_l4_trn, connect_e_l6_trn, connect_e_lgn_trn, connect_trn_e_lgn, connect_e_l4_e_l6): start = np.empty(shape=0) bf_plot = np.empty(shape=0) af_plot = np.empty(shape=0) end = np.empty(shape=0) h.celsius = temperature print "* * * Simulating %d runs * * *" % n_runs h.tstop = total_time for n_sim in range(n_runs): print "#%d: Constructing circuits..." % (n_sim + 1) start = np.append(start, timer()) # creating LGN network i_lgn, i_lgn_rec = create_network(n_i_lgn) e_lgn, e_lgn_rec = create_network(n_e_lgn) # create connections in LGN e_lgn_e_lgn_syn = e_net_connect(e_lgn, e_lgn, threshold, delay, lgn_params['w_e_lgn_e_lgn'], 1) i_lgn_i_lgn_syn = i_net_connect(i_lgn, i_lgn, threshold, delay, lgn_params['w_i_lgn_i_lgn'], 1) i_lgn_e_lgn_syn = i_net_connect(i_lgn, e_lgn, threshold, lgn_params['delay_i_e'], lgn_params['w_i_lgn_e_lgn'], 1) e_lgn_i_lgn_syn = e_net_connect(e_lgn, i_lgn, threshold, lgn_params['delay_e_i'], lgn_params['w_e_lgn_i_lgn'], 1) # weight should be set to zero e_l4, e_l4_rec = create_network(n_e_l4) i_l4, i_l4_rec = create_network(n_i_l4) if with_v1_l4: # create connections in V1 L4 e_l4_e_l4_sin = e_net_connect(e_l4, e_l4, threshold, delay, l4_params['w_e_l4_e_l4'], l4_params['p_e_e']) i_l4_i_l4_sin = i_net_connect(i_l4, i_l4, threshold, delay, l4_params['w_i_l4_i_l4'], l4_params['p_i_i']) e_l4_i_l4_sin = e_net_connect(e_l4, i_l4, threshold, delay, l4_params['w_e_l4_i_l4'], l4_params['p_e_i']) i_l4_e_l4_sin = i_net_connect(i_l4, e_l4, threshold, delay, l4_params['w_i_l4_e_l4'], l4_params['p_i_e']) # extrinsic connections # Population 1) 15 LGN E cells connect to 15 V1 L4 E cells # Population 2) 5 LGN E cells connect to 5 V1 L4 I cells # # Population 1 and population 2 are different # # Hirsch et al., 1998 if connect_e_lgn_e_l4: # connections from Glutamatergic neurons of network LGN to network V1 L4 e_lgn_e_l4_syn = partial_e_net_connect(e_lgn, e_l4, 2./4, 1, 2./4, 1, threshold, delay_e_lgn_e_l4, w_e_lgn_e_l4) # e_lgn_e_l4_syn = topographically_e_connect(e_lgn, e_l4, 0, 1, threshold, delay_e_lgn_e_l4, w_e_lgn_e_l4) if connect_e_l4_e_lgn: # TODO: feedback connections are only of 3/4 of neurons? # connections from Glutamatergic neurons of network 2 (V1) to network 1 (LGN) e_l4_e_lgn_syn = partial_e_net_connect(e_l4, e_lgn, 1./4, 1, 1./4, 1, threshold, delay_e_l4_e_lgn, w_e_l4_e_lgn) if connect_e_lgn_i_l4: # connections from Glutamatergic neurons of network (LGN) to network V1 L4 e_lgn_i_l4_syn = partial_e_net_connect(e_lgn, i_l4, 0, 2./4, 0, 2./4, threshold, delay_e_lgn_i_l4, w_e_lgn_i_l4) # e_lgn_i_l4_syn = topographically_e_connect(e_lgn, i_l4, 0, 1./4, threshold, delay_e_lgn_i_l4, w_e_lgn_i_l4) i_l6, i_l6_rec = create_network_L6(n_i_l6) e_l6, e_l6_rec = create_network_L6(n_e_l6) if with_v1_l6: # create connections in V1 L6 e_l6_e_l6_sin = e_net_connect(e_l6, e_l6, threshold, delay, l6_params['w_e_l6_e_l6'], l6_params['p_e_e']) i_l6_i_l6_sin = i_net_connect(i_l6, i_l6, threshold, delay, l6_params['w_i_l6_i_l6'], l6_params['p_i_i']) e_l6_i_l6_syn = e_net_connect(e_l6, i_l6, threshold, delay, l6_params['w_e_l6_i_l6'], l6_params['p_e_i']) i_l6_e_l6_syn = i_net_connect(i_l6, e_l6, threshold, delay, l6_params['w_i_l6_e_l6'], l6_params['p_i_e']) # connections from V1 input (L4) layer to L6 if connect_e_l4_e_l6: e_l4_e_l6_sin = e_net_connect(e_l4, e_l6, threshold, 1, w_e_l4_e_l6, 1) # ALL-to-ALL connections of feedback if connect_e_l6_e_lgn: e_l6_e_lgn_sin = e_ct_net_connect_delay_dist(e_l6, e_lgn, threshold, delay_distbtn_e_l6_lgn, w_e_l6_e_lgn) # TODO: Connectivity as Hirsch if connect_e_lgn_e_l6: e_lgn_e_l6_syn = e_net_connect(e_lgn, e_l6, threshold, delay_e_lgn_e_l6, w_e_lgn_e_l6, 1) # TODO: Connectivity as Hirsch if connect_e_lgn_i_l6: e_lgn_i_l6_syn = e_net_connect(e_lgn, i_l6, threshold, delay_e_lgn_i_l6, w_e_lgn_i_l6, 1) # create trn neurons (inhibitory only) trn, trn_rec = create_network(n_trn) if with_trn: trn_trn_syn = i_net_connect(trn, trn, threshold, trn_params['delay_i_i'], trn_params['w_trn_trn'], trn_params['p_i_i']) # connections from Glutamatergic neurons of network V1 L4 to trn if with_v1_l4 and connect_e_l4_trn: e_l4_trn_syn = e_net_connect(e_l4, trn, threshold, delay_e_l4_trn, w_e_l4_trn, 1) if with_v1_l6 and connect_e_l6_trn: e_l6_trn_syn = e_net_connect_delay_dist(e_l6, trn, threshold, delay_distbtn_e_l6_trn, w_e_l6_trn, 1) if connect_e_lgn_trn: # connections from Glutamatergic neurons of LGN to TRN # ALL-to-ALL e_lgn_trn_syn = e_net_connect(e_lgn, trn, threshold, delay_e_lgn_trn, w_e_lgn_trn, 1) # # topographic # topographically_connect(e_lgn, trn, 0, 1, threshold, delay_e_lgn_trn, w_e_lgn_trn) if connect_trn_e_lgn: # ALL-to-ALL trn_e_lgn_sin = i_net_connect(trn, e_lgn, threshold, delay, w_trn_e_lgn, 1) # generate inputs to LGN netStim = list() i_stims = list() e_stims = list() stim_rec = h.Vector() for stim_i in range(0, input['nstims']): netStim.append(h.NetStimPois(input['position'])) netStim[stim_i].start = 0 netStim[stim_i].mean = input['stimrate'] # 100 = 10 Hz, 10 = 100 Hz, 1 = 1000Hz, 5 = 200 Hz, 6 = 150 Hz netStim[stim_i].number = 0 if stim_i < n_i_lgn: i_stims.append(h.NetCon(netStim[stim_i], i_lgn[stim_i].synE, con_input_lgn['gaba_threshold'], con_input_lgn['gaba_delay'], con_input_lgn['gaba_weight'])) if stim_i < n_e_lgn: e_stims.append(h.NetCon(netStim[stim_i], e_lgn[stim_i].synE, con_input_lgn['glut_threshold'], con_input_lgn['glut_delay'], con_input_lgn['glut_weight'])) e_stims[0].record(stim_rec) # measure poisson input #0 to LGN Excitatory Cell #0 timeaxis = h.Vector() timeaxis.record(h._ref_t) print "#%d: Running simulation..." % (n_sim + 1) h.run() bf_plot = np.append(bf_plot, timer()) mean_lgn, mean_trn, mean_v1_l4, mean_v1_l6 = plot_all(conn, output_dir, fname_peaks, n_sim, dt, timeaxis, stim_rec, with_v1_l4, with_v1_l6, with_trn, e_lgn_rec, i_lgn_rec, trn_rec, e_l4_rec, i_l4_rec, e_l6_rec, i_l6_rec, n_e_lgn, n_i_lgn, n_trn, n_e_l4, n_i_l4, n_e_l6, n_i_l6) af_plot = np.append(af_plot, timer()) ofname = fname_lfps_prefix + str(n_sim) + ".txt" n = len(timeaxis) indx = np.arange(0, n+1, 40) # store one in every 40 values lfp_lgn = np.array(mean_lgn) lfp_trn = np.array(mean_trn) lfp_l4 = np.array(mean_v1_l4) lfp_l6 = np.array(mean_v1_l6) time = np.array(timeaxis) np.savetxt(ofname, (lfp_lgn[indx], lfp_trn[indx], lfp_l4[indx], lfp_l6[indx], time[indx])) end = np.append(end, timer()) print "Progress: %d runs simulated %d runs missing" % (n_sim + 1, n_runs - n_sim - 1) print_time_stats(start, bf_plot, af_plot, end)
""" Run perfs to gather data and then plot them. """ from run import run_all from plotting import plot_all run_all() plot_all()
def simulate(n_runs, total_time, with_V1_L4, with_V1_L6, with_TRN, input, con_input_lgn, n_e_lgn, n_i_lgn, n_e_l6, n_i_l6, n_e_l4, n_I_L4, n_trn, delay_distbtn_E_L6_LGN, delay_E_L4_E_LGN, delay_E_LGN_I_L4, delay_E_LGN_E_L4, delay_E_LGN_E_L6, delay_E_LGN_TRN, delay_E_L4_TRN, delay_distbtn_E_L6_TRN, delay_E_LGN_I_LGN, delay_I_LGN_E_LGN, delay_E_LGN_I_L6, lgn_params, l4_params, l6_params, W_TRN_TRN, W_E_LGN_TRN, W_TRN_E_LGN, w_e_l6_trn, W_E_L4_E_L6, W_E_LGN_E_L4, W_E_L4_E_LGN, w_e_l6_e_lgn, W_E_LGN_E_L6, W_E_LGN_I_L6, W_E_LGN_I_L4, W_E_L4_TRN, connect_E_LGN_E_L4, connect_E_LGN_I_L4, connect_E_L4_E_LGN, connect_E_LGN_I_L6, connect_E_LGN_E_L6, connect_E_L6_E_LGN, connect_E_L4_TRN, connect_E_L6_TRN, connect_E_LGN_TRN, connect_TRN_E_LGN, connect_E_L4_E_L6): print "* * * Simulating %d runs * * *" % n_runs h.tstop = total_time for n_sim in range(n_runs): # creating LGN network i_lgn, I_LGN_rec = createNetwork(n_i_lgn) e_lgn, E_LGN_rec = createNetwork(n_e_lgn) #create connections in network 1 (LGN) e_lgn_e_lgn_syn = e_net_connect(e_lgn, e_lgn, 0, 1, lgn_params['w_e_lgn_e_lgn']) i_lgn_i_lgn_syn = i_net_connect(i_lgn, i_lgn, 0, 1, lgn_params['w_e_lgn_e_lgn']) i_lgn_e_lgn_syn = i_net_connect(i_lgn, e_lgn, 0, 1, lgn_params['w_e_lgn_e_lgn']) e_lgn_i_lgn_syn = e_net_connect(e_lgn, i_lgn, 0, 1, lgn_params['w_e_lgn_e_lgn']) # weight should be set to zero e_l4, E_L4_rec = createNetwork(n_e_l4) i_l4, I_L4_rec = createNetwork(n_I_L4) if with_V1_L4: #create connections in network 2 (V1 superficial) e_l4_e_l4_sin = e_net_connect(e_l4, e_l4, 0, 1, l4_params['w_e_l4_e_l4']) i_l4_i_l4_sin = i_net_connect(i_l4, i_l4, 0, 1, l4_params['w_i_l4_i_l4']) e_l4_i_l4_sin = e_net_connect(e_l4, i_l4, 0, 1, l4_params['w_e_l4_i_l4']) i_l4_e_l4_sin = i_net_connect(i_l4, e_l4, 0, 1, l4_params['w_i_l4_e_l4']) # Population 1) 15 LGN E cells connect to 15 V1 L4 E cells # Population 2) 5 LGN E cells connect to 5 V1 L4 I cells # # Population 1 and population 2 are different # # Hirsch et al., 1998 #extrinsic connections #ALL-to-ALL connectivity if connect_E_LGN_E_L4: #connections from Glutamatergic neurons of network LGN to network V1 L4 # e_lgn_e_l4_sin = e_net_connect(e_lgn, e_l4, 0, delay_E_LGN_E_L4, W_E_LGN_E_L4) sin_E_LGN_E_L4 = list() len_LGN = len(e_lgn) for neuron_i in range(len(e_lgn)*1/4, len(e_lgn)): e_lgn[neuron_i].soma.push() for neuron_j in range(len(e_l4)*1/4, len(e_l4)): sin_E_LGN_E_L4.append(h.NetCon(e_lgn[neuron_i].soma(0.5)._ref_v, e_l4[neuron_j].synE, 0, delay_E_LGN_E_L4, W_E_LGN_E_L4[neuron_i, neuron_j])) h.pop_section() #topographic connectivity # if connect_E_LGN_E_L4: # #extrinsic connections # #connections from Glutamatergic neurons of network 1 (LGN) to network 2 (V1) # # Glutnt1nt2_sin = list() # for neuron_i in range(len(e_lgn)): # e_lgn[neuron_i].soma.push() # Glutnt1nt2_sin.append(h.NetCon(e_lgn[neuron_i].soma(0.5)._ref_v, e_l4[neuron_i].synE, # 0, delay_E_LGN_E_L4, W_E_LGN_E_L4[neuron_i, neuron_i])) # h.pop_section() if connect_E_L4_E_LGN: #connections from Glutamatergic neurons of network 2 (V1) to network 1 (LGN) Glutnt2nt1_sin = list() for neuron_i in range(len(e_l4)*1/4, len(e_l4)): e_l4[neuron_i].soma.push() for neuron_j in range(len(e_lgn)*1/4, len(e_lgn)): Glutnt2nt1_sin.append(h.NetCon(e_l4[neuron_i].soma(0.5)._ref_v, e_lgn[neuron_j].synE_CT, 0, delay_E_L4_E_LGN, W_E_L4_E_LGN[neuron_i, neuron_j])) h.pop_section() # Population 1) 15 LGN E cells connect to 15 V1 L4 E cells # Population 2) 5 LGN E cells connect to 5 V1 L4 I cells # # Population 1 and population 2 are different # # Hirsch et al., 1998 #All-to-ALL connectivity if connect_E_LGN_I_L4: # connections from Glutamatergic neurons of network (LGN) to network V1 L4 sin_E_LGN_I_L4 = list() len_LGN = len(e_lgn) for neuron_i in range(0, len_LGN*1/4): e_lgn[neuron_i].soma.push() for neuron_j in range(0, len(i_l4)*1/4): sin_E_LGN_I_L4.append(h.NetCon(e_lgn[neuron_i].soma(0.5)._ref_v, i_l4[neuron_j].synE, 0, delay_E_LGN_I_L4, W_E_LGN_I_L4[neuron_i, neuron_j])) h.pop_section() # #topographic connectivity # if connect_E_LGN_I_L4: # # connections from Glutamatergic neurons of network (LGN) to network V1 L4 # sin_E_LGN_I_L4 = list() # len_LGN = len(e_lgn) # for neuron_i in range(0, len_LGN*1/4): # e_lgn[neuron_i].soma.push() # sin_E_LGN_I_L4.append(h.NetCon(e_lgn[neuron_i].soma(0.5)._ref_v, i_l4[neuron_i].synE, # 0, delay_E_LGN_I_L4, W_E_LGN_I_L4[neuron_i, neuron_i])) # h.pop_section() i_l6, I_L6_rec = createNetworkL6(n_i_l6) e_l6, E_L6_rec = createNetworkL6(n_e_l6) if with_V1_L6: # create connections in network 2 (V1 L6) e_l6_e_l6_sin = e_net_connect(e_l6, e_l6, 0, 1, l6_params['w_e_l6_e_l6']) i_l6_i_l6_sin = i_net_connect(i_l6, i_l6, 0, 1, l6_params['w_i_l6_i_l6']) i_l6_e_l6_syn = i_net_connect(i_l6, e_l6, 0, 1, l6_params['w_i_l6_e_l6']) e_l6_i_l6_syn = e_net_connect(e_l6, i_l6, 0, 1, l6_params['w_e_l6_i_l6']) # connections from V1 input (L4) layer to L6 if connect_E_L4_E_L6: e_l4_e_l6_sin = e_net_connect(e_l4, e_l6, 0, 1, W_E_L4_E_L6) #ALL-to-ALL connections of feedback if connect_E_L6_E_LGN: e_l6_e_lgn_sin = e_ct_net_connect_delay_dist(e_l6, e_lgn, 0, delay_distbtn_E_L6_LGN, w_e_l6_e_lgn) # if connect_E_L6_E_LGN: # #connections from Glutamatergic neurons of network 2 (V1) to network 1 (LGN) # k = 0 # GlutL6nt1_sin = list() # for neuron_i in range(len(e_l6)): # e_l6[neuron_i].soma.push() # GlutL6nt1_sin.append(h.NetCon(e_l6[neuron_i].soma(0.5)._ref_v, e_lgn[neuron_i].synE_CT, # 0, delay_distbtn_E_L6_LGN[k], w_e_l6_e_lgn[neuron_i, neuron_i])) # k += 1 # h.pop_section() # TODO: Connectivity as Hirsch if connect_E_LGN_E_L6: e_lgn_e_l6_syn = e_net_connect(e_lgn, e_l6, 0, delay_E_LGN_E_L6, W_E_LGN_E_L6) # TODO: Connectivity as Hirsch if connect_E_LGN_I_L6: e_lgn_i_l6_syn = e_net_connect(e_lgn, i_l6, 0, delay_E_LGN_I_L6, W_E_LGN_I_L6) #create trn neurons (inhibitory only) trn, TRN_rec = createNetwork(n_trn) if with_TRN: trn_trn_syn = i_net_connect(trn, trn, 0, 1, W_TRN_TRN) #connections from Glutamatergic neurons of network V1 L4 to trn if with_V1_L4 and connect_E_L4_TRN: e_l4_trn_syn = e_net_connect(e_l4, trn, 0, delay_E_L4_TRN, W_E_L4_TRN) if with_V1_L6 and connect_E_L6_TRN: e_l6_trn_syn = e_net_connect_delay_dist(e_l6, trn, 0, delay_distbtn_E_L6_TRN, w_e_l6_trn) #ALL-to-ALL if connect_E_LGN_TRN: #connections from Glutamatergic neurons of network 1 (LGN) to trn e_lgn_trn_syn = e_net_connect(e_lgn, trn, 0, delay_E_LGN_TRN, W_E_LGN_TRN) #topographic # if connect_E_LGN_TRN: # #connections from Glutamatergic neurons of LGN to trn # GlutGABAtneurons_sin1 = list() # delayGlutGABAtneurons = 1 # for neuron_i in range(len(e_lgn)): # e_lgn[neuron_i].soma.push() # GlutGABAtneurons_sin1.append(h.NetCon(e_lgn[neuron_i].soma(0.5)._ref_v, trn[neuron_i].synE, 0, delayGlutGABAtneurons, # W_E_LGN_TRN[neuron_i, neuron_i])) # h.pop_section() #ALL-to-ALL if connect_TRN_E_LGN: trn_e_lgn_sin = i_net_connect(trn, e_lgn, 0, 1, W_TRN_E_LGN) # generate inputs to LGN netStim = list() i_stims = list() e_stims = list() stim_rec = h.Vector() for stim_i in range(0, input['nstims']): netStim.append(h.NetStimPois(input['input'])) netStim[stim_i].start = 0 netStim[stim_i].mean = input['stimrate'] # 100 = 10 Hz, 10 = 100 Hz, 1 = 1000Hz, 5 = 200 Hz, 6 = 150 Hz netStim[stim_i].number = 0 if stim_i < n_i_lgn: i_stims.append(h.NetCon(netStim[stim_i], i_lgn[stim_i].synE, con_input_lgn['gaba_threshold'], con_input_lgn['gaba_delay'], con_input_lgn['gaba_weight'])) e_stims.append(h.NetCon(netStim[stim_i], e_lgn[stim_i].synE, con_input_lgn['glut_threshold'], con_input_lgn['glut_delay'], con_input_lgn['glut_weight'])) e_stims[0].record(stim_rec) # measure poisson input #0 to LGN Cell #0 timeaxis = h.Vector() timeaxis.record(h._ref_t) h.run() meanLGN, meanTRN, meanV1input, meanV1output = plot_all(timeaxis, stim_rec, with_V1_L4, with_V1_L6, with_TRN, E_L4_rec, TRN_rec, E_L6_rec, E_LGN_rec, I_L6_rec, i_lgn, i_l4, I_LGN_rec, I_L4_rec, n_e_lgn, n_e_l6, n_e_l4, n_trn, n_i_l6) ofname = "../data_files/" "sim" + str(n_sim+0) + ".txt" n = len(timeaxis) indx = np.arange(1, n, 40) w = np.array(meanLGN) u = np.array(meanTRN) x = np.array(meanV1input) y = np.array(meanV1output) z = np.array(timeaxis) np.savetxt(ofname, (w[indx], u[indx], x[indx], y[indx], z[indx])) print "Progress: %d runs simulated %d runs missing" % (n_sim + 1, n_runs - n_sim - 1)
def run_optim(key: np.ndarray, lhs: np.ndarray, tmp: np.ndarray, xhats: np.ndarray, tmp_c: np.ndarray, xhats_c: np.ndarray, xstar: float, bound: Text, out_dir: Text, x: np.ndarray, y: np.ndarray) -> Tuple[int, float, float, int, float, float]: """Run optimization (either lower or upper) for a single xstar.""" # Directory setup # --------------------------------------------------------------------------- out_dir = os.path.join(out_dir, f"{bound}-xstar_{xstar}") if FLAGS.store_data: logging.info(f"Current run output directory: {out_dir}...") if not os.path.exists(out_dir): os.makedirs(out_dir) # Init optim params # --------------------------------------------------------------------------- logging.info( f"Initialize parameters L, mu, log_sigma, lmbda, tau, slack...") key, subkey = random.split(key) params = init_params(subkey) for parname, param in zip(['L', 'mu', 'log_sigma'], params): logging.info(f"Parameter {parname}: {param.shape}") logging.info(f" -> {parname}: {param}") tau = FLAGS.tau_init logging.info(f"Initial tau = {tau}") fin_tau = np.minimum(FLAGS.tau_factor**FLAGS.num_rounds * tau, FLAGS.tau_max) logging.info(f"Final tau = {fin_tau}") # Set constraint approach and slacks # --------------------------------------------------------------------------- slack = FLAGS.slack * np.ones(FLAGS.num_z * 2) lmbda = np.zeros(FLAGS.num_z * 2) logging.info(f"Lambdas: {lmbda.shape}") logging.info( f"Fractional tolerance (slack) for constraints = {FLAGS.slack}") logging.info(f"Set relative slack variables...") slack *= np.abs(lhs.ravel()) logging.info(f"Set minimum slack to {FLAGS.slack_abs}...") slack = np.maximum(FLAGS.slack_abs, slack) logging.info(f"Slack {slack.shape}") logging.info(f"Actual slack min: {np.min(slack)}, max: {np.max(slack)}") # Setup optimizer # --------------------------------------------------------------------------- logging.info(f"Vanilla SGD with init_lr={FLAGS.lr}...") logging.info(f"Set learning rate schedule") step_size = optim.inverse_time_decay(FLAGS.lr, FLAGS.decay_steps, FLAGS.decay_rate, FLAGS.staircase) init_fun, update_fun, get_params = optim.sgd(step_size) logging.info( f"Init state for JAX optimizer (including L, mu, log_sigma)...") state = init_fun(params) # Setup result dict # --------------------------------------------------------------------------- logging.info(f"Initialize dictionary for results...") results = { "mu": [], "sigma": [], "cholesky_factor": [], "tau": [], "lambda": [], "objective": [], "constraint_term": [], "rhs": [] } if FLAGS.plot_intermediate: results["grad_norms"] = [] results["lagrangian"] = [] logging.info(f"Evaluate at xstar={xstar}...") logging.info(f"Evaluate {bound} bound...") sign = 1 if bound == "lower" else -1 # =========================================================================== # OPTIMIZATION LOOP # =========================================================================== # One-time logging before first step # --------------------------------------------------------------------------- key, subkey = random.split(key) obj, rhs, psisum, constr = objective_rhs_psisum_constr( subkey, get_params(state), lmbda, tau, lhs, slack, xstar, tmp_c, xhats_c) results["objective"].append(obj) results["constraint_term"].append(psisum) results["rhs"].append(rhs) logging.info(f"Objective: scalar") logging.info(f"RHS: {rhs.shape}") logging.info(f"Sum over Psis: scalar") logging.info(f"Constraint: {constr.shape}") tril_idx = np.tril_indices(FLAGS.dim_theta + 1) count = 0 logging.info(f"Start optimization loop...") for _ in tqdm(range(FLAGS.num_rounds)): # log current parameters # ------------------------------------------------------------------------- results["lambda"].append(lmbda) results["tau"].append(tau) cur_L, cur_mu, cur_logsigma = get_params(state) cur_chol = make_cholesky_factor(cur_L)[tril_idx].ravel()[1:] results["mu"].append(cur_mu) results["sigma"].append(np.exp(cur_logsigma)) results["cholesky_factor"].append(cur_chol) subkeys = random.split(key, num=FLAGS.opt_steps + 1) key = subkeys[0] # inner optimization for subproblem # ------------------------------------------------------------------------- for j in range(FLAGS.opt_steps): v, grads = lagrangian_value_and_grad(subkeys[j + 1], get_params(state), lmbda, tau, lhs, slack, xstar, tmp, xhats, sign) state = update_fun(count, grads, state) count += 1 if FLAGS.plot_intermediate: results["lagrangian"].append(v) results["grad_norms"].append( [np.linalg.norm(grad) for grad in grads]) # post inner optimization logging # ------------------------------------------------------------------------- key, subkey = random.split(key) obj, rhs, psisum, constr = objective_rhs_psisum_constr( subkey, get_params(state), lmbda, tau, lhs, slack, xstar, tmp_c, xhats_c) results["objective"].append(obj) results["constraint_term"].append(psisum) results["rhs"].append(rhs) # update lambda, tau # ------------------------------------------------------------------------- lmbda = update_lambda(constr, lmbda, tau) tau = np.minimum(tau * FLAGS.tau_factor, FLAGS.tau_max) # Convert and store results # --------------------------------------------------------------------------- logging.info(f"Finished optimization loop...") logging.info(f"Convert all results to numpy arrays...") results = {k: np.array(v) for k, v in results.items()} logging.info(f"Add final parameters and lhs to results...") L, mu, log_sigma = get_params(state) results["final_L"] = L results["final_mu"] = mu results["final_log_sigma"] = log_sigma results["lhs"] = lhs if FLAGS.store_data: logging.info(f"Save result data to...") result_path = os.path.join(out_dir, "results.npz") onp.savez(result_path, **results) # Generate and store plots # --------------------------------------------------------------------------- if FLAGS.plot_intermediate: fig_dir = os.path.join(out_dir, "figures") logging.info(f"Generate and save all plots at {fig_dir}...") plotting.plot_all(results, x, y, response, fig_dir) # Compute last valid and last satisfied # --------------------------------------------------------------------------- maxabsdiff = np.array([np.max(np.abs(lhs - r)) for r in results["rhs"]]) fin_i = np.sum(~np.isnan(results["objective"])) - 1 logging.info(f"Final non-nan objective at {fin_i}.") fin_obj = results["objective"][fin_i] fin_maxabsdiff = maxabsdiff[fin_i] sat_i = [ np.all((np.abs((lhs - r) / lhs) < FLAGS.slack) | (np.abs(lhs - r) < FLAGS.slack_abs)) for r in results["rhs"] ] sat_i = np.where(sat_i)[0] if len(sat_i) > 0: sat_i = sat_i[-1] logging.info(f"Final satisfied constraint at {sat_i}.") sat_obj = results["objective"][sat_i] sat_maxabsdiff = maxabsdiff[sat_i] else: sat_i = -1 logging.info(f"Constraints were never satisfied.") sat_obj, sat_maxabsdiff = np.nan, np.nan logging.info("Finished run.") return fin_i, fin_obj, fin_maxabsdiff, sat_i, sat_obj, sat_maxabsdiff