def training(result_filename, training_data_filename, num_epochs, column): path = "./result/" + result_filename if not os.path.isdir(path): os.mkdir(path) newdirs = [] for root, dirs, files in walk(training_data_filename): if files is not None: for i in range(len(files)): newdirs.append(root + '/' + files[i]) print(root + '/' + files[i]) x_train, y_train = read_excel(training_data_filename + '/' + files[0]) for i in range(1, len(newdirs)): a1, b1 = read_excel(newdirs[i]) x_train = np.vstack((x_train, a1)) y_train = np.hstack((y_train, b1)) print(newdirs[i]) x_tem_train = [] y_tem_train = [] x_tem_validation = [] y_tem_validation = [] #x_newtest = np.array(x_test,dtype=float) #y_newtest = np.array(y_test,dtype=float) for i in range(len(x_train)): if i % 5 == 0: x_tem_validation.append(x_train[i]) y_tem_validation.append(y_train[i]) else: x_tem_train.append(x_train[i]) y_tem_train.append(y_train[i]) x_new_train = np.array(x_tem_train, dtype=float) y_new_train = np.array(y_tem_train, dtype=float) x_new_validation = np.array(x_tem_validation, dtype=float) y_new_validation = np.array(y_tem_validation, dtype=float) #LeNet_Regressor(train_inputs, train_labels, val_inputs, val_labels, # filepath, num_epoch, SensorType): seqModel = LeNet_Regressor(x_new_train, y_new_train, x_new_validation, y_new_validation, path + '/best_weight.hdf5', num_epochs) hist_df = pd.DataFrame(seqModel.history) # or save to csv: hist_csv_file = path + '/history.csv' with open(hist_csv_file, mode='w') as f: hist_df.to_csv(f)
def json_to_csv(): data = read_json(path, 'Modian_Summary.json') with open("test.csv", "w") as csvfile: writer = csv.writer(csvfile) writer.writerow( ['Team', 'Name', 'Total Amount', 'Headcount', 'Average']) htt, lyt = read_excel() for team in data: for name in data[team]: if name == '黄婷婷': total_amount = round(data[team][name]['总计金额'] + htt, 2) else: if name == '李艺彤': total_amount = round(data[team][name]['总计金额'] + lyt, 2) else: total_amount = data[team][name]['总计金额'] headcount = data[team][name]['集资人数'] average = data[team][name]['人均集资'] line = [team, name, total_amount, headcount, average] writer.writerow(line) print('已更新test.csv文件')
def main(): excel_file = 'data_origin.xls' result_file = 'result.xlsx' data_frame = read_excel(excel_file=excel_file, sheet_name='Sheet1') companies = set_company(data_frame) write_excel(companies, file_name=result_file)
def load_model(file_path, wol_ratio=0.1, scale_obj_rxn_id='BIOMASS_SCALED', scale_obj_mtb_id='Bio_unscaled_w', verbose=False): """Loads the model from file_path, and then adjusts the wolbachia. The ratio of wolbachia / worm biomass in the objective function is set to wol_ratio, and all wolbachia reactions have their bounds scaled by the same ratio. If the model has been previously scaled, it is adjusted to the given ratio.""" model = read_excel(file_path, verbose=verbose) set_model_wolbachia(model, wol_ratio, scale_obj_rxn_id, scale_obj_mtb_id) return model
def __init__(self): self.read_pdf = read_pdf() self.read_excel = read_excel() self.read_web = read_web() self.create_folder = create_folder self.choices = { "1": self.create_folder, "2": self.read_pdf.read_folder, "3": self.read_excel.read_folder, "4": self.read_web.read_folder, "9": self.display_help, "0": self.quit }
def hello_world(): form = QueForm() if request.method == 'GET': db = ExeMysql() mac = db.query() all_coa = [] for ma in mac: all_coa.append(ma[0]) return render_template('line-stack.html', form=form, mac=all_coa) else: f = request.files['file'] exl_data = read_excel(f) db = ExeMysql() db.insert(exl_data) return render_template('line-stack.html', form=form)
if not i.endswith(".mat"): continue m = cobra.io.load_matlab_model(os.path.join("mat", i)) m.id = i[:-4] if m.id in open_boundaries: open_exchanges(m) models.append(m) # ### Some models are only available as Microsoft Excel files # In[5]: m = read_excel("xls/iJS747.xls", verbose=False, rxn_sheet_header=7) models.append(m) # In[6]: m = read_excel("xls/iRM588.xls", verbose=False, rxn_sheet_header=5) models.append(m) # In[7]: m = read_excel("xls/iSO783.xls", verbose=False, rxn_sheet_header=2) models.append(m)
import pandas from cobra import Model, Metabolite, Reaction from read_excel import read_excel, write_excel elegans_file = 'iCEL1273.xlsx' model_in_file = 'model_b_mal.xlsx' model_out_file = 'model_b_mal_2-wip.xlsx' reversible_arrow = '<==>' irreversible_arrow = ('-->', '<--') l_bound, u_bound = -1000, 1000 old_m = read_excel(model_in_file) pio = pandas.io.excel.ExcelFile(elegans_file) rxn_frame = pandas.read_excel(elegans_file, 'Reactions') def use_directionalities(old_m, disagreements): unresolved = [] for r_id, cel_dir, m_dir, cel_products in disagreements: updated = False if cel_dir == 'reversible': rxn = old_m.reactions.get_by_id(r_id) rxn.bounds = (l_bound, u_bound) updated = True elif cel_dir == 'irreversible': if ',' not in r_id: # combined reactions are handled manually cel_prods = set() for m in cel_products: met = m.replace('M', 'C').replace('E', 'C')
reaction.upper_bound = max(reaction.upper_bound, 10) else: reaction.lower_bound = min(reaction.lower_bound, -10) models.append(m) #### Some models are only available as Microsoft Excel files # In[6]: from read_excel import read_excel # In[7]: m = read_excel("xls/iJS747.xls", verbose=False, rxn_sheet_header=7) m.change_objective("agg_GS13m_2") models.append(m) # In[8]: m = read_excel("xls/iRM588.xls", verbose=False, rxn_sheet_header=5) m.change_objective("agg_GS13m") models.append(m) # In[9]: m = read_excel("xls/iSO783.xls", verbose=False, rxn_sheet_header=2) m.change_objective("Biomass")
'partial_wOv_manual_4.xlsx', 'partial_wBm_manual_4.xlsx' ] in_wol_common_file = 'partial_wol_common_4.xlsx' wol_model_names = ['model_wOv_4', 'model_wBm_4'] out_nem_model_names = ['model_o_vol_4', 'model_b_mal_4'] nem_dont_delete = set(['R00104', 'R00161', 'R05332']) nem_xfer_to_wol = set(['R01195', 'R05636']) # # # Run-time options save_wol_models = True save_nem_models = True perform_gap_filling = 5 # False to not, otherwise a number indicating how many iterations. Performs gap-filling using all nematode reactions not already added to the wol model. test_removed_wol_rxns = False # Check the effect of removing the wolbachia-only reactions # # # Run steps in_model_files = [os.path.join(files_dir, m_file) for m_file in in_model_files] in_models = [read_excel(m_file, verbose=False) for m_file in in_model_files] in_wol_unique_files = [ os.path.join(files_dir, m_file) for m_file in in_wol_unique_files ] wol_unique_models = [ read_excel(m_file, verbose=False) for m_file in in_wol_unique_files ] wol_common_model = read_excel(os.path.join(files_dir, in_wol_common_file)) fvas = [] for m in in_models: optimize_minimal_flux(m) fvas.append(flux_variability_analysis(m)) wol_models, unused_models, unique_wol_ids = separate_wol_rxns( in_models, wol_unique_models, wol_common_model, wol_model_names, test_removed_wol_rxns)
if rxn.id.startswith(rxn_pref_remove): rxns_to_remove.append(rxn.id) worm_m.remove_reactions(rxns_to_remove) worm_m.id = out_name write_excel(worm_m, out_file) print('%s saved to %s' % (worm_m, out_file)) # # # Inputs files_dir = '/mnt/hgfs/win_projects/brugia_project' worm_model_files = ['model_b_mal_4-sep.xlsx', 'model_o_vol_4-sep.xlsx'] wol_model_files = ['model_wBm_4-sep.xlsx', 'model_wOv_4-sep.xlsx'] out_model_names = ['model_b_mal_4.5', 'model_o_vol_4.5'] # # # Run-time options # # # Run steps worm_model_files = [ os.path.join(files_dir, m_file) for m_file in worm_model_files ] wol_model_files = [ os.path.join(files_dir, m_file) for m_file in wol_model_files ] worm_models = [ read_excel(m_file, verbose=False) for m_file in worm_model_files ] wol_models = [read_excel(m_file, verbose=False) for m_file in wol_model_files] for worm_m, wol_m, out_name in zip(worm_models, wol_models, out_model_names): out_file = os.path.join(files_dir, out_name + '-wip.xlsx') merge_models(worm_m, wol_m, out_name, out_file)
print('Originally %s yielded %.2f' % (test_m, orig_f)) for rxn in good_m.reactions: r_id = rxn.id t_rxn = test_m.reactions.get_by_id(r_id) good_bounds, test_bounds = rxn.bounds, t_rxn.bounds if good_bounds != test_bounds: t_rxn.bounds = good_bounds new_f = test_m.optimize().f if new_f - orig_f > 1.0: print r_id, new_f, good_bounds, test_bounds print_biomass_impact(r_id, test_m, test_fva, good_f) t_rxn.bounds = test_bounds if test_biomass_impact: diffs = id_bottleneck_metabolites(test_m, orig_f, 'BIOMASS', threshold=1.0) # producing_r_id = mtb.id.upper() # used to go a level deeper in biomass rxn. files_dir = '/mnt/hgfs/win_projects/brugia_project' good_model = 'model_b_mal_4.5-wip.xlsx' test_model = 'model_b_mal_5_M30.xlsx' test_biomass_impact = True good_m = read_excel(os.path.join(files_dir, good_model)) test_m = read_excel(os.path.join(files_dir, test_model)) test_fva = cobra.flux_analysis.flux_variability_analysis(test_m) compare_constraints(good_m, test_m, test_fva, test_biomass_impact)
from read_excel import read_excel import random dataset = input("file to be opened:") sheet = input(f'sheet inside {dataset+".xlsx"} to be opened:') dirt, max_q = read_excel(dataset + ".xlsx", sheet) q_num = input( f'There are {max_q} words, how many words you want to challenge? ') score = 0 while (True): ran_int = random.randint(0, int(q_num) - 1) print(ran_int) print(f'what is the hiragana of... {dirt[ran_int][0]}') answer = input('Answer: ') if answer == dirt[ran_int][1]: print("Correct!") score += 1 else: print(f'Correct answer is {dirt[ran_int][1]}') # task to do: # 1. no repeat question # 2. program end when gone through all the questions.
from matplotlib import pyplot as plt x, height = [item[0].strftime('%m%d') for item in data], [item[1] for item in data] plt.figure(dpi=200) plt.bar(x, height, width=0.35) plt.title("Bar") plt.xlabel("month") plt.ylabel("month count") plt.tick_params(axis='x', labelsize=9, rotation=50) plt.show() if __name__ == '__main__': import read_excel file = '/Users/admin/PycharmProjects/learn-python/python_coder_share/数据来源.xlsx' # excel 数据 data = read_excel.read_excel(file, sheet='Sheet1') print(data) # data = [[datetime.datetime(2018, 12, 1, 0, 0), 227526, 16251], [datetime.datetime(2019, 1, 1, 0, 0), 263218, 22689], # [datetime.datetime(2019, 2, 1, 0, 0), 189339, 9585], [datetime.datetime(2019, 3, 1, 0, 0), 116730, 9425], # [datetime.datetime(2019, 4, 1, 0, 0), 383429, 21349], [datetime.datetime(2019, 5, 1, 0, 0), 247331, 12531], # [datetime.datetime(2019, 6, 1, 0, 0), 181440, 14579], [datetime.datetime(2019, 7, 1, 0, 0), 236901, 16256], # [datetime.datetime(2019, 8, 1, 0, 0), 200990, 15150]] # print(data) # x = [item[0].strftime('%m%d') for item in data] # y = [item[1] for item in data] # print(x, y) data_plot(data) pass
def run(): # # # Parameters min_flux = -1000 max_flux = 1000 min_colour = '#ff0000' zero_colour = '#ffff00' max_colour = '#00ff00' colour_range = (min_colour, zero_colour, max_colour) # # # Run-time options wolbachia_percent = 10.0 files_dir = '/mnt/hgfs/win_projects/brugia_project' #model_files = ['model_bm_5_L3.xlsx', 'model_bm_5_L3D6.xlsx', 'model_bm_5_L3D9.xlsx', 'model_bm_5_L4.xlsx'] #model_files = ['model_bm_5_M30.xlsx', 'model_bm_5_F30.xlsx', 'model_bm_5_M42.xlsx', 'model_bm_5_F42.xlsx', 'model_bm_5_M120.xlsx', 'model_bm_5_F120.xlsx'] #model_files = ['model_bm_5_L3.xlsx', 'model_bm_5_M30.xlsx', 'model_bm_5_F30.xlsx', 'model_bm_5_M42.xlsx', 'model_bm_5_F42.xlsx', 'model_bm_5_M120.xlsx', 'model_bm_5_F120.xlsx'] #model_files = ['model_bm_6.xlsx', 'model_bm_6_lohg.xlsx', 'model_bm_6_holg.xlsx', 'model_bm_6_lolg.xlsx'] #model_files = ['model_bm_6_L3.xlsx', 'model_bm_6_L3D6.xlsx', 'model_bm_6_L3D9.xlsx', 'model_bm_6_L4.xlsx'] #model_files = ['model_bm_6_M30.xlsx', 'model_bm_6_F30.xlsx', 'model_bm_6_M42.xlsx', 'model_bm_6_F42.xlsx', 'model_bm_6_M120.xlsx', 'model_bm_6_F120.xlsx'] model_files = ['model_bm_6.xlsx'] mtb_cmp_str = '%s_wip.xlsx' verbose = True topology_analysis = False fba_analysis = False fva_analysis = True save_visualizations = False do_reaction_mtb_comparison = None # 'C00080', or None test_nutrient_combos = False # False is don't. 2 tests all combos of length 2, etc. show_wol_transports = False model_files = [os.path.join(files_dir, m_file) for m_file in model_files] models = [ load_model(m_file, wol_ratio=wolbachia_percent / 100.0, verbose=verbose) for m_file in model_files ] set_nutrients = False if set_nutrients: #nutrients = [('CARBON_SOURCE', (0, 250)), ('DIFFUSION_2', (0, 580))] # HOHG #nutrients = [('CARBON_SOURCE', (0, 45)), ('DIFFUSION_2', (0, 580))] # HOLG #nutrients = [('CARBON_SOURCE', (0, 250)), ('DIFFUSION_2', (0, 90))] # LOHG nutrients = [('CARBON_SOURCE', (0, 45)), ('DIFFUSION_2', (0, 90))] # LOLG for model in models: for name, bounds in nutrients: model.reactions.get_by_id(name).bounds = bounds if do_reaction_mtb_comparison: new_model_files, new_models = [], [] cel_m = read_excel(os.path.join(files_dir, 'iCEL1273.xlsx'), verbose=False) cel_m.reactions.BIO0103.objective_coefficient = 1.0 for m_file, model in zip(model_files, models): mtb_outfile = mtb_cmp_str % m_file.rpartition('.')[0] m = model.copy() compare_reaction_mtbs(cel_m, m, do_reaction_mtb_comparison, mtb_outfile) new_model_files.append(mtb_outfile) new_models.append(read_excel(mtb_outfile)) model_files += new_model_files models += new_models viz_strs = [ os.path.split(m_file.rpartition('.')[0] + '_%s.txt')[1] for m_file in model_files ] for m in models: cobra.flux_analysis.parsimonious.optimize_minimal_flux(m) #m.optimize() # rather than m.optimize(); returns optimal FBA with minimum total flux through network. if topology_analysis: for m in models: basic_stats(m) # # compare_models(m1, m2) if verbose: for m in models: print('\n%s summary:' % m) m.summary() if fba_analysis: for m, viz_str in zip(models, viz_strs): analyze_shadows(m, 5) if save_visualizations: visualize_fba_reactions(m, (min_flux, max_flux), colour_range, to_file_str=viz_str) # # compare_objective_functions(m1.reactions.get_by_id('BIOMASS'), m2.reactions.get_by_id('BIOMASS')) fvas = [] if fva_analysis: for m, viz_str in zip(models, viz_strs): m_fva = cobra.flux_analysis.flux_variability_analysis(m) fvas.append(m_fva) if save_visualizations: visualize_fva_reactions(m_fva, (min_flux, max_flux), colour_range, viz_str) active_rxn_mtb_analysis(models, fvas) pathway_analysis(models, fvas, pathways_for_analysis) if show_wol_transports: wol_transport_analysis(models, fvas) #assess_metabolites_impact(models[0], models[0].metabolites) if test_nutrient_combos: desc_str = '\nNutrient imports' print('%s\n%s' % (desc_str, '-' * len(desc_str.strip()))) m, orig_fva = models[2], fvas[2] bounds_deltas = (0, 60) rxn_ids = ['NUTRIENTS_%i' % i for i in range(1, 21)] rxn_combs = list(itertools.combinations(rxn_ids, test_nutrient_combos)) diffs = [ test_changed_constraints(r_ids, m, orig_fva, bounds_deltas) for r_ids in rxn_combs ] diffs = [d for d in diffs if d] diffs.sort(key=lambda d: -d[0]) for d in diffs: print(d) return models, fvas
rxn.name = cel_rxn.name rxn.bounds = cel_rxn.bounds rxn.subsystem = cel_rxn.subsystem rxn.enzyme_commission = cel_rxn.enzyme_commission model.add_reaction(rxn) rxn.build_reaction_from_string(cel_rxn.reaction) return True # # # Options files_dir = '/mnt/hgfs/win_projects/brugia_project' model_files = ['model_o_vol_3.xlsx', 'model_b_mal_3.xlsx'] out_files = ['model_o_vol_3.5-wip.xlsx', 'model_b_mal_3.5-wip.xlsx'] do_deletions = True cel_m = read_excel(os.path.join(files_dir, 'iCEL1273.xlsx'), verbose=False) #cel_m.reactions.BIO0101.objective_coefficient = 1.0 # # # TESTING ONLY #cobra.flux_analysis.parsimonious.optimize_minimal_flux(cel_m) models = [ read_excel(os.path.join(files_dir, m_file), verbose=False) for m_file in model_files ] for m, out_file in zip(models, out_files): if do_deletions: modify_model(m, cel_m, do_deletions) write_excel(m, os.path.join(files_dir, out_file)) else: print('Obj before modifications: %.1f' % (m.optimize().f)) rxns_to_delete = modify_model(m, cel_m, do_deletions) diffs = []
"""Run on the models_V3.5. Adds mtb compartments, ensures names are transferred, and renames the GXXXXX ids. """ import os from read_excel import read_excel, write_excel files_dir = '/mnt/hgfs/win_projects/brugia_project' in_model_names = ['model_o_vol_3.5', 'model_b_mal_3.5'] in_model_files = [os.path.join(files_dir, m_file+'.xlsx') for m_file in in_model_names] out_model_files = [os.path.join(files_dir, m_file+'-wip.xlsx') for m_file in in_model_names] in_models = [read_excel(m_file, verbose=False) for m_file in in_model_files] for m in in_models: for mtb in m.metabolites: if mtb.id == 'FA_storage_mix': continue elif mtb.id[0] == 'C': mtb.compartment = 'c' if mtb.id[1] == 'C': continue elif mtb.id[0] == 'M': mtb.compartment = 'm' if mtb.id[1] == 'C' or mtb.name: continue c_id = 'C' + mtb.id[1:] if c_id in m.metabolites and m.metabolites.get_by_id(c_id).name: mtb.name = m.metabolites.get_by_id(c_id).name elif mtb.id[0] == 'G': pass for m, out_file in zip(in_models, out_model_files): write_excel(m, out_file)
for i in sorted(os.listdir("mat")): if not i.endswith(".mat"): continue m = cobra.io.load_matlab_model(os.path.join("mat", i)) m.id = i[:-4] if m.id in open_boundaries: open_exchanges(m) models.append(m) # ### Some models are only available as Microsoft Excel files # In[5]: m = read_excel("xls/iJS747.xls", verbose=False, rxn_sheet_header=7) models.append(m) # In[6]: m = read_excel("xls/iRM588.xls", verbose=False, rxn_sheet_header=5) models.append(m) # In[7]: m = read_excel("xls/iSO783.xls", verbose=False, rxn_sheet_header=2) models.append(m) # In[8]: m = read_excel("xls/iCR744.xls", rxn_sheet_header=4, verbose=False)
print("please input excel file and sheet name, for example:") print("python signal_fft.py \"imu_sample.xlsx\" \"static\" 5") sys.exit(1) print(sys.argv[0]) print(sys.argv[1]) print(sys.argv[2]) print(sys.argv[3]) print(type(sys.argv[0])) print(type(sys.argv[1])) print(type(sys.argv[2])) print(type(sys.argv[3])) #y=read_excel(r'imu_data.xlsx', '04', 0) y = read_excel(sys.argv[1], sys.argv[2], int(sys.argv[3])) n = len(y) # length of the signal k = np.arange(n) T = n / Fs t = np.arange(0, T, Ts) # time vector,这里Ts也是步长 frq = k / T # two sides frequency range frq1 = frq[range(int(n / 2))] # one side frequency range YY = np.fft.fft(y) # 未归一化 Y = np.fft.fft(y) / n # fft computing and normalization 归一化 Y1 = Y[range(int(n / 2))] fig, ax = plt.subplots(4, 1) ax[0].plot(t, y) ax[0].set_xlabel('Time') ax[0].set_ylabel('Amplitude')
import read_excel as re file_name = sys.argv[1] #excel文件全名 JENKINS_QUDAOS = sys.argv[2] #["HW","WX","GDT",2,-1];["XM","WX","GDT",1,-1] IS_DEBUG_MODE = sys.argv[3] #true or false if __name__ == "__main__": if not os.path.exists(constant.base_path): constant.base_path = os.path.abspath('') constant.set() # 开始清理apps下所有生成过的工程 appAbsPath = os.path.join(constant.get_base_path(), constant.apps_path) if not os.path.exists(appAbsPath): os.makedirs(appAbsPath) else: print 'app path exist' excel_json = re.read_excel(constant.excel_file) re.temp_write_out_file(excel_json) copy_file.cpDirs(constant.old_path, constant.new_path) builder = am.AndroidProjectMaker.Builder( ).set_excel_json(excel_json).set_base_path( '%s/%s' % (constant.get_base_path(), constant.apps_path)).set_qudaos(JENKINS_QUDAOS).build(IS_DEBUG_MODE)
def predict(result_filename, testing_data_filename, model_name, weight, column): path = "./result/" + result_filename newdirs = [] for root, dirs, files in walk(testing_data_filename): if files is not None: for i in range(len(files)): newdirs.append(root + '/' + files[i]) print(root + '/' + files[i]) model = load_model(path + '/' + weight) print(model.summary()) for i in range(len(newdirs)): name_tmp = newdirs[i].split('/') print(name_tmp) name = name_tmp[2] + '_' + name_tmp[3] + '_' + name_tmp[4] x_newtest, y_newtest = read_excel(newdirs[i]) train_inputs, d1 = convert_data(x_newtest, y_newtest, column) num_event = np.size(train_inputs, 0) num_order = np.size(train_inputs, 1) num_sensor = np.size(train_inputs, 2) train_inputs = np.reshape(train_inputs, [num_event, 1, num_order, num_sensor, 1]) Y_pred = model.predict(train_inputs) MAE = mean_absolute_error(d1, Y_pred) MAE = '%.4f' % (MAE) print(MAE) # plot a picture output = [] for m in range(len(Y_pred)): output.append(Y_pred[m][0]) # d1[m] = d1[m]/100 nlist = np.arange(0, len(Y_pred)) fig, ax = plt.subplots() ax.plot(nlist, output, label='predict') ax.plot(nlist, d1, label='true') ax.set(xlabel='time step', ylabel='SOC(%)', title=name + "\nmean absolute error =" + str(MAE)) ax.grid() ax.legend() fig.savefig(path + "/" + model_name + '_' + name + ".png") #plt.show() temp = [] temp.append(['predict', 'real']) for m in range(len(output)): temp.append([output[m], d1[m]]) with open(path + "/" + model_name + '_' + name + ".csv", 'w', newline='') as csvfile: writer = csv.writer(csvfile) writer.writerows(temp)
with warnings.catch_warnings(): warnings.simplefilter("ignore") m = cobra.io.read_legacy_sbml(filepath) if model_id in legacy_SBML else cobra.io.read_sbml_model(filepath) m.id = m.description = model_id.replace(".", "_") if m.id in open_boundaries: open_exchanges(m) models.append(m) # ### Some models are only available as Microsoft Excel files # In[4]: m = read_excel("xls/iJS747.xls", verbose=False, rxn_sheet_header=7) models.append(m) # In[5]: m = read_excel("xls/iRM588.xls", verbose=False, rxn_sheet_header=5) models.append(m) # In[6]: m = read_excel("xls/iSO783.xls", verbose=False, rxn_sheet_header=2) models.append(m)