def schkolnik_betaPic(annotate=False): import li_constants as li_const t = np.genfromtxt("data/Shkolnik_2017_betaPic_bib.csv", delimiter=',', dtype=str) b, l, ul, names = [], [], [], [] B, V, SPT, REF, Name = [], [], [], [], [] for i, row in enumerate(t[1:]): if row[13] != '': continue if not utils.isFloat(row[1]) or not utils.isFloat(row[5]) \ or row[12] != 'Y' or float(row[5]) <= 0: t[i + 1, 13] = 'OOR' continue bv, ew = float(row[1]), np.log10(float(row[5])) if not li_const.inRange(bv, ew): t[i + 1, 13] = 'OOR' continue b.append(bv) l.append(ew) ul.append(row[4] == '<') names.append(row[0]) B.append(-999) V.append(-999) SPT.append(row[2]) REF.append(row[16]) Name.append(row[15]) if annotate: np.savetxt("data/Shkolnik_2017_betaPic_annotated.csv", t, delimiter=',', fmt='%s') return b, l, ul, names, B, V, SPT, REF, Name
def extract_number(toks): # toks tokenises (, ) as well if (len(toks) == 0) or not (utils.isInt(toks[0]) or utils.isFloat(toks[0]) or utils.isFrac(toks[0])): return [], toks if (len(toks) > 1): if utils.isInt(toks[0]) and utils.isFrac(toks[1]): return toks[:2], toks[2:] else: return toks[:1], toks[1:] if utils.isInt(toks[0]) or utils.isFloat(toks[0]) or utils.isFrac(toks[0]): return toks, [] else: return [], toks
def result(self, myval): #return Correct or not #no / contained not all values self.r = False if isFloat(myval): if abs(float(myval) - self.correct_result) <= self.tolerance: self.invalid = False self.r = True return 'Well Done!' else: self.invalid = False return 'Not quite' if myval.find('/')==-1: comment = 'Format needs to be 2 numbers separated with the / e.g. 3 / 4' else: parts = myval.split('/') #print(parts) if parts[0].isnumeric() and parts[1].isnumeric(): if abs((int(parts[0]) / int(parts[1])) - self.correct_result) <= self.tolerance: #print('Resulting in {}'.format(str(int(parts[0]) / int(parts[1])))) comment = 'Well Done!' self.invalid = False self.r = True else: comment = 'Not quite' self.invalid = False else: comment = 'Both parts must be numbers' return comment
def tuchor(annotate=False): import li_constants as const tuchor_c = [] tuchor_l = [] tuchor_bverr,tuchor_lerr = [],[] t = ascii.read(join('data','tuchor_updated_err_bv_bib.csv'),delimiter=',') for i,line in enumerate(t[1:]): if line[14] != '' : continue if not utils.isFloat(line[13]): t[i+1,14] = 'OOR' continue bv = float(line[13]) ew = float(line[3]) if in_bounds(bv,ew,const): tuchor_c.append(bv) tuchor_l.append(ew) tuchor_lerr.append(float(line[4])) else: t[i+1,14] = 'OOR' if annotate: np.savetxt(join("data","tuchor_updated_err_bv_annotated.csv"),t,delimiter=',',fmt='%s') tuchor_c,tuchor_l = np.array(tuchor_c),np.log10(np.array(tuchor_l)) return tuchor_c,tuchor_l,tuchor_lerr
def tuchor(annotate=False): import li_constants as const tuchor_c = [] tuchor_l = [] tuchor_bverr, tuchor_lerr = [], [] B, V, Name, SPT, REF = [], [], [], [], [] t = ascii.read('data/tuchor_updated_err_bv_bib.csv', delimiter=',') for i, line in enumerate(t[1:]): if line[14] != '': continue if not utils.isFloat(line[13]): t[i + 1, 14] = 'OOR' continue bv = float(line[13]) ew = float(line[3]) if in_bounds(bv, ew, const): tuchor_c.append(bv) tuchor_l.append(ew) tuchor_lerr.append(float(line[4])) B.append(line[10]) V.append(line[11]) SPT.append(line[12]) REF.append(line[15]) Name.append(line[0]) else: t[i + 1, 14] = 'OOR' if annotate: np.savetxt("data/tuchor_updated_err_bv_annotated.csv", t, delimiter=',', fmt='%s') tuchor_c, tuchor_l = np.array(tuchor_c), np.log10(np.array(tuchor_l)) return tuchor_c, tuchor_l, tuchor_lerr, B, V, SPT, REF, Name
def average(list): sum, count = 0, 0.0 for x in list: if utils.isFloat(x): sum += float(x) count += 1 return str(sum / count) if count > 0 else ''
def getDynData(filename): """ Reads out delay time, Autocorrelation function and standard deviation returns: list of delay times tau, AKF, Measurement Time and Countrate """ flag1 = False flag2 = False delayL = [] akfL = [] measTimeL = [] countrateL = [] with open(filename, "r") as f: for line in f: # findet dynamische Werte, da sie nach "Correlation" auftreten. if flag1: corrVals = line.split() try: if ut.isFloat(corrVals[0]) and ut.isFloat(corrVals[1]): delayL.append(float(corrVals[0])) akfL.append(float(corrVals[1])) except IndexError: pass if "Correlation" in line: flag1 = True if flag2: timeAndCountrate = line.split() try: if ut.isFloat(timeAndCountrate[0]): measTimeL.append(float(timeAndCountrate[0])) except IndexError: pass try: if ut.isFloat(timeAndCountrate[1]): countrateL.append(float(timeAndCountrate[1])) except IndexError: pass if "Count Rate" in line: flag1 = False flag2 = True return delayL, akfL, measTimeL, countrateL
def CleanUpFixString(fixvalue): """ If string starts and ends with matching quote marks, remove these; also convert None, NULL (and UNKNOWN), and convert to int/double. """ # This is ugly: we're stripping quote marks off the fixvalue, even when it's a string. # They're required in the test value, but not allowed in the fix value, # so we'll allow the user to enter them in both places, and remove them here. if(fixvalue and fixvalue[0] == fixvalue[-1] and (fixvalue[0]=="'" or fixvalue[0]=='"')): fixvalue = fixvalue[1:-1] #if fixvalue.upper() == "UNKNOWN": # return -32767 # TODO: is this a good idea? other values to accept? if fixvalue.upper() == "NULL" or fixvalue.upper() == "NONE": return None # make sure to return, the following lines will choke on a None if fixvalue.isdigit(): return int(fixvalue) if utils.isFloat(fixvalue): # this will also match on int, so check that first return float(fixvalue.replace(",", ".", 1)) # accept either , or . as decimal separator return fixvalue
def getConstVal(filename, Name): """ Reads out constants from ALV autogenerated file. filename: Name of the file to read angle from. Name: The name of the constant the function should look for returns: constant value. """ with open(filename, "r") as f: for line in f: if Name in line: lsplit = line.split() for el in lsplit: if ut.isFloat(el): return float(el) print("Nothing found.") return
def CleanUpFixString(fixvalue): """ If string starts and ends with matching quote marks, remove these; also convert None, NULL (and UNKNOWN), and convert to int/double. """ # This is ugly: we're stripping quote marks off the fixvalue, even when it's a string. # They're required in the test value, but not allowed in the fix value, # so we'll allow the user to enter them in both places, and remove them here. if (fixvalue and fixvalue[0] == fixvalue[-1] and (fixvalue[0] == "'" or fixvalue[0] == '"')): fixvalue = fixvalue[1:-1] #if fixvalue.upper() == "UNKNOWN": # return -32767 # TODO: is this a good idea? other values to accept? if fixvalue.upper() == "NULL" or fixvalue.upper() == "NONE": return None # make sure to return, the following lines will choke on a None if fixvalue.isdigit(): return int(fixvalue) if utils.isFloat( fixvalue): # this will also match on int, so check that first return float(fixvalue.replace( ",", ".", 1)) # accept either , or . as decimal separator return fixvalue
showPlots = False fileName = 'baffles' savePlots = False upperLim = False maxAge = const.GALAXY_AGE valid_flags = [ '-bmv', '-rhk', '-li', '-li_err', '-bmv_err', '-plot', '-savePlot', '-ul', '-maxAge', '-s', '-filename', '-help' ] extra_flags = [ '-Plot', '-plots', '-Plots', '-savePlots', '-saveplots', '-saveplot', '-UL', '-save' ] for i, ar in enumerate(argv[1:]): if ar not in valid_flags and ar not in extra_flags \ and not utils.isFloat(ar) and argv[i] != '-filename': print("Invalid flag '" + ar + "'. Did you mean one of these:") print(valid_flags) exit() try: if ('-bmv' in argv): bv = float(argv[argv.index('-bmv') + 1]) if ('-rhk' in argv): rhk = float(argv[argv.index('-rhk') + 1]) import ca_constants as const if bv is not None and ( not (const.BV_RANGE[0] <= bv <= const.BV_RANGE[1])): print("B-V out of range. Must be in range " + str(const.BV_RANGE)) sys.exit() if (not (const.METAL_RANGE[0] <= rhk <= const.METAL_RANGE[1])):
def verifyCallVar(tree): content = walkTable() varTable = [] varTableTypes = [] varTree = [] params = [] attrVar = [] funcsTable = [] exp = [] temp = [] for e in PreOrderIter(tree): if name(e) == 'var': varTree.append(name(e.children[0])) if 'escreva' in str(e.ancestors) or 'leia' in str(e.ancestors): temp.append(name(e.children[0])) if name(e) == 'indice': for i in PreOrderIter(e): if i.is_leaf and name(i.parent) != 'numero' or '.' in name(i): showErrors(getLine(name(e.siblings[0])), 'err', name(e.siblings[0]), 13) exit(0) if name(e) == 'var' and name(e.parent) == 'atribuicao': if ('operador_soma' not in str(e.parent.descendants) and 'operador_multiplicacao' not in str(e.parent.descendants)): for i in PreOrderIter(e.siblings[0]): if i.is_leaf and name(i.parent) != 'chamada_funcao': attrVar.append(name(e.children[0])+'_'+name(i)+'_var') elif i.is_leaf and name(i.parent) == 'chamada_funcao': attrVar.append(name(e.children[0])+'_'+name(i)+'_func') else: for i in PreOrderIter(e.siblings[0]): if name(i).startswith('operador_'): for j in PreOrderIter(i.siblings[0]): if j.is_leaf: exp.append(name(e.children[0])+'_'+name(j)) for k in PreOrderIter(i.siblings[1]): if k.is_leaf: exp.append(name(e.children[0])+'_'+name(k)) for item in content: if 'info' in item: if 'lexema' in item['info']: varTable.append(item['info']['lexema']) varTableTypes.append(item['info']['lexema']+'_'+item['tipo']) else: for e in range(len(item['info'])): if 'lexema' in item['info'][e]: varTable.append(item['info'][e]['lexema']) varTableTypes.append(item['info'][e]['lexema']+'_'+item['tipo']) elif 'parametros' in item and len(item['parametros']) > 0: for e in range(len(item['parametros'])): params.append(item['parametros'][e]['lexema']) if 'categoria' in item and item['categoria'] == 'funcao': funcsTable.append(item['lexema']+'_'+item['tipo']) for e in varTableTypes: nameVt = e.split('_')[0] typeVt = e.split('_')[1] for i in attrVar: nameVar = i.split('_')[0] receptVar = i.split('_')[1] category = i.split('_')[2] if category == 'var' and nameVt == nameVar: for j in varTableTypes: if j.split('_')[0] == receptVar and j.split('_')[1] != typeVt: showErrors(getLine(nameVt), 'err', nameVt, 20) exit(0) elif receptVar.isdigit(): if typeVt != 'inteiro': showErrors(getLine(nameVar), 'err', nameVar, 20) exit(0) elif isFloat(receptVar): if typeVt != 'flutuante': showErrors(getLine(nameVar), 'err', nameVar, 20) exit(0) for e in funcsTable: nameFunc = e.split('_')[0] typeFunc = e.split('_')[1] for i in attrVar: nameVar = i.split('_')[0] receptVar = i.split('_')[1] category = i.split('_')[2] for j in varTableTypes: if category == 'func' and nameFunc == receptVar and typeFunc != j.split('_')[1] and nameVar == j.split('_')[0]: showErrors(getLine(nameVar), 'err', nameVar, 20) exit(0) for e in varTableTypes: nameVt = e.split('_')[0] typeVt = e.split('_')[1] for j in exp: nameVar = j.split('_')[0] receptVar = j.split('_')[1] if nameVar == nameVt: for k in varTableTypes: if k.split('_')[0] == receptVar and k.split('_')[1] != typeVt: showErrors(getLine(nameVar), 'err', nameVar, 20) exit(0) elif receptVar.isdigit(): if typeVt != 'inteiro': showErrors(getLine(nameVar), 'err', nameVar, 20) exit(0) elif isFloat(receptVar): if typeVt != 'flutuante': showErrors(getLine(nameVar), 'err', nameVar, 20) exit(0) noRepeat = [] for e in varTree: if varTree.count(e) == 1: noRepeat.append(e) for e in noRepeat: if e not in params and e in varTable: linha = getLine(e) showErrors(linha, 'warn', e, 21) for i in range(0, len(varTree)): element = varTree[i] if element not in varTable and element not in params: linha = getLine(element) showErrors(linha, 'err', element, 14) exit(0) for i in range(0, len(varTable)): element = varTable[i] if element not in varTree or element in temp and element not in attrVar: linha = getLine(element) showErrors(linha, 'warn', element, 1)
def select_model(args): ''' Args: model_name: pixelvae_with_1_kl_10_mmd Returns: A model selected from the parameters provided ''' model_name = args.model splits = model_name.split("_") ''' Assertions for model name i.e a string ''' # It is only pixelcnn if len(splits) == 2: assert (splits[0] == "pixelcnn"), "It has to be only pixelcnn_2/4/7" assert (isInt(splits[1])), "The number of layers has to be an int" only_pixelcnn = True use_pixelcnn = True args.num_pixelcnn_layers = int(splits[1]) model_params = { 'model_name': "PixelCNN", 'is_decoder_out_normal': False, 'only_pixelcnn': only_pixelcnn, 'use_pixelcnn': use_pixelcnn, "coeff_kl": 0., "coeff_mmd": 0. } # It is either pixelvae or vae else: only_pixelcnn = False # normal_vae_0_kl_0_mmd assert ( len(splits) == 6 and "vae" in splits[1] ), "model name should be of the format normal_pixelvae_1_kl_10_mmd" assert (splits[1] == "pixelvae" or splits[1] == "vae"), "model should be vae or pixelvae" assert (isFloat(splits[2]) and isFloat(splits[4])), "coefficients should be numeric" use_pixelcnn = splits[1] == "pixelvae" is_normal = splits[0] == "normal" # If we are using normal distribution for P(x_hat/z) in decoder-output, then model_params = { 'is_decoder_out_normal': is_normal, 'only_pixelcnn': False, 'use_pixelcnn': use_pixelcnn, "coeff_kl": float(splits[2]), "coeff_mmd": float(splits[4]) } if use_pixelcnn: model_params['model_name'] = "PixelVAE" # If it is PixelVAE and it is not normal then out_channels should be > in_channels if not model_params['is_decoder_out_normal']: assert args.decoder_out_channels > args.input_channels, "decoder_out_channels should be > input_channels when categorical_pixelvae else simply use normal_pixelvae" else: model_params['model_name'] = "VAE" # assert ( # model_params['use_pixelcnn'] == ( # args.sigma_decoder == 0)), "sigma_decoder should be 0 when using vae and non-zero when using pixelvae/pixelcnn" assert not ( model_params['is_decoder_out_normal'] and not use_pixelcnn == (args.sigma_decoder == 0) ), "sigma_decoder should be 0 when using vae and non-zero when using pixelvae/pixelcnn" if model_params['use_pixelcnn']: assert ( args.num_pixelcnn_layers >= 2 ), "num of pixelcnn layers should be greater than 2 when using pixelvae/pixelcnn" if model_params['use_pixelcnn']: assert (model_params['use_pixelcnn'] and (args.pixelcnn_activation == "ReLu" or args.pixelcnn_activation == "ELU")), "Choose either Relu or ELU" model_params['input_channels'] = args.input_channels model_params['input_image_size'] = args.input_image_size model_params['intermediate_channels'] = args.intermediate_channels model_params['z_dimension'] = args.z_dimension model_params['sigma_decoder'] = args.sigma_decoder model_params['require_rsample'] = args.require_rsample model_params['input_image_size'] = args.input_image_size model_params['num_pixelcnn_layers'] = args.num_pixelcnn_layers model_params['pixelcnn_activation'] = args.pixelcnn_activation model_params['coeff_nll'] = args.nll # Could be PixelCNN or PixelVAE if use_pixelcnn: model_params['pixelcnn_out_channels'] = int(args.quantization) # If PixelVAE if not only_pixelcnn: if model_params['is_decoder_out_normal']: model_params['decoder_out_channels'] = args.input_channels else: model_params[ 'decoder_out_channels'] = args.decoder_out_channels else: model_params['decoder_out_channels'] = 0 # If VAE else: model_params['pixelcnn_out_channels'] = 0 # Decoder output follows normal distribution then output channels will be same as input channels if model_params['is_decoder_out_normal']: model_params['decoder_out_channels'] = model_params[ 'input_channels'] # Decoder output follows categoriacal distribution then output channels will be same as quantization else: model_params['decoder_out_channels'] = int(args.quantization) model = VAE(in_channels=model_params['input_channels'], intermediate_channels=model_params['intermediate_channels'], decoder_out_channels=model_params['decoder_out_channels'], pixelcnn_out_channels=model_params['pixelcnn_out_channels'], z_dimension=model_params['z_dimension'], pixelcnn=model_params['use_pixelcnn'], only_pixelcnn=model_params['only_pixelcnn'], pixelcnn_layers=model_params['num_pixelcnn_layers'], pixelcnn_activation=model_params['pixelcnn_activation'], nll=model_params['coeff_nll'], kl=model_params['coeff_kl'], mmd=model_params['coeff_mmd'], require_rsample=model_params['require_rsample'], sigma_decoder=model_params['sigma_decoder'], input_image_size=model_params['input_image_size']) print(model) return model, model_params
def make_table(MR=False): ca_const = utils.init_constants('calcium') li_const = utils.init_constants('lithium') empty = '' """ table = [] #[Object,RA,Dec,Sp Type,B-V,R'HK,Li EW,Source] #first read in all the 4 tables and create a single big table, which then I sort and merge t = np.genfromtxt('data/nielsen_2010_table2.csv',delimiter=',',dtype=str,skip_header=1) for row in t: if not utils.isFloat(row[1]) or not (.45 <= float(row[1]) <= 1.9): continue arr = [] arr.append(row[21].strip()) ra,dec = ra_dec(row[22]) arr.append(ra) arr.append(dec) arr.append(row[4].strip()) arr.append(row[1]) arr.append(row[13]) arr.append(row[7]) arr.append("1") if arr[0] == '' or not (utils.isFloat(arr[5]) or utils.isFloat(arr[6])): continue table.append(arr) bv_to_teff = my_fits.magic_table_convert('bv','teff') t = np.genfromtxt('data/brandt_2014_table.csv',delimiter=',',dtype=str,skip_header=2) for row in t: bv = None if utils.isFloat(row[2]) and utils.isFloat(row[3]): bv = float(row[2]) - float(row[3]) if bv is None or not (.45 <= bv <= 1.9): continue arr = [] arr.append(row[14].strip()) ra,dec = ra_dec(row[15]) arr.append(ra) arr.append(dec) arr.append(row[4].strip()) arr.append("%f" % bv) arr.append(row[7]) if row[9].find('A') != -1: nli = float(row[9].split()[-1]) teff = bv_to_teff(bv) ew = 10** my_fits.teff_nli_to_li([teff],[nli])[0] arr.append("%d" % ew) elif utils.isFloat(row[9]): arr.append(row[9]) else: arr.append(empty) arr.append("2") if arr[0] == '' or not (utils.isFloat(arr[5]) or utils.isFloat(arr[6])): continue table.append(arr) t = np.genfromtxt("data/nearbyStars_Boro_Saikia_2018.txt",delimiter='\t',dtype=str,skip_header=58) for row in t: if not utils.isFloat(row[5]) or not (.45 <= float(row[5]) <= 1.9): continue arr = [] arr.append(row[16].strip()) ra,dec = ra_dec(row[17]) arr.append(ra) arr.append(dec) arr.append(row[18].strip()) arr.append(row[5]) arr.append(row[10]) arr.append(empty) arr.append("3") if arr[0] == '' or not (utils.isFloat(arr[5]) or utils.isFloat(arr[6])): continue table.append(arr) t = np.genfromtxt("data/guillot_2009_li_survey.txt",delimiter='\t',dtype=str,skip_header=77) for row in t: if not utils.isFloat(row[7]) or not (.45 <= float(row[7]) <= 1.9): continue arr = [] arr.append(row[22].strip()) ra,dec = ra_dec(row[23]) arr.append(ra) arr.append(dec) arr.append(row[24].strip()) arr.append(row[7]) arr.append(empty) arr.append(row[16]) arr.append("4") if arr[0] == '' or not (utils.isFloat(arr[5]) or utils.isFloat(arr[6])): continue table.append(arr) table = np.array(table) name_sorted = table[table[:,0].argsort()] thinned = [] #averaging b-v,measurements, sources as 1,4 for name in set(name_sorted[:,0]): subset = name_sorted[name_sorted[:,0]==name] if len(subset) == 1: thinned.append(subset[0]) else: arr = copy.deepcopy(subset[0]) arr[4] = average(subset[:,4]) arr[5] = average(subset[:,5]) arr[6] = average(subset[:,6]) x = list(set(subset[:,7])) x.sort() arr[7] = ','.join(x) thinned.append(arr) thinned = np.array(thinned) final_table = thinned[thinned[:,1].argsort()] np.save("final_table",final_table) exit() """ final_table = np.load("data/merged_nielsen_brandt_saikia_guillot.npy") delimiterMR = ',' baf_li = baffles.age_estimator('lithium') baf_ca = baffles.age_estimator('calcium') #[Object,RA,Dec,Sp Type,B-V,R'HK,Li EW,Source] f = open("baffles_table2_latex.txt", 'w+') fMR = open("baffles_table2.csv", 'w+') cdf = ['2.5%', '16%', '50%', '84%', '97.5%'] column_head = [ 'Name', 'RA', 'Dec', 'Sp. Type', 'B-V', "logR'HK", 'Li EW', 'Ref.' ] column_head += ["R'HK Age at CDF=" + x for x in cdf] column_head += ["Li EW Age at CDF=" + x for x in cdf] column_head += ["Final Age at CDF=" + x for x in cdf] units = [ '', 'h m s', 'h m s', '', 'mags', " ", 'mA', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '' ] fMR.write(delimiterMR.join(column_head)) fMR.write('\n') fMR.write(delimiterMR.join(units)) fMR.write('\n') for row in final_table: arr = [] arrMR = [] arr += [x.replace('V* ', '').replace('_', '-') for x in row[0:4]] arrMR += [x.replace('$', '').replace('V* ', '') for x in row[0:4]] bv = float(row[4]) arr.append("%.2f" % bv) arrMR.append("%.3g" % bv) p_ca, p_li = None, None if utils.isFloat(row[5]): rhk = float(row[5]) arr.append('$%.2f$' % rhk) arrMR.append('%.3f' % rhk) if ca_const.inRange(bv, rhk): p_ca = baf_ca.get_posterior(bv, rhk, showPlot=False) else: arr.append(empty) arrMR.append(empty) ew = None if utils.isFloat(row[6]): ew = float(row[6]) arr.append('%d' % ew) arrMR.append('%g' % ew) else: arr.append(empty) arrMR.append(empty) arr.append(row[7]) arrMR.append(row[7].replace(',', ';')) if bv is not None and ew is not None and ew > 0 and li_const.inRange( bv, np.log10(ew)): p_li = baf_li.get_posterior(bv, ew, showPlot=False) if p_ca is not None: arr += printStats(p_ca.stats) arrMR += printStats(p_ca.stats, MR=True) else: arr += [empty] * 5 arrMR += [empty] * 5 if p_li is not None: arr += printStats(p_li.stats) arrMR += printStats(p_li.stats, MR=True) else: arr += [empty] * 5 arrMR += [empty] * 5 if p_ca is None and p_li is None: continue if p_ca is not None and p_li is not None: prod = p_ca.array * p_li.array prob.normalize(ca_const.AGE, prod) stats = prob.stats(ca_const.AGE, prod) arr += printStats(stats) arrMR += printStats(stats, MR=True) elif p_ca is not None: arr += printStats(p_ca.stats) arrMR += printStats(p_ca.stats, MR=True) elif p_li is not None: arr += printStats(p_li.stats) arrMR += printStats(p_li.stats, MR=True) else: arr += [empty] * 5 arrMR += [empty] * 5 f.write(' & '.join(arr) + " \\\\") f.write('\n') fMR.write(delimiterMR.join(arrMR)) fMR.write('\n') f.close() fMR.close()