def createGraph(name, activation_values, weights, biases, error_metric): dot = Digraph(comment=name) nodes = [] node_count = 0 for layer in activation_values: nodes.append([]) for value in layer: node_name = "N" + str(node_count) nodes[-1].append(node_name) dot.node(node_name, str(round(value, 5))) node_count += 1 node_count = 1 bias_nodes = [] for bias in biases: node_name = "B" + str(node_count) bias_nodes.append(node_name) dot.node(node_name, str(round(bias, 5)), color="blue") node_count += 1 #unpack weights flat_weights = [] for weight_layer in weights: for weight_set in weight_layer: for weight in weight_set: flat_weights.append(weight) edges = [] bias_edges = [] for node_layer_index in range(1, len(nodes)): for prev_node in nodes[node_layer_index - 1]: for node in nodes[node_layer_index]: a = str(prev_node) b = node edges.append([a, b]) if node_layer_index < len(nodes): for node in nodes[node_layer_index]: try: a = bias_nodes[node_layer_index - 1] b = node bias_edges.append([a, b]) except: print("Output layer has no bias") for node_pair_index in range(0, len(edges)): dot.edge(edges[node_pair_index][0], edges[node_pair_index][1], label=str(round(flat_weights[node_pair_index], 5))) for node_pair_index in range(0, len(bias_edges)): dot.edge(bias_edges[node_pair_index][0], bias_edges[node_pair_index][1], color="blue") if error_metric[1] == None: metric = "N/A" else: metric = str(round(error_metric[1], 5)) dot.attr(label=r'\n\n' + error_metric[0] + ": " + metric) dot.render(name, view=True) remove(name)
def update_output(input1): # print(input1) df = pd.DataFrame(columns = ['friend_follower_ratio','favourites_count','followers_count','friends_count','listed_count','protected','statuses_count','verified', 'tweets_per_day', 'favourites_per_day']) if input1 == None: return '',df.round(1).to_dict('records'),'',style_table_by_z_value(df,means,stds) else: # print(input1) bot,df,imurl,legit = predictUser(input1) if legit: # print('days_exist',df['days_exist'][0]) sig_fig = 2 # print(df.columns) df['friend_follower_ratio'] =round(float(df['friend_follower_ratio'][0]), sigfigs = sig_fig)#.round(6) df['tweets_per_day'] = round(float(df['tweets_per_day'][0]),sigfigs=sig_fig) df['favourites_per_day'] = round(float(df['favourites_per_day'][0]),sigfigs=sig_fig) zs = df- means/stds zs = abs(zs.where((zs >3) | (zs <-3),0)) # print(zs) # layout = # print(Xnew.head()) try: url = imurl[0][:-11]+imurl[0][-4:] except: url = '' return bot, df.to_dict('records'), url, style_table_by_z_value(df,means,stds)
def choose_downsamps(blocklen): """ choose_downsamps(blocklen): Return a good list of possible downsample sizes given a block of data of length blocklen spectra. """ # This is first cut. We will then remove redundant ones. x = np.asarray([n for n in np.arange(1, 260) if blocklen % n == 0]) if len(x) == 1: return x # Now only choose those where the ratio is between 1.5 and 2, if possible if (x[1:] / x[:-1]).min() < 1.5: newx = [1] if 2 in x: newx.append(2) if 3 in x: newx.append(3) maxnewx = newx[-1] while maxnewx < x[-1]: if round(1.5 * maxnewx + 1e-7) in x: newx.append(round(1.5 * maxnewx + 1e-7)) elif 2 * maxnewx in x: newx.append(2 * maxnewx) else: if x[-1] > 1.5 * maxnewx: newx.append(int(x[x > 1.5 * maxnewx].min())) else: return newx maxnewx = newx[-1] return newx else: return x
def round_ct(ct): if ct > 1000000000: return "{}B+".format(round(ct / 1000000000, sigfigs=2)) if ct > 1000000: return "{}M+".format(round(ct / 1000000, sigfigs=2)) if ct > 1000: return "{}K+".format(round(ct / 1000, sigfigs=2)) return ct
def runTest(self): self.assertWarns(UserWarning, round, *self.args, **self.kwargs) filterwarnings("ignore") if type(self.output) == float: self.assertAlmostEqual(round(*self.args, **self.kwargs), self.output) else: self.assertEqual(round(*self.args, **self.kwargs), self.output) resetwarnings()
def visualizeTotalEnergy(self, path="noPath.png", hyperplane=None): # plots the total magnetization with time plt.close() fig = plt.figure() plt.plot(self.systemDataTimeSeries[0], self.systemDataTimeSeries[2], "+k") plt.title("\n".join( wrap( "Ising Model, Dimension = " + str(self.d) + ", N = " + str(self.n) + ", Tc = " + str(sigfig.round(float(self.tc), sigfigs=4)) + "K, T = " + str(sigfig.round(float(self.t), sigfigs=4)) + "K, Time = " + str(self.timeStep) + "au", 60))) plt.xlabel("Time steps / a.u.") plt.ylabel("Total energy / J") return fig
def redondea_valor(precio): precio_bruto = Decimal(precio) if precio_bruto >= 10: precio_redondeado=float('{0:.2f}'.format(precio_bruto)) else: precio_redondeado=round(precio_bruto, sigfigs=4, type=Decimal) return precio_redondeado
def __repr__(self): return str({ 'name': self.name, 'probability': self.probability and round(self.probability, 3) })
def get_year(year: int, year_system: str = CE) -> Tuple[int, int, int]: """ Return a decompressed year value (year, second, microsecond) ready for storage. For years before common era, - the year value is set to 1, and - second and microsecond values are set. The second and microsecond values are used to: - correctly sort datetimes in the db and - determine the actual year upon retrieval from the db. """ year = int(year) if year > HistoricDateTime.bce_threshold and year_system != YBP: # Safe to assume this should be YBP year_system = YBP microsecond, second = 0, 0 if year_system not in {CE, BCE, YBP}: raise ValueError elif year_system in {BCE, YBP}: year = year if year_system == BCE else year - BP_REFERENCE_YEAR year = round(year, sigfigs=HistoricDateTime.significant_figures) # Build a year stamp with max 6 digits scientific_notation: str = '{:.4e}'.format( year) # '1.3800e+10' for the Big Bang decimal_num_str, exponent_str = scientific_notation.split('e+') exponent = int(exponent_str) decimal_num = int(decimal_num_str.replace( PERIOD, '')) # 10, 13800 for the Big Bang inv_exponent = EXPONENT_INVERSION_BASIS - exponent inv_decimal_num = DECIMAL_INVERSION_BASIS - int(decimal_num) second, microsecond = inv_exponent, inv_decimal_num year = 1 # TODO return year, second, microsecond
def __str__(self): #this is used when passing an instance of fitSolutionClass to print. precision = 3 assert len(self.fitResultsDict) == len(self._unitsListForParamters) stringToPrint = '|----Spectral Fit Solution--------\n' stringToPrint += '|Solution Parameters (rounded to ' + str( precision) + ' sig figs): \n' i = 0 for name, value in self.fitResultsDict.items(): print(name, value) unit = self._unitsListForParamters[i] if type(value) == bool or type(value) == str: if value == True and type(value) is not str: stringValue = 'True' elif value == False and type(value) is not str: stringValue = 'False' else: stringValue = value elif value is None: stringValue = 'None' else: stringValue = str(sigfig.round(value, precision)) stringToPrint += '|' + name + ": " + stringValue + ' ' + unit + '\n' i += 1 stringToPrint += '|---------------------------------' return stringToPrint
def __repr__(self): return str({ 'name': str(self.name), 'probability': str(round(float(self.probability), 3)) if self.probability else 0 })
def generateBVector5S(matrix, size): from sigfig import round from numpy import zeros b = zeros(size, float) for i in range(len(matrix)): b[i] = round(matrix[i][0], sigfigs=5) return b
def cluster_data(confirmed, clusters_config=None): if len(confirmed) == 0: return {"data": [], "clusters": []} if clusters_config is None: clusters_config = {"clusters": CLUSTERS, "labels": CLUSTERS_LABELS} df = pd.DataFrame(confirmed) df = df.dropna(how="any", axis=0, subset=["confirmed"]) breaks = jenkspy.jenks_breaks(df["confirmed"], nb_class=clusters_config["clusters"]) rounded_breaks = list(map(lambda limit: round(limit, sigfigs=3), breaks)) df["group"] = pd.cut( df["confirmed"], bins=breaks, labels=clusters_config["labels"], include_lowest=True, ) df = df.where(pd.notnull(df), None) # convert NaN to None non_inclusive_lower_limits = list( map(lambda limit: 0 if limit == 0 else limit + 1, rounded_breaks)) # add 1 to all limits (except 0) return { "data": df.to_dict("records"), "clusters": list(zip(non_inclusive_lower_limits, rounded_breaks[1:])), }
def step_impl(context): dpb1_predicted_pair = PredictedPair() rows = context.df.iloc[context.row_index:context.row_index+1] params = dpb1_predicted_pair.generate_params_matching(rows) print(json.dumps(params)) results = dpb1_predicted_pair.call_tce_pred_match(params, url="http://localhost:5010") predicted_freq = dpb1_predicted_pair.get_perm_freq(results['data'][0]) context.predicted_freq = (isinstance(predicted_freq, float) and str(round(predicted_freq, 3)) or predicted_freq)
def num_to_sci(val, available_digits=4): if len(str(val).replace(".", "")) <= available_digits: return left_pad(str(val), available_digits) sf = round(val, available_digits - 1) index = 0 if val == 0 else floor(log10(sf) / 3) converted = sf / 10**(index * 3) if converted == int(converted): converted = int(converted) return left_pad(f'{converted}{SUFFIXES[index]}', available_digits)
async def sigfigs(self, ctx, number: float, figures: int): """ Round a number to a specified number of significant figures """ result = sigfig.round(number, sigfigs=figures) e = Embed(title="Rounding Result") e.add_field(name="Original Number", value=number, inline=True) e.add_field(name="Figures", value=figures, inline=True) e.add_field(name="Result", value=result, inline=False) e.add_field(name="Result (Scientific Notation)", value="{:e}".format(result)) e.color = Color.dark_blue() await ctx.send(embed=e)
def year_bp(self) -> int: """Return the year in YBP (years before present).""" current_year = datetime.now().year if self.year_bce: ybp = self.year_bce + APPROXIMATE_PRESENT_YEAR else: ybp = current_year - self.year ybp = int(sigfig.round(ybp, sigfigs=self.significant_figures)) # Correct rounding error if needed if TEN_THOUSAND < ybp < ONE_MILLION: # TODO: use BCE if smaller than this scale = 500 ybp = round(ybp / scale) * scale return int(ybp)
def generateMatrixWith5S(rowSize, colSize): from numpy import random, array from random import randint from sigfig import round myMatrix = [] mat = random.rand(rowSize, colSize) for i in range(len(mat)): innerMatrix = [] for j in mat[i]: num = round(randint(1, 4) + j, sigfigs=5) innerMatrix.append(num) myMatrix.append(innerMatrix) arr = array(myMatrix, dtype=float) return arr
def parse_gene_list(contents, network_type, example_data=False): """Parses the uploaded gene list, returns a Bootstrap table and a Pandas DataFrame.""" if example_data is True: genes_df = pd.read_csv(os.path.join('data', 'example_diff_expr.csv')) else: content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) genes_df = pd.read_csv(io.StringIO(decoded.decode('utf-8'))) cols = [col for col in genes_df.columns if col not in ['pvalue', 'padj'] ] # select all columns except p-values # Round columns to significant values genes_df.loc[:, cols] = genes_df[cols].round(2) if network_type == 'DE' or network_type == 'combined': # Check RNASeq headers are there if not {'log2FoldChange', 'padj'}.issubset(genes_df.columns): return dbc.Alert( 'Check your header names. They should include "log2FoldChange" and "padj"', color='danger', style={'display': 'inline-block'}), [] # if genes_df['pvalue']: # genes_df['pvalue'] = [sigfig.round(n, sigfigs=3) for n in genes_df['pvalue']] genes_df['padj'] = [ sigfig.round(n, sigfigs=3) for n in genes_df['padj'] ] small_df = genes_df.head() # smaller df to display on app table = dash_table.DataTable(data=small_df.to_dict('records'), columns=[{ "name": i, "id": i } for i in small_df.columns], style_table={ 'maxHeight': '20vh', }, style_cell={ 'font-family': 'sans-serif', 'textAlign': 'left' }) upload_contents = html.Div([ dbc.Alert('Your list was uploaded successfully!', color='primary', dismissable=True, style={'display': 'inline-block'}), table ]) return upload_contents, genes_df
def setNumber(self): # The input will always be a string # First, try to convert to float try: # If we succeed, our number is set self.f_Value = float(self.s_Input) except Exception as e: b_PrefixMatched = False for s_Prefix in self.d_MetricScale: if self.s_Input[-1] == s_Prefix: b_PrefixMatched = True if b_PrefixMatched == False: raise ValueError(f"Prefix {self.s_Input[-1]} not a valid metric prefix") else: f_Value = float(self.s_Input[:-1]) i_Exponent = int(self.d_MetricScale[self.s_Input[-1]]) self.i_SigFigs = len(self.s_Input[:-1]) self.f_Value = round(f_Value*(10**i_Exponent), sigfigs=self.i_SigFigs)
def gera_num(N, rho): list_a = np.zeros(N) list_b = np.zeros(N) for i in range(N): r1 = np.random.normal(0, 1) r2 = np.random.normal(0, 1) a = a0 + sig_a * r1 b = b0 + sig_b * (rho * r1 + r2 * np.sqrt(1 - rho**2)) list_a[i] = a list_b[i] = b sa = np.std(list_a) sb = np.std(list_b) n = 0 soma_vab = 0 for i in range(N): soma_vab += (list_a[i] - a0) * (list_b[i] - b0) if (list_a[i] - a0 > 0 and list_b[i] - b0 > 0) or (list_a[i] - a0 < 0 and list_b[i] - b0 < 0): n += 1 f = n / N inc_n = sigfig.round(float(np.sqrt(N * f * (1 - f))), sigfigs=2) inc_f = sigfig.round(inc_n / N, sigfigs=2) v_ab = soma_vab / (N - 1) R = v_ab / (sa * sb) inc_v_ab = sigfig.round(float(sa * sb * np.sqrt((1 + R**2) / (N - 1))), sigfigs=2) inc_R = sigfig.round(float((1 - R**2) / np.sqrt(N - 1)), sigfigs=2) w = list_a + list_b std_w = np.std(w) inc_std_w = sigfig.round(float(std_w / np.sqrt(2 * (N - 1))), sigfigs=2) z = list_a - list_b std_z = np.std(z) inc_std_z = sigfig.round(float(std_z / np.sqrt(2 * (N - 1))), sigfigs=2) print(f'Caso com rho={rho}') print(f'a1: n = {n} +- {inc_n}') print(f'a2: f = {f} +- {inc_f}') print(f'a3: V_ab = {v_ab} +- {inc_v_ab}, R = {R} +- {inc_R}') print(f'a4: std_w = {std_w} +- {inc_std_w}') print(f'a5: std_z = {std_z} +- {inc_std_z}\n') return [list_a, list_b]
def balance(): try: euros_from = consulta('SELECT from_quantity FROM movements WHERE from_currency = "EUR"') euros_to = consulta('SELECT to_quantity FROM movements WHERE to_currency = "EUR"') except: flash('Imposible acceder a la base de datos.') inversion=0 return render_template('balance.html', inversion=inversion) saldo_euros_invertidos=0 total_euros_invertidos=0 for euros in euros_from: total_euros_invertidos += (euros['from_quantity']) saldo_euros_invertidos = saldo_euros_invertidos - (euros['from_quantity']) for euros in euros_to: saldo_euros_invertidos = saldo_euros_invertidos + (euros['to_quantity']) carteraactual = micarteracripto() if 'EUR' in carteraactual: carteraactual.pop('EUR') valoractual=0 for clave, valor in carteraactual.items(): if valor == 0: precio = 0 else: url = 'https://pro-api.coinmarketcap.com/v1/tools/price-conversion?amount={}&symbol={}&convert={}&CMC_PRO_API_KEY={}'.format(valor, clave, 'EUR', API_KEY) respuesta = requests.get(url) if respuesta.status_code ==200: dict_precios = respuesta.json() precio = (dict_precios['data']['quote']['EUR']['price']) else: flash('Error de API KEY. No podemos calcular tu balance') inversion = 0 return render_template('balance.html', inversion=inversion) valoractual += precio misaldo= total_euros_invertidos + saldo_euros_invertidos + valoractual misaldo = redondea_valor(misaldo) balance = round((misaldo - total_euros_invertidos), decimals=3) return render_template('balance.html', inversion=total_euros_invertidos, valoractual=misaldo, balance=balance)
def gera_num_1(N): lista_x = np.zeros(N) lista_y = np.zeros(N) for i in range(N): ec = sc * np.random.normal(0, 1) x = x0 + sl * np.random.normal(0, 1) + ec y = y0 + sl * np.random.normal(0, 1) + ec lista_x[i] = x lista_y[i] = y sx = np.std(lista_x) sy = np.std(lista_y) n = 0 soma_vxy = 0 for i in range(N): soma_vxy += (lista_x[i] - x0) * (lista_y[i] - y0) if (lista_x[i] - x0 > 0 and lista_y[i] - y0 > 0) or (lista_x[i] - x0 < 0 and lista_y[i] - y0 < 0): n += 1 f = n / N inc_n = sigfig.round(float(np.sqrt(N * f * (1 - f))), sigfigs=2) inc_f = sigfig.round(inc_n / N, sigfigs=2) v_xy = soma_vxy / (N - 1) R = v_xy / (sx * sy) inc_v_xy = sigfig.round(float(sx * sy * np.sqrt((1 + R**2) / (N - 1))), sigfigs=2) inc_R = sigfig.round(float((1 - R**2) / np.sqrt(N - 1)), sigfigs=2) w = lista_x + lista_y std_w = np.std(w) inc_std_w = sigfig.round(float(std_w / np.sqrt(2 * (N - 1))), sigfigs=2) z = lista_x - lista_y std_z = np.std(z) inc_std_z = sigfig.round(float(std_z / np.sqrt(2 * (N - 1))), sigfigs=2) std_w_e_z_sem_cov = np.sqrt(sx**2 + sy**2) print(f'a: f = {f} +- {inc_f}') print(f'b: V_xy = {v_xy} +- {inc_v_xy}, R = {R} +- {inc_R}') print(f'c: std_w = {std_w} +- {inc_std_w}') print(f'd: std_z = {std_z} +- {inc_std_z}') print(f'e: propag inc = {std_w_e_z_sem_cov}') return [lista_x, lista_y]
def round_data(self): for item in list(self.tensions.keys()): try: self.tensions[item] = sigfig.round(self.tensions[item], self.sig_figs) self.stresses[item] = sigfig.round(self.stresses[item], self.sig_figs) self.strains[item] = sigfig.round(self.strains[item], self.sig_figs) self.buckling_ratios[item] = sigfig.round( self.buckling_ratios[item], self.sig_figs) except KeyError: continue for item in list(self.reactions.keys()): try: self.reactions[item] = (sigfig.round( self.reactions[item][0], self.sig_figs), sigfig.round( self.reactions[item][1], self.sig_figs)) except KeyError: continue
def __repr__(self): return ','.join([ self.name or '', self.population or '', str(round(self.frequency, 3)) if self.frequency else '0' ])
break except ValueError: # Prevent ValueError convert the String input to an integer from occuring print("Enter an integer") mean_list = [] # Add all means to this number to perform a t-test variance_list = [] # Add all variances to this number to perform a t-test number = None for data_set in data_sets: # Do statistical analysis for each data set number = len(data_set) - 1 # Create the number for statistical operations, subtract 1 because last in index is the name mean_sum = 0 for value in data_set[:-1]: # Add all the values to create the sum to calculate mean, remove last because last is the name mean_sum += value mean = round(mean_sum/number, sigfigs=sig_figs) # Calculate the mean with significant figures mean_list.append(mean) minimum = data_set[0] for value in data_set[:-1]: # Find the maximum for the range subtract because last element is name if value < minimum: minimum = value maximum = data_set[0] for value in data_set[:-1]: # Find the minimum for the range subtract because last element is name if value > maximum: maximum = value stat_range = maximum-minimum # Calculate the range squared_values = []
def step_impl(context, allele_freqs): assert_that( ','.join([ str(round(float(dpb1.frequency), 3)) for dpb1 in context.haplotype.dpb1_alleles ]), is_(allele_freqs))
def make_latex_table(releasename='February2021', savename='selfcal_summary'): if datetime.datetime.today() > datetime.datetime( year=2021, month=1, day=10): result = requests.get( f'https://data.rc.ufl.edu/secure/adamginsburg/ALMA-IMF/{releasename}Release/tables/metadata_sc.ecsv', auth=('almaimf', keyring.get_password('almaimf', 'almaimf'))) with open(f'{releasename}_metadata_sc.ecsv', 'w') as fh: fh.write(result.text) result = requests.get( 'https://data.rc.ufl.edu/secure/adamginsburg/ALMA-IMF/tables/bandpass_fraction.ecsv', auth=('almaimf', keyring.get_password('almaimf', 'almaimf'))) with open('bandpass_fraction.ecsv', 'w') as fh: fh.write(result.text) bp_tbl = Table.read('bandpass_fraction.ecsv') bp_tbl['band'] = [f'B{b}' for b in bp_tbl['band']] bp_tbl.rename_column('field', 'region') bp_tbl = table.join(bp_tbl.group_by('config').groups[0], bp_tbl.group_by('config').groups[1], keys=('region', 'band')) bp_tbl.rename_column('bwfrac_1', '12Mlong_frac') bp_tbl.rename_column('bwfrac_2', '12Mshort_frac') bp_tbl.remove_column('config_1') bp_tbl.remove_column('config_2') tbl = table.join(Table.read(f'{releasename}_metadata_sc.ecsv'), bp_tbl, keys=('region', 'band')) bad = np.array([('diff' in x) or ('noco' in x) for x in tbl['filename']]) # downselect keep = ((tbl['suffix'] == 'finaliter') & (tbl['robust'] == 'r0.0') & (~tbl['pbcor']) & (~tbl['bsens']) & (~tbl['nobright']) & (~bad)) wtbl = tbl[keep] assert not any(np.isnan(wtbl['dr_improvement'])) print(len(wtbl)) print(wtbl) # strip preceding "sc" from selfcal numbers wtbl['selfcaliter'] = Column(data=[ row['selfcaliter'][2:] + ("a" if row['has_amp'] else "") for row in wtbl ]) # SensVsReq can be populated with either pre- or post-; we want post wtbl['SensVsReqPost'] = wtbl['mad_sample_post'] / wtbl['Req_Sens'] wtbl['SensVsReqPre'] = wtbl['mad_sample_pre'] / wtbl['Req_Sens'] wtbl['mad_sample_post'].unit = u.mJy / u.beam wtbl['mad_sample_pre'].unit = u.mJy / u.beam # convert 'peak' to mJy # ("imstats" uses Jy, "compare" uses mJy) wtbl['peak'] *= 1e3 wtbl['peak'].unit = u.mJy / u.beam cols_to_keep = { 'region': 'Region', 'band': 'Band', 'selfcaliter': '$n_{sc}$', 'bmaj': r'$\theta_{maj}$', 'bmin': r'$\theta_{min}$', 'bpa': 'BPA', 'Req_Res': r"$\theta_{req}$", 'BeamVsReq': r"$\Omega_{syn}^{1/2}/\Omega_{req}^{1/2}$", #'peak/mad': "DR", 'peak': '$S_{peak}$', 'mad_sample_post': '$\sigma_{MAD}$', 'Req_Sens': r"$\sigma_{req}$", 'SensVsReqPost': r"$\sigma_{MAD}/\sigma_{req}$", 'dr_pre': "DR$_{pre}$", 'dr_post': "DR$_{post}$", 'dr_improvement': "DR$_{post}$/DR$_{pre}$" } units = { '$S_{peak}$': (u.mJy / u.beam).to_string(u.format.LatexInline), '$\sigma_{MAD}$': (u.mJy / u.beam).to_string(u.format.LatexInline), '$\sigma_{req}$': (u.mJy / u.beam).to_string(u.format.LatexInline), r'$\theta_{req}$': u.arcsec.to_string(u.format.LatexInline), r'$\theta_{maj}$': u.arcsec.to_string(u.format.LatexInline), r'$\theta_{min}$': u.arcsec.to_string(u.format.LatexInline), r'PA': u.deg.to_string(u.format.LatexInline), } latexdict['units'] = units fwtbl = wtbl[list(cols_to_keep.keys())] for old, new in cols_to_keep.items(): if old in wtbl.colnames: #wtbl[old].meta['description'] = description[old] fwtbl.rename_column(old, new) if new in units: fwtbl[new].unit = units[new] float_cols = [ '$\\theta_{maj}$', '$\\theta_{min}$', 'BPA', '$S_{peak}$', '$\\sigma_{MAD}$', '$\\theta_{req}$', '\\sigma_{req}$', '$\\sigma_{MAD}/\\sigma_{req}$', # '$\\theta_{req}/\\theta_{maj}$', "$\Omega_{syn}^{1/2}/\Omega_{req}^{1/2}$", 'DR$_{pre}$', 'DR$_{post}$', 'DR$_{post}$/DR$_{pre}$' ] # ALREADY IN mJy # convert to mJy #fwtbl['$\sigma_{MAD}$'] *= 1000 formats = { key: lambda x: strip_trailing_zeros('{0:0.3f}'.format(round_to_n(x, 2), nsf=2)) for key in float_cols } formats = { key: lambda x: str(sigfig.round(str(x), sigfigs=2)) for key in float_cols } fwtbl.write('selfcal_summary.ecsv', format='ascii.ecsv', overwrite=True) # caption needs to be *before* preamble. #latexdict['caption'] = 'Continuum Source IDs and photometry' latexdict['header_start'] = '\label{tab:selfcal}' #\n\\footnotesize' latexdict[ 'preamble'] = '\caption{Selfcal Summary}\n\\resizebox{\\textwidth}{!}{' latexdict['col_align'] = 'l' * len(fwtbl.columns) latexdict['tabletype'] = 'table*' latexdict['tablefoot'] = ( "}\par\n" "$n_{sc}$ is the number of self-calibration iterations adopted. " "Those with a final iteration of amplitude self-calibration are denoted with the `a' suffix. " "$\\theta_{maj}, \\theta_{min}$, and BPA give the major and minor full-width-half-maxima (FWHM) of the synthesized beams. " "$\\theta_{req}$ is the requested beam size, " "and $\\Omega_{syn}^{1/2}/\\Omega_{req}^{1/2}$ gives the ratio of the synthesized to the " "requested beam area; larger numbers imply poorer resolution. " "$\sigma_{MAD}$ and $\sigma_{req}$ are the measured and requested " "RMS sensitivity, respectively, and $\sigma_{MAD}/\sigma_{req}$ is the excess noise " "in the image over that requested. $\sigma_{MAD}$ is measured on the \\texttt{cleanest} images. " "$DR_{pre}$ and $DR_{post}$ are the dynamic range, $S_{peak} / \sigma_{MAD}$, for the " "pre- and post-self-calibration data; $DR_{post}/DR_{pre}$ gives the improvement " "factor.") fwtbl.sort(['Region', 'Band']) fwtbl.write(f"../datapaper/{savename}.tex", formats=formats, overwrite=True, latexdict=latexdict) return fwtbl
#!/usr/bin/env python3 import re from statistics import stdev, mean from sigfig import round from sys import argv f = open(argv[1], "r") rpt = f.read().split("\n") # TODO: split based on mission load f.close() # 15 is magic number - where '\d\d:\d\d:\d\d "FPS: ' ends fps = list( map(lambda s: float(s[15:-1]), filter(lambda s: bool(re.match(r'^\d\d:\d\d:\d\d "FPS: .*"', s)), rpt))) print( str(round(mean(fps), sigfigs=3)) + " +/- " + str(round(stdev(fps), sigfigs=3)) + " FPS")
ax[plt_idx].set_title(dep_name, fontsize=10) #ax[plt_idx].axvline(x=3*np.mean(all_deployment_dtemp_dtime[plt_idx]),color='g',linewidth=line_thick) ax[plt_idx].axvline(x=np.mean(all_deployment_dtemp_dtime[plt_idx]) + 3 * np.std(all_deployment_dtemp_dtime[plt_idx]), color='r', linewidth=line_thick) ax[plt_idx].axvline(x=np.mean(all_deployment_dtemp_dtime[plt_idx]) - 3 * np.std(all_deployment_dtemp_dtime[plt_idx]), color='r', linewidth=line_thick) anno = 'mean = ' + str( round(float(np.mean(all_deployment_dtemp_dtime[plt_idx])), sigfigs=3)) anno += '\n3SD = ' + str( round(float(3 * np.std(all_deployment_dtemp_dtime[plt_idx])), sigfigs=3)) anno += '\nsamples = ' + str(len(all_deployment_dtemp_dtime[plt_idx])) ax[plt_idx].annotate(anno, xy=label_coords, xycoords=label_method, fontsize=8) #ax[plt_idx].set_ylim(bottom=0,top=np.max(hist_data[0])) #ax[plt_idx].set_xlim(left=-450, right=450) np.linspace(-450,450,901)