def handle_stats( stats_counter ): stats_counter_thresh=40 # Updating stats for rpisurv community every 40 loops if stats_counter % stats_counter_thresh == 0: stats.update_stats(uniqid, str(stats.get_runtime(start_time)), update_stats_enabled) else: logger.debug("stats_counter is " + str(stats_counter) + ". Only sending every " + str(stats_counter_thresh))
def update(): more = True while more: stat_session = db.StatSession() log_session = db.LogSession() events, more = logparser.parse_sql(log_session) stats.update_stats(stat_session, events) stat_session.commit() log_session.commit()
def hidden_stats(ticker, adj, mar, n): ## Checking delist.csv file = os.getcwd() + '\\data\\delist.csv' dlf = pd.read_csv(file) delist_date = dlf.columns[0] td = (dateparser.parse(str(today)) - dateparser.parse(delist_date)) # If delist.csv has not been updated since yesterday, then update. if td > timedelta(days=1): print('Retrieving a new list of delisted stocks...') delist.update_delist() # Adjusting values based on slider from the top dashboard st = stats.update_stats(ticker) if st != None and st != 'NoneType': if adj != 0 or mar != 5: st['parsed']['Entry Point (Decimal)'] = st['parsed'][ 'Entry Point (Decimal)'] + (adj / 100) st['parsed']['Entry Point'] = '{}%'.format( st['parsed']['Entry Point (Decimal)'] * 100) st['notes']['Engage?'] = '{} ({})'.format( st['parsed']['Engage?'], st['parsed']['Entry Point']) st = json.dumps(st, indent=4) return (st)
def test_model(trained_model, test_data, language_stats, device, language_names_dict, int2lang): correct_per_example = 0 total_predictions = 5000 incorrect_guesses_per_instance = 0 percent = 0 example = 0 batch_nr = 1 tenp = 500 num_characters = [] count = 0 for x, y in tqdm(test_data): batch_nr += 1 example += 1 hidden_layer = trained_model.init_hidden(1).to(device) for examples in zip(x, y): #total_predictions += 1 count += 1 prediction = trained_model(examples[0].unsqueeze(0).to(device), hidden_layer) _, indeces = torch.max(prediction[0].data, dim=1) characters = len(torch.nonzero(examples[0])) if indeces[0].item() == examples[1].item(): num_characters.append(characters) correct_per_example += 1 stats.update_stats(language_stats, indeces[0].item(), examples[1].item(), int2lang, characters, language_names_dict) break else: #characters = 0 stats.update_stats(language_stats, indeces[0].item(), examples[1].item(), int2lang, characters, language_names_dict) incorrect_guesses_per_instance += 1 continue print(example) print(correct_per_example) print(incorrect_guesses_per_instance) print(count) return language_stats
def compute_update_stats(writer, step, modules, prev_params): for name, module in modules.items(): update_scales, param_scales, fractions = stats.update_stats( get_cpu_params(module), prev_params[name]) writer.add_scalar( f'update/{name}_mean_update_scale', np.mean(update_scales), step) writer.add_scalar( f'update/{name}_mean_param_scale', np.mean(param_scales), step) writer.add_scalar( f'update/{name}_mean_update_to_param_scale', np.mean(fractions), step) writer.add_histogram( f'update/{name}_update_to_param_scale', fractions, step) writer.add_histogram( f'update/{name}_update_scale', update_scales, step) writer.add_histogram( f'update/{name}_param_scale', param_scales, step)
def create_new_descriptive_stats(): data = convert_to_string(request.data) descriptive, correlations = stats.update_stats(data) cache.set("descriptive", descriptive) cache.set("correlations", correlations) return "Successfully updated statistics cache", 200