def main(): operation = gM.menu() if operation == 1: f.get_data() elif operation == 2: f.show_std() else: f.analyze_records()
def _set_default_season(self): """Sets season default to the year of current season, done so for when season is left blank in any function that reqeuires it """ url = 'http://data.nba.net/10s/prod/v1/today.json' get_data(url) season = get_data(url)['teamSitesOnly']['seasonYear'] return str(season)
def main(): print("INFO: Initializing...") DB = load_file(DB_FILENAME) CHATS = load_file(CHATS_FILENAME) # Initiate main loop while True: print("INFO: Polling for chat additions or removals...") poll_new_chats(CHATS) print("INFO: Fetching fresh data...") items = get_data() print("INFO: Processing data...") for item in items: identifier = item["identifier"] message = item["message"] if identifier not in DB: print("INFO: Sending messages...") for chat in CHATS: send_message(message, int(chat)) time.sleep(1) append_file(DB_FILENAME, identifier) DB.append(identifier) print("INFO: Sleeping for {} secs...".format(SLEEP)) time.sleep(SLEEP)
def url_query(**kwargs): _unixTime=int(time.time()) _url=config.queryUrl _headersUserAgent=config.headersUserAgent _m=kwargs.get('m') or 'QueryData' _rowCode=kwargs.get('rowCode') or 'zb' _colCode=kwargs.get('colCode') or 'sj' _dbCode=kwargs.get('dbCode') or 'hgnd' _wds=kwargs.get('wds') or '[]' _dfwds=kwargs.get('dfwds') _data=functions.get_data(url=_url, headersUserAgent=_headersUserAgent, m=_m, rowCode=_rowCode, colCode=_colCode, dbCode=_dbCode, wds=_wds, dfwds=_dfwds) logging.info('爬取数据'+_dfwds+'成功') _list=[] if int(_data.get('returncode')) < 400 : for d in _data.get('returndata').get('datanodes'): _code=d.get('code') _data=float(d.get('data').get('data')) _wds=d.get('wds')[0].get('valuecode') _year=int(d.get('wds')[1].get('valuecode')) _tuple=(_code, _data, _wds, _year, _unixTime) _list.append(_tuple) return _list
def main(): root.info(f'Reading data and writing to database every {sleep_duration} seconds.') # start_http_server(8000) prom_temp = Gauge( 'Temperature', 'Temperature measured by the DHT22 Sensor') prom_humid = Gauge( 'Humidity', 'Relative Humidity measured by the DHT22 Sensor') while True: t_start = time.time() try: date, humidity, temperature = get_data() if None in (humidity, temperature): raise customEx # customEx can also be raised in get_data()! except customEx: time.sleep(int(sleep_duration/10)) continue # print(date, humidity, temperature) root.debug('climate inside: T[C] = {temp}, rH[%] = {hum}'.format(hum=humidity, temp=temperature)) # logger_data.info(f'; {humidity}; {temperature}') prom_temp.set(temperature) prom_humid.set(humidity) t_end = time.time() t_delta = t_end - t_start try: first except NameError as e: first = False start_http_server(8000) try: time.sleep(sleep_duration - t_delta) except ValueError: continue
def get_boxscore_data(date=None, gameId=None, schedule_obj=None): """Gets raw json data for given game. Args: date: The date of the given game using YYYYMMDD format. gameId: The gameId for given game. Acquired using a get_gameId fucntion. Returns: Dict of raw json data from the data.nba.net _boxscore.json endpoint """ "TODO: clean up reused code below" if date and not gameId: if schedule_obj: gameId = schedule_obj.get_gameId(date=date) else: gameId, date = S().get_gameId(date=date) if not date or not gameId: if schedule_obj: gameId, date = schedule_obj.last_game_id_date else: gameId, date = S().last_game_id_date url_start = 'http://data.nba.net/prod/v1/' url = str(url_start) + str(date) + '/' + str(gameId) + '_boxscore.json' data = get_data(url) return data
def add_mounted(self): nfs_share = capture_mounted_nfs() n = len(nfs_share) if n>0: for i in range(n): data = get_data(nfs_share[i]) self.add_line('umountline', data)
def start_app(): global historicalData, sizeOfModelData, marketData, symbolData, periodData, model, n_per_in, n_per_out, n_features, modelWeights global modelWeights, percentTestData, epochs, batch_size, earlyStop, saveWeights, displayResults, df, close_scaler, testDf, numberToPredict global percentToPredict, predictions, shortDf, actual, normalizedDf, trainDf, normalizedTrainDf, startAmount, tradingFee, train_close_scaler, calcPredictionsNumberPredicts, binaryModel # functions.disable_console() df, testDf, trainDf, n_features = functions.get_data( periodData, marketData, symbolData, percentTestData=percentTestData, saveData=True, historical=historicalData, size=sizeOfModelData) model = functions.create_model(n_per_in, n_per_out, n_features) binaryModel = functions.create_model_binary(n_per_in, n_per_out, n_features) normalizedTrainDf, train_close_scaler = functions.normalize_df(trainDf) normalizedDf, close_scaler = functions.normalize_df(df) normalizedDf.to_csv("normalizedDf.csv")
def get_data(self): print('學分統計中...') self.ui.label_29.setText('學分統計中...') self.ui.label_29.setAlignment(Qt.AlignLeft) #functions.get_data(driver,self.ui.lineEdit.text()[3:5]) b = [] a = functions.get_data(driver, self.ui.lineEdit.text()[3:5]) for i in range(len(a)): b.append(str(a[i])) self.ui.label_21.setText(b[0]) self.ui.label_21.setAlignment(Qt.AlignCenter) self.ui.label_22.setText(b[1]) self.ui.label_22.setAlignment(Qt.AlignCenter) self.ui.label_23.setText(b[2]) self.ui.label_23.setAlignment(Qt.AlignCenter) self.ui.label_24.setText(b[3]) self.ui.label_24.setAlignment(Qt.AlignCenter) self.ui.label_25.setText(b[4]) self.ui.label_25.setAlignment(Qt.AlignCenter) self.ui.label_26.setText(b[5]) self.ui.label_26.setAlignment(Qt.AlignCenter) self.ui.label_27.setText(b[6]) self.ui.label_27.setAlignment(Qt.AlignCenter) ## self.ui.label_28.setText(b[7]) ## self.ui.label_28.setAlignment(Qt.AlignCenter) print('學分統計完成!') self.ui.label_29.setText('學分統計完成!') self.ui.label_29.setAlignment(Qt.AlignLeft)
def main(): ts = round(datetime.datetime.now().timestamp(), 0) print(" ") print("OFDC: Activated at " + str(ts)) print(" ") #Update all Tradeable Symbols for i in range(31): try: sym = get_symbols(i) df1 = f.get_data(sym) f.update_db(df1, sym) except: print("ERR. Get and Update - " + sym) continue #Post DB f.post_db() ts2 = round(datetime.datetime.now().timestamp(), 0) tim = round(((ts2 - ts)/60), 2) msg = f"OFDC: Done, Deactivated in {tim} Minutes." print(" ") print(msg) return None
def analysis(): form = request.args data = fn.get_data(form['country'],form['state'],form['type']) sum = fn.sum(data) trend, seasonal, resid = fn.decompose(data) acf = fn.get_acf(data) pacf = fn.get_pacf(data) #q_stat = fn.lb_test(acf) stats = fn.get_stats(data) decomp = {} decomp['trend'] = trend decomp['seasonal'] = seasonal decomp['resid'] = resid data = fn.get_ma(data,7) out = { 'data' : data, 'decomp' : decomp, 'acf' : acf, 'pacf' : pacf, 'stats': stats, 'sum':sum } return render_template('analysis.html', out=out, form=form)
def test_get_data_function(self): """Tests the get_data funtion to assure it returns a dictionary, with one of the keys being the _internal key.""" url = f"http://data.nba.net/10s/prod/v1/today.json" data = get_data(url) self.assertIsNotNone(data) self.assertIn("_internal", data.keys())
def chart(): alea_data, nlea_data, alct_data, nlct_data, alwt_data, nlwt_data, older_data, \ = None, None, None, None, None, None, None, bc = bar_colors boc = bar_outline_colors size = 0 years = [] for i in range(2019, 1870, -1): years.append(i) if request.args.get('years') is None: year = 2019 else: year = int(request.args.get('years')) if year > 1968: alea_data = get_data(year, 0) if year > 1993: alct_data = get_data(year, 1) alwt_data = get_data(year, 2) nlea_data = get_data(year, 3) nlct_data = get_data(year, 4) nlwt_data = get_data(year, 5) else: alwt_data = get_data(year, 1) nlea_data = get_data(year, 2) nlwt_data = get_data(year, 3) else: older_data = get_data(year) size = int(get_size(year)) return render_template('index.html', years=years, year=year, alea_data=alea_data, nlea_data=nlea_data, alwt_data=alwt_data, nlwt_data=nlwt_data, alct_data=alct_data, nlct_data=nlct_data, older_data=older_data, bc=bc, boc=boc, size=size)
def drift(): form = request.args data = fn.get_data(form['country'],form['state'],form['type']) adwin = fn.adwin(data) data = fn.get_drift(data,adwin) out = { 'data' : data } return render_template('drift.html', out=out)
def main(): """ Main route """ hotspot_one = { "filename": "map.png", "title": "Temporal hotspot 1", "xticks": functions.get_ticks("weekdays"), "yticks": functions.get_ticks("hours"), "labels": { "xlabel": "Weekdays", "ylabel": "Hours" } } hotspot_two = { "filename": "map2.png", "title": "Temporal hotspot 2", "xticks": functions.get_ticks("weekdays"), "yticks": functions.get_ticks("hours"), "labels": { "xlabel": "Weekdays", "ylabel": "Hours" } } # Get a 2d list, dataframe hotspot_one["data"] = functions.get_data(hotspot_one) hotspot_two["data"] = functions.get_data(hotspot_two) # Creates the hotspot functions.create_hotspot(hotspot_one) functions.create_hotspot(hotspot_two) # The filenames for hotspot images filenames = [] filenames.append(hotspot_one["filename"]) filenames.append(hotspot_two["filename"]) return render_template("index.html", hotspots=filenames)
def forecast(): form = request.args data = fn.get_data(form['country'],form['state'],form['type']) h, c, f, m = fn.forecast(data,form['time']) out = { 'data' : data, 'forecast' : h, 'chart': c, 'future': f } return render_template('forecast.html', out=out, form=form)
def main(): logger.info('Displaying humidity.') while True: time, humidity, temperature = get_data() # display measurements on display lcd.setCursor( 0, 0 ) # set cursor position. this needs to be here, otherwise the display keeps old output lcd.message('Temp={0:0.1f}*C\n'.format(temperature)) lcd.message('Humidity={0:0.1f}%'.format(humidity)) sleep(sleep_duration)
def update_futopt_tables(db_future_nrows, db_option_nrows): text_errors = [] data = get_data() # clear table with staled data db.session.query(Future).delete() # write fresh data for row in data["futures"].drop_duplicates().iterrows(): future = Future(secid=row[1].SECID, shortname=row[1].SHORTNAME, lasttradedate=row[1].LASTTRADEDATE, assetcode=row[1].ASSETCODE, prevopenposition=row[1].PREVOPENPOSITION, prevsettleprice=row[1].PREVSETTLEPRICE, oi_rub=row[1].OI_RUB, oi_percentage=row[1].OI_PERCENTAGE, lasttrademonth=row[1].LASTTRADEMONTH, date_created=datetime.utcnow()) db.session.add(future) try: editions = db.session.query(Edition).filter(Edition.table == "futures").first() editions.edition = data["future_edition"] editions.date_created = datetime.utcnow() except AttributeError: editions = Edition(table="futures", edition=data["future_edition"], date_created=datetime.utcnow()) db.session.add(editions) # clear table with staled data db.session.query(Option).delete() # write fresh data for row in data["options"].drop_duplicates().iterrows(): option = Option(secid=row[1].SECID, shortname=row[1].SHORTNAME, lasttradedate=row[1].LASTTRADEDATE, assetcode=row[1].ASSETCODE, prevopenposition=row[1].PREVOPENPOSITION, prevsettleprice=row[1].PREVSETTLEPRICE, oi_rub=row[1].OI_RUB, oi_percentage=row[1].OI_PERCENTAGE, lasttrademonth=row[1].LASTTRADEMONTH, underlying_future=row[1].UNDERLYING, date_created=datetime.utcnow()) db.session.add(option) try: editions = db.session.query(Edition).filter(Edition.table == "options").first() editions.edition = data["option_edition"] editions.date_created = datetime.utcnow() except AttributeError: editions = Edition(table="options", edition=data["option_edition"], date_created=datetime.utcnow()) db.session.add(editions) db.session.commit() df_fut = pd.read_sql(db.session.query(Future).statement, db.session.bind) df_opt = pd.read_sql(db.session.query(Option).statement, db.session.bind) return [df_fut, df_opt, text_errors]
def build_dataframe(): df = get_data() df = df.drop( ['scoring_drive', 'final_outcome', 'quarter', 'game_id', 'play_id'], axis=1) df['play_type'] = df['play_type'].astype('category') df['last_play_type'] = df['last_play_type'].astype('category') df['down'] = df['down'].astype('category') df['play_in_drive'] = df['play_in_drive'].astype('category') df['score_diff'] = df['current_pos_score'] - df['current_opp_score'] df = df.drop(['current_pos_score', 'current_opp_score'], axis=1) return df
def get_team_data(season=None): """Gets raw json data for all teams. Args: season: Year of season start date. Ex: 2019 for the 2019/2020 season. Returns: Dict of raw json data from data.nba.net.../teams.json endpoint """ if not season: season = get_season_year() url = 'http://data.nba.net/prod/v2/' + str(season) + '/teams.json' data = get_data(url) return data
def main(**kwargs): unixTime = int(time.time()) url = queryUrl hua = headersUserAgent m = 'QueryData' rowCode = 'zb' colCode = 'sj' dbCode = 'hgnd' wds = '[]' dwList = dfwdsList ## 定义汇总列表 totalList = [] ## 获取url数据,轮询依次从dfwds中执行 for dfwds in dwList: try: data = get_data(url=url, headersUserAgent=hua, m=m, rowCode=rowCode, colCode=colCode, dbCode=dbCode, wds=wds, dfwds=dfwds) except Exception as e: ## 执行失败, 转执行下一个索引值 logging.warning('获取数据{}失败'.format(dfwds)) continue else: logging.info('获取数据{}成功'.format(dfwds)) ## 从get请求数据中获取写入数据库需要的数据 reValueList = [(x.get('code'), float(x.get('data').get('data')), x.get('wds')[0].get('valuecode'), int(x.get('wds')[1].get('valuecode')), unixTime) for x in data.get('returndata').get('datanodes') if int(data.get('returncode')) < 400] ## 数据汇总 totalList += reValueList ## 轮询执行汇总列表 for x in totalList: ## 获取数据库code,value,wds,year,unixtime对应值, x是元组, 有5个元素 code, value, wds, year, unixTime = x ## 写入数据库 mysql_update(code=code, value=value, wds=wds, year=year, unixTime=unixTime)
def prepare_clustering(input_file: str, similarity_function: str, all_solutions: bool = False): """ :param input_file: path to the input file :param similarity_function: the similarity function to be applied; must be one of the following: -single-linkage, -complete-linkage, -average-linkage or -ward-linkage :param all_solutions: True if script is intended to return all possible solutions :return: void """ cluster_list = get_data(input_file) distance_matrix = compute_distance_matrix(cluster_list) matrix_update_function = functions[similarity_function] bottom_up(cluster_list, distance_matrix, matrix_update_function, all_solutions)
def get_standings_data(division=False): """Gets raw json data for leagues division/conference standings based on arguments. Args: division: string, 'division' to return division standings, 'conference' to return conference standings Returns: Dict of raw json data from data.nba.net.../standings_XXX.json endpoint XXX is either division or conference. """ if not division: division = 'conference' url = f"https://data.nba.net/prod/v1/current/standings_{division}.json" data = get_data(url) return data
def __init__(self, meta_data, verbose=0): if os.path.exists(meta_data["base_path"]): print("--Warning! Experiment path already exists. By calling further methods, the current files will be overwritten.--") else: os.makedirs(meta_data["base_path"]) self.verbose = verbose self.meta_data = meta_data self.optim = functions.get_optim(meta_data["optim"], meta_data["optim_config"]) self.X_train, self.y_train, self.X_test, self.y_test = functions.get_data(meta_data["dataset"], meta_data["base_path"]) if 'normalize' in meta_data.keys() and meta_data["normalize"] is not None: self.X_train_default = self.X_train self.X_test_default = self.X_test self.X_train = normalize(self.X_train, mean=meta_data["normalize"]["mean"], std=meta_data["normalize"]["std"]) self.X_test = normalize(self.X_test, mean=meta_data["normalize"]["mean"], std=meta_data["normalize"]["std"]) self.model_name = meta_data["model_name"] self.base_path = meta_data["base_path"] self.epochs = meta_data["epochs"] self.post_epochs = meta_data["post_epochs"] self.batch_size = meta_data["batch_size"] self.layer_key = meta_data["layer_key"] self.embedding_approach = meta_data["embedding_approach"] self.embedding_subset = meta_data["embedding_subset"] self.embedding_weight = meta_data["embedding_weight"] self.embedding_config = meta_data["embedding_conf"] self.embedding_epochs = meta_data["embedding_epochs"] self.embedding_batch_size = meta_data["embedding_batch_size"] self.embedding_optim = functions.get_optim(meta_data["embedding_optim"], meta_data["embedding_optim_config"]) self.contraction_factors = meta_data["contraction_factors"] self.shift_factors = meta_data["shift_factors"] tf.random.set_seed(meta_data["experiment_number"]) self.classifier_model = None self.embedder_model = None self.sub_model = None self.logits_train = None self.logits_test = None
def get_schedule_data(season=None, team=None): """Gets raw json data of given team schedule for given season. Args: season: year of season start date, YYYY format. team: teamUrl for given team -> team name, ex: raptors, sixers Returns: Dict of raw json data from data.nba.net.../schedule.json endpoint """ if not team: season = get_season_year() if not team: team = get_team() team = handle_team_url_name(team) url_start = 'http://data.nba.net/prod/v1/' url = url_start + str(season) + '/teams/' + str(team) + '/schedule.json' data = get_data(url) return data
def get_team_leaders_data(team=None, season=None): """Gets raw json data for stat leaders for given team. Args: team: lowercase team name of team. ex: raptors, sixers, bulls season: Year of season start date. Ex: 2019 for the 2019/2020 season. Returns: Dict of raw json data from data.nba.net /leaders.json endpoint """ if not team: team = get_team() team = handle_team_url_name(team) if not season: season = get_season_year() url1 = 'http://data.nba.net/prod/v1/' url_end = '/leaders.json' url = str(url1) + str(season) + '/teams/' + str( team.lower()) + str(url_end) data = get_data(url) return data
def lambda_handler(event, context): #extraemos los datos try: token = event['token'] proy = event['project'] month = event['month'] year = event['year'] except: token = '0' #Sino retorna 422 - UNPROCESABLE ENTITY if token == '0': return {'statusCode': 422, 'body': json.dumps('Check parameters')} elif token == 'VAcoG60gSbifBrrnKL_hUw': #ya tenemos los inputs y se lo enviamos data = get_data(proy, month, year) return { 'mensajes': data[0], 'tiempo_total': data[1], 'llamadas': data[2] } else: return {'statusCode': 403, 'body': json.dumps('Invalid token')}
def monitor_humidity(context: CallbackContext): job = context.job results = get_data() humidity = results[1] ## alternative for debugging # humidity = np.random.normal(60, 10, 1) humidity_pretty = "{0:0.1f}%".format(humidity) if humidity > 75: job.context['alerting'] = True msg_int = job.context['message_interval'] if msg_int % 30 == 0 and msg_int < 1440: # send message every 30 minutes and stop after 24h (60 minutes * 24 hours = 1440 minutes) logger.info('Sending alert. Humidity at ' + humidity_pretty) context.bot.send_message(chat_id=c.channel('id'), text='Humidity at ' + humidity_pretty + '! Air the room!') else: logger.info('Message interval = ' + str(msg_int)) job.context['message_interval'] += 1 elif humidity <= 75 and job.context['alerting']: # this part only runs once after we have stopped alerting # reset context values job.context['alerting'] = False job.context['message_interval'] = 30 logger.info('Humidity level restored.') context.bot.send_message(chat_id=c.channel('id'), text='Safe humidity level restored!') else: # this is the standard case which usually runs every 60 seconds logger.info('Checking humidity. Humidity at ' + humidity_pretty)
######################### #### Keyword Reports #### # Set Local Parameters dimension = 'phrase' service_url = 'https://api.semrush.com' ## Table 1.1 - Keyword Overview ## # Set Local Parameters call_type = 'phrase_this' features = 'Nq' # Request Data final_df_phrase_this = func.get_data(dimension=dimension, call_type=call_type, features=features, service_url=service_url, api_key=api_key, date_mid_month=date_mid_month, kw_list=kw_list, db_list=db_list, url_list=url_list, current_time=current_time, last_update_date=last_update_date_semrush_kw_traffic) ## Table 1.2 - Organic Results ## # Set Local Parameters call_type = 'phrase_organic' features = 'Dn,Ur,Fp' serp_features_map = { '0': 'instant_answer', '6': 'site_links', '11': 'featured_snippet', '22': 'faq', } # Request Data final_df_phrase_organic = func.get_data(dimension=dimension, call_type=call_type, features=features, service_url=service_url, api_key=api_key, date_mid_month=date_mid_month, kw_list=kw_list, db_list=db_list,
from __future__ import print_function import numpy as np from functions import get_data, make_model import cv2 import matplotlib.cm as cm from vis.visualization import visualize_cam, overlay from vis.utils import utils from keras import activations import keras from keras.applications.vgg16 import VGG16, preprocess_input from matplotlib import pyplot as plt x_train, y_train, x_test, y_test = get_data() model = VGG16(weights=None, input_shape=(256, 256, 1), classes=2) model.compile(loss=keras.losses.binary_crossentropy, optimizer="sgd") epochs = 6 try: model.load_weights("model.h5") except ValueError: model.fit(x_train, y_train, epochs=epochs, verbose=1) model.save_weights("model.h5") # score = model.evaluate(x_test, y_test, verbose=0) # print('Test loss:', score) # print('Test accuracy:', score) class_idx = 0
from scipy import special from scipy.integrate import quad from scipy import optimize as sciopt from scipy.interpolate import interp1d as interp from functions import integral2, Jfactor, get_data from multiprocessing import Pool ################################################################################################################ # dwarf surface brightness profile def I(R,rh): return 4./3. * rh/(1+(R/rh)**2)**2 ########################################################### dwarf = argv[1] R,v,dv,D,rh,rt = get_data(dwarf) u=v.mean() theta=0.5 r0_i,r0_f,Nr0 = 4,4,200 ra_i,ra_f,Nra = 4,4,200 case = 'OM_%s_%i%i%i%i_%i'%(dwarf,r0_i,r0_f,ra_i,ra_f,theta*10) r0_array = np.logspace(-r0_i,r0_f,Nr0) ra_array = np.logspace(-ra_i,ra_f,Nra) gamma_array = R/rh alpha_array = rh/r0_array delta_array = ra_array/rh A_array = np.array([gamma_array[i]/I(Ri,rh) for i,Ri in enumerate(R)]) I_array = np.zeros(shape=(len(A_array),len(ra_array),len(r0_array)))
import numpy as np import matplotlib.pyplot as plt import functions import Wavelets # Retrieve Historical Data ohlc = functions.get_data(ticker="AAPL", interval="1d", start_date="2019-01-01", end_date="2021-01-01") signal = ohlc['Close'] # Choosing Analysis Parameters j_max = -4 nb_moments = 4 threshold = "SURE" # Initializing Graph Params fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(nrows=2, ncols=2) fig.set_size_inches(16, 7.6) fig.set_tight_layout(True) if nb_moments == 1: wavelet_name = "Haar" else: wavelet_name = "Daubechie_{}".format(nb_moments) fig.suptitle(str("Signal Wavelet Analysis (" + wavelet_name + ")"), fontsize=16) # Wavelet Analysis y = np.empty(len(signal)) y_denoised = np.empty(len(signal)) wavelet = Wavelets.Daubechie(signal=signal, nb_vanishing_moments=nb_moments) for level in range(0, abs(j_max)):
from scipy.stats import norm import matplotlib.mlab as mlab import matplotlib.pyplot as plt import numpy as np import sys, json import functions as fun #get extent image of filesystem path = sys.argv[1] name = sys.argv[2] data = fun.get_data(path) extents = open('/root/tmp/fs-drift.data','w') for i in range(0,len(data)): extents.write(str(fun.count_extents(data[i]))+'\n') extents.close() #get extents data from file arch = "/root/tmp/fs-drift.data" datos = [] for item in open(arch,'r'): item = item.strip() if item != '': try: datos.append(int(item)) except ValueError: pass suma = sum(datos) buckets = list(set(datos)) buckets.sort() values = []