def command_line_arguments(year, month, day, plot_day, plot_month): if day!='None': limit = monthrange(int(year), int(month))[1] if int(day) > limit: print("Invalid Date for given month and year. Exiting...\n") exit() val = get_day(year, month, day) if plot_day: try: plot_points = get_day(year, month, day, rtr=True); val=False pl = plotting.Plotting(year=year, month=month, day=day) pl.normal_plot(plot_points) return "plotted" except IndexError: pass elif day == 'None': if month and year: print("\n YEAR: {} | MONTH: {} | \n".format(year, month)) get_data.main(year, month) if plot_month: try: monthly_average = get_month(year, month) plot = plotting.Plotting(year=year, month=month) plot.normal_plot(monthly_average, day=False) except IndexError: raise("Index Error. Can't Plot Monthy Average Values.")
def extract(self): get_data.main(self.images_root_dir, self.image_path_label_csv_path) if config_retrieval["model_kind"] == 'delf': self.delf.extract_feature_from_imagelabelcsv( self.image_path_label_csv_path, self.feature_npy_path) if config_retrieval['model_kind'] == 'ArcFace': pass
def get_day(year, month, day, rtr=False): response = get_data.get_value_from_database(year, month, day) if response: if rtr: return json.loads(response[0][1]) print("\n YEAR: {} | MONTH: {} | DAY: {}".format(year, month, day)) print(response[0][1]) return 1 else: get_data.main(year,month,display=False) get_day(year, month, day)
def main(BUILD_STATE, IMAGES_PER_LABEL, TRAIN_MODEL): labels = [0, 1] get_data.main(BUILD_STATE, IMAGES_PER_LABEL, labels) datapath = 'data' dataset, plot_set = preprocess.compileData(data_path=datapath, make_new=BUILD_STATE) if BUILD_STATE: preprocess.example_plot(plot_set) convmodel = netmodel.buildModel(print_summary=False) if TRAIN_MODEL: netmodel.train(convmodel)
def data(run): """The basic interface depths are further processed to pull out maxima / minima using peakdetect. Parallax correction is applied here, as well as conversion of the data units from pixels in to tank relative numbers (units of fluid depth, lock length). At this point some 'sanity' images are written out with some of the measured data superimposed. The data extracted here are saved to another file. """ get_data.main(run)
def cla(): val = True try: year = sys.argv[1] month = sys.argv[2] except IndexError: display_error() exit() try: if int(year) > 2013 or int(year) < 1950: print("Year beyond the range. Exiting...\n") display_error() exit() if int(month) < 1 or int(month) > 12: print("Invlaid Month. Exiting...\n") display_error() exit() except ValueError: print("Invalid Numerical Value") exit() try: day = sys.argv[3] if int(day) > 31 or int(day) < 1: print("Invalid Dates. Exiting...\n") display_error() exit() limit = monthrange(int(year), int(month))[1] if int(day) > limit: print("Invalid Date for given month and year. Exiting...\n") display_error() exit() try: if not sys.argv[4]: val = get_day(year, month, day) except: pass except IndexError: pass try: if sys.argv[4]: plot_points = get_day(year, month, day, rtr=True) val = False pl = plotting.Plotting(year=year, month=month, day=day) pl.normal_plot(plot_points) return "plotted" except IndexError: pass if val: get_data.main(year, month)
def main(): # get all possible flights and user params from get_data script params, df_outbound, df_inbound = get_data.main() # print() # get common destinations for outbound and inbound trips # print('OUTBOUND FLIGHTS-------') # print('Date: ', params['date_outbound']) # print('Getting common destinations and calculating prices...') df_common_dest_outbound = get_common_dest(df_outbound) # print() # print('Saving flights to sorted_common_dest.txt') save_df_to_json(df_common_dest_outbound, 'Data/sorted_common_dest.json') # print() # print_top_flights(params, df_common_dest_outbound) # if params['date_inbound'] != None: # print() # print('INBOUND FLIGHTS-------') # print('Date: ', params['date_inbound']) # print('Getting common destinations and calculating prices...') #df_common_dest_inbound = get_common_dest(df_inbound) # have to figure out how to combine inbound and outbound # print() print('Done!')
async def main(): while True: with open(get_data.OUT_FILE) as f: old = f.read() get_data.main() print( "Fetched for updates, time:", strftime("%d %b %Y %H:%M:%S +0000", gmtime()) ) with open(get_data.OUT_FILE) as f: new = f.read() if new != old: print("Pushing new data") os.system("git pull") os.system(f"git add {get_data.OUT_FILE}") os.system('git commit -m "Update COVID-19 Data"') os.system("git push") await asyncio.sleep(30 * 60) # 30 min
def get_all(): substitutions = get_data.main() all_substitutions = [] for teacher in substitutions.keys(): teacher_subs = [{ **create_sub(sub), "substitute_teacher": teacher } for sub in substitutions[teacher]] all_substitutions += sorted(teacher_subs, key=lambda sub: sub["lesson_id"]) return jsonify({"data": all_substitutions})
def stocks(): stock_data = get_data.main() stock_data_df = pd.DataFrame(stock_data) stock_data_df.drop(index = 0, inplace = True) stock_data_df['date'] = stock_data_df['date'].astype(str) stock_data_df=stock_data_df.rename(columns = {'date':'date_local'}) stock_data_lod = stock_data_df.to_dict('records') # return render_template('index2.html', stock_data = stock_data) return jsonify(stock_data_lod)
def retail(): stock_data, covid_data = get_data.main() topic = 'covid-19 retail industry' article_count = 5 articles = get_data.get_articles(topic, article_count) return render_template('retail.html', covid_data=covid_data, stock_data=stock_data, articles=articles)
def delivery(): stock_data, covid_data = get_data.main() topic = 'covid-19 delivery industry' article_count = 5 articles = get_data.get_articles(topic, article_count) return render_template('delivery.html', covid_data=covid_data, stock_data=stock_data, articles=articles)
def index(): stock_data, covid_data = get_data.main() topic = 'covid-19' article_count = 5 articles = get_data.get_articles(topic, article_count) return render_template('index.html', covid_data=covid_data, stock_data=stock_data, articles=articles)
def med_services(): stock_data, covid_data = get_data.main() topic = 'covid-19 medical services' article_count = 5 articles = get_data.get_articles(topic, article_count) return render_template('med_services.html', covid_data=covid_data, stock_data=stock_data, articles=articles)
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import get_data data = get_data.main() data = data[data['Country/Region'] == 'US'] data = data[data['Province/State'] == 'Minnesota'] data_to_plot = data.get(['Province/State', 'date', 'n_cases']) data_to_plot.plot() plt.savefig('us_cases_by_state.pdf')
#! /usr/bin/env python """ This script should reproduce our best submission on Kaggle, using an already trained CNN architecture for twitter sentiment analysis. Our model is (at least) 1.6 GB, so we didn't attach it. Instead, you can download it using the `get_data.py` script (which this script calls) For the training, see the file `train_CNN.py`, and for evaluation, `eval_CNN.py` Our code is an adaptation from the open source (Apache license) software at https://github.com/dennybritz/cnn-text-classification-tf Authors: András Ecker, Valentin Vasiliu, Ciprian I. Tomoiagă """ # Data preparation: # ================================================== import get_data get_data.main() # Model evaluation: # ================================================== import eval_CNN
def __iter__(self): pre_frame = -self.__frame_gap - 1 if self.__length == 0: loop = lambda x: True else: loop = lambda x: x < self.__length self.__reader.set(cv2.CAP_PROP_POS_FRAMES, self.__now_frame) self.__now_frame while loop(self.__now_frame): ret, im = self.__reader.read() if self.__now_frame - pre_frame > self.__frame_gap: pre_frame = self.__now_frame yield self.__now_frame, np.flip(im, 2) self.__frame_count -= 1 if self.__frame_count == 0: break self.__now_frame += 1 if __name__ == "__main__": import get_data import sys pics = main(get_data.main(sys.argv[1], sys.argv[2])) for pic in pics: cv2.imshow('a', pic) k = cv2.waitKey(30) & 0xff if k == 29: break cv2.destroyAllWindows()
def index(): stock_data = get_data.main() return render_template('index.html', stock_data = stock_data)
def main(): args = parser.parse_args() if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) # select device torch.cuda.set_device(args.gpu_id) print('[Info] use gpu: {}'.format(torch.cuda.current_device())) # get parameters sys.path.insert(0, args.model_dir) from params import params assert len(params) > 1 last_cfg = params[0][-1] last_cfg() get_data.main() dataset = VQADataset('test', params[0][1]) itoa = dataset.codebook['itoa'] vote_buff = [{} for i in range(len(dataset))] conf_buff = np.zeros((len(dataset), len(itoa))) sm_conf_buff = np.zeros((len(dataset), len(itoa))) l2_conf_buff = np.zeros((len(dataset), len(itoa))) que_ids = dataset.que_id for fpath, mgrp, mname, acc, cfg_func, in params: # data if cfg_func != last_cfg: cfg_func() get_data.main() last_cfg = cfg_func dataset = VQADataset('test', mgrp) itoa = dataset.codebook['itoa'] dataset.reload_obj(mgrp) dataloader = torch.utils.data.DataLoader( dataset, batch_size=args.bs, shuffle=False, num_workers=2, pin_memory=True) # model model_group = import_module('models.' + mgrp) model = getattr(model_group, mname) num_words=dataset.num_words, num_ans=dataset.num_ans, emb_size=get_emb_size()) cp_file = os.path.join(args.model_dir, fpath) checkpoint = torch.load(cp_file, map_location=lambda s, l: s.cuda(0)) model.load_state_dict(checkpoint['state_dict']) model.cuda() model.eval() # predict bar = progressbar.ProgressBar() start = 0 # sample: (que_id, img, que, [obj]) for sample in bar(dataloader): sample_var = [Variable(d).cuda() for d in list(sample)[1:]] score = model(*sample_var) sm_score = torch.nn.functional.softmax(score) l2_score = torch.nn.functional.normalize(score) bs = score.size(0) conf_buff[start:start+bs] += score.data.cpu().numpy() sm_conf_buff[start:start+bs] += sm_score.data.cpu().numpy() l2_conf_buff[start:start+bs] += l2_score.data.cpu().numpy() _, ans_ids = torch.max(score.data, dim=1) for i, ans_id in enumerate(ans_ids): ans = itoa[ans_id] ans_score = acc + vote_buff[start + i].get(ans, 0) vote_buff[start + i][ans] = ans_score start += bs
# -*- coding: utf-8 -*- import get_data as get_data get_data.main(product="", cve_id="", file_name="opensource_product_list5.csv")
def f_data(run): """Fast version of data. """ get_data.main(run, 22)
def get_by_teacher(name): teachers = get_data.main() subs = teachers.get(name) substitutions_of_teacher = [create_sub(sub) for sub in subs] return jsonify({"data": substitutions_of_teacher})
parser.add_argument('-w','--nwalkers', default=8, help='(int) Number of walkers in parameter space', type=int) args = parser.parse_args() N_CPU = args.ncpu MCMC_STEPS = args.chainlength N_WALKERS = args.nwalkers #%% """ Get data from measurements Choose relevant transects. Slice by date """ fn_weather_data = Path('data/weather_station_historic_data.xlsx') dfs_by_transects = get_data.main(fn_weather_data) # Choose transects relevant_transects = ['P002', 'P012', 'P016', 'P018'] dfs_relevant_transects = {x: dfs_by_transects[x] for x in relevant_transects} dfs_sliced_relevant_transects = {} # Slice by julian day jday_bounds = {'P002':[775, 817], # 660: 22/10/2019; 830: 9/4/2020 'P012':[740, 771], 'P015':[707, 724], 'P016':[707, 731], 'P018':[815, 827] } for key, df in dfs_relevant_transects.items():
import pandas as pd import get_data import time from oanda_api import buy_sell, accsum buy_sell.mkt_position('AUD_USD', '-1') current_status = 0 # 1 = in long position, 0 = in sort position print('This is where the fun begins') while True: instrument = "AUD_USD" df = get_data.main() account_value = accsum.account_value() units = round((0.04 * float(account_value)) / df['Close'].iloc[-1]) if df['psarbull'].iloc[-1] != "NaN": if current_status == 0: current_status = 1 print('Buy Signal') buy_sell.close_all('short', 'AUD_USD') time.sleep(5) buy_sell.mkt_position('AUD_USD', str(units)) elif df['psarbear'].iloc[-1] != "NaN": if current_status == 1: current_status = 0 print('Sell Signal') buy_sell.close_all('long', 'AUD_USD') time.sleep(5) buy_sell.mkt_position('AUD_USD', str(-units)) print(current_status)
import get_data url = "https://www.betexplorer.com/handball/sweden/she-women/" x, y = get_data.main(url) print(x, '\n\n', y)
def stock_page(): stock_data = get_data.main() return render_template('stock-page.html')
def get_teachers(): teachers = list(get_data.main().keys()) filtered_teachers = filter_dismissed_lessons(teachers) return jsonify({"data": filtered_teachers})