def main(): if len(sys.argv) == 2: device = sys.argv[1] else: device = wait_for_balanceboard() iface = xwiimote.iface(device) iface.open(xwiimote.IFACE_BALANCE_BOARD) try: # for m in measurements(iface): # print_bboard_measurements(*m) for kg, err in average_mesurements(measurements(iface)): pkg = "qme.seri.wiiweight.weight" perr = "qme.seri.wiiweight.err" print("{:.2f} +/- {:.2f}".format(kg / 100.0, err / 100.0)) kg, err = (int(round(x, 0)) for x in (kg, err)) for d, p in zip((kg, err), (pkg, perr)): print(d, p) util.submit(p, d) sys.exit(0) except KeyboardInterrupt: print("Bye!")
def main(): if len(sys.argv) == 2: device = sys.argv[1] else: device = wait_for_balanceboard() iface = xwiimote.iface(device) iface.open(xwiimote.IFACE_BALANCE_BOARD) try: # for m in measurements(iface): # print_bboard_measurements(*m) for kg, err in average_mesurements(measurements(iface)): pkg = "qme.seri.wiiweight.weight" perr = "qme.seri.wiiweight.err" print("{:.2f} +/- {:.2f}".format(kg/100.0, err/100.0)) kg, err = (int(round(x, 0)) for x in (kg, err)) for d, p in zip((kg, err), (pkg, perr)): print(d, p) util.submit(p, d) sys.exit(0) except KeyboardInterrupt: print("Bye!")
def predict(*, pred_date: str, bj_windows: str='golden_8', ld_windows: str='golden_8', bj_method: str='median', ld_method: str='median', bj_lgbm: bool=True, ld_lgbm: bool=True, bj_fwbw: bool=True, ld_fwbw: bool=True, n_thread: int=8, save: bool=True, dosubmit: bool=False, suffix: str='dummy', verbose: int=2): vprint = get_verbose_print(verbose_level=verbose) pred_date = pd.to_datetime(pred_date) get_new_data = pred_date > pd.to_datetime('2018-03-28') sub = pd.read_csv("../input/sample_submission.csv") OUTDIR = '../submission/sub_{}-{}-{}'.format(pred_date.year, pred_date.month, pred_date.day) os.system('mkdir -p {}'.format(OUTDIR)) predict_start_day = pred_date + pd.Timedelta(1, unit='D') predict_start = pd.to_datetime(get_date(predict_start_day)) bj_data = get_city_data(city='bj', vprint=vprint, impute_with_lgbm=bj_lgbm, get_new_data=get_new_data) ld_data = get_city_data(city='ld', vprint=vprint, impute_with_lgbm=ld_lgbm, get_new_data=get_new_data) vprint(2, bj_data.head()) vprint(2, bj_data.loc[bj_data['stationId']!= 'zhiwuyuan_aq'].tail()) vprint(2, ld_data.head()) vprint(2, ld_data.tail()) bj_fwbw_impute_methods = ['day', 'mean'] if bj_fwbw else [] ld_fwbw_impute_methods = ['day', 'mean'] if ld_fwbw else [] bj_pred = rolling_summary(sub=sub, data=bj_data, predict_start=predict_start, windows=MEDIAN_WINDOWS[bj_windows], n_thread=n_thread, method=bj_method, impute_methods=bj_fwbw_impute_methods, vprint=vprint) ld_pred = rolling_summary(sub=sub, data=ld_data, predict_start=predict_start, windows=MEDIAN_WINDOWS[ld_windows], n_thread=n_thread, method=ld_method, impute_methods=ld_fwbw_impute_methods, vprint=vprint) submissions = sub.copy() bj_cond = submissions['test_id'].map(lambda x: x.split('#')[0] in BEIJING_STATIONS) ld_cond = submissions['test_id'].map(lambda x: x.split('#')[0] in LONDON_STATIONS) submissions.loc[bj_cond] = bj_pred.loc[bj_cond].values submissions.loc[ld_cond] = ld_pred.loc[ld_cond].values submissions['PM2.5'] = submissions['PM2.5'].map(lambda x: max(0, x)) submissions['PM10'] = submissions['PM10'].map(lambda x: max(0, x)) submissions['O3'] = submissions['O3'].map(lambda x: max(0, x)) if save: if not suffix: filepath = '{}/model_{}_sub.csv'.format(OUTDIR, 3) else: filepath = '{}/model_{}_sub_{}.csv'.format(OUTDIR, 3, suffix) submissions.to_csv(filepath, index=False) if dosubmit: submit(subfile=filepath, description='model_{}_{}'.format(3, str(predict_start).split()[0]), filename='model_{}_sub_{}.csv'.format(3, str(predict_start).split()[0]) )
def predict(*, pred_date: str, bj_his_length: int, ld_his_length: int, bj_npoints: int, ld_npoints: int, bj_scale:float, ld_scale: float, n_thread: int=8, save: bool=True, dosubmit: bool=False, suffix: str='dummy', verbose: int=2): vprint = get_verbose_print(verbose_level=verbose) pred_date = pd.to_datetime(pred_date) get_new_data = pred_date > pd.to_datetime('2018-03-28') sub = pd.read_csv("../input/sample_submission.csv") OUTDIR = '../submission/sub_{}-{}-{}'.format(pred_date.year, pred_date.month, pred_date.day) os.system('mkdir -p {}'.format(OUTDIR)) predict_start_day = pred_date + pd.Timedelta(1, unit='D') predict_start = pd.to_datetime(get_date(predict_start_day)) bj_data = get_city_data(city='bj', vprint=vprint, impute_with_lgbm=True, partial_data=True, get_new_data=get_new_data) ld_data = get_city_data(city='ld', vprint=vprint, impute_with_lgbm=True, partial_data=True, get_new_data=get_new_data) vprint(2, bj_data.head()) vprint(2, bj_data.loc[bj_data['stationId']!= 'zhiwuyuan_aq'].tail()) vprint(2, ld_data.head()) vprint(2, ld_data.tail()) bj_pred = fbprophet(sub=sub, data=bj_data, current_date=predict_start, history_length=bj_his_length, changepoint_scale=bj_scale, num_changepoints=bj_npoints, n_thread=n_thread, vprint=vprint) ld_pred = fbprophet(sub=sub, data=ld_data, current_date=predict_start, history_length=ld_his_length, changepoint_scale=ld_scale, num_changepoints=ld_npoints, n_thread=n_thread, vprint=vprint) submissions = sub.copy() bj_cond = submissions['test_id'].map(lambda x: x.split('#')[0] in BEIJING_STATIONS) ld_cond = submissions['test_id'].map(lambda x: x.split('#')[0] in LONDON_STATIONS) submissions.loc[bj_cond, ['PM2.5', 'PM10', 'O3']] = bj_pred.loc[bj_cond, ['PM2.5', 'PM10', 'O3']].values submissions.loc[ld_cond, ['PM2.5', 'PM10']] = ld_pred.loc[ld_cond, ['PM2.5', 'PM10']].values submissions['PM2.5'] = submissions['PM2.5'].map(lambda x: max(0, x)) submissions['PM10'] = submissions['PM10'].map(lambda x: max(0, x)) submissions['O3'] = submissions['O3'].map(lambda x: max(0, x)) submissions = submissions[['test_id', 'PM2.5', 'PM10', 'O3']] if save: if not suffix: filepath = '{}/model_{}_sub.csv'.format(OUTDIR, 4) else: filepath = '{}/model_{}_sub_{}.csv'.format(OUTDIR, 4, suffix) submissions.to_csv(filepath, index=False) if dosubmit: submit(subfile=filepath, description='model_{}_{}'.format(4, str(predict_start).split()[0]), filename='model_{}_sub_{}.csv'.format(4, str(predict_start).split()[0]))
def onsubmit(subj_id, instance): if subj_id: # only superuser can provide arbitrary subject ids if not request.user.is_superuser: return HttpResponseForbidden('dev access required') else: subj_id = request.session.get('subject_oid') if not subj_id: assert False, 'subject OID not in session' odm = util.generate_submit_payload({ 'subject_id': subj_id, 'event_ordinal': ordinal, }, instance) util.submit(odm) cl = CompletionLog() cl.crf_id = form_id cl.ordinal = ordinal or 0 cl.subject_oid = subj_id cl.save() return redirect(home)
from util import prepare_data, submit from models.lgb import get_lgb_predictions from models.nn import get_nn_predictions if __name__ == "__main__": train_gal, train_exgal, test_gal, test_exgal, gal_class_list, exgal_class_list, test_df = prepare_data( ) lgb_oof_gal, lgb_oof_exgal, lgb_test_gal, lgb_test_exgal = get_lgb_predictions( train_gal, train_exgal, test_gal, test_exgal) lgb_gal_preds = [] for i in range(lgb_oof_gal.shape[1]): lgb_gal_preds.append("lgb_pred" + str(i)) train_gal["lgb_pred" + str(i)] = lgb_oof_gal[:, i] test_gal["lgb_pred" + str(i)] = lgb_test_gal[:, i] lgb_exgal_preds = [] for i in range(lgb_oof_exgal.shape[1]): lgb_exgal_preds.append("lgb_pred" + str(i)) train_exgal["lgb_pred" + str(i)] = lgb_oof_exgal[:, i] test_exgal["lgb_pred" + str(i)] = lgb_test_exgal[:, i] oof_preds_gal, oof_preds_exgal, test_preds_gal, test_preds_exgal = get_nn_predictions( train_gal, train_exgal, test_gal, test_exgal) submit(test_df, test_preds_gal, test_preds_exgal, gal_class_list, exgal_class_list, "submissions/stacking.csv")
from util import timer, submit, read_number_list A_NUM = 507622668 if __name__ == "__main__": with timer(): numbers = read_number_list("input") for i, num in enumerate(numbers): for j, num2 in enumerate(numbers[i + 1:]): num += num2 if num == A_NUM: block = numbers[i:i + j + 2] submit(min(block) + max(block)) elif num > A_NUM: break
passwd = getpass.getpass() pname = args.p.upper() if args.c: contest = args.c.upper() else: contest = '' file = args.f lang_code = LANG_CODES[args.l] cookies = util.login(username, passwd) if not cookies: exit("Could not login") print "Successfully Logged in!" print "Submitting Your source code..." r = util.submit(cookies, pname, file, lang_code, contest) if not r: print "Could not submit the problem.Please check if problem name and/or contest name are correct" util.logout(cookies) exit() s_id = util.get_submission_id(r.text) if not s_id: print "There was some problem in retrieving your submission id.Please try again." util.logout(cookies) exit() print "Your submission id is %s" % s_id status = util.get_submission_status(s_id) if not status: print "Could not get submission status."
# columns = feature_labels + target_labels + ['Predicted--{}'.format(outp) for outp in target_labels] predicted_prob = pd.np.clip(pd.DataFrame((pd.np.array(trainer.module.activate(i)) for i in df_freq[feature_labels].values), columns=class_labels), 0, 1) log_losses = [round(otto.log_loss(ds['target'], otto.normalize_dataframe(predicted_prob).values, method=m).sum(), 3) for m in 'ksfohe'] print('The log losses for the training set were {}'.format(log_losses)) # df = pd.DataFrame(table, columns=columns, index=df.index[max(delays):]) # ################################################################################ # ########## Predict labels for Validation/Test Set for Kaggle submission # # df_test = pd.DataFrame.from_csv(DATA_PATH + "test.csv") test_ids = df_test.index.values # transform counts into Term Frequency x Inverse Document Frequency (normalized term frequency) features tfidf = TfidfTransformer() print('Transforming the validation set features into a TFIDF frequency matrix...') df_test = pd.DataFrame(tfidf.fit_transform(df_test[feature_labels].values).toarray(), index=test_ids, columns=feature_labels) print('Finished transforming the test data using the trained TFIDF.') # columns = feature_labels + target_labels + ['Predicted--{}'.format(outp) for outp in target_labels] df_test = pd.DataFrame((pd.np.array(trainer.module.activate(i)) for i in df_test.values), index=test_ids, columns=class_labels) otto.submit(df_test)
from util import timer, submit, read_number_list TAIL_LEN = 25 if __name__ == "__main__": with timer(): numbers = read_number_list("input") for i, num in enumerate(numbers[TAIL_LEN:], TAIL_LEN): tail = numbers[i - TAIL_LEN:i] for addend in tail: if num - addend in tail and num - addend != addend: break else: submit((i, num))
"min_data_in_leaf": 200, "num_leaves": 5, "feature_fraction": 0.7 } print("GALACTIC MODEL") oof_preds_gal, test_preds_gal = train_and_predict(train_gal, test_gal, features_gal, params_gal) print("EXTRAGALACTIC MODEL") oof_preds_exgal, test_preds_exgal = train_and_predict( train_exgal, test_exgal, features_exgal, params_exgal) evaluate(train_gal, train_exgal, oof_preds_gal, oof_preds_exgal) return oof_preds_gal, oof_preds_exgal, test_preds_gal, test_preds_exgal if __name__ == "__main__": train_gal, train_exgal, test_gal, test_exgal, gal_class_list, exgal_class_list, test_df = prepare_data( ) oof_preds_gal, oof_preds_exgal, test_preds_gal, test_preds_exgal = get_lgb_predictions( train_gal, train_exgal, test_gal, test_exgal) test_preds_gal = get_meta_preds(train_gal, oof_preds_gal, test_preds_gal, 0.2) test_preds_exgal = get_meta_preds(train_exgal, oof_preds_exgal, test_preds_exgal, 0.2) submit(test_df, test_preds_gal, test_preds_exgal, gal_class_list, exgal_class_list, "submissions/submission_lgb.csv")
def handle_answer(question): util.submit(question.path, question.answer)
def predict(*, pred_date: str, bj_his_length=360, ld_his_length=420, bj_windows='golden_8', ld_windows='fib_8', bj_dropout=0.6, ld_dropout=0.2, bj_units=(48, 48, 48, 48), ld_units=(24, 24, 24, 24), bj_batchsize=84, ld_batchsize=22, verbose: int=2, save=True, dosubmit=False, suffix='alt_lgb_split'): vprint = get_verbose_print(verbose_level=verbose) pred_date = pd.to_datetime(pred_date) get_new_data = pred_date > pd.to_datetime('2018-03-28') sub = pd.read_csv("../input/sample_submission.csv") OUTDIR = '../submission/sub_{}-{}-{}'.format(pred_date.year, pred_date.month, pred_date.day) os.system('mkdir -p {}'.format(OUTDIR)) predict_start_day = pred_date + pd.Timedelta(1, unit='D') predict_start = pd.to_datetime(get_date(predict_start_day)) bj_data = get_city_data(city='bj', vprint=vprint, impute_with_lgbm=False, partial_data=False, get_new_data=get_new_data) ld_data = get_city_data(city='ld', vprint=vprint, impute_with_lgbm=False, partial_data=False, get_new_data=get_new_data) vprint(2, bj_data.head()) vprint(2, bj_data.loc[bj_data['stationId']!= 'zhiwuyuan_aq'].tail()) vprint(2, ld_data.head()) vprint(2, ld_data.tail()) bj_data = impute(bj_data, lgbm=True, hour=True, mean=True) ld_data = impute(ld_data, lgbm=True, hour=True, mean=True) vprint(2, bj_data.head()) vprint(2, bj_data.loc[bj_data['stationId']!= 'zhiwuyuan_aq'].tail()) vprint(2, ld_data.head()) vprint(2, ld_data.tail()) bj_w_train_data = long_to_wide(bj_data) ld_w_train_data = long_to_wide(ld_data) train_split_date = pred_date - pd.Timedelta(3, unit='D') bj_pred = fit_predict(city='bj', sub=sub, w_train_data=bj_w_train_data, train_data=bj_data, train_split_date=train_split_date, history_length=bj_his_length, pred_date=pred_date, windows=MEDIAN_WINDOWS[bj_windows], dropout_rate=bj_dropout, units=bj_units, batch_size=bj_batchsize, l2_strength=0.0001, n_folds=5, vprint=vprint ) ld_pred = fit_predict(city='ld', sub=sub, w_train_data=ld_w_train_data, train_data=ld_data, train_split_date=train_split_date, history_length=ld_his_length, pred_date=pred_date, windows=MEDIAN_WINDOWS[ld_windows], dropout_rate=ld_dropout, units=ld_units, batch_size=ld_batchsize, l2_strength=0.0001, n_folds=5, vprint=vprint ) submissions = sub.copy() bj_cond = submissions['test_id'].map(lambda x: x.split('#')[0] in BEIJING_STATIONS) ld_cond = submissions['test_id'].map(lambda x: x.split('#')[0] in LONDON_STATIONS) submissions.loc[bj_cond, ['PM2.5', 'PM10', 'O3']] = bj_pred.loc[bj_cond, ['PM2.5', 'PM10', 'O3']].values submissions.loc[ld_cond, ['PM2.5', 'PM10']] = ld_pred.loc[ld_cond, ['PM2.5', 'PM10']].values submissions['PM2.5'] = submissions['PM2.5'].map(lambda x: max(0, x)) submissions['PM10'] = submissions['PM10'].map(lambda x: max(0, x)) submissions['O3'] = submissions['O3'].map(lambda x: max(0, x)) submissions = submissions[['test_id', 'PM2.5', 'PM10', 'O3']] if save: if not suffix: filepath = '{}/model_{}_sub.csv'.format(OUTDIR, 6) else: filepath = '{}/model_{}_sub_{}.csv'.format(OUTDIR, 6, suffix) submissions.to_csv(filepath, index=False) if dosubmit: submit(subfile=filepath, description='model_{}_{}'.format(6, str(predict_start).split()[0]), filename='model_{}_sub_{}.csv'.format(6, str(predict_start).split()[0]))
from util import timer, submit if __name__ == "__main__": with timer(): with open("input") as file: arrival = int(file.readline().strip()) ids = [ int(id) for id in file.readline().strip().split(",") if id != "x" ] i = arrival while True: for id in ids: if i % id == 0: submit(id * (i - arrival)) i += 1
from eight.a import load_program, Program, Instruction, InfiniteLoopError from util import timer, submit def fix_program(program: Program) -> Program: return program def flip_nop_jmp(instruction: Instruction) -> Instruction: if instruction.command == "nop": instruction.command = "jmp" elif instruction.command == "jmp": instruction.command = "nop" return instruction if __name__ == "__main__": with timer(): program = load_program("input") for i, instruction in enumerate(program.instructions): try: if instruction.command != "acc": program.instructions[i] = flip_nop_jmp(instruction) submit(program.run(allow_loops=False)) except InfiniteLoopError as e: program.instructions[i] = flip_nop_jmp(instruction) program.reset()
from util import submit, read_number_set, timer if __name__ == "__main__": with timer(): for number in read_number_set(): complement = 2020 - number if complement in read_number_set(): submit(number * complement)
from util import submit, read_number_set, timer if __name__ == "__main__": with timer(): for i in read_number_set(): for j in read_number_set(): complement = 2020 - i - j if complement in read_number_set(): submit(i * j * complement)