def process(): session = getUserInSession() ext = request.form.get('ext') #dropping table with the same number as the current session has if it somehow exists engine = create_engine(db_url) sql = text('DROP TABLE IF EXISTS table_' + session + ';') engine.execute(sql) #calling tracker from cmd that may be not correct (perhaps having the entire tracker code as an imported module would be better) dir_path = os.path.dirname(os.path.realpath(__file__)) os.chdir(dir_path) os.system( "conda activate project & python object_tracker.py --video static/" + session + "." + ext + " --output data/video/output.mp4") #calling text detector start.main(project_path + "static/" + session + "/", project_path + "static/" + session + "/") #calling text recogniser, 2nd parameter helps us to get db name etc demo.main(project_path + "static/" + session + "/", len(project_path) + 7, db_url) #reading table and writing data to html df = read_table(session, engine) write_html('templates/' + session + '.html', df) df.to_excel('static/' + session + '/results.xlsx') return render_template(session + ".html")
def test_run_main(platform, m_stt_side_effect): """test run main func.""" with mock.patch('melissa.profile_loader.load_profile'): with mock.patch('start.tts') as m_tts, \ mock.patch('start.subprocess') as m_subprocess, \ mock.patch('start.stt') as m_stt, \ mock.patch('start.load_profile') as m_load_profile, \ mock.patch('start.sys') as m_sys, \ mock.patch('start.query') as m_query: # pre run m_stt.side_effect = m_stt_side_effect m_sys.platform = platform # run import start with pytest.raises(KeyboardInterrupt): start.main() # test m_load_profile.assert_called_once_with(True) if platform.startswith('linux') or platform == 'win32': m_subprocess.call.assert_called_with( ['mpg123', 'data/snowboy_resources/ding.wav']) elif platform == 'darwin': m_subprocess.call.assert_called_with( ['afplay', 'data/snowboy_resources/ding.wav']) else: m_subprocess.call.assert_not_called() if m_stt_side_effect != KeyboardInterrupt: if m_stt_side_effect[0] == M_TEXT: m_query.assert_called_once_with(M_TEXT) assert m_stt.call_count == 2 else: m_query.assert_not_called() m_stt.assert_called_once_with() m_tts.assert_called()
def run(self, directory, *args, **kwargs): sys.path.append(os.path.abspath(directory)) try: import start except ImportError: self.parser.print_usage() sys.exit() start.main()
def main(): db = open('db_file.txt', 'r') for line in db: print(line) db.close() start.main()
def step_impl(context): def intercept_service(*args, **kwargs): context.service = PartnerService(*args, **kwargs) return context.service with patch('partner.PartnerService', new=intercept_service): from start import main main()
def showThought(): start.clearSn() newcursor = connectionMysql.getMysqlconnection() newcursor.execute("SELECT * from thoughts order by rand() limit 10 ") data = newcursor.fetchall() for tData in data: newStr = tData['thought'].replace('\n', '') print(str(tData['id']) + '\t' + newStr) print("\n") start.main()
def showTask(): start.clearSn() newcursor = connectionMysql.getMysqlconnection() newcursor.execute("SELECT * from tasks order by priority desc limit 10 ") data = newcursor.fetchall() for tData in data: newStr = tData['task'].replace('\n', '') print(str(tData['priority'])+'\t'+newStr) print("\n") start.main()
def writeThought(): start.clearSn() newcursor = connectionMysql.getMysqlconnection() x = raw_input("Enter your thought \n") t = datetime.now() unix_secs = mktime(t.timetuple()) unix_secs = int(unix_secs) query = "INSERT INTO thoughts(date, thought) values(" + str( unix_secs) + ",'" + x + "')" newcursor.execute(query) start.clearSn() start.main()
def test_main_return_none_relion(self): """Test if the main function is running successfull""" name = 'data_test.star' tolerance = 30 tolerance_filament = 0.25 window_size = 4 typ = 'relion' start.main(file_name=name, tolerance=tolerance, tolerance_filament=tolerance_filament, window_size=window_size, typ=typ) assert (True)
def test_main_return_none_sphire(self): """Test if the main function is running successfull""" name = 'bdb:stack_small' tolerance = 30 tolerance_filament = 0.25 window_size = 4 typ = 'sphire' start.main(file_name=name, tolerance=tolerance, tolerance_filament=tolerance_filament, window_size=window_size, typ=typ) assert (True)
def loader_2(b, s): if b == 0 and s == 0: start.main() elif b == 1 and s == 1: game.play1_1(0) elif b == 1 and s == 2: game.play1_2(0) elif b == 1 and s == 3: game.play1_3(0) elif b == 1 and s == 4: game.play1_4(0) else: print("DB error")
def main_offline(n = None, dataset = 'proton-beam', rand_shuffle = None, alpha = 1, num_it = 3): start.main(dataset) lc = crowd_model.labels_collection(start.turk_data_id, start.rel) #lc.preprocess() if dataset.startswith('sim'): gold_dic = start.dic_workers else: gold_dic = lc.get_true_ss() if rand_shuffle == None: rand_shuffle = random.random() random.shuffle(start.turk_data_id, lambda : rand_shuffle) #random.shuffle(start.turk_data , lambda : rand_shuffle) random.shuffle(start.rel , lambda : rand_shuffle) if n == None: n = len(start.rel) #lc1 = crowd_model.labels_collection(start.turk_data_id[:n], np.hstack((start.rel[:ngold], (nitem -ngold)*[None]))) lc1 = crowd_model.labels_collection(start.turk_data_id[:n], n*[None]) #print n, lc1.crowd_labels[0] for alpha in [1]: tc = crowd_model.tc_model(lc1) tc.em(num_it = num_it) tc_ss = eval_cm(tc.dic_worker_ss, gold_dic) #print "tc ", alpha, tc_ss print "tc ", tc_ss sys.stdout.flush() lc2 = crowd_model.labels_collection(start.turk_data_id[:n], n*[None]) mv = crowd_model.mv_model(lc2) mv_ss = eval_cm(mv.dic_ss, gold_dic) print "mv", mv_ss sys.stdout.flush() lc3 = crowd_model.labels_collection(start.turk_data_id[:n], n*[None]) for full_cov in [False, True]: vs = crowd_model.vss_model(lc3, full_cov = full_cov) vs.em(num_it = num_it) vs.get_dic_ss(); vs_ss = eval_cm(vs.dic_ss, gold_dic) print "vs Full_Cov = ", full_cov, vs_ss sys.stdout.flush() return (gold_dic, tc, mv, vs)
def writeTask(): start.clearSn() newcursor = connectionMysql.getMysqlconnection() x = input("Enter your Task \n") tp = input("Enter Task Priority \n") if tp=="": tp = 0 t = datetime.now() unix_secs = mktime(t.timetuple()) unix_secs = int(unix_secs) query = "INSERT INTO tasks(date, task,priority) values("+str(unix_secs)+",'"+x+"',"+str(tp)+")" newcursor.execute(query) start.clearSn() start.main()
def writeReminder(): start.clearSn() newcursor = connectionMysql.getMysqlconnection() x = raw_input("Enter your Reminder \n") tp = raw_input("Enter Reminder Priority \n") if tp=="": tp = 0 t = datetime.now() unix_secs = mktime(t.timetuple()) unix_secs = int(unix_secs) query = "INSERT INTO reminders (date, reminder,priority) values("+str(unix_secs)+",'"+x+"',"+str(tp)+")" newcursor.execute(query) start.clearSn() start.main()
def model(parameters: dict) -> dict: """Runs the model with random randomness Args: parameters (dict): the parameters of this model run. Override those in isleconfig Returns: the result as a dictionary """ sim_params = isleconfig.simulation_parameters.copy() sim_params.update(parameters) setup = setup_simulation.SetupSim() [event_schedule, event_damage, np_seeds, random_seeds] = setup.obtain_ensemble(1, overwrite=True) result = start.main( sim_params=sim_params, rc_event_schedule=event_schedule[0], rc_event_damage=event_damage[0], np_seed=np_seeds[0], random_seed=random_seeds[0], save_iteration=0, replic_id=0, requested_logs=None, resume=False, summary=calibration_statistic.calculate_single, ) return result
def do_POST(self): self._set_headers() #print ("in post method") self.data_string = self.rfile.read(int(self.headers['Content-Length'])) self.data_string = self.data_string.decode("utf-8") #print (self.data_string) self.send_response(200) #self._set_headers() #self.send_header("Content-type", "text/html") self.send_header("Access-Control-Allow-Origin", "*") self.send_header("Access-Control-Allow-Credentials", "true") self.send_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS") self.send_header("Access-Control-Allow-Headers", "X-Requested-With, Content-Type, Origin, Authorization, Accept, Client-Security-Token, Accept-Encoding") self.send_header('Content-Type', 'application/json') self.end_headers() query_d = urllib.parse.parse_qs(self.data_string) #print (query_d) #print (query_d['sentence']) #print (query_d['sentence'][0]) sentence = query_d['sentence'][0] print (sentence) result1 = start.main(sentence) #result1 = {'userInput': 'this is a test sentence.', 'ngrams': [{'length': 3, 'ngram': [{'error_at': 1, 'ngram': ['is', 'a', 'test']}, {'error_at': 2, 'ngram': ['a', 'test', 'sentence']}, {'error_at': 3, 'ngram': ['test', 'sentence']}]}], 'tagged': [('this', 'DT'), ('is', 'VBZ'), ('a', 'DT'), ('test', 'NN'), ('sentence', 'NN'), ('.', '.')], 'forms': [{'sentence': 'this be a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this am a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this are a test sentence.', 'parsehits': 1, 'APIhits': '0'}, {'sentence': 'this is a test sentence.', 'parsehits': 16700, 'APIhits': '2100'}, {'sentence': 'this are a test sentence.', 'parsehits': 1, 'APIhits': '0'}, {'sentence': 'this being a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this was a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this were a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this was a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this were a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this been a test sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this a test is sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this is a sentence test.', 'parsehits': 1, 'APIhits': '0'}, {'sentence': 'this a test is sentence.', 'parsehits': 0, 'APIhits': '0'}, {'sentence': 'this is a test sentence.', 'parsehits': 16700, 'APIhits': '2100'}, {'sentence': 'this a is test sentence.', 'parsehits': 3, 'APIhits': '0'}]} print (result1) self.wfile.write(json.dumps(result1).encode()) return
def uploads(): # 读取图片 upload_file = request.files['file'] if not upload_file: return render_template('upload.html', result="upload failed!!!") upload_name = upload_file.filename.split("/")[-1] file = File.file(upload_name) upload_path = "./tmp/" + file.file_hash upload_file.save(upload_path) file.updata_path(upload_path) # 读取tag upload_tag = request.form['tags'] upload_tags = upload_tag.split(" ") file.updata_tag(upload_tags) # 调用start.py放大图片 try: if start.main(file.file_hash): file.status = "success" except Exception as e: file.status = "error" print(e) DB.add(file) return redirect(url_for('show'))
def main(dp): ''' If you want to add a file, read here! First, you're going to need to put a function main(dp) at the ned of your file that runs dp.add_handler() on the functions you want to add to the bot. Secondly, you're going to need to import the file and run main(dp). If your file is named foobar.py, you can do that like this: import foobar foobar.main(dp) ''' import start start.main(dp)
def test_main_kafka_dies(self, mock_kafka_start, mock_kazoo): mock_popen = MagicMock(spec=Popen) mock_kafka_start.return_value = mock_popen mock_popen.returncode = -1 main_return = main() self.assertEqual(-1, main_return) mock_kazoo.assert_not_called()
def test_main_static(self, mock_conn_string, mock_kafka_start, mock_kazoo): mock_conn_string.return_value = [] mock_popen = MagicMock(spec=Popen) mock_kafka_start.return_value = mock_popen mock_popen.returncode = None main_return = main(loops=1, loop_interval=0.001) self.assertEqual(0, main_return) mock_kazoo.assert_not_called()
def main(nitem, ngold, nsam, dataset = 'proton-beam'): start.main(dataset) sep_val = nitem; money = (10000000000, 1, 100) #adata = active.active_data(start.mat[:sep_val,:], start.turk_data[:sep_val], start.rel[:sep_val], start.turk_data_id[:sep_val], start.turk_data_uncer[:sep_val], money=money)) for i in range(nitem): adata.query_crowd_all(0) for i in range(ngold): adata.query_expert_fix(i) (res, res_mv, dic_ds) = util.aggregate(has_gold = ngold > 0) print sklearn.metrics.confusion_matrix(start.rel[ngold:nitem], np.asarray(res[ngold:]) > 0.5) print sklearn.metrics.roc_auc_score(start.rel[ngold:nitem], res[ngold:]) print sklearn.metrics.confusion_matrix(start.rel[ngold:nitem], np.asarray(res_mv[ngold:]) > 0.1) print sklearn.metrics.roc_auc_score(start.rel[ngold:nitem], res_mv[ngold:]) #return (res, res_mv) #ss = crowd_model.ss_model(adata.lc, 1, 2, 0.1, nsam) #ss.em() #ss.infer_true_l() #print ss.mu #print ss.C #print sklearn.metrics.confusion_matrix(start.rel[ngold:nitem], np.asarray(ss.prob[ngold:]) > 0.5) #print sklearn.metrics.roc_auc_score(start.rel[ngold:nitem], ss.prob[ngold:]) #ss.workers_ss() #compare_conf_mat(dic_ds, ss.dic_ss) return adata
def test_main_unknown_typ(self): """Test if the main function is running successfull""" name = 'data_test.star' tolerance = 30 tolerance_filament = 0.25 window_size = 4 typ = 'ok' return_value = start.main(file_name=name, tolerance=tolerance, tolerance_filament=tolerance_filament, window_size=window_size, typ=typ) assert (return_value == 'Unreachable code!')
def setup(dataset = 'proton-beam', n = 1000, ngold = 0, rand_shuffle = None): start.main(dataset) if rand_shuffle != None: random.shuffle(start.turk_data_id, lambda : rand_shuffle) random.shuffle(start.rel, lambda : rand_shuffle) lc_gold = crowd_model.labels_collection(start.turk_data_id, start.rel) gold_dic = lc_gold.get_true_ss() lc1 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None]) tc = crowd_model.tc_model(lc1) lc2 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None]) mv = crowd_model.mv_model(lc2) lc3 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None]) vs_full = crowd_model.vss_model(lc3, full_cov = True) lc4 = crowd_model.labels_collection(start.turk_data_id[:n], start.rel[:ngold] + (n-ngold)*[None]) vs_diag = crowd_model.vss_model(lc3, full_cov = False) return (gold_dic, mv, tc, vs_full, vs_diag)
def reproduce(n = None, dataset = 'RCT', rand_shuffle = None, num_it = 3, split = None): """ read save_ss files reproduce evaluation """ filename = 'save_ss_' + dataset + ' ' + str(n) + '_' + str(rand_shuffle) f = open(filename, 'r') (tc_dic, mv_dic, vs_diag_dic, vs_full_dic) = pickle.load(f) start.main(dataset) lc = crowd_model.labels_collection(start.turk_data_id, start.rel) gold_dic = lc.get_true_ss() random.shuffle(start.turk_data_id, lambda : rand_shuffle) random.shuffle(start.rel, lambda : rand_shuffle) test_data = (start.turk_data_id[split:], start.rel[split:]) print n print "tc ", eval_cm(tc_dic, gold_dic, True, test_data) print "mv ", eval_cm(mv_dic, gold_dic, True, test_data) print "vs Full_Cov = False ", eval_cm(vs_diag_dic, gold_dic, True, test_data) print "vs Full_Cov = True " , eval_cm(vs_full_dic, gold_dic, True, test_data) f.close()
def test_main_restart(self, mock_conn_string, mock_kafka_start, mock_kazoo): mock_conn_string.side_effect = [ ['1.1.1.1', '2.2.2.2'], ['2.2.2.2', '3.3.3.3'] ] mock_popen = MagicMock(spec=Popen) mock_kafka_start.return_value = mock_popen mock_popen.returncode = None main_return = main(loops=1, loop_interval=0.001, restart_interval=0.001) self.assertEqual(0, main_return) mock_kazoo.assert_called_with(hosts=ANY) self.assertEqual(2, mock_kafka_start.call_count)
def main_online(n, dataset = 'RCT', rand_shuffle = None, bs = 500, tc_w = 0.1, vs_w = 0.1): start.main(dataset) lc = crowd_model.labels_collection(start.turk_data_id, start.rel) #lc.preprocess() gold_dic = lc.get_true_ss() if rand_shuffle == None: rand_shuffle = random.random() random.shuffle(start.turk_data_id, lambda : rand_shuffle) #random.shuffle(start.turk_data , lambda : rand_shuffle) random.shuffle(start.rel , lambda : rand_shuffle) lc1 = crowd_model.labels_collection([], []) lc2 = crowd_model.labels_collection([], []) lc3 = crowd_model.labels_collection([], []) tc = crowd_model.tc_model(lc1) vs = crowd_model.vss_model(lc2) mv = crowd_model.mv_model(lc3) res = [] for i in range(0, n, bs): tc.online_em(start.turk_data_id[i:i+bs], num_it = 5, w = tc_w) tc_ss = eval_cm(tc.dic_worker_ss, gold_dic) vs.online_em(start.turk_data_id[i:i+bs], no_train = True, w = vs_w) vs.get_dic_ss() vs_ss = eval_cm(vs.dic_ss, gold_dic) mv.online(start.turk_data_id[i:i+bs]) mv_ss = eval_cm(mv.dic_ss, gold_dic) print i + bs, tc_ss, vs_ss, mv_ss res.append([i+bs, tc_ss, vs_ss, mv_ss]) return (res, tc, mv, vs)
def test_main_restart(self, mock_conn_string, mock_kafka_start, mock_kazoo): mock_conn_string.side_effect = [['1.1.1.1', '2.2.2.2'], ['2.2.2.2', '3.3.3.3']] mock_popen = MagicMock(spec=Popen) mock_kafka_start.return_value = mock_popen mock_popen.returncode = None main_return = main(loops=1, loop_interval=0.001, restart_interval=0.001) self.assertEqual(0, main_return) mock_kazoo.assert_called_with(hosts=ANY) self.assertEqual(2, mock_kafka_start.call_count)
dest='nums_competing' ) #for single experiments just include one value, for combinations a list [1, 2] parser.add_argument( '--competing_ccalg', '-a', choices=['cubic', 'bbr', 'reno', 'none', 'vegas', 'yeah', 'westwood'], dest='competing_ccalgs', action='append') parser.add_argument('--duration', '-d', type=int, default=60) parser.add_argument('--chrome', '-s', action='store_true', help='Run website traffic with headless chrome') parser.add_argument('--repeat', '-r', type=int, default=1) args = parser.parse_args() return args if __name__ == '__main__': # configure logging log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../logging_config.ini') fileConfig(log_file_path) logging.getLogger("paramiko").setLevel(logging.WARNING) args = parse_args() logging.info('Arguments: {}'.format(args)) start.main(args) # Tests that can be run as separate experiments at one go # iperf(ccalg), video(ccalg), web video(website)
def tradeStart(self): stockID = str(self.lineEditInputStockIDTrend.text()) sDate = str(self.lineEditTradeDate.text()) start.main(stockID, sDate)
def game(rang, combo, count_colors): """The main entry point for the application""" platform_rang = WIN_H / rang reset = Button((500, 810), (90, 30), "Reset") scores = Button((600, 810), (90, 30), "Scores") readme = Button((700, 810), (90, 30), "Readme") loop = True click = False screen = pg.display.set_mode((WIN_W, WIN_H+50)) pg.display.set_caption("Lines") board = GameBoard(rang, combo, count_colors) point_x = point_y = -1 pg.init() while loop: screen.fill(COLORS[0]) draw_field(screen, board, platform_rang) point_click(screen, point_x, point_y, platform_rang) draw_text(screen, "Score: " + str(board.get_score()), 20, 805, 40) reset.draw_button(screen) readme.draw_button(screen) scores.draw_button(screen) for event in pg.event.get(): if event.type == pg.QUIT: loop = False if event.type == pg.MOUSEBUTTONDOWN: mouse = pg.mouse.get_pos() if reset.in_button(mouse): pg.quit() start.main() sys.exit() if scores.in_button(mouse): back = Button((600, 810), (150, 30), "Back to game") scoreloop = True scr = sc.get_scores() while scoreloop: for event in pg.event.get(): if event.type == pg.QUIT: pg.quit() sys.exit() if event.type == pg.MOUSEBUTTONDOWN: if back.in_button(pg.mouse.get_pos()): scoreloop = False y_pos = 0 screen.fill(COLORS[0]) draw_text(screen, "Scores:", 30, y_pos, 50) y_pos += 50 for line in scr: if y_pos > 800: break draw_text(screen, line, 40, y_pos, 25) y_pos += 20 back.draw_button(screen) pg.display.update() break if readme.in_button(mouse): back = Button((600, 810), (150, 30), "Back to game") readmeloop = True while readmeloop: for event in pg.event.get(): if event.type == pg.QUIT: pg.quit() sys.exit() if event.type == pg.MOUSEBUTTONDOWN: if back.in_button(pg.mouse.get_pos()): readmeloop = False screen.fill(COLORS[0]) draw_readme(screen) back.draw_button(screen) pg.display.update() break if not click: (x_pos, y_pos) = pg.mouse.get_pos() x_f = int(x_pos // platform_rang) y_f = int(y_pos // platform_rang) if board.get_value(x_f, y_f) != 0 and board.get_value(x_f, y_f): point_x = x_f * platform_rang point_y = y_f * platform_rang click = True else: (x_pos, y_pos) = pg.mouse.get_pos() x_s = int(x_pos // platform_rang) y_s = int(y_pos // platform_rang) if board.get_value(x_s, y_s) != 0 and board.get_value(x_s, y_s): x_f = x_s y_f = y_s point_x = x_f * platform_rang point_y = y_f * platform_rang else: way = board.make_move((x_f, y_f), (x_s, y_s)) if way: draw_way(screen, way, platform_rang) click = False point_x = -1 point_y = -1 if board.adds: draw_next(screen, board.adds, COLORS, platform_rang) if str(board).count('0') == 0: loop = False if str(board).count('0') == rang*rang: board.stand_circles() board.add_circles(rang//3) pg.display.update() pg.quit() return board.get_score()
import start import pickle import active import os dataset = 'proton-beam' directory = 'exp_' + dataset + '_unf' + '/' os.makedirs(directory) start.main(dataset) def save_disk(filename, obj): f = open(directory + filename, 'w') pickle.dump(obj, f) f.close() expert_cost = 100 rloss = 10 total_cost = 100000 runs = 1 (je, adata) = active.experi_money(start.mat, start.rel, start.turk_data, start.turk_data_uncer, runs, (total_cost, 1, expert_cost), stra = 'je', rloss = rloss) save_disk('je.pkl', je) (jc, adata) = active.experi_money(start.mat, start.rel, start.turk_data, start.turk_data_uncer, runs, (total_cost, 1, expert_cost), stra = 'jc', rloss = rloss) save_disk('jc.pkl', jc)
from start import main main()
def starter(): cur.execute('UPDATE load set BigStage = 0, SmallStage = 0') conn.commit() conn.close() start.main()
def opt(num, opt): screen.border() escape = False fonts.tittle(screen, TEXT[2]) up_down(num) option = str(opt) while escape == False: key = screen.getch(2, 1) if key == 258: num -= 1 if num < 1: num = 1 option = OPTION[num] screen.border() up_down(num) escape = False elif key == 259: num += 1 if num > 4: num = 4 option = OPTION[num] screen.border() up_down(num) escape = False elif key in [curses.KEY_ENTER, ord('\n'), 10]: escape = True curses.endwin() if option == ('exit'): main_exit(num) elif option == ('options'): escape = True screen.erase() screen.refresh() curses.endwin() options.main(6) elif option == ('about'): process(2) elif option == ('start'): escape = True screen.erase() screen.refresh() curses.endwin() start.main(4) elif key == curses.KEY_RESIZE: screen.erase() screen.refresh() escape = True curses.endwin() fonts.error()
def delete_login_success(username, password): login_success_screen.destroy() login_screen.destroy() main_screen.destroy() start.main(username, password) exit(1)
def startandwait(): main() gameStopped()
def tradeStart(self): stockID=str(self.lineEditInputStockIDTrend.text()) sDate=str(self.lineEditTradeDate.text()) start.main(stockID,sDate)
def main_multitask_sr(rand_shuffle = None, num_prev = 3): """ Multitask Systematic review """ start.main('proton-beam', rand_shuffle = rand_shuffle) proton_n = len(start.turk_data_id) proton_turk_data = copy.deepcopy(start.turk_data_id) proton_rel = copy.deepcopy(start.turk_data_id) proton_lc = crowd_model.labels_collection(proton_turk_data, proton_n * [None]) proton_vs = crowd_model.vss_model(proton_lc) start.main('appendicitis', rand_shuffle = rand_shuffle) appen_n = len(start.turk_data_id) appen_turk_data = copy.deepcopy(start.turk_data_id) appen_rel = copy.deepcopy(start.turk_data_id) appen_lc = crowd_model.labels_collection(appen_turk_data, appen_n * [None]) appen_vs = crowd_model.vss_model(appen_lc) start.main('dst', rand_shuffle = rand_shuffle) dst_n = len(start.turk_data_id) dst_turk_data = copy.deepcopy(start.turk_data_id) dst_rel = copy.deepcopy(start.turk_data_id) dst_lc = crowd_model.labels_collection(appen_turk_data, appen_n * [None]) dst_vs = crowd_model.vss_model(appen_lc) start.main('omega3', rand_shuffle = rand_shuffle) lc_gold = crowd_model.labels_collection(start.turk_data_id, start.rel) gold_dic = lc_gold.get_true_ss() if num_prev == 3: prev_data = proton_turk_data + appen_turk_data + dst_turk_data prev_vs = [proton_vs, appen_vs, dst_vs] elif num_prev == 2: prev_data = proton_turk_data + appen_turk_data prev_vs = [proton_vs, appen_vs] else: prev_data = proton_turk_data prev_vs = [proton_vs] n = len(start.turk_data_id) for m in [100, 200, 500, 1000]: new_lc = crowd_model.labels_collection(start.turk_data_id[:m], m*[None]) single_task = crowd_model.vss_model(new_lc) accum_lc = crowd_model.labels_collection(prev_data + \ start.turk_data_id[:m], (proton_n + m)*[None]) accum = crowd_model.vss_model(accum_lc) new_vs = crowd_model.vss_model(new_lc) multi = crowd_model.multitask(prev_vs + [new_vs], inter_cor = 0.1) single_task.em(4) accum.em(4) multi.em(3) print m #print "single", weighted_eval_cm(single_task.dic_ss, gold_dic) #print "accum ", weighted_eval_cm( accum.dic_ss, gold_dic) #print "multi ", weighted_eval_cm(multi.datasets[2].dic_ss, gold_dic) print "single", eval_cm(single_task.dic_ss, gold_dic) print "accum ", eval_cm( accum.dic_ss, gold_dic) print "multi ", eval_cm(multi.datasets[-1].dic_ss, gold_dic)
def main_loss(n = None, dataset = 'RCT', rand_shuffle = None, num_it = 3, split = None, prior = 1): """ Save worker sen/spe Estimate loss (FP + FN) Error = Weighted by worker prevalance prior = prior for the crowd model """ start.main(dataset, True) #restore_start() lc = crowd_model.labels_collection(start.turk_data_id, start.rel) gold_dic = lc.get_true_ss() if dataset == 'RCT': split = 151224 # take all the data else: split = len(start.rel) / 2 random.shuffle(start.turk_data_id, lambda : rand_shuffle) random.shuffle(start.rel, lambda : rand_shuffle) test_data = (start.turk_data_id[split:], start.rel[split:]) crowd_model.global_psen = (prior,1); crowd_model.global_pspe = (prior,1); crowd_model.global_pfpr = (1,prior) lc1 = crowd_model.labels_collection(start.turk_data_id[:n], n*[None]) tc = crowd_model.tc_model(lc1) tc.em(num_it) tc_ss = eval_cm(tc.dic_worker_ss, gold_dic) print "tc", tc_ss; sys.stdout.flush() #hc = crowd_model.hc_model(lc1) #hc.build_model_def() #hc.infer_dic_ss() #tc_ss = eval_cm(tc.dic_worker_ss, gold_dic, True, test_data) #hc_ss = eval_cm(hc.dic_ss, gold_dic) #print "hc ", hc_ss; sys.stdout.flush() lc2 = crowd_model.labels_collection(start.turk_data_id[:n], n*[None]); mv = crowd_model.mv_model(lc2) #mv_ss = eval_cm(mv.dic_ss, gold_dic, True, test_data) mv_ss = eval_cm(mv.dic_ss, gold_dic) print "mv", mv_ss; sys.stdout.flush() lc3 = crowd_model.labels_collection(start.turk_data_id[:n], n*[None]); #for full_cov in [False, True]: vs_diag = crowd_model.vss_model(lc3, full_cov = False) vs_diag.em(num_it = num_it) vs_diag.get_dic_ss(); #vs_diag_ss = eval_cm(vs_diag.dic_ss, gold_dic, True, test_data) vs_diag_ss = eval_cm(vs_diag.dic_ss, gold_dic) print "vs Full_Cov = False", vs_diag_ss; sys.stdout.flush() vs_full = crowd_model.vss_model(lc3, full_cov = True) vs_full.em(num_it = num_it) vs_full.get_dic_ss(); #vs_full_ss = eval_cm(vs_full.dic_ss, gold_dic, True, test_data) vs_full_ss = eval_cm(vs_full.dic_ss, gold_dic) print "vs Full_Cov = True", vs_full_ss; sys.stdout.flush() # save sen-spe: filename = 'save_ss_' + dataset + ' ' + str(n) + '_' + str(rand_shuffle) f = open(filename, 'w') pickle.dump((tc.dic_worker_ss, mv.dic_ss, vs_diag.dic_ss, vs_full.dic_ss), f)
#!/usr/bin/env python import start import signal import sys import os def signal_handler(sig, frame): os.system('killall -9 roslaunch roscore python') sys.exit(0) signal.signal(signal.SIGINT, signal_handler) start.main()