def test(): t = test_.Test() x = -1e10 t.check(no.distant_past() < x) t.check(no.far_future() > x) x = 1e10 t.check(no.distant_past() < x) t.check(no.far_future() > x) # dreams never end t.check(no.never() != no.never()) t.check(not no.never() == x) t.check(no.never() != x) t.check(not x < no.never()) t.check(not x >= no.never()) # no nay never: t.check(not no.isnever(x)) # no nay never no more: t.check(no.isnever(no.never())) #t.check(False) s = no.ustream(10000) t.check(isinstance(s, np.ndarray)) t.check(len(s) == 10000) t.check(abs(np.mean(s) - 0.5) < 0.02) f = no.lazy_eval("2 + 2") t.check(f() == 4) # # TODO this overlaps/duplicates tests in op.py - reorganise # # test thinning algorithm for non-homogeneous Poisson process # h = np.array([0.014] * 10) # #l = no.stopping(h) # l = no.first_arrival(h, 1.0, 10000) # t.check(abs(np.mean(l) * 0.014 - 1.0) < 0.03) # # varying timestep should make no difference # l = no.first_arrival(h, 0.1, 10000) # t.check(abs(np.mean(l) * 0.014 - 1.0) < 0.03) # # test a certain(ish) hazard rate # h = np.array([0.99, 0.99, 0.01]) # l = no.first_arrival(h, 1.0, 10000) # no.log("TODO NHPP appears broken: %f" % np.mean(l)) # # test a zero(ish) hazard rate # h = np.array([1e-30, 1e-30, 1e-30, .9999]) # l = no.first_arrival(h, 1.0, 10000) # no.log("TODO NHPP appears broken: %f" % np.mean(l)) # # this also tests a zero hazard rate # h = np.array([i/3000 for i in range(100)]) # #no.log(h) # le = no.first_arrival(h, 1.0, 10000) # no.log(sum(le)/len(le)) # # y # h = np.array([0.999, 0.1]) # le = no.first_arrival(h, 1.0, 1000) # no.log(sum(le)/len(le)) sometime = no.isnever(np.full(10, 1.0)) t.check(np.all(~sometime)) never = no.isnever(np.full(10, no.never())) no.log(never) t.check(np.all(never)) # # DataFrame ops # modify df passing column df = pd.read_csv("../../tests/df.csv") # modify df passing directly no.directmod(df, "DC2101EW_C_ETHPUK11") t.check( np.array_equal(df["DC2101EW_C_ETHPUK11"].values, np.zeros(len(df)) + 3)) df = pd.read_csv("../../tests/df.csv") cats = np.array(range(4)) transitions = np.identity(len(cats)) * 0 + 0.25 #no.log(transitions) no.transition(cats, transitions, df, "DC2101EW_C_ETHPUK11") # it's possible this could fail depending on random draw t.check( np.array_equal(np.sort(df["DC2101EW_C_ETHPUK11"].unique()), np.array(range(4)))) # df2 = df.copy() # df3 = no.append(df,df2) # t.check(len(df3) == len(df) + len(df2)) return not t.any_failed
dat.SetColour(color) if parent is None: parent = dabo.dAppRef.ActiveForm super(dColorDialog, self).__init__(parent, data=dat) self._selColor = None def show(self): self._selColor = None ret = kons.DLG_CANCEL res = self.ShowModal() if res == wx.ID_OK: ret = kons.DLG_OK col = self.GetColourData().GetColour() self._selColor = col.Red(), col.Green(), col.Blue() return ret def release(self): self.Destroy() def getColor(self): return self._selColor if __name__ == "__main__": import test test.Test().runTest(dColorDialog)
_getTickPosition, _setTickPosition, None, _("""Position of the tick marks; must be one of Top, Bottom (default), Left or Right. Not fully supported on all platforms. Must be set during object creation; has no effect once created. (str)""")) DynamicOrientation = makeDynamicProperty(Orientation) DynamicMax = makeDynamicProperty(Max) DynamicMin = makeDynamicProperty(Min) DynamicShowLabels = makeDynamicProperty(ShowLabels) class _dSlider_test(dSlider): def initProperties(self): self.Size = (300, 300) self.Max = 95 self.Min = 23 self.Value = 75 self.ShowLabels = True # Try changing these to see their effects # self.Reversed = True # self.TickPosition = "Left" def onHit(self, evt): print "Hit! Value =", self.Value if __name__ == "__main__": import test test.Test().runTest(_dSlider_test)
def run_training(): """Main method for training and testing.""" logging.set_verbosity(logging.INFO) # Get configuration and read additional parameters for json file. hparams = parameters() keys = hparams.values().keys() #keys.sort() tw_name = hparams.task_name for key in keys: value = hparams.values()[key] logging.info(' ' + key + ('\t' if len(key) > 14 else '\t\t') + str(value)) if key.startswith('batch'): continue if isinstance(value, int) or isinstance(value, float): tw_name += '_' for s in key.split('_'): tw_name += s[0] tw_name += '%d' % value if isinstance(value, int) else '%.3f' % value tw_name = "prashant_tensor" logging.info('tensorboard name %s' % tw_name) task_dict = {'upos': 3, 'xtag': 4, 'feats': 5} hparams.tagging = task_dict[FLAGS.task.strip("'")] conll_columns = [1, hparams.tagging] # Uses min_occurrence, lowercase, batch_char_size, batch_word_size reader = rd.Reader(hparams) logging.info('reading embeddings %s' % FLAGS.embeddings) reader.load(FLAGS.embeddings) logging.info('reading training data %s' % FLAGS.train) data = reader.read_corpus(FLAGS.train, conll_columns) sentences_train = reader.sentences(data) ## Create vocab and batch training data. ## vocab = Vocab() vocab.word_id = reader.build_word_vocab(sentences_train, hparams.lowercase) char_train = reader.to_char_corpus(data, tag_position=1) vocab.char_id, vocab.tag_id = reader.build_char_vocab( char_train, add_special_tokens=True) vocab.init_id_tag() output_dir = os.path.expanduser(FLAGS.output_dir) vocab.pred_id = reader.embedding_dict vocab.write(FLAGS.output_dir) # list of sentences sentences_char_train = reader.char_sentences(char_train, vocab.char_id, vocab.tag_id) sentences_char_train_index = [] for k, s in enumerate(sentences_char_train): sentences_char_train_index.append(SntId(s, k)) sort_len = lambda snt_id: len(snt_id.snt) sentences_char_train_index.sort(key=sort_len) # Index from batch-index to index in training set. sentences_char_index_b2t = {} sentences_char_index_t2b = {} sentences_char_train = [] for k, s in enumerate(sentences_char_train_index): sentences_char_index_b2t[k] = s.index sentences_char_index_t2b[s.index] = k sentences_char_train.append(s.snt) # Create the char batches. batches_char, batches_idx_end, batches_idx_start, target_batch = ( reader.char_sentences_to_buckets_index_sc(hparams, sentences_char_train, vocab.char_id['\t'])) # Collect the sentences and sort due to length. sentences_id = reader.sentences_ids(data, vocab.word_id, vocab.tag_id) # Evaluate number of unknown words. reader.check_for_unknown_pretrained_embeddings(sentences_id, vocab.word_id) sentences_id_index = [SntId(s, k) for k, s in enumerate(sentences_id)] sentences_id_index.sort(key=sort_len) dev = tester.Test(hparams, reader, FLAGS.dev, vocab.id_tag) # Maps sentence to the batches. sentence_index = {} sentences_id = [] for k, si in enumerate(sentences_id_index): sentences_id.append(si.snt) sentence_index[k] = si.index sentences_id.sort(key=len) batches_word = reader.sentences_to_buckets(hparams, sentences_id) # Read development data. dev_data = reader.read_corpus(FLAGS.dev, conll_columns) dev_dataset = Dataset(dev_data, vocab, reader, vocab.char_id['\t'], hparams) # Builds model for training. global_step_tensor = tf.Variable(0, trainable=False, name='global_step') global_step_tensor_joint = tf.Variable(0, trainable=False, name='global_step_joint') global_step_tensor_ch = tf.Variable(0, trainable=False, name='global_step_ch') # Adam has his own adaptive learning rate, nevertheless use a bit decay. steps_decay = math.ceil(float(hparams.batch_word_size) / 5000.0) logging.info('steps_decay %d ' % steps_decay) rate = tf.train.exponential_decay(hparams.learning_rate, global_step_tensor, steps_decay, hparams.learning_rate_decay) optimizer = tf.contrib.opt.LazyAdamOptimizer(rate) # Optimizer for char model steps_decay_ch = math.ceil(float(hparams.batch_char_size) / 5000.0) rate_ch = tf.train.exponential_decay(hparams.learning_rate, global_step_tensor_ch, steps_decay_ch, hparams.learning_rate_decay) optimizer_ch = tf.contrib.opt.LazyAdamOptimizer(rate_ch) # Optimizer for word model rate_joint = tf.train.exponential_decay(hparams.learning_rate, global_step_tensor_joint, steps_decay, hparams.learning_rate_decay) # Optimizer for meta model optimizer_joint = tf.contrib.opt.LazyAdamOptimizer(rate_joint) model = Model(hparams) logging.info('Create training model.') pretrained = tf.Variable(tf.constant( 0.0, shape=[len(reader.embeddings), reader.embed_size]), trainable=False, name='Pre') ph = model with tf.variable_scope('x', reuse=None): is_training = True (loss_w, correct) = model.word_model(is_training, hparams, len(vocab.word_id), reader.embed_size, len(vocab.tag_id), pretrained, ph.inputs_words) (loss_ch, correct_ch) = model.char_model(is_training, hparams, len(vocab.char_id), hparams.embed_size, len(vocab.tag_id), ph.inputs_chars, ph.idx_start, ph.idx_end, ph.targets_ch) loss_m = model.join(is_training, hparams, ph.inputs_words, ph.lout_w, ph.lout_c, len(vocab.tag_id)) # Setup the optimizers. train = optimizer.minimize(loss_w, global_step=global_step_tensor) traim_m = optimizer_joint.minimize(loss_m, global_step=global_step_tensor_joint) train_ch = optimizer_ch.minimize(loss_ch, global_step=global_step_tensor_ch) # Prepare the embeddings. embedding_ph = tf.placeholder(tf.float32, [len(reader.embeddings), reader.embed_size]) embedding_init = pretrained.assign(embedding_ph) logging.info('Create testing model.') with tf.variable_scope('x', reuse=True): is_training = False (predictions_w, out_logits_w) = model.word_model(is_training, hparams, len(vocab.word_id), reader.embed_size, len(vocab.tag_id), pretrained, ph.inputs_words) (predictions_c, out_logits_c) = model.char_model( is_training, hparams, len(vocab.char_id), hparams.embed_size, len(vocab.tag_id), ph.inputs_chars, ph.idx_start, ph.idx_end, ph.targets_ch) predictions_m = model.join(is_training, hparams, ph.inputs_words, ph.lout_w, ph.lout_c, len(vocab.tag_id)) class Testmodel(object): """Tensors for testmodel""" def __init__(self): self.predictions_w = predictions_w self.out_logits_w = out_logits_w self.predictions_c = predictions_c self.out_logits_c = out_logits_c self.predictions_m = predictions_m testmodel = Testmodel() model.summaries_acc(ph.acc_m, 'dev-' + FLAGS.task) model.summaries_acc(ph.acc_c, 'dev-ch-' + FLAGS.task) model.summaries_acc(ph.acc_w, 'dev-w-' + FLAGS.task) saver = tf.train.Saver() # Create session and initialize variables. config_proto = tf.ConfigProto() config_proto.gpu_options.allow_growth = True with tf.Session(config=config_proto) as sess: sess.run(tf.global_variables_initializer()) # Load embeddings into TF. sess.run(embedding_init, feed_dict={embedding_ph: reader.embeddings}) iteration = 0 start = timeit.default_timer() # Merge all the summaries. merged = tf.summary.merge_all() logging.info('Writing summary to ' + os.path.join(output_dir, tw_name)) train_writer = tf.summary.FileWriter(os.path.join(output_dir, tw_name), sess.graph) filename_model = os.path.join(output_dir, str(hparams.task_name)) # Continue training if model exists. logging.info('check existens %s' % (filename_model + '.meta')) if tf.gfile.Exists(filename_model + '.meta'): logging.info('found existing model') saver.restore(sess, os.path.join(output_dir, str(hparams.task_name))) best_acc, _ = test(sess, dev, ph, dev_dataset, testmodel) logging.info('acc_dev %f' % best_acc.meta) else: best_acc = Accuracies() state = 'train_word_model' max_steps_to_try_improve = hparams.early_stopping_steps word_improve_steps = max_steps_to_try_improve + 500 char_improve_steps = max_steps_to_try_improve + 500 joint_improve_steps = max_steps_to_try_improve + 500 start = timeit.default_timer() batches_results_c, batches_results_w = ([], []) total_time = timeit.default_timer() # Training loop. while True: iteration += 1 logging.info('Iteration %d ' % iteration) start = timeit.default_timer() num_batches = 0 logging.info( 'total time %s' % str(timedelta(seconds=(timeit.default_timer() - total_time)))) # Train word model. if state == 'train_word_model': for fw in batches_word: _, loss_out, correct_out = sess.run( [train, loss_w, correct], feed_dict={ph.inputs_words: fw}) num_batches += 1 steps = sess.run(global_step_tensor) word_improve_steps -= 1 if steps % 5 == 0: logging.info('steps word: %d loss %f correct %f ' % (steps, loss_out, correct_out)) log_training_time(start, num_batches, hparams.batch_word_size) acc, _ = test(sess, dev, ph, dev_dataset, testmodel) if best_acc.word < acc.word: best_acc.word = acc.word if word_improve_steps < max_steps_to_try_improve: word_improve_steps = max_steps_to_try_improve saver.save(sess, filename_model) logging.info('model saved') logging.info('dev set: word model %f' % acc.word) summary = model.create_summary(merged, sess, ph, acc) train_writer.add_summary(summary, iteration) logging.info('best word accuracy %f and steps to go %d', best_acc.word, word_improve_steps) if word_improve_steps <= 0: state = 'train_char_model' logging.info('load best model.') saver.restore(sess, filename_model) else: continue # train the char model if state == 'train_char_model': for fc, i_end, i_start, target in zip(batches_char, batches_idx_end, batches_idx_start, target_batch): feed = { ph.inputs_chars: fc, ph.idx_start: i_start, ph.idx_end: i_end, ph.targets_ch: target } sess_out = sess.run([train_ch, loss_ch, correct_ch], feed_dict=feed) num_batches += 1 char_improve_steps -= 1 steps_ch = sess.run(global_step_tensor_ch) if steps_ch % 10 == 0: logging.info('steps chars: %d loss %f correct %f ' % (steps_ch, sess_out[1], sess_out[2])) acc, _ = test(sess, dev, ph, dev_dataset, testmodel) log_training_time(start, num_batches, hparams.batch_char_size) if best_acc.char < acc.char: best_acc.char = acc.char if char_improve_steps < max_steps_to_try_improve: char_improve_steps = max_steps_to_try_improve saver.save(sess, filename_model) logging.info('model saved') logging.info('dev accuracies: meta %f word %f char %f' % (acc.meta, acc.word, acc.char)) summary = model.create_summary(merged, sess, ph, acc) train_writer.add_summary(summary, iteration) logging.info('best char accuracy %f and steps to go %d', best_acc.char, char_improve_steps) if char_improve_steps <= 0: state = 'train_joint_model' logging.info( 'load best char model and continue with meta model') start = timeit.default_timer() saver.restore(sess, filename_model) else: continue # Rerun the models with the test model for training the meta model. if not batches_results_c: results_w, results_ch = ([], []) logging.info('compute word model') for fw in batches_word: feed = {ph.inputs_words: fw} results_w.extend(sess.run(out_logits_w, feed_dict=feed)) logging.info('compute char model') for fc, e_idx, s_idx in zip(batches_char, batches_idx_end, batches_idx_start): feed = { ph.inputs_chars: fc, ph.idx_start: s_idx, ph.idx_end: e_idx } results_ch.extend(sess.run(out_logits_c, feed_dict=feed)) index_step = 0 # Since sentences of word and char batches are sorted differently, # merge them in order to run the meta model. for batch_w in batches_word: cout, wout = ([], []) for _ in batch_w: wout.append(results_w[index_step]) w_train_index = sentence_index[index_step] lsc = results_ch[ sentences_char_index_t2b[w_train_index]] w_shape = results_w[index_step].shape pad_c = np.zeros(w_shape) if w_shape[0] <= lsc.shape[0]: pad_c[:w_shape[0], : w_shape[1]] = lsc[:w_shape[0], :w_shape[1]] else: pad_c[:lsc.shape[0], :lsc.shape[1]] = lsc cout.append(pad_c) index_step += 1 batches_results_c.append(cout) batches_results_w.append(wout) # Train the meta model. for batch_w, cout, wout in zip(batches_word, batches_results_c, batches_results_w): feed = { ph.inputs_words: batch_w, ph.lout_w: wout, ph.lout_c: cout } _, loss_joint = sess.run([traim_m, loss_m], feed_dict=feed) num_batches += 1 joint_improve_steps -= 1 total_train_iters_joint = sess.run(global_step_tensor_joint) if total_train_iters_joint % 5 == 0: logging.info('steps meta: %d loss %f ', total_train_iters_joint, loss_joint) log_training_time(start, num_batches, hparams.batch_word_size) acc, _ = test(sess, dev, ph, dev_dataset, testmodel) if best_acc.meta < acc.meta: if joint_improve_steps < max_steps_to_try_improve: joint_improve_steps = max_steps_to_try_improve best_acc = acc saver.save(sess, filename_model) logging.info('model saved') logging.info('dev accuracies: meta %f word %f char %f' % (acc.meta, acc.word, acc.char)) logging.info('best meta model accuracy %f and steps to go %d', best_acc.meta, joint_improve_steps) summary = model.create_summary(merged, sess, ph, acc) train_writer.add_summary(summary, iteration) if joint_improve_steps <= 0: logging.info('total time %s' % str( timedelta(seconds=(timeit.default_timer() - total_time)))) return
import test test.print_test() x = test.Test(4) x.print() print(test.a) import test test.a = 20 test = 4 from test import a from test import print_test , Test print_test() from math import sin as cos from math import cos as sin from math import pi as p print(sin(p)) print(cos(p/2))
def login(): global one_time_login payload = {'form_id': 'new_login_form', 'op': 'Login'} decrypted = encryption.decrypt(key, "ivfile", "new.json") # print(type(decrypted)) decrypted.decode('utf-8') # str=json.load(open('new.json','r')) str = json.loads(decrypted) # print(str) payload['name'] = str['username'] payload['pass'] = str['password'] lang = str["language"] with requests.Session() as s: while (1): print("----------------Type help to view commands----------------") # print(r.text) str = input() li = str.split(' ') if (len(li) == 2 and li[0] != "user" and li[0] != "getlist"): choice = li[0] contest_c = li[1] # print("Enter\np-Practice\nc-Contest\n") # choice=input() # if(choice=='c'): # print("Enter contest name") # else: # print("Enter problem name") # contest_c=input() # print(make_dir.parse()) while True: print( "----------------Type help to view commands----------------" ) print(colored("ENTER: ", attrs=['bold'])) inp = input() if (inp == "submit" and choice == 'c'): # Not working for contest try: if (one_time_login == 0): while (1): print(colored("Logging in...", "green"), end="\r") ti = time.time() p = s.get("https://www.codechef.com/", headers=headers) soup = bs4.BeautifulSoup( p.text, 'html.parser') if (type(soup) != type(None)): break a = soup.find('input', attrs={"name": 'form_build_id'})['value'] payload['form_build_id'] = a s.post('https://www.codechef.com/', data=payload, headers=headers) # print(time.time()-ti) print(colored("Logged in ", "yellow")) one_time_login = 1 submit(s, contest_c, lang, 'c') except: logout(s) # Working for practice as well as contest but not parsing question and have to add multithreading elif (inp == "parse" and choice == 'c'): try: if (one_time_login == 0): while (1): print(colored("Logging in...", "green"), end="\r") ti = time.time() p = s.get("https://www.codechef.com/", headers=headers) soup = bs4.BeautifulSoup( p.text, 'html.parser') if (type(soup) != type(None)): break a = soup.find('input', attrs={"name": 'form_build_id'})['value'] payload['form_build_id'] = a s.post('https://www.codechef.com/', data=payload, headers=headers) # print(time.time()-ti) print(colored("Logged in ", "green")) one_time_login = 1 make_dir.parse(s, contest_c, choice) except: logout(s) elif (inp == "test" and choice == 'c'): varr = input("Enter problem name: ") test.Test(contest_c, lang, varr) elif (inp == 'open' and choice == 'c'): # complete openf() elif (inp == "upcontest"): # complete contest.get_comp(s, 0, 1) elif (inp == "ctest" and choice == 'c'): xc = input("Enter problem name: ") # if entered wrong contest name in try test.Test_ac(f'{contest_c}/{xc}', 'Test_files', xc, lang, s) elif (inp == 'race'): # complete try: if (one_time_login == 0): while (1): print(colored("Logging in...", "green"), end="\r") ti = time.time() p = s.get("https://www.codechef.com/", headers=headers) soup = bs4.BeautifulSoup( p.text, 'html.parser') if (type(soup) != type(None)): break a = soup.find('input', attrs={"name": 'form_build_id'})['value'] payload['form_build_id'] = a s.post('https://www.codechef.com/', data=payload, headers=headers) # print(time.time()-ti) print(colored("Logged in ", "green")) one_time_login = 1 contest.race(s, contest_c) except: pass elif (inp == "parse" and choice == "p"): try: make_dir.parse(s, contest_c, choice) except: logout(s) elif (inp == "open" and choice == 'p'): webbrowser.open( f"https://www.codechef.com/problems/{contest_c}") elif (inp == 'test' and choice == 'p'): ti1 = time.time() test.Test("Practice", lang, contest_c) # print(time.time()-ti1) elif (inp == "ctest" and choice == 'p'): test.Test_ac(f'Practice/{contest_c}', 'Test_files', contest_c, lang, s) elif (inp == "submit" and choice == "p"): try: if (one_time_login == 0): while (1): print(colored("Logging in...", "green"), end="\r") p = s.get("https://www.codechef.com/", headers=headers) soup = bs4.BeautifulSoup( p.text, 'html.parser') if (type(soup) != type(None)): break a = soup.find('input', attrs={"name": 'form_build_id'})['value'] payload['form_build_id'] = a r = s.post('https://www.codechef.com/', data=payload, headers=headers) print(colored("Logged in ", "green")) one_time_login = 1 ti2 = time.time() submit(s, contest_c, lang, 'p') # print(time.time()-ti2) except Exception as e: print(e) logout(s) elif (inp == "help"): print( colored( "parse - Parsing Contest or Problem\nsubmit-Submitting Problem\nopen-Opening question\ntest-To test your cases against standard io\nctest-To test custom cases against custom io\nquit-To leave the current mode", "cyan")) elif (inp == "quit"): break elif (li[0] == "upcontest"): with requests.Session() as s: contest.get_comp(s, 0, 1) print("Upcoming contests") elif (li[0] == "prcontest"): with requests.Session() as s: contest.get_comp(s, 0, 0) print("Present Contests") elif (li[0] == "pacontest"): with requests.Session() as s: contest.get_comp(s, 0, 2) print("Past Contests") # elif(li[0] == "race"): #print("Entering Race") elif (li[0] == "user"): with requests.Session() as s: user.userinfo(s, li[1], 0) elif (li[0] == "getlist"): with requests.session() as s: req = s.get(f'https://www.codechef.com/problems/{li[1]}/') soup = bs4.BeautifulSoup(req.text, 'html.parser') soup = soup.find('tbody') il = soup.find_all('a') for i in range(1, 1 + int(li[2]) * 3, 3): print(colored(il[i].text, "yellow")) # print(soup) elif (li[0] == "getrank"): inp = input( "Enter {c(country)/i(institution} {CountryName/InstitutionName}" ) inp1 = input("Enter number of users") ranking.getranking(inp[0], inp[2:], int(inp1)) elif (li[0] == "compare"): with requests.Session() as s: x = user.userinfo(s, li[1], 1) # print(x) y = user.userinfo(s, li[2], 1) z = [ "Correct Answer", "Partially Submitted", "Wrong Answers", "TLE", "Compilation Error" ] print( colored( '{:<35} {:^15} {:^15}'.format( "Status", li[1], li[2]), "yellow")) for i in range(len(x)): print( colored( '{:<35} {:^15} {:^15}'.format( z[i], x[i], y[i]), "yellow")) # print(y) # elif(li[0]=="ccompare"): elif (li[0] == "help"): print( colored( "c <Contest Name> - To enter codechef in contest mode\np <Question Name> - To enter codechef in practice mode\nuser <username> - To get user information\ngetlist <difficulty> <number> - Gets list of latest practice problems of selected difficulty\nupcontest - To view upcoming contests\nprcontest - To view present contests\npacontest - To view past contests\nrace - To parse contest as soon as contest start\nquit - to logout and quit the program", "cyan")) elif (li[0] == "quit"): logout(s) break
_("Color of visited links (str or tuple)")) VisitedUnderline = property( _getVisitedUnderline, _setVisitedUnderline, None, _("Is the link underlined in the visited state? (bool)")) ForeColor = LinkColor class _dHyperLink_test(dHyperLink): def _onHit(self, evt): print "hit" def afterInit(self): self.Caption = "The Dabo Wiki" self.FontSize = 24 self.URL = "http://dabodev.com/wiki/" self.LinkColor = "olive" self.VisitedColor = "maroon" self.HoverColor = "crimson" self.LinkUnderline = True self.HoverUnderline = False self.VisitedUnderline = True self.bindEvent(dabo.dEvents.Hit, self._onHit) #self.ShowInBrowser = False if __name__ == "__main__": import test test.Test().runTest(_dHyperLink_test)
message=message, defaultPath=defaultPath, style=wx.DD_NEW_DIR_BUTTON) class dSaveDialog(dFileDialog): """Creates a save dialog, which asks the user to specify a file to save to.""" def __init__(self, parent=None, message=_("Save to:"), defaultPath="", defaultFile="", wildcard="*.*", style=wx.SAVE): self._baseClass = dSaveDialog if parent is None: parent = dabo.dAppRef.ActiveForm super(dSaveDialog, self).__init__(parent=parent, message=message, defaultPath=defaultPath, defaultFile=defaultFile, wildcard=wildcard, style=style) if __name__ == "__main__": import test test.Test().runTest(dFileDialog) test.Test().runTest(dFolderDialog) test.Test().runTest(dSaveDialog)
weak_learners[args.weak_learner], X_cut, y_cut, M) train_accuracy, baseline_train_accuracy = model.test( weak_learners[args.weak_learner], X_cut, y_cut, X_test, y_test, M, trials=args.trials) test_accuracy, baseline_test_accuracy = model.final_test( X_test, y_test, M) else: model = test.Test(ensembler, weak_learners[args.weak_learner], X_cut, y_cut, M) train_accuracy, baseline_train_accuracy = model.test( X_cut, y_cut, X_test, y_test, M, trials=args.trials) test_accuracy, baseline_test_accuracy = model.final_test( X_test, y_test, M) acc += test_accuracy results.append(acc / float(T)) print "Result: ", results ax.set_xticks(num_samples) ax.plot(num_samples, results, label=name) fig.savefig('Greedyboost_prev50_num_samples.png') plt.legend(loc="lower right") plt.xlabel('Number of Samples', fontsize=12)
import test worker = test.Test('zhangsan', 10000) print(type(worker)) print(worker.hello()) print('end') print(test.Test.classSpec) print(hasattr(worker, 'classSpec'))
import test test = test.Test() data = { "id": 460, } test.random_function(data)
Picture = property( _getNormalPicture, _setNormalPicture, None, _("""Specifies the image normally displayed on the button. (str)""")) class _dBorderlessButton_test(dBorderlessButton): def initProperties(self): self.Caption = "You better not push me" self.FontSize = 8 self.Width = 223 self.Picture = "themes/tango/32x32/apps/accessories-text-editor.png" def onContextMenu(self, evt): print "context menu" def onMouseRightClick(self, evt): print "right click" def onHit(self, evt): self.ForeColor = "purple" self.FontBold = True self.FontItalic = True self.Caption = "Ok, you cross this line, and you die." self.Width = 333 self.Form.layout() if __name__ == "__main__": import test test.Test().runTest(_dBorderlessButton_test)
"random_stump": random_stump.RandomStump, "decisiontree": decision_trees.DecisionTree, "perceptron": perceptron.Perceptron } X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1) results = [] T = 1 print "Running: ", args.algorithm for t in range(T): model = test.Test(algorithms[args.algorithm], weak_learners[args.weak_learner], X_train, y_train, args.M) train_accuracy, baseline_train_accuracy = model.test( X_train, y_train, X_test, y_test, args.M, trials=args.trials) test_accuracy, baseline_test_accuracy = model.final_test( X_test, y_test, args.M) results.append(test_accuracy) print results f = open('results_datasets.txt', 'a') f.write("Dataset: " + str(args.dataset) + "\n") f.write("Algorithm: " + str(args.algorithm) + "\n") f.write("Weak learner: " + str(args.weak_learner) + "\n") f.write("baseline acc: " + str(baseline_test_accuracy) + "\n") f.write(str(results)) f.write("Final Accuracy: " + str(float(sum(results)) / T) + "\n\n\n") '''
def test(self): self.frame_inferior.destroy() self.root_frame.main_container.destroy() test.Test(self.root, self.root_frame)
self.append(("Attack of the Clones", "Episode 2", 2002)) self.append(("Revenge of the Sith", "Episode 3", 2005)) self.append(("A New Hope", "Episode 4", 1977)) self.append(("The Empire Strikes Back", "Episode 5", 1980)) self.append(("Return of the Jedi", "Episode 6", 1983)) self.Keys = [0, 1, 2, 3, 4, 5] def initProperties(self): self.MultipleSelect = True self.HorizontalRules = True self.VerticalRules = True #self.HeaderVisible = False def onHit(self, evt): print "KeyValue: ", self.KeyValue print "PositionValue: ", self.PositionValue print "StringValue: ", self.StringValue print "Value: ", self.Value def onListSelection(self, evt): print "List Selection!", self.Value, self.LastSelectedIndex, self.SelectedIndices def onListDeselection(self, evt): print "Row deselected:", evt.EventData["index"] if __name__ == "__main__": import test test.Test().runTest(_dListControl_test)
return getattr(self, "_paneStyle", "Label") ExpanderDimensions = property( _getExpanderDimensions, _setExpanderDimensions, None, _("Dimensions of the visible expander control.")) Panel = property(_getPanel, None, None, _("Return panel object reference.")) PanelStyle = property( _getPanelStyle, None, None, _("Specifies pane style and can be 'Label' (default) or 'Button'.")) class _CollapsiblePanelTest(dCollapsiblePanel): def initProperties(self): self.Caption = "Collapsible Panel Test" def createItems(self): panel = self.Panel gs = dabo.ui.dGridSizer(MaxCols=2) gs.append(dabo.ui.dTextBox(panel), "expand") gs.append(dabo.ui.dButton(panel, Caption=u"Test"), "expand") gs.setColExpand(True, (0, 1)) panel.Sizer = gs if __name__ == "__main__": import test test.Test().runTest(_CollapsiblePanelTest)
tip="Dabo! Dabo! Dabo!", help="Large icon resized to fit in the max dimensions") self.appendSeparator() self.appendButton("Exit", pic="%s/actions/system-log-out.png" % iconPath, toggle=True, OnHit=self.onExit, tip="Exit", help="Quit the application") def onCopy(self, evt): dabo.ui.info("Copy Clicked!") def onToggle(self, evt): item = evt.EventObject dabo.ui.info("CHECKED: %s, ID: %s" % (item.Value, item.GetId())) def onExit(self, evt): app = self.Application if app: app.onFileExit(None) else: dabo.ui.stop("Sorry, there isn't an app object - can't exit.") if __name__ == "__main__": import test test.Test().runTest(_dToolBar_test)
import test #import lib.rock from lib.rock import Rock as ROCK my_test = test.Test("fun stuff") my_test.say_phrase() #my_rock = lib.rock.Rock() #my_rock = Rock() my_rock = ROCK() my_rock.say_phrase()
import sys import data_structure as data import train import test train_set = data.read_arff_file(sys.argv[1]) test_set = data.read_arff_file(sys.argv[2]) trn = train.Train(train_set) trn.calc_all_cond_prob() I = train.Information(trn) #trn.nodes['class','metastases']=.57 #trn.nodes['class','malign_lymph ']=1-.57 tst = test.Test(test_set, trn) tst.print_naive()
else: forms2close.remove(frm) class dFormMain(dFormMainBase, wx.Frame): def __init__(self, parent=None, properties=None, *args, **kwargs): self._baseClass = dFormMain if dabo.MDI: # Hack this into an MDI Parent: dFormMain.__bases__ = (dFormMainBase, wx.MDIParentFrame) preClass = wx.PreMDIParentFrame self._mdi = True else: # This is a normal SDI form: dFormMain.__bases__ = (dFormMainBase, wx.Frame) preClass = wx.PreFrame self._mdi = False ## (Note that it is necessary to run the above block each time, because ## we are modifying the dFormMain class definition globally.) dFormMainBase.__init__(self, preClass, parent, properties, *args, **kwargs) if __name__ == "__main__": import test test.Test().runTest(dFormMain)
import test test.test1() test.test2() test.test3() a = test a.test2() b = test.Test("Debdeep") print(b.name)
if value == "v": self._addWindowStyleFlag(wx.LI_VERTICAL) elif value == "h": self._addWindowStyleFlag(wx.LI_HORIZONTAL) else: raise ValueError("The only possible values are " "'Horizontal' and 'Vertical'.") # property definitions follow: Orientation = property( _getOrientation, _setOrientation, None, "Specifies the Orientation of the line. (str) \n" " Horizontal (default) \n" " Vertical" "This is determined by the Width and Height properties. " "If the Width is greater than the Height, it will be Horizontal. " "Otherwise, it will be Vertical.") DynamicOrientation = makeDynamicProperty(Orientation) class _dLine_test(dLine): def initProperties(self): self.Orientation = "Horizontal" self.Width = 200 self.Height = 10 if __name__ == "__main__": import test test.Test().runTest(_dLine_test)
def __init__(self, apk, package): self.apk=apk self.cpu=cpu.CPU() self.syscall=syscall.SYSCALL(package) self.color=color.RGB(package) self.test=test.Test(apk)
""" values = [] fig = plt.figure() for i in range(len(self.no_review_events_history)): values.append(self.no_review_events_history[i][index]) # plt.axhline(y=self.problems_list[index].threshold) plt.plot(values) if save: fig.savefig("./graphs/no_of_review_event_{}.png".format(index)) if display: plt.show() if __name__ == "__main__": test_ = test.Test() test_.construct_test(folder="./problems") student = Learner(test_) # print("Problem") ########## Simulation test ################## test_.problems[0].display() student.curr_question_index = 0 student.answer(test_.problems[0], "D", 10) student.latest_question_index += 1 student.curr_question_index += 1 student.increment_time() student.display_state() test_.problems[1].display()
weak_learners[args.weak_learner], X_train, y_train, 1) train_accuracy, baseline_train_accuracy = model.test( weak_learners[args.weak_learner], X_train, y_train, X_test, y_test, 1, trials=i) test_accuracy, baseline_test_accuracy = model.final_test( X_test, y_test, 1) else: model = test.Test(ensembler, weak_learners[args.weak_learner], X_train, y_train, args.M) train_accuracy, baseline_train_accuracy = model.test( X_train, y_train, X_test, y_test, args.M, trials=i) test_accuracy, baseline_test_accuracy = model.final_test( X_test, y_test, args.M) acc += test_accuracy results.append(acc / float(T)) print "Result: ", results ax.set_xticks(tr) ax.plot(tr, results, label=name) fig.savefig('Greedyboost_trials.png') plt.legend(loc="lower right") plt.xlabel('Number of Samples', fontsize=12)
Love won't hurt anymore It's an open smile on a friendly shore Yes love... It's love... Love Boat soon will be making another run The Love Boat promises something for everyone Set a course for adventure Your mind on a new romance Love won't hurt anymore It's an open smile on a friendly shore It's love... It's love... It's love... It's the Love Boat It's the Love Boat """ def afterInit(self): self.Form.Size = (444, 244) dabo.ui.callAfter(self.adjustFormCaption) def adjustFormCaption(self): newcap = "%s - WordWrap: %s" % (self.Form.Caption, self.WordWrap) self.Form.Caption = newcap if __name__ == "__main__": import test test.Test().runTest(_dEditBox_test, WordWrap=True) test.Test().runTest(_dEditBox_test, WordWrap=False)
keys = {} for actor in actors: choices.append("%s %s" % (actor['fname'], actor['lname'])) keys[actor["iid"]] = len(choices) - 1 # self.MultipleSelect = True self.Choices = choices self.Keys = keys self.ValueMode = 'Key' self.Value = 23 def onHit(self, evt): print "HIT:" print "\tKeyValue: ", self.KeyValue print "\tPositionValue: ", self.PositionValue print "\tStringValue: ", self.StringValue print "\tValue: ", self.Value print "\tCount: ", self.Count def onMouseLeftDoubleClick(self, evt): print "double click at position %s" % self.PositionValue def onMouseLeftDown(self, evt): print "mousedown" if __name__ == "__main__": import test test.Test().runTest(_dListBox_test)
class _dForm_test(dForm): def afterInit(self): self.Caption = _("Regular Form") def onActivate(self, evt): print _("Activate") def onDeactivate(self, evt): print _("Deactivate") class _dBorderlessForm_test(dBorderlessForm): def afterInit(self): self.btn = dabo.ui.dButton(self, Caption=_("Close Borderless Form")) self.Sizer.append(self.btn, halign="Center", valign="middle") self.layout() self.btn.bindEvent(dEvents.Hit, self.close) dabo.ui.callAfter(self.setSize) def setSize(self): self.Width, self.Height = self.btn.Width + 60, self.btn.Height + 60 self.layout() self.Centered = True if __name__ == "__main__": import test test.Test().runTest(_dForm_test) test.Test().runTest(_dBorderlessForm_test)
def test(): t = test_.Test() if neworder.size() == 1: neworder.log("Skipping MPI tests") return True t.check(send_recv(True)) t.check(send_recv(10)) t.check(send_recv(10.01)) t.check(send_recv("abcdef")) t.check(send_recv([1, 2, 3])) t.check(send_recv({"a": "fghdfkgh"})) x = np.array([1, 4, 9, 16]) if neworder.rank() == 0: neworder.send(x, 1) if neworder.rank() == 1: y = neworder.receive(0) neworder.log("MPI: 0 sent {}={} 1 recd {}={}".format( type(x), x, type(y), y)) t.check(np.array_equal(x, y)) df = pd.read_csv("../../tests/ssm_E09000001_MSOA11_ppp_2011.csv") if neworder.rank() == 0: neworder.log("sending (as csv) df len %d rows from 0" % len(df)) neworder.send_csv(df, 1) if neworder.rank() == 1: dfrec = neworder.receive_csv(0) neworder.log("got (as csv) df len %d rows from 0" % len(dfrec)) t.check(dfrec.equals(df)) if neworder.rank() == 0: neworder.log("sending (pickle) df len %d rows from 0" % len(df)) neworder.send(df, 1) if neworder.rank() == 1: dfrec = neworder.receive(0) neworder.log("got (pickle) df len %d rows from 0" % len(dfrec)) t.check(dfrec.equals(df)) # TODO how to test? neworder.log("process %d syncing..." % neworder.rank()) neworder.sync() neworder.log("process %d synced" % neworder.rank()) i = "rank " + str(neworder.rank()) root = 0 if root == neworder.rank(): neworder.log("broadcasting '%s' from %d" % (i, root)) i = neworder.broadcast(i, root) neworder.log("%d got broadcast: '%s' from %d" % (neworder.rank(), i, root)) t.check(i == "rank 0") # a0 will be different for each proc a0 = np.random.rand(2, 2) if root == neworder.rank(): neworder.log("broadcasting '%s' from %d" % (str(a0), root)) a1 = neworder.broadcast(a0, root) # a1 will equal a0 on rank 0 only neworder.log("%d got broadcast: '%s' from %d" % (neworder.rank(), str(a1), root)) if neworder.rank() == 0: t.check(np.array_equal(a0, a1)) else: t.check(not np.array_equal(a0, a1)) # test ustream/sequence t.check(neworder.indep()) if root == neworder.rank(): u0 = neworder.ustream(1000) u1 = np.zeros(1000) else: u0 = np.zeros(1000) u1 = neworder.ustream(1000) # broadcast u1 from 1 neworder.broadcast(u1, 1) # proc 0 should have 2 different random arrays # proc 1 should have zeros and a random array t.check(not np.array_equal(u0, u1)) # check independent streams u = neworder.ustream(1000) v = neworder.broadcast(u, root) # u == v on broadcasting process only t.check(np.array_equal(u, v) == (neworder.rank() == root)) # test gather x = (neworder.rank() + 1)**2 / 8 a = neworder.gather(x, 0) if neworder.rank() == 0: t.check(np.array_equal(a, [0.125, 0.5])) else: t.check(len(a) == 0) #neworder.log(a) # test scatter if neworder.rank() == 0: a = (np.array(range(neworder.size())) + 1)**2 / 8 else: a = np.zeros(neworder.size()) neworder.log(a) x = neworder.scatter(a, 0) t.check(x == (neworder.rank() + 1)**2 / 8) # test allgather a = np.zeros(neworder.size()) - 1 a[neworder.rank()] = (neworder.rank() + 1)**2 / 8 a = neworder.allgather(a) t.check(np.array_equal(a, np.array([0.125, 0.5]))) # this should probably fail (gather not implemented for int) x = neworder.rank() + 100 a = neworder.gather(x, 0) #neworder.log(type(x)) #neworder.log(type(a)) return not t.any_failed