def test_in_vitro_enz(self): met1 = Metabolite('met1') met2 = Metabolite('met2') cytosol = Cytosol() cond1 = PresentEntity(met1, cytosol) cond2 = PresentEntity(met2, cytosol) cond_enz = PresentCatalyst(cytosol) r1 = Reaction('r1', [cond1, cond_enz], [cond2]) enz = Protein('p1', properties=[Catalyses(r1)]) self.oracle = Oracle(None, [enz], [r1], None, [], [], []) expD = ExperimentDescription(ReconstructionEnzReaction('r1', 'p1'), []) out = self.oracle.execute_in_vitro_exp(expD) self.assertEqual(out.outcome, 'true')
def test_in_vitro_transp(self): met1 = Metabolite('met1') met2 = Metabolite('met2') cytosol = Cytosol() cond1 = PresentEntity(met1, cytosol) cond2 = PresentEntity(met2, cytosol) cond_trp = PresentTransporter(cytosol) r1 = Reaction('r1', [cond1, cond_trp], [cond2]) transp = Protein('p1', properties=[Transports(r1)]) self.oracle = Oracle(None, [transp], [r1], None, [], [], []) expD = ExperimentDescription( ReconstructionTransporterRequired('r1', 'p1'), []) out = self.oracle.execute_in_vitro_exp(expD) self.assertEqual(out.outcome, 'true')
def test_game_result_basic_1_to_1_robot_kangaroo(self): minion1 = RobotKangaroo(1, 1) minion2 = Minion(2, 2) scenario = Scenario() scenario.set_board([minion1], [minion2]) result = Oracle().calculate_game_result_density(scenario) print(result) self.assertEqual(result.player_one_win_percentage, 0) self.assertEqual(result.player_two_win_percentage, 0) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 100) minion1 = RobotKangaroo(1, 1) minion2 = Minion(1, 1) scenario = Scenario() scenario.set_board([minion1], [minion2]) result = Oracle().calculate_game_result_density(scenario) self.assertEqual(result.player_one_win_percentage, 100) self.assertEqual(result.player_two_win_percentage, 0) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 0)
def test_game_result_basic_1_to_1_sampling(self): minion1 = Minion(1, 1) minion2 = Minion(1, 1) scenario = Scenario() scenario.set_board([minion1], [minion2]) args = {'sampling_amount': 200} result = Oracle().calculate_game_result_density(scenario, method='sampling', args=args) self.assertEqual(result.player_one_win_percentage, 0) self.assertEqual(result.player_two_win_percentage, 0) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 100)
def test_game_result_2x1(self): minion1 = Minion(10, 15) minion2 = Minion(1, 2) board_one = [minion1, minion2] minion3 = Minion(5, 5) board_two = [minion3] scenario = Scenario() scenario.set_board(board_one, board_two) result = Oracle().calculate_game_result_density(scenario) self.assertEqual(result.player_one_win_percentage, 100) self.assertEqual(result.player_two_win_percentage, 0) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 0)
def factory(self, dbtype): dbtype = dbtype.lower() if dbtype == 'oracle': return Oracle(self._params) elif dbtype == 'mssql': return MsSql(self._params) elif dbtype == 'mysql': return MySql(self._params) #elif dbtype == 'db2': # return DB2(self._params) #elif dbtype == 'sybase': # return SaiBase(self._params) elif dbtype == 'postgresql': return Postgres(self._params) else: return None
def setUp(self): archive = Archive() rev_mod = RevCAddB(archive) exp_mod = BasicExpModuleNoCosts(archive, None) oracle = Oracle(archive, [], [], None, [], [], []) qual_mod = QualityModule(archive) stop_threshold = 2 max_time = 4 suffix = 'test' self.overseer_qual = OverseerWithModQuality(archive, rev_mod, exp_mod, oracle, 2, qual_mod, max_time, suffix, stop_threshold) self.overseer_no_qual = OverseerNoQuality(archive, rev_mod, exp_mod, oracle, 2, max_time, suffix, stop_threshold)
def run(self): board_one = [] board_two = [] for i in range(7): board_one.append(Minion(10, 10)) for i in range(7): board_two.append(Minion(1, 1)) oracle = Oracle(board_one, board_two) result = oracle.calculate_game_result_density() self.assertEqual(result.player_one_win_percentage, 100) self.assertEqual(result.player_two_win_percentage, 0) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 0)
def test_game_result_7x7(self): board_one = [] board_two = [] for i in range(7): board_one.append(Minion(10, 10)) for i in range(7): board_two.append(Minion(1, 1)) scenario = Scenario() scenario.set_board(board_one, board_two) result = Oracle().calculate_game_result_density(scenario) self.assertEqual(result.player_one_win_percentage, 100) self.assertEqual(result.player_two_win_percentage, 0) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 0)
def __init__(self, config=None, sgdb=None, db_migrate=None, execution_log=None): self.cli = CLI() self.config = config or {} self.log = LOG(self.config.get("log_dir", None)) self.sgdb = sgdb if self.sgdb is None and not self.config.get("new_migration", None): if self.config.get("db_engine") is 'mysql': from mysql import MySQL self.sgdb = MySQL(config) elif self.config.get("db_engine") is 'oracle': from oracle import Oracle self.sgdb = Oracle(config) self.db_migrate = db_migrate or SimpleDBMigrate(config) if execution_log: self.execution_log = execution_log
def test_game_result_2x2_win_and_tie(self): minion1 = Minion(2, 1) minion2 = Minion(1, 1) board_one = [minion1, minion2] minion3 = Minion(2, 2) minion4 = Minion(1, 1) board_two = [minion3, minion4] scenario = Scenario() scenario.set_board(board_one, board_two) result = Oracle().calculate_game_result_density(scenario) self.assertEqual(result.player_one_win_percentage, 0) self.assertEqual(result.player_two_win_percentage, 50) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 50)
def main(): result = {} if request.method == 'POST': symbol = request.form.get('symbol') stock = Oracle(symbol, requests.Session()) prediction = stock.predict_future(15) accuracy, increase_accuracy, decrease_accuracy, pred_profit, hold_profit = stock.evaluate_prediction() prediction["timestamp"] = prediction['timestamp'].dt.day result["ticker"] = symbol.upper() result["prediction"] = prediction result["increase_accuracy"] = increase_accuracy result["decrease_accuracy"] = decrease_accuracy result["in_range_accuracy"] = accuracy result["pred_profit"] = pred_profit result["hold_profit"] = hold_profit return render_template('main.html', future=result)
def test_do_check_ignoring_negative(self): mod = Model('m_0', [], [], []) archive = Archive() archive.working_models.update([mod]) rev_mod = RevCIAddR(archive) exp_mod = BasicExpModuleNoCosts(archive, None) oracle = Oracle(archive, [], [], None, [], [], []) qual_mod = QualityModule(archive) max_time = 4 suffix = 'test' overseer = OverseerWithModQuality(archive, rev_mod, exp_mod, oracle, 2, qual_mod, 10, max_time, suffix, 2) overseer.cycles_since_last_new_model = 5 overseer.cycles_since_best_model_changed = 5 overseer.current_best_models = set([mod]) before = list(overseer.archive.development_history) overseer.do_check() after = overseer.archive.development_history[-1] self.assertEqual(before, []) self.assertIsInstance(after, CheckPointFail)
def __init__(self, T, K, C, sigma2_w, opt_iters, R0_init_scale): self.T = T self.K = K self.C = C self.sigma2_w = sigma2_w self.opt_iters = opt_iters self.R0_init_scale = R0_init_scale self.evaluator = Evaluator(K=self.K, C=self.C) self.DKM = MemoryWriterDKM(K=self.K, C=self.C, sigma2_w=self.sigma2_w) self.VBM = MemoryWriterVBM(K=self.K, C=self.C) self.R0 = np.random.normal(loc=0.0, scale=self.R0_init_scale, size=(self.K, self.C)) self.U0 = np.eye(K) self.pM = DistributionalMemory(R=self.R0, U=self.U0) self.orcl = Oracle(K=K, C=C)
def test_game_result_2x2_win_and_tie_sampling(self): minion1 = Minion(2, 1) minion2 = Minion(1, 1) board_one = [minion1, minion2] minion3 = Minion(2, 2) minion4 = Minion(1, 1) board_two = [minion3, minion4] scenario = Scenario() scenario.set_board(board_one, board_two) args = {'sampling_amount': 20000} result = Oracle().calculate_game_result_density(scenario, method='sampling', args=args) print('error: ', abs(50 - result.player_two_win_percentage)) self.assertEqual(result.player_one_win_percentage, 0) self.assertGreater(5, abs(50 - result.player_two_win_percentage)) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertGreater(5, abs(50 - result.draw))
def test_game_result_7x7_sampling(self): board_one = [] board_two = [] for i in range(7): board_one.append(Minion(10, 10)) for i in range(7): board_two.append(Minion(1, 1)) scenario = Scenario() scenario.set_board(board_one, board_two) args = {'sampling_amount': 2000} result = Oracle().calculate_game_result_density(scenario, method='sampling', args=args) print('error: ', abs(100 - result.player_one_win_percentage)) self.assertGreater(5, abs(100 - result.player_one_win_percentage)) self.assertGreater(5, abs(0 - result.player_two_win_percentage)) self.assertEqual(result.lethal_one, 0) self.assertEqual(result.lethal_two, 0) self.assertEqual(result.draw, 0)
def main(): print "\n---------- Start of Padding Oracle Attack ----------\n" plaintext = raw_input("Enter your plaintext WITHOUT padding: ") oracle = Oracle(KEY, BLOCK_SIZE) padded_plaintext = pkcs7_pad(plaintext, BLOCK_SIZE) ciphertext = oracle.aes_padding(padded_plaintext, IV) print "\nPlaintext Entered: {} Number of bytes: {}".format( plaintext, len(plaintext)) print "Padded Plaintext: {} Number of bytes: {}".format( padded_plaintext, len(padded_plaintext)) print "Ciphertext: {} Number of bytes: {}\n".format( repr(ciphertext), len(ciphertext)) print "Executing attack..." decoded_ciphertext = execute_padding_oracle_attack(oracle, ciphertext, IV) print "Attack has ended.\n" print "Decoded Ciphertext: {} Number of bytes: {}".format( decoded_ciphertext, len(decoded_ciphertext)) print "\n---------- End of Padding Oracle Attack ----------\n"
def __init__(self, config, sgdb=None): Main._check_configuration(config) self.cli = CLI() self.config = config self.log = LOG(self.config.get("log_dir", None)) self.sgdb = sgdb if self.sgdb is None and not self.config.get("new_migration", None): if self.config.get("database_engine") == 'mysql': from mysql import MySQL self.sgdb = MySQL(config) elif self.config.get("database_engine") == 'oracle': from oracle import Oracle self.sgdb = Oracle(config) elif self.config.get("database_engine") == 'mssql': from mssql import MSSQL self.sgdb = MSSQL(config) else: raise Exception("engine not supported '%s'" % self.config.get("database_engine")) self.db_migrate = SimpleDBMigrate(self.config)
def generate_all_potential_parses_for_sentence(self, tagged_sentence, predicted_tags, min_probability=0.1): pos_ptag_seq, _, tag2span, all_predicted_rtags, _ = self.get_tags_relations_for( tagged_sentence, predicted_tags, self.cr_tags) if len(all_predicted_rtags) == 0: return [] # tags without positional info rtag_seq = [t for t, i in pos_ptag_seq if t[0].isdigit()] # if not at least 2 concept codes, then can't parse if len(rtag_seq) < 2: return [] words = [wd for wd, tags in tagged_sentence] # Initialize stack, basic parser and oracle parser = ShiftReduceParser(Stack(verbose=False)) parser.stack.push((ROOT, 0)) # needs to be a tuple oracle = Oracle([], parser) tag2words = defaultdict(list) for ix, tag_pair in enumerate(pos_ptag_seq): bstart, bstop = tag2span[tag_pair] tag2words[tag_pair] = self.ngram_extractor.extract( words[bstart:bstop + 1]) # type: List[str] all_parses = self.recursively_parse(defaultdict(set), defaultdict(set), oracle, pos_ptag_seq, tag2span, tag2words, 0, words, defaultdict(list), min_probability) return all_parses
def setUp(self): self.g1 = Gene('g1') self.p1 = Protein('p1') self.met1 = Metabolite('met1') self.met2 = Metabolite('met2') self.cplx1 = Complex('cplx1') self.cytosol = Cytosol() self.cond1 = PresentEntity(self.met1, self.cytosol) self.cond2 = PresentEntity(self.met2, self.cytosol) self.cond3 = PresentEntity(self.p1, self.cytosol) self.cond4 = PresentEntity(self.cplx1, self.cytosol) self.growth = Growth('growth', [self.cond2]) self.growth.reversibility = False self.r1 = Reaction('r1', [self.cond1], [self.cond2]) self.r2 = Reaction('r2', [self.cond3], [self.cond4]) self.r1.reversibility = False self.r2.reversibility = False self.entities = [self.g1, self.p1, self.met1, self.met2, self.cplx1] self.compartments = [self.cytosol] self.activities = [self.growth, self.r1, self.r2] self.setup_conds = [self.cond1, self.cond3] self.mod1 = Model('m0', self.setup_conds, [self.growth, self.r1], []) self.mod2 = Model('m1', self.setup_conds, [self.growth, self.r2], []) self.archive = Archive() self.archive.working_models.update([self.mod1, self.mod2]) self.archive.mnm_compartments = list(self.compartments) self.archive.mnm_entities = list(self.entities) self.archive.mnm_activities = list(self.activities) self.oracle = Oracle(self.archive, [], [], self.mod1, self.entities, self.compartments, self.activities)
row_count = oracle.execute(sql) updateUpdateCount(oracle, id, row_count) oracle.commit() def mainNeedUpdate(oracle, id, table_name, partition): need_update = getNeedUpdate(oracle, table_name, partition) print 'need update %s'%need_update try: updateNeedUpdate(oracle, id, need_update) except Exception: print "update id = %s table_name = %s partition = %s error!!!!"%(id, table_name, partition) print e if __name__ == "__main__": from oracle import Oracle oracle = Oracle() #检查需要update 多少 ''' for i in getUpdateInfo(oracle): oracle = Oracle() id = i['id'] table_name = i['table_name'] partition = i['partition'] print 'check count', id,table_name,partition th = threading.Thread(group = None, target = mainNeedUpdate, args = (oracle, id, table_name, partition)) th.start() ''' #开始update for i in getNeedUpdateInfo(oracle): oracle = Oracle() id = i['id']
from __method__ import Method from oracle import Oracle import pandas as pd import numpy as np import time folder = 'CSVModels/' filename = 'FFM-1000-200-0.50-SAT-1.csv' a, p, c, s, d, u, scores, t = [], [], [], [], [], [], [], [] for i in range(20): start_time = time.time() m = Method(folder + filename) o = Oracle(len(m.rank)) asked = 0 first_qidx = set() while True: path, node = m.find_node() # print("Node id =", node.id) q_idx = m.pick_questions(node) #m.ask_questions(q_idx, node) for q in q_idx: first_qidx.add(q) asked += 1 picked = o.pick(q_idx, node) m.adjust_weights(node, picked, q_idx) m.re_rank() solutions = m.check_solution() if solutions is not None: print("Found solution in", asked, "questions")
def test_reporter(self): oracle = Oracle("msft", requests.session()) oracle.report() oracle.close() self.assertEqual(0, 0)
sql = "delete from %s " % table_name where = ' where 1 = 1 ' wheres = ["and %s = :%s" % (k, k) for k in where_dic.keys()] where += ' '.join(wheres) where += ' ' + and_the sql += where row_count = oracle.execute(sql, where_dic) if (row_count != count): raise Exception, '应delete \ %s表%s下,实际只delete了%s,所以报错了' % (table_name, count, row_count) return row_count def testDelete(oracle): where_dic = { 'pool_id': '1303324', } table_name = 'pool' print delete(oracle=oracle, table_name=table_name, and_the=' and \ pool_id = 1303324') if __name__ == "__main__": test_oracle = Oracle() testDelete(test_oracle) #oracle.rollback()
def updateModelState(self, model_state, model): """ update the model state and store it for later sampling :param model_state: :return: """ model_state_dict = model_state previous_model_state = self.model_state # things to put into the model state # test loss and standard deviation between models self.model_state = torch.stack( ( torch.tensor(model_state_dict["test loss"]), torch.tensor(model_state_dict["test std"]), ) ) # sample energies self.model_state = torch.cat( (self.model_state, torch.tensor(model_state_dict["best cluster energies"])) ) # sample uncertainties self.model_state = torch.cat( (self.model_state, torch.Tensor(model_state_dict["best cluster deviations"])) ) # internal dist, dataset dist, random set dist self.model_state = torch.cat( (self.model_state, torch.tensor(model_state_dict["best clusters internal diff"])) ) self.model_state = torch.cat( (self.model_state, torch.tensor(model_state_dict["best clusters dataset diff"])) ) self.model_state = torch.cat( (self.model_state, torch.tensor(model_state_dict["best clusters random set diff"])) ) # n proxy models, # clustering cutoff, # progress fraction singletons = torch.stack( ( torch.tensor(model_state_dict["n proxy models"]), torch.tensor(model_state_dict["clustering cutoff"]), torch.tensor(model_state_dict["iter"] / model_state_dict["budget"]), ) ) self.model_state = torch.cat((self.model_state, singletons)) self.model_state = self.model_state.to(self.device) self.proxyModel = model # this should already be on correct device - passed directly from the main program # get data to compute distances # model state samples self.modelStateSamples = model_state_dict["best cluster samples"] # training dataset self.trainingSamples = np.load('datasets/' + self.config.dataset.oracle + '.npy', allow_pickle=True).item() self.trainingSamples = self.trainingSamples['samples'] # large random sample numSamples = min(int(1e4), self.config.dataset.dict_size ** self.config.dataset.max_length // 100) # either 1e4, or 1% of the sample space, whichever is smaller dataoracle = Oracle(self.config) self.randomSamples = dataoracle.initializeDataset(save=False, returnData=True, customSize=numSamples) # get large random dataset self.randomSamples = self.randomSamples['samples'] return previous_model_state, self.model_state
def create_oracle(self): parser = ShiftReduceParser(Stack(verbose=False)) parser.stack.push((ROOT, 0)) # needs to be a tuple return Oracle([], parser)
def generate_training_data(self, tagged_sentence, predicted_tags, out_parse_examples, out_crel_examples, predict_only=False): pos_ptag_seq, pos_ground_truth_crels, tag2span, all_predicted_rtags, all_actual_crels = self.get_tags_relations_for( tagged_sentence, predicted_tags, self.cr_tags) if predict_only: # clear labels pos_ground_truth_crels = [] all_actual_crels = set() if len(all_predicted_rtags) == 0: return [] words = [wd for wd, tags in tagged_sentence] # Initialize stack, basic parser and oracle stack = Stack(verbose=False) # needs to be a tuple stack.push((ROOT, 0)) parser = ShiftReduceParser(stack) oracle = Oracle(pos_ground_truth_crels, parser) predicted_relations = set() # type: Set[str] # instead of head and modifiers, we will map causers to effects, and vice versa effect2causers = defaultdict(set) # heads can have multiple modifiers cause2effects = defaultdict(set) # tags without positional info rtag_seq = [t for t, i in pos_ptag_seq if t[0].isdigit()] # if not at least 2 concept codes, then can't parse if len(rtag_seq) < 2: return [] tag2words = defaultdict(list) for ix, tag_pair in enumerate(pos_ptag_seq): bstart, bstop = tag2span[tag_pair] word_seq = words[bstart:bstop + 1] tag2words[tag_pair] = self.ngram_extractor.extract( word_seq) # type: List[str] # Oracle parsing logic # consume the buffer for tag_ix, buffer_tag_pair in enumerate(pos_ptag_seq): buffer_tag = buffer_tag_pair[0] bstart, bstop = tag2span[buffer_tag_pair] remaining_buffer_tags = pos_ptag_seq[tag_ix:] # Consume the stack while True: tos_tag_pair = oracle.tos() tos_tag = tos_tag_pair[0] # Returns -1,-1 if TOS is ROOT if tos_tag == ROOT: tstart, tstop = -1, -1 else: tstart, tstop = tag2span[tos_tag_pair] # Note that the end ix in tag2span is always the last index, not the last + 1 btwn_start, btwn_stop = min(tstop + 1, len(words)), max(0, bstart) btwn_word_seq = words[btwn_start:btwn_stop] distance = len(btwn_word_seq) btwn_word_ngrams = self.ngram_extractor.extract( btwn_word_seq) # type: List[str] feats = self.feat_extractor.extract( stack_tags=stack.contents(), buffer_tags=remaining_buffer_tags, tag2word_seq=tag2words, between_word_seq=btwn_word_ngrams, distance=distance, cause2effects=cause2effects, effect2causers=effect2causers, positive_val=self.positive_val) # Consult Oracle or Model based on coin toss if predict_only: action = self.predict_parse_action( feats=feats, tos=tos_tag, models=self.parser_models[-1], vectorizer=self.parser_feature_vectorizers[-1]) else: # if training gold_action = oracle.consult(tos_tag_pair, buffer_tag_pair) rand_float = np.random.random_sample( ) # between [0,1) (half-open interval, includes 0 but not 1) # If no trained models, always use Oracle if len(self.parser_models) == 0: action = gold_action elif rand_float <= self.beta: action = self.predict_parse_action( feats=feats, tos=tos_tag, models=self.parser_models[-1], vectorizer=self.parser_feature_vectorizers[-1]) else: if len(self.parser_models) < 2: action = gold_action # use previous model if available else: action = self.predict_parse_action( feats=feats, tos=tos_tag, models=self.parser_models[-2], vectorizer=self.parser_feature_vectorizers[-2]) # Given the remaining tags, what is the cost of this decision # in terms of the optimal decision(s) that can be made? cost_per_action = self.cost_function( pos_ground_truth_crels, remaining_buffer_tags, oracle) # make a copy as changing later out_parse_examples.add(dict(feats), gold_action, cost_per_action) # Decide the direction of the causal relation if action in [LARC, RARC]: c_e_pair = (tos_tag, buffer_tag) # Convert to a string Causer:{l}->Result:{r} cause_effect = denormalize_cr(c_e_pair) e_c_pair = (buffer_tag, tos_tag) # Convert to a string Causer:{l}->Result:{r} effect_cause = denormalize_cr(e_c_pair) if predict_only: gold_lr_action = None else: if cause_effect in all_actual_crels and effect_cause in all_actual_crels: gold_lr_action = CAUSE_AND_EFFECT elif cause_effect in all_actual_crels: gold_lr_action = CAUSE_EFFECT elif effect_cause in all_actual_crels: gold_lr_action = EFFECT_CAUSE else: gold_lr_action = REJECT # Add additional features # needs to be before predict below crel_feats = self.crel_features(action, tos_tag, buffer_tag) feats.update(crel_feats) rand_float = np.random.random_sample() if predict_only: lr_action = self.predict_crel_action( feats=feats, model=self.crel_models[-1], vectorizer=self.crel_feat_vectorizers[-1]) else: if len(self.crel_models) == 0: lr_action = gold_lr_action elif rand_float <= self.beta: lr_action = self.predict_crel_action( feats=feats, model=self.crel_models[-1], vectorizer=self.crel_feat_vectorizers[-1]) else: if len(self.crel_models) < 2: lr_action = gold_lr_action else: lr_action = self.predict_crel_action( feats=feats, model=self.crel_models[-2], vectorizer=self.crel_feat_vectorizers[-2]) if lr_action == CAUSE_AND_EFFECT: predicted_relations.add(cause_effect) predicted_relations.add(effect_cause) cause2effects[tos_tag_pair].add(buffer_tag_pair) effect2causers[buffer_tag_pair].add(tos_tag_pair) cause2effects[buffer_tag_pair].add(tos_tag_pair) effect2causers[tos_tag_pair].add(buffer_tag_pair) elif lr_action == CAUSE_EFFECT: predicted_relations.add(cause_effect) cause2effects[tos_tag_pair].add(buffer_tag_pair) effect2causers[buffer_tag_pair].add(tos_tag_pair) elif lr_action == EFFECT_CAUSE: predicted_relations.add(effect_cause) cause2effects[buffer_tag_pair].add(tos_tag_pair) effect2causers[tos_tag_pair].add(buffer_tag_pair) elif lr_action == REJECT: pass else: raise Exception("Invalid CREL type") # cost is always 1 for this action (cost of 1 for getting it wrong) # because getting the wrong direction won't screw up the parse as it doesn't modify the stack if not predict_only: out_crel_examples.add(dict(feats), gold_lr_action) # Not sure we want to condition on the actions of this crel model # action_history.append(lr_action) # action_tag_pair_history.append((lr_action, tos, buffer)) # end if action in [LARC,RARC] if not oracle.execute(action, tos_tag_pair, buffer_tag_pair): break if oracle.is_stack_empty(): break # Validation logic. Break on pass as relations that should be parsed # for pcr in all_actual_crels: # l,r = normalize_cr(pcr) # if l in rtag_seq and r in rtag_seq and pcr not in predicted_relations: # pass return predicted_relations
def generate_story_qs_at_end(self, world, tasks_per_story, tasks, questions, num_agents=6, num_locations=3, statement_noise=0): """ Allows user to specify chapter and question for each task in story. :tasks: list with length of tasks per story. Each entry is a string in the set {'tb','fb','sofb'} :questions: list with length of tasks per story. Each entry is a string in the set {'memory', 'reality', 'belief', 'search'} :statement_noise: probability of encountering noise sentence like 'The dog ran through the kitchen.' """ # Fetch agents and objects and select a random subset idx_support_dummy = [0] actors = world.get_actors() locations = world.get_locations() objects = world.get_objects() containers = world.get_containers() random_actors = np.random.choice(actors, size=num_agents, replace=False) random_locations = np.random.choice(locations, size=num_locations, replace=False) random_objects = np.random.choice(objects, size=num_locations * 2, replace=False) random_containers = np.random.choice(containers, size=num_locations * 2, replace=False) # Create the oracle oracle = Oracle(random_actors, random_locations, random_objects, random_containers) # Populate locations in the oracle with containers for i in range(len(random_locations)): location = random_locations[i] containers = random_containers[2 * i:2 * i + 2] oracle.set_containers(location, list(containers)) # Populate containers with objects for i in range(len(random_objects)): oracle.set_object_container(random_objects[i], random_containers[i]) # Need start state for memory question start_state = oracle.locations.obj_containers.copy() # Create story by task chapters = { 'tb': write_true_belief_chapter, 'fb': write_false_belief_chapter, 'sofb': write_second_order_false_belief_chapter } story = [] for i in range(tasks_per_story - 1): chapter = chapters[tasks[i]] location = np.random.choice(random_locations) agent_ids = np.random.choice(range(len(random_actors)), size=2, replace=False) story.extend( chapter(start_state, oracle, location, agent_ids, random_actors, [])) chapter = chapters[tasks[-1]] location = np.random.choice(random_locations) agent_ids = np.random.choice(range(len(random_actors)), size=2, replace=False) story.extend( chapter(start_state, oracle, location, agent_ids, random_actors, questions)) # At the end, at noise sentences randomly if statement_noise: noisy_story = [] prev_i = 0 noise = [ i for i in range(len(story)) if np.random.rand() < statement_noise ] for i in noise: noisy_story.extend(story[prev_i:i] + [Clause([], NoiseAction())]) prev_i = i noisy_story.extend(story[prev_i:]) return noisy_story return story
'hold') def collectTax(oracle, **args): '''发票收回 oper_staff_id distri_id tax_begin_nbr tax_end_nbr ''' collect_tax = CollectTax(oracle, **args) collect_tax.collectTax() def allCollect(oracle, distri_id, oper_staff_id): '''全部回收''' update_dic = {'state':'out','state_date':'/sysdate'} where_dic = {'distri_id':distri_id} update(oracle, 'distri', update_dic, where_dic) insertDistriLog(oracle = oracle, distri_id = distri_id, oper_staff_id = oper_staff_id, oper = 'be collect',state = 'out') new_distri_id = getNextVal(oracle) insertDistriAsSplit(oracle, new_distri_id = new_distri_id, distri_id = distri_id, staff_id = oper_staff_id) insertDistriLog(oracle = oracle, distri_id = new_distri_id, oper_staff_id = oper_staff_id, oper = 'collect',state = 'hold') def test_collectTax(oracle): collectTax(oracle, distri_id = 999, oper_staff_id = 22, tax_begin_nbr = '001', tax_end_nbr = '100') if __name__ == "__main__": from oracle import Oracle oracle_test = Oracle() test_collectTax(oracle_test) oracle_test.commit()
def test_me(): test_example = Dictionary(1, 5) test_example.attack(Oracle("thisisa"))