def makeStateAndErrorState(self, name, folder, order): new_state = models.State() new_state.name = name new_state.folder = folder new_state.order = order new_state.is_error = False new_state_error = models.State() new_state_error.name = name + "_error" new_state_error.folder = folder new_state_error.order = order new_state_error.is_error = True return new_state, new_state_error
def load_test_data(choice="file", overwrite=True): ''' This will load test data. ''' if overwrite: getall = models.storage.all().copy() for ele in getall.values(): models.storage.delete(ele) states = ['Alabama', 'Arizona', 'Washington', 'Texas', 'California'] cities = ['One', 'Two', 'Three'] if choice == "file": fs = models.storage for x in states: new_state = models.State() setattr(new_state, "name", x) fs.new(new_state) fs.save() model_states = fs.all("State") for x in model_states.values(): for v in cities: new_city = models.City() setattr(new_city, "state_id", x.id) setattr(new_city, "name", v) fs.new(new_city) fs.save() else: pass
def reload(self): '''This is the 'reload' instance. Deserializes the JSON file to __objects. If JSON file does not exist, do nothing. Return: __object or nothing ''' if os.path.isfile(FileStorage.__file_path) is True: with open(FileStorage.__file_path, 'r+', encoding='utf-8') as fn: obj = json.load(fn) for key in obj.keys(): is_dict = obj[key] is_class = is_dict['__class__'] if 'BaseModel' in is_class: FileStorage.__objects[key] = models.BaseModel(obj[key]) if 'Amenity' in is_class: FileStorage.__objects[key] = models.Amenity(obj[key]) if 'City' in is_class: FileStorage.__objects[key] = models.City(obj[key]) if 'Place' in is_class: FileStorage.__objects[key] = models.Place(obj[key]) if 'Review' in is_class: FileStorage.__objects[key] = models.Review(obj[key]) if 'State' in is_class: FileStorage.__objects[key] = models.State(obj[key]) if 'User' in is_class: FileStorage.__objects[key] = models.User(obj[key])
def test_DBStorage_new_method(self): ''' Test new method ''' new_state = models.State() self.storage.new(new_state) self.assertTrue(new_state in self.storage._DBStorage__session)
def trans(): state = models.State.get_by_key_name("instance") if not state: state = models.State(key_name="instance") state.running = not state.running state.put() if state.running: from google.appengine.ext import deferred deferred.defer(tasks.update_state, state) # Transactional enqueued deferred call!
def populate_data(): db = SessionLocal() countries = covid.getLocations(rank_by='deaths') for country in countries: db_record = models.State(id=country['id'], country=country['country'], country_code=country['country_code'], population=country['country_population'], last_updated=country['last_updated'], confirmed=((country['latest'])['confirmed']), deaths=((country['latest'])['deaths']), recovered=((country['latest'])['recovered'])) db.add(db_record) db.commit()
def get_state(state_url): try: state = DB_MANAGER.open().query( mo.State).filter(mo.State.api_url == state_url).one() return state except: try: state_obj = get_request(state_url) state = mo.State() state.parse_json(state_obj) state = DB_MANAGER.save(state) return state except: return None
def test_FileStorage_all_class_specific(self): ''' Test all method with a class specified ''' new_city = models.City() new_state = models.State() state_key = str(new_state.__class__.__name__) + "." + str(new_state.id) city_key = str(new_city.__class__.__name__) + "." + str(new_city.id) self.storage.new(new_city) self.storage.new(new_state) tmp = self.storage.all(models.City) state = tmp.get(state_key, None) city = tmp.get(city_key, None) self.assertTrue(city is not None, msg="\n{}\n{}".format(tmp, city)) self.assertTrue(state is None)
def tasks(request): '''Top-level view. This provides a list of the known 'states' and a count of shards found within each. ''' state = models.State() itemlist = [] resultsd = count_by_group(get_counts_by_graph(), split_by_status) for item in state.get_states: name = item.lower() itemlist.append( { 'url' : reverse('list', kwargs={'status' : name}), 'label' : item, 'count' : resultsd.get(name, 0), } ) return render_to_response('tasks.html', RequestContext(request, { 'title' : 'Tasks', 'itemlist' : itemlist, }) )
def setState(data): state = Model.State jObject = json.loads(data) try: state = Model.State() state.state = jObject['state'] state.cities = [] print("[INFO] Connecting DB") dbConnection = connect('indiaData') print("[INFO] Connection stablished") #Checking is state already exist resp = getStateByName(jObject['state']) if (resp != None or resp == INVALID_REQUEST or resp == EMPTY_REQUEST): print(resp) return "{\"errorMessage\":\"State Invalid or State Exist\"}" else: state.save() return "{\"message\":\"Success\"}" except Exception as e: error_message = "Error In Saving Indian State " + data print(error_message, "Error Message", e) return error_message
def do_create(self, arg): '''Creates a new instance of BaseModel, save to JSON file.''' args = arg.split() if len(args) < 1: print(self.errors['noclass']) elif args[0] in self.new_class: if args[0] == 'BaseModel': new = models.BaseModel() if args[0] == 'Amenity': new = models.Amenity() if args[0] == 'City': new = models.City() if args[0] == 'Place': new = models.Place() if args[0] == 'Review': new = models.Review() if args[0] == 'State': new = models.State() if args[0] == 'User': new = models.User() new.save() print('{}'.format(new.id)) else: print(self.errors['badclass'])
def main(program_path: str, patterns_path: str): with open(program_path) as f: program_json = json.loads(f.read().replace('async', 'is_async')) program: Program = to_model(program_json) # print('Program:') # pprint(program_json) with open(patterns_path) as f: patterns = json.load(f, object_hook=lambda d: Pattern(**d)) # print('Patterns:') # pprint(patterns) state = models.State(patterns) program.execute(state) # print('Before Vulnerabilities:') # pprint(state.vulnerabilities) vulnerabilities = [v.__dict__ for v in state.vulnerabilities] vulnerabilities = [{ k: list(v) if isinstance(v, tuple) else v for k, v in vuln.items() } for vuln in vulnerabilities] # print('After Vulnerabilities:') # pprint(vulnerabilities) return vulnerabilities
def test_match(self): rete = models.Rete().save() p1 = models.Production.get('p1', [ { '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '?index':'?mount_point', }, } } }, { '#sys/domains/raid/in_raid':{ 'in':{ 'type':'str', 'value':'?mountpoint', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } }, { '#sys/domains/raid/has_partitions':{ 'in':{ 'type':'str', 'value':'?mountpoint', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } } ]) initial_state = models.State({ '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '0':'/dev/sda', '1':'/dev/sdb', } } } }, { '#sys/domains/raid/in_raid':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1'#DONT_CARE }, 'out':{ 'type':'bool', 'value':'True' } } }, { '#sys/domains/raid/has_partitions':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } } ) rete.add_production(p1) for wme in initial_state.iter_triples(): rete.add_wme(wme) self.assertEqual(len(list(rete.triggered_pnodes.all())), 1)
def test_id_match(self): from triple.utils import nested_to_triples rete = models.Rete().save() # Define production. p1 = models.Production.get('p1', [ # [u'?id1', u'?b795a407-2512-4d3c-a7bf-c46c06e1772a', u'0', u'/dev/sda'], # ['?', u'?19b99244-8469-4305-a186-f0a89fc27f7d', u'src', u'?id1'], { '#sys/sensors/raid/get_all_drive_mountpoints':{ 'out':{ 'type':'list', 'value':{ '0:id=?id1':'/dev/sda', '1':'/dev/sdb', } } } }, { '#sys/sensors/raid/in_raid':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1'#DONT_CARE }, 'out':{ 'type':'bool', 'value':'False' } } } ]) self.assertEqual(models.AlphaNode.objects.all().count(), 0) rete.add_production(p1) #return # Define WMEs. initial_state = models.State( { '#sys/sensors/raid/get_all_drive_mountpoints':{ 'out':{ 'type':'list', 'value':{ '0:id=?id1':'/dev/sda', '1':'/dev/sdb', } } } },{ '#sys/sensors/raid/in_raid':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1'#DONT_CARE }, 'out':{ 'type':'bool', 'value':'False' } } } ) for t in initial_state.iter_triples(): rete.add_wme(t) # triples = [] # triples.append(T(*[u'someid1', u'0', u'/dev/sda'])) # triples.append(T(*[u'someid2', u'src', triples[-1].id])) # t1,t2 = triples # for t in triples: # rete.add_wme(t) # Confirm alphanode memory. # anodes = list(models.AlphaNode.objects.all().order_by('id')) # self.assertEqual(len(anodes), 4) # self.assertEqual(list(anodes[0].items.all()), [t1,t2]) # self.assertEqual(set(anodes[0].children.all()), set([anodes[1],anodes[3]])) # self.assertEqual(list(anodes[1].items.all()), [t1]) # self.assertEqual(set(anodes[1].children.all()), set([anodes[2]])) # self.assertEqual(list(anodes[2].items.all()), [t1]) # self.assertEqual(list(anodes[3].items.all()), [t2]) # # # Confirm betajoinnode memory. # bjoinnodes = list(models.BetaJoinNode.objects.all().order_by('id')) # self.assertEqual(len(bjoinnodes), 2) # bmem1_tokens = list(bjoinnodes[0].child.tokens.all()) # self.assertEqual(len(bmem1_tokens), 1) # #print bmem1_tokens # self.assertEqual([t.wme for t in bmem1_tokens], [t1]) # self.assertEqual(len(list(bjoinnodes[1].pnodes.all())), 1) #self.assertEqual(len(list(bjoinnodes[1].pnodes.all()[0].tokens.all())), 1) # print '-'*80 # nodes = models.AlphaNode.objects.filter(parent=None) # for n in nodes: # walk_nodes(n) self.assertEqual(len(list(rete.triggered_pnodes)), 1)
def main(): component_probabilities_df = pd.read_csv('data/ComponentProbabilities.csv') #test_components_df = pd.read_csv('data/TestComponents.csv') test_components_df = pd.read_csv('data/TestComponents_small.csv') #test_outcomes_df = pd.read_csv('data/TestOutcomes.csv') test_outcomes_df = pd.read_csv('data/TestOutcomes_small.csv') agent_count = 2 comp_dict = {} comp_run_dict = {} test_comp_dict = {} #test_comp = [] test_dict = {} test_outcomes_dict = {} #run_tests = {} #test_run_dict ={} action_dict = {} for index, row in component_probabilities_df.iterrows(): # print(row['ComponentName'], row['FaultProbability']) if row['ComponentName'] in comp_dict.keys(): pass else: comp_dict[row['ComponentName']] = models.Component( row['ComponentName'], row['FaultProbability']) comp_run_dict[row['ComponentName']] = 0 for index, row in test_components_df.iterrows(): # print(row['TestName'], row['ComponentName']) if row['TestName'] in test_comp_dict.keys(): test_comp_dict[row['TestName']].append( comp_dict[row['ComponentName']]) else: test_comp_dict[row['TestName']] = [] test_comp_dict[row['TestName']].append( comp_dict[row['ComponentName']]) for test in test_comp_dict: test_dict[test] = models.Test(test, test_comp_dict[test]) #print(test,test_dict[test].get_failure_probability()) for index, row in test_outcomes_df.iterrows(): #print(row['TestName'], row['TestOutcome']) if row['TestName'] in test_outcomes_dict.keys(): pass else: test_outcomes_dict[row['TestName']] = row['TestOutcome'] # create 2 agents tuple of actions (test1,test2) and probability for failure when performing both actions in specific state # store it in action_dict which contains the actual set of actions for test1 in test_dict: for test2 in test_dict: if test1 == test2: pass else: action_key = (test1, test2) action_dict[action_key] = 1 - ( float(test_dict[test1].get_success_probability()) * float(test_dict[test2].get_success_probability())) #print(action_dict) # Create all possible states and reward for the test run till current state states = [] state_index = [0 for s in test_dict.keys()] for i, t in enumerate(test_dict.keys()): state_index[i] = t states_comb = operations.list_of_combs(state_index) #print(states_comb) state_counter = 0 for s in states_comb: if len(s) % 2 == 0: state_outcomes = [] state_name = 'S' + str(state_counter) state_idx = state_counter tests_run = s test_left = list(set(state_index).difference(set(s))) for st in tests_run: state_outcomes.append(test_outcomes_dict[st]) state = models.State(state_name, state_idx, tests_run, test_left, state_outcomes) states.append(state) state_counter += 1 #print(state.get_state_info(),state.get_state_reward()) transitions = [] transitions_act = {} actions = {} action_Pfailure = {} expected_reward = {} action_counter = 0 for s in states: source_state_run = s.get_tests_run() for t in states: target_state_run = t.get_tests_run() if s.get_state_name() != t.get_state_name(): action = list( set(target_state_run).difference(set(source_state_run))) if len(action) == 2: action_tup = (action[0], action[1]) action_tup_reversed = (action[1], action[0]) if action_tup in actions: action_name = actions[action_tup] elif action_tup_reversed in actions: action_name = actions[action_tup_reversed] else: action_name = 'A' + str(action_counter) actions[action_tup] = action_name action_Pfailure[action_name] = 1 - (float(test_dict[ action_tup[0]].get_success_probability()) * float( test_dict[ action_tup[1]].get_success_probability())) action_counter += 1 tr = (s.get_state_name(), action_name, t.get_state_name()) tr_act = (s.get_state_name(), action_name) expected_reward[tr] = action_Pfailure[ action_name] * operations.calculate_reward(0) + ( 1 - action_Pfailure[action_name] ) * operations.calculate_reward(1) transitions.append(tr) transitions_act[tr_act] = t.get_state_name() print(actions) print(action_Pfailure) print(transitions_act) print(expected_reward) print(states) # calculate value iteration max_exp_utility = 0 exp_utility = 0 max_iter_num = 100 U1 = dict([(s.get_state_name(), 0) for s in states]) counter = 0 while counter < max_iter_num: U = U1.copy() for s in states: for a in actions.values(): if (s.get_state_name(), a) in transitions_act: s1 = transitions_act[(s.get_state_name(), a)] exp_utility = action_Pfailure[a] * U[s1] if max_exp_utility < exp_utility: max_exp_utility = exp_utility U1[s.get_state_name()] = s.get_state_reward() + max_exp_utility #print(U) max_exp_utility = 0 exp_utility = 0 counter += 1 #print(counter) print(U) # calculate optimal policy current_state = 'S0' max_V = 0.0 tran_V = 0.0 best_act = '' number_of_tests = 10 policy_actions = [] for i in range(0, int(number_of_tests / agent_count)): for a in actions.values(): if (current_state, a) in transitions_act: s1 = transitions_act[(current_state, a)] tran_V = U[s1] if tran_V > max_V and a not in policy_actions: max_V = tran_V best_act = a if best_act != '': policy_actions.append(best_act) current_state = transitions_act[(current_state, best_act)] max_V = 0.0 tran_V = 0.0 best_act = '' print(policy_actions)
def edit(request, status, datatype): shard = request.GET.get('ref', '') shard = urllib.unquote(shard).decode('utf8') state = models.State(state=status) paths = shard.split('/') prefix = '/'.join(paths[:-2]) + '/' localname = paths[-2] pre = prefixes.Prefixes() md_element = pre.value2key(prefix) # will we need to call custom code here for different types? ShardFormSet = formset_factory(forms.ProvenanceForm, extra=0) warning_msg = '' if request.method == 'POST': formset = ShardFormSet(request.POST) if formset.is_valid(): process_formset(formset, shard, status, datatype) return HttpResponseRedirect( url_with_querystring( reverse('edit', kwargs={'status' : status, 'datatype' : datatype}), ref=shard)) else: print formset.errors else: ushardm = get_shard(shard, status, datatype) if len(ushardm) > 1: warning_msg = ( 'Warning: ' '%s Data Shards with the same name at status "%s" found.' % ( len(ushardm), status.upper())) initial_data_set = [] for item in ushardm: data_set = {} previousurl = item.get('previous') previouslabel = previousurl.split('/')[-1] data_set = dict( provenanceMD5 = item.get('prov').split('/')[-1], baseshardMD5 = item.get('link').split('/')[-1], metadata_element = md_element, local_name = localname, current_status = item.get('status'), standard_name = item.get('cfname'), unit = item.get('unit'), long_name = item.get('long_name'), comment = item.get('comment'), reason = item.get('reason'), last_edit = item.get('last_edit'), previous = mark_safe("%s" % previouslabel) ) initial_data_set.append(data_set) formset = ShardFormSet(initial=initial_data_set) return render_to_response('main.html', RequestContext(request, { 'viewname' : 'Edit Shard', 'status' : 'Status: %s, datatype: %s' % (status.upper(), datatype), 'title' : 'Edit Shard: %s' % shard, 'detail' : 'Shard: %s' % shard, 'formset' : formset, 'read_only' : READ_ONLY, 'error' : warning_msg, }) )
def test_add_production_post_wme(self): rete = models.Rete().save() p1 = models.Production.get('p1', [ { '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '?index':'?mount_point', }, } } }, { '#sys/domains/raid/in_raid':{ 'in':{ 'type':'str', 'value':'?mountpoint', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } }, { '#sys/domains/raid/has_partitions':{ 'in':{ 'type':'str', 'value':'?mountpoint', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } } ]) p2 = models.Production.get('p2', [ { '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '?index':'?mount_point', }, } } }, { '#sys/domains/raid/in_raid':{ 'in':{ 'type':'str', 'value':'?mountpoint', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } }, ]) p3 = models.Production.get('p3', [ { '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '?index':'?mount_point', }, } } }, ]) p4 = models.Production.get('p4', [ { 'laksjdflsjflkdjfls':{ 'out':{ 'type':'lskdjflksf', 'value':{ '?index':'?mount_point', }, } } }, ]) initial_state = models.State({ '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '0':'/dev/sda', '1':'/dev/sdb', } } } }, { '#sys/domains/raid/in_raid':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1'#DONT_CARE }, 'out':{ 'type':'bool', 'value':'True' } } }, { '#sys/domains/raid/has_partitions':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } } ) for wme in initial_state.iter_triples(): rete.add_wme(wme) rete.add_production(p1) rete.add_production(p2) rete.add_production(p3) rete.add_production(p4) self.assertEqual(len(list(rete.triggered_pnodes.all())), 3) # Confirm production removal updates triggered pnode set. rete.remove_production(p4) self.assertEqual(len(list(rete.triggered_pnodes.all())), 3) rete.remove_production(p3) self.assertEqual(len(list(rete.triggered_pnodes.all())), 2) rete.remove_production(p2) self.assertEqual(len(list(rete.triggered_pnodes.all())), 1) rete.remove_production(p1) self.assertEqual(len(list(rete.triggered_pnodes.all())), 0)
def test_variable_extraction(self): rete = models.Rete().save() p1 = models.Production.get('p1', [ { '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '?index':'?mount_point', }, } } }, ]) initial_state = models.State({ '#sys/domains/raid/get_all_drive_mountpoints:id=?id1':{ 'out':{ 'type':'list', 'value':{ '0':'/dev/sda', '1':'/dev/sdb', } } } }, { '#sys/domains/raid/in_raid':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1'#DONT_CARE }, 'out':{ 'type':'bool', 'value':'True' } } }, { '#sys/domains/raid/has_partitions':{ 'in':{ 'type':'str', 'value':'/dev/sda', 'src':'?id1' }, 'out':{ 'type':'bool', 'value':'True', } } } ) for wme in initial_state.iter_triples(): rete.add_wme(wme) rete.add_production(p1) triggered_pnodes = list(rete.triggered_pnodes.all()) self.assertEqual(len(triggered_pnodes), 1) pnode = triggered_pnodes[0] matches = list(pnode.matches) self.assertEqual(len(matches), 2) self.assertEqual(len(matches[0]), 4) self.assertEqual(len(matches[1]), 4) match_vars = frozenset([frozenset([(k,v) for k,v in mv.iteritems() if k in ['mount_point','index','id1']]) for mv in pnode.match_variables]) self.assertEqual(match_vars, frozenset([frozenset([(u'index', u'1'), (u'mount_point', u'/dev/sdb'), (u'id1', 1)]), frozenset([(u'mount_point', u'/dev/sda'), (u'index', u'0'), (u'id1', 1)])]))
def add_valid_state(state): new_state = models.State(state, datetime.date.today()) models.db.session.add(new_state)
def setUp(self): """Setup for each testcase""" self.testcase = models.State()
def create_state(code='NY'): return models.State(code=code).save()