def test_policy_parser(self): model = policies_model() # result = model.generate_samples({'prenew': 3, 'pattempts': 3, 'pdict': 0, 'psets': 2, 'phist': 4}) # result = model.generate_samples({'plen': 0}) # result = model.generate_samples({}) #model.generate_training_set() #policy = model.latest_policy(3) policyUpdate = [ { 'employee': ['executives', 'road'], 'location': ['office', 'home'], 'device': ['phone', 'desktop'], 'policyDelta': { 'pwpolicy': {'plen': 12, 'pdict': 1}, 'passfaces': {}, 'biometric': {} } }, { 'employee': ['desk', 'road'], 'location': ['office', 'public'], 'device': ['desktop', 'laptop'], 'policyDelta': { 'pwpolicy': {'plen': 8, 'psets': 3}, 'passfaces': {}, 'biometric': {'bdata': 2} } }, { 'employee': ['desk'], 'location':['office'], 'device':['desktop'], 'policyDelta': { 'pwpolicy': {'plen':0}, 'passfaces': {'pdata': 1}, 'biometric': {} } }] """ updated_policy = model.parse_policy(policyUpdate) latest_policy_before = model.iter_to_nested_obj(model.get_policy_history(4, True)) latest_policy_after = model.merge_policies(updated_policy, latest_policy_before) print "updated_policy", updated_policy print "latest_policy before", model.nested_obj_to_list_of_dict(latest_policy_before) #print model.get_latest_policy(4)[1] print "latest_policy after", model.nested_obj_to_list_of_dict(latest_policy_after)""" #model.commit_policy_update(policyUpdate, '2014-02-15') #print model.nested_obj_to_list_of_dict(model.iter_to_nested_obj(model.get_policy_history(2))) #print model.get_policy_history(4)[0] #print model.get_policies_list(1) model.merge_policies(model.parse_policy(policyUpdate), model.iter_to_nested_obj(model.get_policy_history(2)))
def POST(self): """ Listens to POST requests for main Chronos API """ payload = json.loads(web.data()) web.header('Content-Type', 'application/json') event_accept = False policy_accept = False client_date = date_utils.iso8601_to_date(payload.get('date', '2014-01-06')) if context.user_id() == 0: return json.dumps({ 'success': False, 'messages': ['Unauthorized'] }) # corrected_sync_date backtracks if client submits invalid date. corrected_sync_date = records.sync_history(context.user_id(), client_date) policy_update = payload.get('policyUpdate') if corrected_sync_date.day == 1: if policy_update is None: # Expecting a policy update, but not found. corrected_sync_date -= timedelta(days=1) else: policies_model.commit_policy_update(policy_update, corrected_sync_date) policy_accept = True if corrected_sync_date == records.next_due_event_date(context.user_id()): event_accept = True if event_accept or policy_accept: records.clear_prophecy(context.user_id(), corrected_sync_date) # TODO get prophecy for multiple risks #records.record_prophecy(context.user_id(), simulation().calc_risk_prob()) response = { 'date': corrected_sync_date.isoformat(), 'policyAccept': policy_accept, 'eventAccept': event_accept, 'calendar': [ records.get_calendar(context.user_id(), corrected_sync_date) ] } if payload.get('initPolicy', False): response['policy'] = policies_model().get_policies_list(context.user_id()) return json.dumps(response)
def prophesize(cls, user_id, base_date): """ Given user_id, returns prophecy, a list of dictionaries of events. Events start from specified base_date, offset from 0 to 30 days. [ { :param user_id: :param base_date: 'date': 'YYYY-MM-DD' 'incident_id': 1, 'cost': 5000000 }, ... ] """ random.seed() # policies = db.query('SELECT * FROM policies WHERE user_id=$user_id ORDER BY date DESC limit 1', vars=locals()) # TODO lasagna code - this should be fixed when multiple policies are used. history = policies_model().get_policy_history(user_id, True) response = policies_model().nested_obj_to_list_of_dict( policies_model().iter_to_nested_obj(history)) #Taking to consideration policy context for events prediction prophecy = [] max_risk = 0 max_cost = 0 for policy in response: p = policy['data'] p_context = { 'employees': [p['employee']], 'locations': [p['location']], 'devices': [p['device']] } p_incidents = sim_model().request(p, p_context) for current_incident in p_incidents: if current_incident['risk'] > max_risk: max_risk = current_incident['risk'] max_cost = current_incident['cost'] daily_prob = cls.daily_prob(current_incident['risk']) incident_cost = current_incident[ 'cost'] * company.max_incident_cost for i in range(0, 31): rand = random.random() if rand < daily_prob: if len(prophecy) > 0: for event in prophecy: #check if one incident per day is generated (date is different for each event) if ((base_date + timedelta(days=i) ).isoformat()) == event['date']: break else: #check if there are no duplicate incidents in prophecy if current_incident['id'] == event['incident_id']\ and current_incident['employee'] == event['employee']\ and current_incident['location'] == event['location']\ and current_incident['device'] == event['device']: break else: if (prophecy.index(event) + 1) == len(prophecy): prophecy.append({ 'date': (base_date + timedelta(days=i) ).isoformat(), 'incident_id': current_incident['id'], 'cost': cls.randomize_cost( incident_cost), 'employee': current_incident['employee'], 'location': current_incident['location'], 'device': current_incident['device'] }) else: prophecy.append({ 'date': (base_date + timedelta(days=i)).isoformat(), 'incident_id': current_incident['id'], 'cost': cls.randomize_cost(incident_cost), 'employee': current_incident['employee'], 'location': current_incident['location'], 'device': current_incident['device'] }) # TODO currently productivity costs is being used as risk impact. score_model.insert_score(user_id, 1, (max_risk * 4 + max_cost) / 5.0, base_date) score_model.insert_score(user_id, 2, (max_cost * 4 + max_risk) / 5.0, base_date) return prophecy
def prophesize(cls, user_id, base_date): """ Given user_id, returns prophecy, a list of dictionaries of events. Events start from specified base_date, offset from 0 to 30 days. [ { :param user_id: :param base_date: 'date': 'YYYY-MM-DD' 'incident_id': 1, 'cost': 5000000 }, ... ] """ random.seed() # policies = db.query('SELECT * FROM policies WHERE user_id=$user_id ORDER BY date DESC limit 1', vars=locals()) # TODO lasagna code - this should be fixed when multiple policies are used. history = policies_model().get_policy_history(user_id, True) response = policies_model().nested_obj_to_list_of_dict(policies_model().iter_to_nested_obj(history)) #Taking to consideration policy context for events prediction prophecy = [] max_risk = 0 max_cost = 0 for policy in response: p = policy['data'] p_context = {'employees': [p['employee']], 'locations': [p['location']], 'devices': [p['device']]} p_incidents = sim_model().request(p, p_context) for current_incident in p_incidents: if current_incident['risk'] > max_risk: max_risk = current_incident['risk'] max_cost = current_incident['cost'] daily_prob = cls.daily_prob(current_incident['risk']) incident_cost = current_incident['cost'] * company.max_incident_cost for i in range(0, 31): rand = random.random() if rand < daily_prob: if len(prophecy) > 0: for event in prophecy: #check if one incident per day is generated (date is different for each event) if ((base_date + timedelta(days=i)).isoformat()) == event['date']: break else: #check if there are no duplicate incidents in prophecy if current_incident['id'] == event['incident_id']\ and current_incident['employee'] == event['employee']\ and current_incident['location'] == event['location']\ and current_incident['device'] == event['device']: break else: if (prophecy.index(event)+1) == len(prophecy): prophecy.append({ 'date': (base_date + timedelta(days=i)).isoformat(), 'incident_id': current_incident['id'], 'cost': cls.randomize_cost(incident_cost), 'employee': current_incident['employee'], 'location': current_incident['location'], 'device': current_incident['device'] }) else: prophecy.append({ 'date': (base_date + timedelta(days=i)).isoformat(), 'incident_id': current_incident['id'], 'cost': cls.randomize_cost(incident_cost), 'employee': current_incident['employee'], 'location': current_incident['location'], 'device': current_incident['device'] }) # TODO currently productivity costs is being used as risk impact. score_model.insert_score(user_id, 1, (max_risk * 4 + max_cost) / 5.0, base_date) score_model.insert_score(user_id, 2, (max_cost * 4 + max_risk) / 5.0, base_date) return prophecy
def test_policy_parser(self): model = policies_model() # result = model.generate_samples({'prenew': 3, 'pattempts': 3, 'pdict': 0, 'psets': 2, 'phist': 4}) # result = model.generate_samples({'plen': 0}) # result = model.generate_samples({}) #model.generate_training_set() #policy = model.latest_policy(3) policyUpdate = [{ 'employee': ['executives', 'road'], 'location': ['office', 'home'], 'device': ['phone', 'desktop'], 'policyDelta': { 'pwpolicy': { 'plen': 12, 'pdict': 1 }, 'passfaces': {}, 'biometric': {} } }, { 'employee': ['desk', 'road'], 'location': ['office', 'public'], 'device': ['desktop', 'laptop'], 'policyDelta': { 'pwpolicy': { 'plen': 8, 'psets': 3 }, 'passfaces': {}, 'biometric': { 'bdata': 2 } } }, { 'employee': ['desk'], 'location': ['office'], 'device': ['desktop'], 'policyDelta': { 'pwpolicy': { 'plen': 0 }, 'passfaces': { 'pdata': 1 }, 'biometric': {} } }] """ updated_policy = model.parse_policy(policyUpdate) latest_policy_before = model.iter_to_nested_obj(model.get_policy_history(4, True)) latest_policy_after = model.merge_policies(updated_policy, latest_policy_before) print "updated_policy", updated_policy print "latest_policy before", model.nested_obj_to_list_of_dict(latest_policy_before) #print model.get_latest_policy(4)[1] print "latest_policy after", model.nested_obj_to_list_of_dict(latest_policy_after)""" #model.commit_policy_update(policyUpdate, '2014-02-15') #print model.nested_obj_to_list_of_dict(model.iter_to_nested_obj(model.get_policy_history(2))) #print model.get_policy_history(4)[0] #print model.get_policies_list(1) model.merge_policies( model.parse_policy(policyUpdate), model.iter_to_nested_obj(model.get_policy_history(2)))