def consumption_thread_function(verification_obj): # the web service has to be considered as running forever, so the monitoring loop for now should also run forever # this needs to be changed for a clean exit INACTIVE_MONITORING = False continue_monitoring = True while continue_monitoring: # take top element from the queue try: top_pair = verification_obj.consumption_queue.get(timeout=1) except: continue if top_pair[0] == "end-monitoring": # return from the monitoring function to end the monitoring thread vypr_output("Returning from monitoring thread.") continue_monitoring = False continue # if monitoring is inactive, we do nothing with what we consume unless it's a resume message if INACTIVE_MONITORING: if top_pair[0] == "inactive-monitoring-stop": # return from the monitoring function to end the monitoring thread vypr_output( "Restarting monitoring. Monitoring thread will still be alive." ) INACTIVE_MONITORING = False continue else: if top_pair[0] == "inactive-monitoring-start": # return from the monitoring function to end the monitoring thread vypr_output( "Pausing monitoring. Monitoring thread will still be alive." ) # turn on inactive monitoring INACTIVE_MONITORING = True # skip to the next iteration of the consumption loop continue # if inactive monitoring is off (so monitoring is running), process what we consumed vypr_output("Consuming: %s" % str(top_pair)) property_hash = top_pair[0] # remove the property hash and just deal with the rest of the data top_pair = top_pair[1:] instrument_type = top_pair[0] function_name = top_pair[1] # get the maps we need for this function maps = verification_obj.function_to_maps[function_name][property_hash] static_qd_to_monitors = maps.static_qd_to_monitors formula_structure = maps.formula_structure bindings = maps.binding_space program_path = maps.program_path verdict_report = maps.verdict_report atoms = formula_structure._formula_atoms if instrument_type == "function": # we've received a point telling us that a function has started or ended # for now, we can just process "end" - we reset the contents of the maps # that are updated at runtime scope_event = top_pair[2] if scope_event == "end": vypr_output("*" * 50) vypr_output( "Function '%s' started at time %s has ended at %s." % (function_name, str( maps.latest_time_of_call), str(top_pair[-1]))) # before resetting the qd -> monitor map, go through it to find monitors # that reached a verdict, and register those in the verdict report for static_qd_index in static_qd_to_monitors: for monitor in static_qd_to_monitors[static_qd_index]: # check if the monitor has a collapsing atom - only then do we register a verdict if monitor.collapsing_atom: verdict_report.add_verdict( static_qd_index, monitor._formula.verdict, monitor.atom_to_observation, monitor.atom_to_program_path, atoms.index(monitor.collapsing_atom), monitor.collapsing_atom_sub_index, monitor.atom_to_state_dict) # everything not here is static data that we need, and should be left maps.static_qd_to_monitors = {} # generate the verdict report report_map = verdict_report.get_final_verdict_report() binding_to_line_numbers = {} for bind_space_index in report_map.keys(): binding = bindings[bind_space_index] binding_to_line_numbers[bind_space_index] = [] # for each element of the binding, print the appropriate representation for bind_var in binding: if type(bind_var) is CFGVertex: if bind_var._name_changed == ["loop"]: binding_to_line_numbers[ bind_space_index].append( bind_var._structure_obj.lineno) else: binding_to_line_numbers[ bind_space_index].append( bind_var._previous_edge._instruction. lineno) elif type(bind_var) is CFGEdge: binding_to_line_numbers[bind_space_index].append( bind_var._instruction.lineno) # send the verdict we send the function name, the time of the function call, the verdict report # object, the map of bindings to their line numbers and the date/time of the request the identify it # (single threaded...) send_verdict_report(function_name, maps.latest_time_of_call, top_pair[-1], maps.program_path, verdict_report, binding_to_line_numbers, verification_obj.transaction_start_time, top_pair[4]) # reset the verdict report maps.verdict_report.reset() # reset the function start time for the next time maps.latest_time_of_call = None # reset the program path maps.program_path = [] elif scope_event == "start": vypr_output("Function '%s' has started." % function_name) # remember when the function call started maps.latest_time_of_call = top_pair[3] vypr_output("Set start time to %s" % maps.latest_time_of_call) vypr_output("*" * 50) if instrument_type == "trigger": # we've received a trigger instrument vypr_output( "Processing trigger - dealing with monitor instantiation") static_qd_index = top_pair[2] bind_variable_index = top_pair[3] vypr_output("Trigger is for bind variable %i" % bind_variable_index) if bind_variable_index == 0: vypr_output("Instantiating new, clean monitor") # we've encountered a trigger for the first bind variable, so we simply instantiate a new monitor new_monitor = formula_tree.new_monitor( formula_structure.get_formula_instance()) try: static_qd_to_monitors[static_qd_index].append(new_monitor) except: static_qd_to_monitors[static_qd_index] = [new_monitor] else: vypr_output("Processing existing monitors") # we look at the monitors' timestamps, and decide whether to generate a new monitor # and copy over existing information, or update timestamps of existing monitors new_monitors = [] subsequences_processed = [] for monitor in static_qd_to_monitors[static_qd_index]: # check if the monitor's timestamp sequence includes a timestamp for this bind variable vypr_output( " Processing monitor with timestamp sequence %s" % str(monitor._monitor_instantiation_time)) if len(monitor._monitor_instantiation_time ) == bind_variable_index + 1: if monitor._monitor_instantiation_time[: bind_variable_index] in subsequences_processed: # the same subsequence might have been copied and extended multiple times # we only care about one continue else: subsequences_processed.append( monitor. _monitor_instantiation_time[: bind_variable_index] ) # construct new monitor vypr_output( " Instantiating new monitor with modified timestamp sequence" ) # instantiate a new monitor using the timestamp subsequence excluding the current bind # variable and copy over all observation, path and state information old_instantiation_time = list( monitor._monitor_instantiation_time) updated_instantiation_time = tuple( old_instantiation_time[:bind_variable_index] + [datetime.datetime.utcnow()]) new_monitor = formula_tree.new_monitor( formula_structure.get_formula_instance()) new_monitors.append(new_monitor) # copy timestamp sequence, observation, path and state information new_monitor._monitor_instantiation_time = updated_instantiation_time # iterate through the observations stored by the previous monitor # for bind variables before the current one and use them to update the new monitor for atom in monitor._state._state: if not (type(atom) is formula_tree.LogicalNot): if (formula_structure._bind_variables. index(get_base_variable(atom)) < bind_variable_index and not (monitor._state._state[atom] is None)): if monitor._state._state[atom] == True: new_monitor.check_optimised(atom) elif monitor._state._state[ atom] == False: new_monitor.check_optimised( formula_tree.lnot(atom)) atom_index = atoms.index(atom) for sub_index in monitor.atom_to_observation[ atom_index].keys(): new_monitor.atom_to_observation[atom_index][sub_index] = \ monitor.atom_to_observation[atom_index][sub_index] for sub_index in monitor.atom_to_program_path[ atom_index].keys(): new_monitor.atom_to_program_path[atom_index][sub_index] = \ monitor.atom_to_program_path[atom_index][sub_index] for sub_index in monitor.atom_to_state_dict[ atom_index].keys(): new_monitor.atom_to_state_dict[atom_index][sub_index] = \ monitor.atom_to_state_dict[atom_index][sub_index] elif len(monitor._monitor_instantiation_time ) == bind_variable_index: vypr_output( " Updating existing monitor timestamp sequence") # extend the monitor's timestamp sequence tmp_sequence = list( monitor._monitor_instantiation_time) tmp_sequence.append(datetime.datetime.utcnow()) monitor._monitor_instantiation_time = tuple( tmp_sequence) # add the new monitors static_qd_to_monitors[static_qd_index] += new_monitors if instrument_type == "path": # we've received a path recording instrument # append the branching condition to the program path encountered so far for this function. program_path.append(top_pair[2]) if instrument_type == "instrument": static_qd_indices = top_pair[2] atom_index = top_pair[3] atom_sub_index = top_pair[4] instrumentation_point_db_ids = top_pair[5] observation_time = top_pair[6] observation_end_time = top_pair[7] observed_value = top_pair[8] thread_id = top_pair[9] try: state_dict = top_pair[10] except: # instrument isn't from a transition measurement state_dict = None vypr_output("Consuming data from an instrument in thread %i" % thread_id) lists = zip(static_qd_indices, instrumentation_point_db_ids) for values in lists: static_qd_index = values[0] instrumentation_point_db_id = values[1] vypr_output("Binding space index : %i" % static_qd_index) vypr_output("Atom index : %i" % atom_index) vypr_output("Atom sub index : %i" % atom_sub_index) vypr_output("Instrumentation point db id : %i" % instrumentation_point_db_id) vypr_output("Observation time : %s" % str(observation_time)) vypr_output("Observation end time : %s" % str(observation_end_time)) vypr_output("Observed value : %s" % observed_value) vypr_output("State dictionary : %s" % str(state_dict)) instrumentation_atom = atoms[atom_index] # update all monitors associated with static_qd_index if static_qd_to_monitors.get(static_qd_index): for (n, monitor) in enumerate( static_qd_to_monitors[static_qd_index]): # checking for previous observation of the atom is done by the monitor's internal logic monitor.process_atom_and_value( instrumentation_atom, observation_time, observation_end_time, observed_value, atom_index, atom_sub_index, inst_point_id=instrumentation_point_db_id, program_path=len(program_path), state_dict=state_dict) # set the task as done verification_obj.consumption_queue.task_done() vypr_output("Consumption finished.") vypr_output("=" * 100) # if we reach this point, the monitoring thread is ending vypr_logger.end_logging()
def consumption_thread_function(verification_obj): # the web service has to be considered as running forever, so the monitoring loop for now should also run forever # this needs to be changed for a clean exit INACTIVE_MONITORING = False transaction = -1 continue_monitoring = True while continue_monitoring: # take top element from the queue try: top_pair = verification_obj.consumption_queue.get(timeout=1) ## In case of flask testing except: # Changing flag to false here because in normal testing, end-monitoring does not change to False. # If exception is raised we just terminate the monitoring continue if top_pair[0] == "end-monitoring": # return from the monitoring function to end the monitoring thread vypr_output("Returning from monitoring thread.") continue_monitoring = False continue # if monitoring is inactive, we do nothing with what we consume unless it's a resume message if INACTIVE_MONITORING: if top_pair[0] == "inactive-monitoring-stop": # return from the monitoring function to end the monitoring thread vypr_output( "Restarting monitoring. Monitoring thread will still be alive." ) INACTIVE_MONITORING = False continue else: if top_pair[0] == "inactive-monitoring-start": # return from the monitoring function to end the monitoring thread vypr_output( "Pausing monitoring. Monitoring thread will still be alive." ) # turn on inactive monitoring INACTIVE_MONITORING = True # skip to the next iteration of the consumption loop continue # if inactive monitoring is off (so monitoring is running), process what we consumed if top_pair[0] == "test_transaction": transaction = top_pair[1] vypr_output("Test Transaction >> %s" % str(top_pair)) continue vypr_output("Consuming: %s" % str(top_pair)) first_element = top_pair[0] if first_element == "function": # we have a function start/end instrument property_hash_list = top_pair[1] function_name = top_pair[2] scope_event = top_pair[3] if scope_event == "end": # first, send the function call data independently of any property # the latest time of call and program path are the same everywhere latest_time_of_call = verification_obj.function_to_maps[ function_name][verification_obj.function_to_maps[ function_name].keys()[0]].latest_time_of_call program_path = verification_obj.function_to_maps[ function_name][verification_obj.function_to_maps[ function_name].keys()[0]].program_path if 'yes' in TEST_FRAMEWORK: transaction_time = transaction else: transaction_time = top_pair[3] insertion_data = send_function_call_data( function_name, latest_time_of_call, top_pair[-1], program_path, transaction_time) # now handle the verdict data we have for each property for property_hash in property_hash_list: maps = verification_obj.function_to_maps[function_name][ property_hash] static_qd_to_monitors = maps.static_qd_to_monitors verdict_report = maps.verdict_report vypr_output("*" * 50) # before resetting the qd -> monitor map, go through it to find monitors # that reached a verdict, and register those in the verdict report for static_qd_index in static_qd_to_monitors: for monitor in static_qd_to_monitors[static_qd_index]: # check if the monitor has a collapsing atom - only then do we register a verdict if monitor.collapsing_atom_index is not None: verdict_report.add_verdict( static_qd_index, monitor._formula.verdict, monitor.atom_to_observation, monitor.atom_to_program_path, monitor.collapsing_atom_index, monitor.collapsing_atom_sub_index, monitor.atom_to_state_dict) # reset the monitors maps.static_qd_to_monitors = {} # send the verdict data send_verdict_report(verdict_report, property_hash, insertion_data["function_id"], insertion_data["function_call_id"]) # reset the verdict report maps.verdict_report.reset() # reset the function start time for the next time maps.latest_time_of_call = None elif scope_event == "start": vypr_output("Function '%s' has started." % function_name) for property_hash in property_hash_list: # reset anything that might have been left over from the previous call, # especially if an unhandled exception caused the function to end without # vypr instruments sending an end signal maps = verification_obj.function_to_maps[function_name][ property_hash] maps.static_qd_to_monitors = {} maps.verdict_report.reset() # remember when the function call started maps.latest_time_of_call = top_pair[4] vypr_output("Set start time to %s" % maps.latest_time_of_call) # reset the program path maps.program_path = [] vypr_output("*" * 50) else: # we have another kind of instrument that is specific to a property property_hash = top_pair[0] # remove the property hash and just deal with the rest of the data top_pair = top_pair[1:] instrument_type = top_pair[0] function_name = top_pair[1] # get the maps we need for this function maps = verification_obj.function_to_maps[function_name][ property_hash] static_qd_to_monitors = maps.static_qd_to_monitors formula_structure = maps.formula_structure program_path = maps.program_path verdict_report = maps.verdict_report atoms = formula_structure._formula_atoms if instrument_type == "trigger": # we've received a trigger instrument vypr_output( "Processing trigger - dealing with monitor instantiation") static_qd_index = top_pair[2] bind_variable_index = top_pair[3] vypr_output("Trigger is for bind variable %i" % bind_variable_index) if bind_variable_index == 0: vypr_output("Instantiating new, clean monitor") # we've encountered a trigger for the first bind variable, so we simply instantiate a new monitor new_monitor = formula_tree.new_monitor( formula_structure.get_formula_instance()) try: static_qd_to_monitors[static_qd_index].append( new_monitor) except: static_qd_to_monitors[static_qd_index] = [new_monitor] else: vypr_output("Processing existing monitors") # we look at the monitors' timestamps, and decide whether to generate a new monitor # and copy over existing information, or update timestamps of existing monitors new_monitors = [] subsequences_processed = [] for monitor in static_qd_to_monitors[static_qd_index]: # check if the monitor's timestamp sequence includes a timestamp for this bind variable vypr_output( " Processing monitor with timestamp sequence %s" % str(monitor._monitor_instantiation_time)) if len(monitor._monitor_instantiation_time ) == bind_variable_index + 1: if monitor._monitor_instantiation_time[: bind_variable_index] in subsequences_processed: # the same subsequence might have been copied and extended multiple times # we only care about one continue else: subsequences_processed.append( monitor. _monitor_instantiation_time[: bind_variable_index] ) # construct new monitor vypr_output( " Instantiating new monitor with modified timestamp sequence" ) # instantiate a new monitor using the timestamp subsequence excluding the current bind # variable and copy over all observation, path and state information old_instantiation_time = list( monitor._monitor_instantiation_time) updated_instantiation_time = tuple( old_instantiation_time[:bind_variable_index] + [datetime.datetime.now()]) new_monitor = formula_tree.new_monitor( formula_structure.get_formula_instance()) new_monitors.append(new_monitor) # copy timestamp sequence, observation, path and state information new_monitor._monitor_instantiation_time = updated_instantiation_time # iterate through the observations stored by the previous monitor # for bind variables before the current one and use them to update the new monitor for atom_index in monitor._state._state: atom = atoms[atom_index] if not (type(atom) is formula_tree.LogicalNot): # the copy we do for the information related to the atom # depends on whether the atom is mixed or not if formula_tree.is_mixed_atom(atom): # for mixed atoms, the return value here is a list base_variables = get_base_variable( atom) # determine the base variables which are before the current bind variable relevant_base_variables = filter( lambda var: (formula_structure. _bind_variables.index(var) < bind_variable_index), base_variables) # determine the base variables' sub-indices in the current atom relevant_sub_indices = map( lambda var: base_variables. index(var), relevant_base_variables) # copy over relevant information for the sub indices # whose base variables had positions less than the current variable index # relevant_sub_indices can contain at most 0 and 1. for sub_index in relevant_sub_indices: # set up keys in new monitor state if they aren't already there if not (new_monitor. atom_to_observation. get(atom_index)): new_monitor.atom_to_observation[ atom_index] = {} new_monitor.atom_to_program_path[ atom_index] = {} new_monitor.atom_to_state_dict[ atom_index] = {} # copy over observation, program path and state information new_monitor.atom_to_observation[atom_index][sub_index] = \ monitor.atom_to_observation[atom_index][sub_index] new_monitor.atom_to_program_path[atom_index][sub_index] = \ monitor.atom_to_program_path[atom_index][sub_index] new_monitor.atom_to_state_dict[atom_index][sub_index] = \ monitor.atom_to_state_dict[atom_index][sub_index] # update the state of the monitor new_monitor.check_atom_truth_value( atom, atom_index, atom_sub_index) else: # the atom is not mixed, so copying over information is simpler if (formula_structure. _bind_variables.index( get_base_variable(atom) ) < bind_variable_index and not (monitor._state._state[ atom] is None)): # decide how to update the new monitor based on the existing monitor's truth # value for it if monitor._state._state[ atom_index] == True: new_monitor.check_optimised( atom) elif monitor._state._state[ atom_index] == False: new_monitor.check_optimised( formula_tree.lnot( atom)) # copy over observation, program path and state information new_monitor.atom_to_observation[atom_index][0] = \ monitor.atom_to_observation[atom_index][0] new_monitor.atom_to_program_path[atom_index][0] = \ monitor.atom_to_program_path[atom_index][0] new_monitor.atom_to_state_dict[atom_index][0] = \ monitor.atom_to_state_dict[atom_index][0] vypr_output( " New monitor construction finished.") elif len(monitor._monitor_instantiation_time ) == bind_variable_index: vypr_output( " Updating existing monitor timestamp sequence" ) # extend the monitor's timestamp sequence tmp_sequence = list( monitor._monitor_instantiation_time) tmp_sequence.append(datetime.datetime.now()) monitor._monitor_instantiation_time = tuple( tmp_sequence) # add the new monitors static_qd_to_monitors[static_qd_index] += new_monitors if instrument_type == "path": # we've received a path recording instrument # append the branching condition to the program path encountered so far for this function. program_path.append(top_pair[2]) if instrument_type == "instrument": static_qd_indices = top_pair[2] atom_index = top_pair[3] atom_sub_index = top_pair[4] instrumentation_point_db_ids = top_pair[5] observation_time = top_pair[6] observation_end_time = top_pair[7] observed_value = top_pair[8] thread_id = top_pair[9] try: state_dict = top_pair[10] except: # instrument isn't from a transition measurement state_dict = None vypr_output("Consuming data from an instrument in thread %i" % thread_id) lists = zip(static_qd_indices, instrumentation_point_db_ids) for values in lists: static_qd_index = values[0] instrumentation_point_db_id = values[1] vypr_output("Binding space index : %i" % static_qd_index) vypr_output("Atom index : %i" % atom_index) vypr_output("Atom sub index : %i" % atom_sub_index) vypr_output("Instrumentation point db id : %i" % instrumentation_point_db_id) vypr_output("Observation time : %s" % str(observation_time)) vypr_output("Observation end time : %s" % str(observation_end_time)) vypr_output("Observed value : %s" % observed_value) vypr_output("State dictionary : %s" % str(state_dict)) instrumentation_atom = atoms[atom_index] # update all monitors associated with static_qd_index if static_qd_to_monitors.get(static_qd_index): for (n, monitor) in enumerate( static_qd_to_monitors[static_qd_index]): # checking for previous observation of the atom is done by the monitor's internal logic monitor.process_atom_and_value( instrumentation_atom, observation_time, observation_end_time, observed_value, atom_index, atom_sub_index, inst_point_id=instrumentation_point_db_id, program_path=len(program_path), state_dict=state_dict) if instrument_type == "test_status": # verified_function = top_pair[1] status = top_pair[2] start_test_time = top_pair[3] end_test_time = top_pair[4] test_name = top_pair[6] # # We are trying to empty all the test cases in order to terminate the monitoring # if test_name in list_test_cases: # list_test_cases.remove(test_name) # # if len(list_test_cases) == 0: # continue_monitoring = False if status.failures: test_result = "Fail" elif status.errors: test_result = "Error" else: test_result = "Success" # If test data exists. test_data = { "test_name": test_name, "test_result": test_result, "start_time": start_test_time.isoformat(), "end_time": end_test_time.isoformat() } json.loads( requests.post(os.path.join(VERDICT_SERVER_URL, "insert_test_data/"), data=json.dumps(test_data)).text) # set the task as done verification_obj.consumption_queue.task_done() vypr_output("Consumption finished.") vypr_output("=" * 100)