def setUpClass(cls): causal_forest_orig = causal_grammar_summerdata.causal_forest ignoreoverlaps = True #these really need less-confusing names =/ withoutoverlaps = not ignoreoverlaps suppress_output = not kDebug simplify = False # TODO: I should have a unit test that just makes sure simplifying doesn't break anything example = 'light_9_screen_57_9404' if simplify: causal_grammar_summerdata.causal_forest = causal_grammar.get_simplified_forest_for_example(causal_forest_orig, example) print kActionDetections #fluent_parses, action_parses = causal_grammar.import_summerdata(example,kActionDetections) fluent_parses = {2336: {'door': 0.06674041018366039}, 2350: {'door': 4.511042523263396}, 2333: {'light': 1.2432023175179374}, 2322: {'screen': 4.511042523263396}, 2425: {'light': 4.111633152359644}, 2364: {'screen': 0.015513717831387925}, 2429: {'light': 4.111633152359644}} action_parses = { 2361: {'standing_START': {'energy': 1.6e-05, 'agent': 'uuid1'}}, 2442: {'standing_END': {'energy': 1.6e-05, 'agent': 'uuid1'}}, 2341: {'usecomputer_END': {'energy': 0.300451, 'agent': 'uuid1'}}, 2342: {'drink_START': {'energy': 1.787062, 'agent': 'uuid1'}, 'drink_END': {'energy': 1.787062, 'agent': 'uuid1'}}, 2311: {'usecomputer_START': {'energy': 0.300451, 'agent': 'uuid1'}} } orig_xml = xml_stuff.munge_parses_to_xml(fluent_parses, action_parses) sorted_keys = sorted(fluent_parses.keys()) causal_xml = causal_grammar.process_events_and_fluents(causal_grammar_summerdata.causal_forest, fluent_parses, action_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy, suppress_output = suppress_output, handle_overlapping_events = withoutoverlaps) #uploadComputerResponseToDB(example, fluent_and_action_xml, 'causalgrammar', connType, conn) #uploadComputerResponseToDB(example, orig_xml, 'origdata', connType, conn) #cls.root = ET.fromstring(xml_string) cls.orig_xml = ET.fromstring(orig_xml) cls.causal_xml = ET.fromstring(causal_xml) if kDebug: print(orig_xml) print(causal_xml)
def setUpClass(cls): causal_forest_modified = [ { "node_type": "root", "symbol_type": "fluent", "symbol": "light_on", "children": [ { "node_type": "and", "probability": .25, "children": [ #on inertially -- higher chance of occurrence? { "node_type": "leaf", "symbol": "light_on", "symbol_type": "prev_fluent" }, { "node_type": "leaf", "symbol": "A3", "symbol_type": "nonevent", "timeout": 10 }, { "node_type": "leaf", "symbol": "A4", "symbol_type": "nonevent", "timeout": 10 }, ]}, { "node_type": "and", "probability": .375, "children": [ #on by causing action { "node_type": "leaf", "symbol_type": "prev_fluent", "symbol": "light_off" }, { "node_type": "leaf", "symbol_type": "event", "symbol": "FLIPSWITCH", "timeout": 10 }, ] }, { "node_type": "and", "probability": .375, "children": [ #on by causing action { "node_type": "leaf", "symbol_type": "prev_fluent", "symbol": "light_off" }, { "node_type": "leaf", "symbol_type": "event", "symbol": "A2", "timeout": 10 }, ] }, ], }, { "node_type": "root", "symbol_type": "fluent", "symbol": "light_off", "children": [ { "node_type": "and", "probability": .25, "children": [ #off inertially { "node_type": "leaf", "symbol": "light_off", "symbol_type": "prev_fluent" }, { "node_type": "leaf", "symbol": "FLIPSWITCH", "symbol_type": "nonevent", "timeout": 10 }, { "node_type": "leaf", "symbol": "A2", "symbol_type": "nonevent", "timeout": 10 }, ]}, { "node_type": "and", "probability": .375, "children": [ #off by causing action { "node_type": "leaf", "symbol_type": "prev_fluent", "symbol": "light_on" }, { "node_type": "leaf", "symbol_type": "event", "symbol": "A3", "timeout": 10 }, ] }, { "node_type": "and", "probability": .375, "children": [ #off by causing action { "node_type": "leaf", "symbol_type": "prev_fluent", "symbol": "light_on" }, { "node_type": "leaf", "symbol_type": "event", "symbol": "A4", "timeout": 10 }, ] }, ], }, ] fluents_simple_light = { 8: { "light": causal_grammar.probability_to_energy(.9)}, #light turns on at 8 } actions_simple_light = { 5: { "FLIPSWITCH": {"energy": causal_grammar.probability_to_energy(.9), "agent": ("uuid4")} }, 6: { "A2": {"energy": causal_grammar.probability_to_energy(.6), "agent": ("uuid4")} }, } xml_string = causal_grammar.process_events_and_fluents(causal_forest_modified, fluents_simple_light, actions_simple_light, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy,not kDebug, handle_overlapping_events = True) # !kDebug: suppress output cls.root = ET.fromstring(xml_string) if kDebug: print(xml_string)
def setUpClass(cls): fluents_simple_light = { 6: { "light": causal_grammar.probability_to_energy(.9)}, #light turns on at 6 8: { "light": causal_grammar.probability_to_energy(.6)}, #light turns on at 8 } actions_simple_light = { 5: { "FLIPSWITCH": {"energy": causal_grammar.probability_to_energy(.9), "agent": ("uuid4")} }, } xml_string = causal_grammar.process_events_and_fluents(causal_forest_light, fluents_simple_light, actions_simple_light, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy,not kDebug, handle_overlapping_events = True) # !kDebug: suppress output cls.root = ET.fromstring(xml_string) if kDebug: print(xml_string)
def setUpClass(cls): fluents_simple_thirst = { 5: { "thirst": causal_grammar.probability_to_energy(.1)}, #thirst turns off at 5 20: { "thirst": causal_grammar.probability_to_energy(.1)}, #thirst turns off at 20 } actions_simple_thirst = { 4: { "DRINK": {"energy": causal_grammar.probability_to_energy(.9), "agent": ("uuid4")} }, #energy = .11 19: { "DRINK": {"energy": causal_grammar.probability_to_energy(.9), "agent": ("uuid4")} }, #energy = .11 } xml_string = causal_grammar.process_events_and_fluents(causal_forest_thirst, fluents_simple_thirst, actions_simple_thirst, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy,suppress_output = not kDebug, handle_overlapping_events = False) cls.root = ET.fromstring(xml_string) if kDebug: print(xml_string)
def setUpClass(cls): causal_forest_abbreviated = [ #NOTE: this was yanked from causal_grammar_summerdata.py on 2016-04-24 ("root", "fluent", "screen_on", .5, False, [ #SCREEN ON ("and", False, False, .35, False, [ #ON INERTIALLY ("leaf", "prev_fluent", "screen_on", False, False, False), ("leaf", "nonevent", "usecomputer_END", False, 1, False), ] ), ("and", False, False, .3, False, [ #ON CAUSALLY ("leaf", "prev_fluent", "screen_off", False, False, False), ("leaf", "event", "usecomputer_START", False, 100, False), ] ), ("and", False, False, .35, False, [ #ON CONTINUOUSLY ("leaf", "prev_fluent", "screen_on", False, False, False), ("leaf", "event", "usecomputer_START", False, 100, False), ] ), ] ), ("root", "fluent", "screen_off", .5, False, [ #SCREEN OFF ("and", False, False, .3, False, [ #OFF CAUSALLY ("leaf", "prev_fluent", "screen_on", False, False, False), ("leaf", "event", "usecomputer_END", False, 30, False) ] ), ("and", False, False, .4, False, [ # OFF INERTIALLY - due to the screensaver kicking on (so causally changed because of non-action) ("leaf", "prev_fluent", "screen_on", False, False, False), ("leaf", "nonevent", "usecomputer_START", False, 100, False), ("leaf", "nonevent", "usecomputer_END", False, 1000, False) ] ), ("and", False, False, .3, False, [ # OFF INERTIALLY - no change because didn't start using the computer ("leaf", "prev_fluent", "screen_off", False, False, False), ("leaf", "nonevent", "usecomputer_START", False, 200, False) ] ) ] ) ] causal_forest_modified = causal_grammar.generate_causal_forest_from_abbreviated_forest(causal_forest_abbreviated) fluents = { 550: { "screen": 0.969968}, } actions = { 528: { "usecomputer_END": {"energy": 0, "agent": ("uuid4")} }, 529: { "usecomputer_START": {"energy": 0, "agent": ("uuid4")} }, 558: { "usecomputer_END": {"energy": 0, "agent": ("uuid4")} }, #559: { "usecomputer_START": {"energy": 0.183661, "agent": ("uuid4")} }, #567: { "usecomputer_END": {"energy": 0.183661, "agent": ("uuid4")} }, } xml_string = causal_grammar.process_events_and_fluents(causal_forest_modified, fluents, actions, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy,not kDebug, handle_overlapping_events = True) # !kDebug: suppress output cls.root = ET.fromstring(xml_string) cls.xml_string = xml_string if kDebug: print(xml_string)
def setUpClass(cls): import causal_grammar_summerdata causal_forest_orig = causal_grammar_summerdata.causal_forest ignoreoverlaps = False #these really need less-confusing names =/ suppress_output = not kDebug #suppress_output = True simplify = True # TODO: I should have a unit test that just makes sure simplifying doesn't break anything example = 'door_1_8145' """ Clip: annotation: 261 - 314: opens, standing; 314 - 349: closes, standing detection: 261 - 314: opens 297, shuts 301; 314-349: closes 322, closes 338, standing origdb: 261 - 314: opens, shuts; 314-349: shuts x 2, stand_start, stand_end causal: 261 - 314: stays open; 314-349: shuts, stand_start """ if simplify: causal_grammar_summerdata.causal_forest = causal_grammar.get_simplified_forest_for_example(causal_forest_orig, example) # fluent_parses, action_parses = causal_grammar.import_summerdata(example,kActionDetections) #fluent_parses = {297: {'door': 0.13272600275231877}, 338: {'door': 1.4261894413473993}, 322: {'door': 1.0789008419993453}, 301: {'door': 1.1634100434038068}, 313: {'screen': 3.203371638332235}} withoutoverlaps = False fluent_parses = {297: {'door': 0.13272600275231877} } action_parses = { 296: { "standing_START": {"energy": 0.279449, "agent": "uuid1"} }, #297: { "standing_START": {"energy": 0.240229, "agent": "uuid1"} }, #326: { "standing_END": {"energy": 0.257439, "agent": "uuid1"} }, ##328: { "standing_END": {"energy": 0.338959, "agent": "uuid1"} }, ##320: { "standing_START": {"energy": 0.240229, "agent": "uuid1"} }, ##346: { "standing_END": {"energy": 0.240229, "agent": "uuid1"} }, ##347: { "benddown_START": {"energy": 1.236692, "agent": "uuid1"} }, ##349: { "benddown_END": {"energy": 1.236692, "agent": "uuid1"} }, } orig_xml = xml_stuff.munge_parses_to_xml(fluent_parses, action_parses) sorted_keys = sorted(fluent_parses.keys()) causal_xml = causal_grammar.process_events_and_fluents(causal_grammar_summerdata.causal_forest, fluent_parses, action_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy, suppress_output = suppress_output, handle_overlapping_events = withoutoverlaps) #uploadComputerResponseToDB(example, fluent_and_action_xml, 'causalgrammar', connType, conn) #uploadComputerResponseToDB(example, orig_xml, 'origdata', connType, conn) #cls.root = ET.fromstring(xml_string) cls.orig_xml = ET.fromstring(orig_xml) cls.causal_xml = ET.fromstring(causal_xml) if kDebug: print("ORIG XML") xml_stuff.printXMLActionsAndFluents(cls.orig_xml) print("CAUSAL XML") xml_stuff.printXMLActionsAndFluents(cls.causal_xml)
def setUpClass(cls): causal_forest = [ { "node_type": "root", "symbol_type": "fluent", "symbol": "fluent_on", "children": [ { "node_type": "and", "probability": .6, "children": [ #on inertially - not turned off { "node_type": "leaf", "symbol": "fluent_on", "symbol_type": "prev_fluent" }, { "node_type": "leaf", "symbol": "OFFACTION", "symbol_type": "nonevent", "timeout": 3 }, ]}, { "node_type": "and", "probability": .4, "children": [ #on by causing action { "node_type": "leaf", "symbol_type": "prev_fluent", "symbol": "fluent_off" }, { "node_type": "leaf", "symbol_type": "event", "symbol": "ONACTION", "timeout": 1 }, ] }, ], }, { "node_type": "root", "symbol_type": "fluent", "symbol": "fluent_off", "children": [ { "node_type": "and", "probability": .6, "children": [ #off inertially #energy = .51 { "node_type": "leaf", "symbol": "fluent_off", "symbol_type": "prev_fluent" }, { "node_type": "leaf", "symbol": "OFFACTION", "symbol_type": "nonevent", "timeout": 3 }, ]}, { "node_type": "and", "probability": .4, "children": [ #off by causing action A1 { "node_type": "leaf", "symbol_type": "prev_fluent", "symbol": "fluent_on" }, { "node_type": "leaf", "symbol_type": "event", "symbol": "OFFACTION", "timeout": 1 }, ] }, ], }, ] fluents_simple = { 5: { "fluent": causal_grammar.probability_to_energy(.9)}, #light turns off at 5 -- energy = .51 } actions_simple = { 1: { "OFFACTION": {"energy": causal_grammar.probability_to_energy(.9), "agent": ("uuid4")} }, #energy = .11 } xml_string = causal_grammar.process_events_and_fluents(causal_forest, fluents_simple, actions_simple, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy,not kDebug, handle_overlapping_events = True) # !kDebug: suppress output cls.root = ET.fromstring(xml_string) if kDebug: print(xml_string)
causal_grammar.kFluentThresholdOffEnergy = 0.6972 #examples = [ "door_1_8145", ] #examples = [ "door_2_8145", "door_5_8145", "door_6_8145", "door_7_8145", "door_9_8145", ] #raise("MAYBE DELETE 'computer' FROM RESULTS BEFORE RERUNNING") for example in examples: try: fluent_parses, action_parses = causal_grammar.import_summerdata(example,'CVPR2012_reverse_slidingwindow_action_detection_logspace') import pprint pp = pprint.PrettyPrinter(indent=1) pp.pprint(fluent_parses) pp.pprint(action_parses) except ImportError: import_failed.append(example) continue orig_xml = munge_parses_to_xml(fluent_parses,action_parses) fluent_and_action_xml = causal_grammar.process_events_and_fluents(causal_grammar_summerdata.causal_forest, fluent_parses, action_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy, True) # last true: suppress the xml output print orig_xml.toprettyxml(indent="\t") print fluent_and_action_xml.toprettyxml(indent="\t") #print fluent_and_action_xml.toprettyxml(indent="\t") if uploadComputerResponseToDB(example, fluent_and_action_xml, 'causalgrammar', conn): completed.append(example) else: also_oject_failed.append(example) if uploadComputerResponseToDB(example, orig_xml, 'origdata', conn): also_completed.append(example) else: oject_failed.append(example) print("COMPLETED: {}".format(completed)) print("ALSO COMPLETED: {}".format(also_completed)) print("SKIPPED DUE TO OJECT: {}".format(oject_failed)) print("ALSO SKIPPED DUE TO OJECT: {}".format(also_oject_failed))
{ "node_type": "and", "probability": .2, "children": [ { "node_type": "leaf", "symbol_type": "prev_fluent", "symbol": "dooropen_on" }, { "node_type": "leaf", "symbol_type": "event", "symbol": "door_close_inside", "timeout": 20 }, ] }, ], }, ] # TRUE NEXT LINE TO LOAD ORIGINAL MATLAB CSV FILE FOR OFFICE SCENE, JUST PULLING LIGHT SWITCH AND DOOR INFO, OVERRIDING TRIVIAL fluent_parses AND temporal_parses ABOVE if False: fluent_maps = {"Light_Status": "light", "Door_Status": "dooropen"} event_maps = {"Touch_Switch":"E1", "Close_Door_Inside": "door_close_inside", "Close_Door_Outside": "door_close_outside", "Open_Door_Inside":"door_open_inside", "Open_Door_Outside":"door_open_outside"} fluent_parses, temporal_parses = causal_grammar.import_csv("results/Exp2_output_data.txt",fluent_maps,event_maps) print "--PREPPING DEMO--" import pprint pp = pprint.PrettyPrinter(indent=1) print("causal_forest") pp.pprint(causal_forest) causal_grammar.hr() print("fluent_parses") pp.pprint(fluent_parses) causal_grammar.hr() print("temporal_parses") pp.pprint(temporal_parses) causal_grammar.hr() print "--RUNNING DEMO--" causal_grammar.process_events_and_fluents(causal_forest, fluent_parses, temporal_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy) print "--DEMO COMPLETE--"
# dict keys are frame numbers # frames are only reported when a fluent changes, and only for the fluent(s) that changed; fluents are considered to be on or off ("light" is treated as "light_on", and then "light_off" is calculated from that internally, for instance) import causal_grammar ### TODO: deal with trash_6_phone_11_screen_22 because it has timer/jump #fluent_parses, temporal_parses = causal_grammar.import_summerdata('door_13_light_3_9406','CVPR2012_reverse_slidingwindow_action_detection') fluent_parses, temporal_parses = causal_grammar.import_summerdata('door_1_8145','CVPR2012_reverse_slidingwindow_action_detection') #import causal_grammar_summerdata_justdoor as causal_grammar_summerdata # sets up causal_forest import causal_grammar_summerdata as causal_grammar_summerdata # sets up causal_forest """ for tree in causal_grammar_hallway.causal_forest: for foo in causal_grammar.generate_parses(tree): print causal_grammar.make_tree_like_lisp(foo) import pprint pp = pprint.PrettyPrinter(indent=1) pp.pprint(temporal_parses) causal_grammar.hr() pp.pprint(causal_grammar_hallway.causal_forest) causal_grammar.hr() """ # These thresholds tuned for this fluent data because it's not "flipping between on and off", it's # flipping "did transition closed to on" and "didn't transition closed to on" causal_grammar.kFluentThresholdOnEnergy = 0.6892 causal_grammar.kFluentThresholdOffEnergy = 0.6972 causal_grammar.process_events_and_fluents(causal_grammar_summerdata.causal_forest, fluent_parses, temporal_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy)
def processAndUploadExamples(directory, examples, conn, simplify=False, require_consistency=True): print("===========") print("UPLOADING") print("===========") completed = [] also_completed = [] oject_failed = [] also_oject_failed = [] import_failed = [] import causal_grammar import causal_grammar_summerdata # sets up causal_forest causal_forest_orig = causal_grammar_summerdata.causal_forest #raise("MAYBE DELETE 'computer' FROM RESULTS BEFORE RE-RUNNING") for example in examples: print("---------\nEXAMPLE: {}\n-------".format(example)) """ -s (simplify) is broken at the moment, on the below example, so ... this can help """ #if example == "doorlock_2_8145": # suppress_output = False if simplify: causal_grammar_summerdata.causal_forest = causal_grammar.get_simplified_forest_for_example( causal_forest_orig, example) print("... simplified to {}".format(", ".join( x['symbol'] for x in causal_grammar_summerdata.causal_forest))) try: fluent_parses, temporal_parses = causal_grammar.import_summerdata( example, directory) import pprint pp = pprint.PrettyPrinter(indent=1) print " fluent parses " pp.pprint(fluent_parses) print("") print " action parses " pp.pprint(temporal_parses) print("") except ImportError as ie: print("IMPORT FAILED: {}".format(ie)) import_failed.append(example) continue orig_xml = xml_stuff.munge_parses_to_xml(fluent_parses, temporal_parses) fluent_and_action_xml = causal_grammar.process_events_and_fluents( causal_grammar_summerdata.causal_forest, fluent_parses, temporal_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy, suppress_output=suppress_output, handle_overlapping_events=withoutoverlaps, require_consistency=require_consistency) if debugQuery: print("_____ ORIG FLUENT AND ACTION PARSES _____") #print minidom.parseString(orig_xml).toprettyxml(indent="\t") xml_stuff.printXMLActionsAndFluents(ET.fromstring(orig_xml)) print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^") print("_____ AFTER CAUSAL GRAMMAR _____") #print minidom.parseString(fluent_and_action_xml).toprettyxml(indent="\t") xml_stuff.printXMLActionsAndFluents( ET.fromstring(fluent_and_action_xml)) print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^") print("------> causalgrammar <------") if uploadComputerResponseToDB(example, fluent_and_action_xml, 'causalgrammar', connType, conn): completed.append("{}-{}".format(example, 'causalgrammar')) else: oject_failed.append("{}-{}".format(example, 'causalgrammar')) print("------> causalsmrt <------") if uploadComputerResponseToDB(example, fluent_and_action_xml, 'causalsmrt', connType, conn): completed.append("{}-{}".format(example, 'causalsmrt')) else: oject_failed.append("{}-{}".format(example, 'causalsmrt')) print("------> origdata <------") if uploadComputerResponseToDB(example, orig_xml, 'origdata', connType, conn): completed.append("{}-{}".format(example, 'origdata')) else: oject_failed.append("{}-{}".format(example, 'origdata')) print("------> origsmrt <------") if uploadComputerResponseToDB(example, orig_xml, 'origsmrt', connType, conn): completed.append("{}-{}".format(example, 'origsmrt')) else: oject_failed.append("{}-{}".format(example, 'origsmrt')) print("------> random <------") if uploadComputerResponseToDB(example, orig_xml, 'random', connType, conn): completed.append("{}-{}".format(example, 'random')) else: oject_failed.append("{}-{}".format(example, 'random')) gc.collect() print("COMPLETED: {}".format(completed)) if oject_failed: print("SKIPPED DUE TO OBJECT: {}".format(oject_failed)) if import_failed: print("SKIPPED DUE TO IMPORT: {}".format(import_failed)) print("....................") print("....................")
def buildHeatmapForExample(exampleName, prefix, conn=False): debugOn = False parsedExampleName = exampleName.split('_') # -------------------- STAGE 1: READ FROM DB -------------------------- # for db lookup, remove "room" at end, and munge _'s away exampleNameForDB = "".join(parsedExampleName[:-1]) # for cutpoints, just remove "room" at end exampleNameForCutpoints = "_".join(parsedExampleName[:-1]) m = hashlib.md5(exampleNameForDB) tableName = TBLPFX + m.hexdigest() leaveconn = True human_dups = dict() if not conn: leaveconn = False conn = MySQLdb.connect(host=DBHOST, user=DBUSER, passwd=DBPASS, db=DBNAME) cursor = conn.cursor() try: cursor.execute("SHOW COLUMNS FROM {}".format(tableName)) except (MySQLdb.ProgrammingError): print "TABLE {} not found for example {}".format( tableName, exampleNameForDB) return allColumns = cursor.fetchall() sqlStatement = "SELECT name, " for singleColumn in allColumns: columnName = singleColumn[0] if "act_made_call" not in columnName and "act_unlock" not in columnName and columnName.startswith( prefix): sqlStatement += columnName + ", " else: pass #print singleColumn notNullColumn = allColumns[len(allColumns) - 3] # the last data column (hopefully) sqlStatement = sqlStatement[:-2] sqlStatement += " FROM " + tableName + " WHERE " + notNullColumn[ 0] + " IS NOT NULL" cursor.execute(sqlStatement) headers = [i[0] for i in cursor.description] # SELECT name, screen_192_off_on, screen_192_on_off, screen_192_on, screen_192_off, screen_348_off_on, screen_348_on_off, screen_348_on, screen_348_off, screen_action_192_act_mousekeyboard, screen_action_192_act_no_mousekeyboard, screen_action_348_act_mousekeyboard, screen_action_348_act_no_mousekeyboard FROM cvpr2012_7f05529dec6a03d3a459fc2ee1969f7f WHERE screen_action_348_act_no_mousekeyboard IS NOT NULL csv_rows = [] first = True frames_sorted = sorted( int(x) for x in videoCutpoints.cutpoints[exampleNameForCutpoints].keys()) start_of_frames = frames_sorted[0] end_of_frames = int(videoCutpoints.cutpoints[exampleNameForCutpoints][str( frames_sorted[-1])]) impulse_width_2 = (end_of_frames - start_of_frames) / (kImpulseDivision / 2 ) # width / 2 action_width = impulse_width_2 * 2 for row in cursor: fluent_matrix = [] action_matrix = [] fluents = False actions = False name = row[0] if name in human_dups: human_dups[name] += 1 name = "{}_{}".format(name, human_dups[name]) else: human_dups[name] = 1 xindex = 1 while xindex < len(row): (root, actiontest, rest) = headers[xindex].split("_", 2) if actiontest == TYPE_ACTION: actions = True # a yes action at this point means we "trigger" it for 1 frame in the middle of our framecount, give or take (start_frame, _, action) = rest.split( "_" ) # assumes "act" before "act_no"; assumes only "act" and "act_no"; TODO: what else exists in the database??? start_frame = int(start_frame) end_frame = int( videoCutpoints.cutpoints[exampleNameForCutpoints][str( start_frame)]) frame_diff = end_frame - start_frame zeros = [ '0', ] * (frame_diff) choices = row[xindex:xindex + 2] sum_choices = float(sum(choices)) choices = [a / sum_choices for a in choices] triggered = choices[0] >= choices[1] if triggered: trigger_start = int(frame_diff / 2) - int(action_width / 2) zeros[trigger_start:trigger_start + action_width] = [ choices[0], ] * action_width action_matrix.extend(zeros) xindex += 2 else: fluents = True start_frame = int(actiontest) end_frame = int( videoCutpoints.cutpoints[exampleNameForCutpoints][str( start_frame)]) frame_diff = end_frame - start_frame # note this throws preference to on, then off, then onoff, then offon # just because we need some simple tie-breaking way # TODO: -1 class to represent unknown/unsure? choices = row[xindex:xindex + 4] sum_choices = float(sum(choices)) if sum_choices == 0: choices = [ .25, .25, .25, .25 ] # TODO: is this a valid something? or a sign of a bug? else: choices = [a / sum_choices for a in choices] offon = choices[0] onoff = choices[1] on = choices[2] off = choices[3] if on == max(offon, onoff, on, off): result = [ on, ] * (frame_diff) elif off == max(offon, onoff, on, off): result = [ 1 - off, ] * (frame_diff) elif onoff == max(offon, onoff, on, off): diff_2 = frame_diff / 2 result = [ onoff, ] * (diff_2) + [ 1 - onoff, ] * (frame_diff - diff_2) else: # it's offon, then diff_2 = frame_diff / 2 result = [ 1 - offon, ] * (diff_2) + [ offon, ] * (frame_diff - diff_2) fluent_matrix.extend(result) xindex += 4 if first: row = [ "NAME", ] row.extend(xrange(start_of_frames, end_of_frames)) csv_rows.append(",".join([str(x) for x in row])) first = False if fluents: row = [ name + " " + prefix + " on", ] row.extend([str(x) for x in fluent_matrix]) csv_rows.append(",".join(row)) if actions: row = [ name + " " + action, ] row.extend([str(x) for x in action_matrix]) csv_rows.append(",".join(row)) cursor.close() if not leaveconn: conn.close() # -------------------- STAGE 2: READ FROM PARSE -------------------------- # for screen_1_lounge, that's CVPR2012_fluent_result/screen_on_off_fluent_results.txt, CVPR2012_fluent_result/screen_off_on_fluent_results.txt, and results/CVPR2012_reverse_slidingwindow_action_detection_logspace/screen_1_lounge.py # thankfully, import_summerdata uses parsingSummerActionAndFluentOutput's readFluentResults and readActionResults fluent_parses, action_parses = causal_grammar.import_summerdata( exampleName, kActionDetections) # displaying fluent changes as _impulses_ around their detection, # and 50% everywhere else fluent_matrix = [ 0.5, ] * (end_of_frames - start_of_frames) fluent_matrix_len = len(fluent_matrix) for frame in sorted(x for x in fluent_parses.keys() if x < end_of_frames): # prefix is, for example, 'screen', the root of the tree we are looking at if prefix in fluent_parses[frame]: energy = fluent_parses[frame][prefix] probability = causal_grammar.energy_to_probability(energy) offset_left = 0 if int(frame) - start_of_frames < impulse_width_2: offset_left = start_of_frames - int(frame) offset_right = 0 if int(frame) + impulse_width_2 > end_of_frames: offset_right = end_of_frames - int(frame) if debugOn and (offset_left != 0 or offset_right != 0): print("OFFSETS: {} and {} at frame {}".format( offset_left, offset_right, frame)) result = [ 1 - probability, ] * (impulse_width_2 - offset_left) + [ probability, ] * (impulse_width_2 - offset_right) start_replacement = int( frame) - start_of_frames - impulse_width_2 - offset_left if debugOn: print("START FRAME: {}; CURRENT FRAME: {}".format( start_of_frames, int(frame))) print("REPLACING {} to {} with {}".format( start_replacement, start_replacement + len(result), result)) fluent_matrix[start_replacement:start_replacement + len(result)] = result row = [ 'ORIG' + " " + prefix + " on", ] row.extend([str(x) for x in fluent_matrix]) csv_rows.append(",".join(row)) if len(fluent_matrix) != fluent_matrix_len: print("ERROR: fluent_matrix grew in replacement") raise SystemExit(0) #{1016: {'usecomputer_END': {'energy': 0.0, 'agent': 'uuid1'}}, 733: {'usecomputer_END': {'energy': 0.0, 'agent': 'uuid1'}}, 388: {'usecomputer_END': {'energy': 0.0, 'agent': 'uuid1'}}, 389: {'usecomputer_START': {'energy': 0.0, 'agent': 'uuid1'}}, 582: {'usecomputer_START': {'energy': 0.001096, 'agent': 'uuid1'}}, 650: {'usecomputer_END': {'energy': 0.0, 'agent': 'uuid1'}}, 651: {'usecomputer_START': {'energy': 6e-06, 'agent': 'uuid1'}}, 525: {'usecomputer_END': {'energy': 3e-06, 'agent': 'uuid1'}}, 526: {'usecomputer_START': {'energy': -0.0, 'agent': 'uuid1'}}, 889: {'usecomputer_START': {'energy': 0.0, 'agent': 'uuid1'}}, 791: {'usecomputer_END': {'energy': -0.0, 'agent': 'uuid1'}}, 593: {'usecomputer_END': {'energy': 0.001096, 'agent': 'uuid1'}}, 594: {'usecomputer_START': {'en actionPairings = kActionPairings[prefix] for actionPairing in actionPairings: last_frame = start_of_frames action_matrix = [] actions = False last_probability = 0 for frame in sorted(x for x in action_parses.keys() if x < end_of_frames): # prefix is, for example, 'screen', the root of the tree we are looking at if actionPairing[0] in action_parses[frame]: energy = action_parses[frame][actionPairing[0]]['energy'] last_probability = causal_grammar.energy_to_probability(energy) frame_diff = frame - last_frame result = [ 0., ] * frame_diff action_matrix.extend(result) actions = True last_frame = frame elif actionPairing[1] in action_parses[frame]: frame_diff = frame - last_frame result = [ last_probability, ] * frame_diff # we know start and stop are symmetric action_matrix.extend(result) last_probability = 0 actions = True last_frame = frame if not actions: # we've never seen anything! 0% all the way! result = [ 0., ] * (end_of_frames - start_of_frames) action_matrix.extend(result) elif frame < end_of_frames: if debugOn: print( "LAST FRAME BEFORE END OF FRAMES: filling {} from {} -> {}" .format(last_probability, last_frame, end_of_frames)) result = [ last_probability, ] * (end_of_frames - last_frame) action_matrix.extend(result) row = [ 'ORIG' + " " + actionPairing[0].split("_")[0], ] row.extend([str(x) for x in action_matrix]) csv_rows.append(",".join(row)) # -------------------- STAGE 3: READ CAUSALGRAMMAR RESULTS -------------------------- fluent_and_action_xml_string = causal_grammar.process_events_and_fluents( causal_grammar_summerdata.causal_forest, fluent_parses, action_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy, True) # last True: suppress the xml output # <temporal><fluent_changes><fluent_change energy="17.6095287144" fluent="screen" frame="0" new_value="off" /><fluent_change energy="16.5108710247" fluent="screen" frame="328" new_value="on" old_value="off" /></fluent_changes></temporal> fluent_and_action_xml = ET.fromstring(fluent_and_action_xml_string) fluent_changes = sorted(fluent_and_action_xml.findall('.//fluent_change'), key=lambda elem: int(elem.attrib['frame'])) events = sorted(fluent_and_action_xml.findall('.//event'), key=lambda elem: int(elem.attrib['frame'])) last_frame = start_of_frames fluent_matrix = [] fluents = False last_probability = 0 thison = kOnsOffs[prefix][0] thisoff = kOnsOffs[prefix][1] for fluent in fluent_changes: # prefix is, for example, 'screen', the root of the tree we are looking at # ignoring energies at the moment because they are the sum of all the energies for this 'chain' as compared to other chains, and not for the individual nodes (oops TODO?) if prefix == fluent.attrib['fluent']: # xml_stuff.printXMLFluent(fluent) frame = int(fluent.attrib['frame']) new_value = fluent.attrib['new_value'] if 'old_value' in fluent.attrib: fluents = True # this counts as having an answer if we needed one old_value = fluent.attrib['old_value'] if old_value == thison: last_probability = 1. else: last_probability = 0. if new_value == thison: probability = 1. else: probability = 0. if frame <= start_of_frames: # we're not there yet :) last_probability = probability fluents = True # and this cements our answer continue if int(frame) > end_of_frames: # just wrap up our last value frame = end_of_frames frame_diff = end_of_frames - last_frame result = [ last_probability, ] * frame_diff fluent_matrix.extend(result) last_frame = end_of_frames break frame_diff = frame - last_frame last_frame = frame if not fluents: # let's fill in our previous state with the opposite to this one... # because we take in our fluents as "changes" to the fluent! # alternately, it might be fair to say 50/50 on this one... last_probability = probability result = [ 1 - last_probability, ] * frame_diff else: # we've seen something before, that's what we fill up to this frame result = [ last_probability, ] * frame_diff last_probability = probability fluent_matrix.extend(result) fluents = True if not fluents: # we've never seen anything! 50% all the way! result = [ 0.5, ] * (end_of_frames - start_of_frames) fluent_matrix.extend(result) elif last_frame < end_of_frames: result = [ last_probability, ] * (end_of_frames - last_frame) fluent_matrix.extend(result) row = [ 'CAUSAL ' + prefix + " on", ] row.extend([str(x) for x in fluent_matrix]) csv_rows.append(",".join(row)) # <temporal><actions><event action="usecomputer_START" energy="33.4413967566" frame="658" /><event action="usecomputer_START" energy="33.4271617566" frame="1000" /><event action="usecomputer_START" energy="52.6460829824" frame="1025" /><event action="usecomputer_END" energy="52.2956748688" frame="1232" /></actions></temporal> actionPairings = kActionPairings[prefix] for actionPairing in actionPairings: last_frame = start_of_frames action_matrix = [] actions = False last_probability = 0 for event in events: result = [] # ignoring energies at the moment because they are the sum of all the energies for this 'chain' as compared to other chains, and not for the individual nodes (oops TODO?) frame = int(event.attrib['frame']) frame_diff = frame - last_frame if actionPairing[0] == event.attrib[TYPE_ACTION]: if debugOn: xml_stuff.printXMLAction(event) last_probability = 1. if frame <= start_of_frames: if debugOn: print("PRE-START, SETTING PREV_PROB: 1") continue if frame > end_of_frames: frame = end_of_frames frame_diff = frame - last_frame result = [ 0., ] * frame_diff elif actionPairing[1] == event.attrib[TYPE_ACTION]: if debugOn: xml_stuff.printXMLAction(event) last_probability = 0. if frame <= start_of_frames: if debugOn: print("PRE-START, SETTING PREV_PROB: 0") continue if frame > end_of_frames: frame = end_of_frames frame_diff = frame - last_frame result = [ 1., ] * frame_diff # we know start and stop are symmetric if len(result) > 0: actions = True action_matrix.extend(result) last_frame = frame if frame >= end_of_frames: break if not actions: # we've never seen anything! 0% all the way! result = [ last_probability, ] * (end_of_frames - start_of_frames) action_matrix.extend(result) elif last_frame < end_of_frames: if debugOn: print( "LAST FRAME BEFORE END OF FRAMES: filling {} from {} -> {}" .format(last_probability, last_frame, end_of_frames)) result = [ last_probability, ] * (end_of_frames - last_frame) action_matrix.extend(result) row = [ 'CAUSAL' + " " + actionPairing[0].split("_")[0], ] row.extend([str(x) for x in action_matrix]) csv_rows.append(",".join(row)) # -------------------- STAGE 4: READ GROUND TRUTH -------------------------- #screen_1_lounge.csv: # 192, 338, screen, OFF # 339, 985, screen, ON # 986, 1298, screen, OFF # 284, 986, screen_act, usecomputer truthcsv_name = os.path.join(kTruthDir, ".".join((exampleName, "csv"))) if os.path.isdir(kTruthDir) and os.path.exists(truthcsv_name): # first, fluents result = [ 0., ] * (end_of_frames - start_of_frames) with open(truthcsv_name, 'rb') as truthcsv_file: truthcsv = csv.reader(truthcsv_file, skipinitialspace=True) for line in truthcsv: (start, end, key, value) = line if key == prefix: if value == "OFF": continue start = max(0, int(start) - start_of_frames) end = min( int(end) - start_of_frames, end_of_frames - start_of_frames) if end < 0: continue result[start:end] = [ 1., ] * (end - start) row = [ 'TRUTH ' + prefix + ' on', ] row.extend([str(x) for x in result]) csv_rows.append(",".join(row)) # and then, actions! actionPairings = kActionPairings[prefix] prefix_act = "_".join((prefix, "act")) for actionPairing in actionPairings: result = [ 0., ] * (end_of_frames - start_of_frames) with open(truthcsv_name, 'rb') as truthcsv_file: truthcsv = csv.reader(truthcsv_file, skipinitialspace=True) for line in truthcsv: (start, end, key, value) = line if key == prefix_act and actionPairing[0].startswith( value): start = max(0, int(start) - start_of_frames) end = min( int(end) - start_of_frames, end_of_frames - start_of_frames) if end < 0: continue result[start:end] = [ 1., ] * (end - start) row = [ 'TRUTH ' + actionPairing[0].split("_")[0], ] row.extend([str(x) for x in result]) csv_rows.append(",".join(row)) if not debugOn: print("\n".join(csv_rows))
import_failed = list() causal_forest_orig = causal_grammar_summerdata.causal_forest withoutoverlaps = not args.ignoreoverlaps for example in args.example: try: if args.simplify: causal_grammar_summerdata.causal_forest = causal_grammar.get_simplified_forest_for_example( causal_forest_orig, example) fluent_parses, temporal_parses = causal_grammar.import_summerdata( example, kActionDetections) import pprint pp = pprint.PrettyPrinter(indent=1) pp.pprint(fluent_parses) pp.pprint(temporal_parses) except ImportError as ie: print("IMPORT FAILED: {}".format(ie)) import_failed.append(example) continue fluent_and_action_xml = causal_grammar.process_events_and_fluents( causal_grammar_summerdata.causal_forest, fluent_parses, temporal_parses, causal_grammar.kFluentThresholdOnEnergy, causal_grammar.kFluentThresholdOffEnergy, causal_grammar.kReportingThresholdEnergy, suppress_output=False, handle_overlapping_events=withoutoverlaps) if len(import_failed): print("FAILED IMPORTING: {}".format(", ".join(import_failed))) print groupings