def set_model(sem_input_file, sem_name, sem_input_macro = True, semantics_name='TCG_semantics_main', grammar_name='TCG_grammar_VB_main', model_params = {}): """ Sets up a TCG production model. Args: - sem_input_file (STR): Semantic input file name - sem_name (STR): Semantic input name - sem_input_macro (BOOL): True is the input is an ISRF macro - semantics_name (STR): Name of the semantic file containing the perceptual, world, and conceptualization knowledge. - grammar_name (STR): Name of the grammar file to use. - model_prams (dict): Dictionary defining the model parameters (if different than default) Returns: (model, semantic input genereator) """ SEM_INPUT_PATH = './data/sem_inputs/' model = TCG_production_system(grammar_name=grammar_name, semantics_name=semantics_name) if model_params: model.update_params(model_params) # Set up semantic input generator conceptLTM = model.schemas['Concept_LTM'] if not(sem_input_macro): sem_inputs = TCG_LOADER.load_sem_input(sem_input_file, SEM_INPUT_PATH) sem_input = {sem_name:sem_inputs[sem_name]} sem_gen = ls.SEM_GENERATOR(sem_input, conceptLTM, speed_param=1) if sem_input_macro: sem_inputs = TCG_LOADER.load_sem_macro(sem_name, sem_input_file, SEM_INPUT_PATH) sem_gen = ls.SEM_GENERATOR(sem_inputs, conceptLTM, speed_param=1) return (model, sem_gen)
def set_inputs(model, input_name, sem_input_file='diagnostic.json', sem_input_macro=False, speed_param=10): """ Sets up a TCG ISRF inputs generator for TCG production model. Args: - model (): model to which the inputs will be sent - input_name (STR): name of the input to be used. - sem_input_file (STR): Semantic input file name. For non-macro input, set to 'ALL' to load all inputs from file. - sem_input_macro (BOOL): True is the input is an ISRF macro. - speed_param (INT): multiplier of the rate defined in the ISRF input (by default the ISFR rate is 1.) Returns: - input SEM_GENERATOR object. """ SEM_INPUT_PATH = './data/sem_inputs/' conceptLTM = model.schemas['Concept_LTM'] if not(sem_input_macro): sem_inputs = TCG_LOADER.load_sem_input(sem_input_file, SEM_INPUT_PATH) if input_name == 'ALL': sem_gen = ls.SEM_GENERATOR(sem_inputs, conceptLTM, speed_param=speed_param, is_macro=sem_input_macro) sem_gen.ground_truths = TCG_LOADER.load_ground_truths(sem_input_file, SEM_INPUT_PATH) else: sem_input = {input_name:sem_inputs[input_name]} sem_gen = ls.SEM_GENERATOR(sem_input, conceptLTM, speed_param=speed_param,is_macro=sem_input_macro) ground_truths = TCG_LOADER.load_ground_truths(sem_input_file, SEM_INPUT_PATH) sem_gen.ground_truths = ground_truths.get(input_name, None) if sem_input_macro: sem_inputs = TCG_LOADER.load_sem_macro(input_name, sem_input_file, SEM_INPUT_PATH) sem_gen = ls.SEM_GENERATOR(sem_inputs, conceptLTM, speed_param=speed_param, is_macro=sem_input_macro) ground_truths = TCG_LOADER.load_ground_truths(sem_input_file, SEM_INPUT_PATH) sem_gen.ground_truths = ground_truths.get(input_name, None) return sem_gen
def set_inputs(model, input_name, input_file='TCG_scene.json', show_scene=True): """ Sets up a SCENE input for SALVIA_P model Args: - sem_name (STR): Semantic input name. - sem_input_file (STR): Semantic input file name. For non-macro input, set to 'ALL' to load all inputs from file. - sem_input_macro (BOOL): True is the input is an ISRF macro. - speed_param (INT): multiplier of the rate defined in the ISRF input (by default the ISFR rate is 1.) Returns input SEM_GENERATOR object. """ # Defining scene input SCENE_INPUT_PATH = "./data/scenes/" SCENE_FOLDER = "%s%s/" % (SCENE_INPUT_PATH, input_name) IMG_FILE = SCENE_FOLDER + 'scene.png' perceptLTM = model.schemas['Percept_LTM'] my_scene = TCG_LOADER.load_scene(input_file, SCENE_FOLDER, perceptLTM) model.set_input(my_scene) return (input_name, IMG_FILE)
def test_run(seed=None): """ Test run function for the production model. """ if not(seed): # Quick trick so that I can have access to the seed used to run the simulation. random.seed(seed) seed = random.randint(0,10**9) print "seed = %i" %seed random.seed(seed) SEM_INPUT = 'sem_inputs.json' # semantic input files (no macros) INPUT_NAME = 'blue_woman_kick_man' # Name of the input to use. FOLDER = './tmp/TEST_%s_%s/' %(INPUT_NAME, str(seed)) # Folder where the simulation results will be saved. language_system_P = TCG_production_system(grammar_name='TCG_grammar_VB_main', semantics_name='TCG_semantics_main') # Create model # Set up semantic input generator conceptLTM = language_system_P.schemas['Concept_LTM'] sem_inputs = TCG_LOADER.load_sem_input(SEM_INPUT, "./data/sem_inputs/") speed_param = 1 sem_gen = ls.SEM_GENERATOR(sem_inputs, conceptLTM, speed_param) generator = sem_gen.sem_generator(INPUT_NAME) (sem_insts, next_time, prop) = generator.next() #Getting the initial input. # Test paramters language_system_P.params['Control']['task']['start_produce'] = 3100 language_system_P.params['Control']['task']['time_pressure'] = 200 language_system_P.params['Grammatical_WM_P']['C2']['confidence_threshold'] = 0.3 set_up_time = -10 # Starts negative to let the system settle before it receives its first input. Also, easier to handle input arriving at t=0. max_time = 3000 save_states = [30, 700, 2000] flag = False for t in range(set_up_time, max_time): if next_time != None and t>next_time: (sem_insts, next_time, prop) = generator.next() print "t:%i, sem: %s (prop: %s)" %(t, ', '.join([inst.name for inst in sem_insts]), prop) language_system_P.set_input(sem_insts) language_system_P.update() output = language_system_P.get_output() if not(language_system_P.schemas['Grammatical_WM_P'].comp_links) and t>10 and not(flag): print "t:%i, Competition done" % t flag = True TCG_VIEWER.display_lingWM_state(language_system_P.schemas['Semantic_WM'], language_system_P.schemas['Grammatical_WM_P'], concise=True, folder = FOLDER) language_system_P.params['Control']['task']['start_produce'] = t + 10 if output['Utter']: print "t:%i, '%s'" %(t, output['Utter']) if t - set_up_time in save_states: TCG_VIEWER.display_lingWM_state(language_system_P.schemas['Semantic_WM'], language_system_P.schemas['Grammatical_WM_P'], concise=True, folder = FOLDER) language_system_P.schemas['Semantic_WM'].show_SemRep() language_system_P.schemas['Grammatical_WM_P'].show_dynamics(inst_act=True, WM_act=False, c2_levels=True, c2_network=False) language_system_P.save_sim(FOLDER, 'test_language_output.json') return language_system_P
def set_inputs(model, input_name, input_file='kuchinsky.json'): """ Sets up a SCENE_LIGHT input for SALVIA_P_light model Args: - input_name (STR): scene input name. - input_input_file (STR): scene_input file name. Sets up the SCENE_LIGHT input for the model. """ # Defining scene input SCENE_INPUT_PATH = "./data/scene_inputs/" conceptLTM = model.schemas['Concept_LTM'] my_scene = TCG_LOADER.load_scene_light(input_file, SCENE_INPUT_PATH, input_name, conceptLTM) model.set_input(my_scene) return input_name
def run_model(seed=None): """ """ SEM_INPUT = 'sem_inputs.json' INPUT_NAME = 'kick_static_focus_agent' language_system_P = TCG_production_system(grammar_name='TCG_grammar_VB_main', semantics_name='TCG_semantics_main') # Set up semantic input generator conceptLTM = language_system_P.schemas['Concept_LTM'] sem_inputs = TCG_LOADER.load_sem_input(SEM_INPUT, "./data/sem_inputs/") speed_param = 1 sem_gen = ls.SEM_GENERATOR(sem_inputs, conceptLTM, speed_param) generator = sem_gen.sem_generator(INPUT_NAME) (sem_insts, next_time, prop) = generator.next() # Test paramters language_system_P.params['Control']['task']['start_produce'] = 400 language_system_P.params['Control']['task']['time_pressure'] = 200 language_system_P.params['Grammatical_WM_P']['C2']['confidence_threshold'] = 0.3 set_up_time = -10 # Starts negative to let the system settle before it receives its first input. Also, easier to handle input arriving at t=0. max_time = 900 out_data = [] for t in range(set_up_time, max_time): if next_time != None and t>next_time: (sem_insts, next_time, prop) = generator.next() language_system_P.set_input(sem_insts) language_system_P.update() # Store output output = language_system_P.get_output() if output['Grammatical_WM_P']: out_data.append(output['Grammatical_WM_P']) if output['Utter']: print "t:%i, '%s'" %(t, output['Utter']) # Output analysis res = prod_analyses(out_data) return res
def test2(seed=None): "Uses UTTER_GEN class for inputs" random.seed(seed) language_system_C = TCG_comprehension_system() # Display schema system language_system_C.system2dot(image_type='png', disp=True) ling_inputs = TCG_LOADER.load_ling_input("ling_inputs.json", "./data/ling_inputs/") utter_gen = ls.UTTER_GENERATOR(ling_inputs) input_name = 'test_naming' generator = utter_gen.utter_generator(input_name) (word_form, next_time) = generator.next() set_up_time = -10 # (Threshold = 28??)Starts negative to let the system settle before it receives its first input. Also, easier to handle input arriving at t=0. Set up time really matters! Need to analyze more cleraly why and how much time is needed. max_time = 300 save_states = [] for t in range(set_up_time, max_time): if next_time != None and t > next_time: (word_form, next_time) = generator.next() print "t:%i, receive: %s" % (t, word_form) language_system_C.set_input(word_form) language_system_C.update() if t - set_up_time in save_states: TCG_VIEWER.display_gramWM_state( language_system_C.schemas['Grammatical_WM_C'], concise=True) language_system_C.schemas['Phonological_WM_C'].show_dynamics( inst_act=True, WM_act=False, c2_levels=False, c2_network=False) language_system_C.schemas['Grammatical_WM_C'].show_dynamics( inst_act=True, WM_act=True, c2_levels=True, c2_network=True) language_system_C.schemas['Grammatical_WM_C'].show_state() language_system_C.schemas['Semantic_WM'].show_dynamics() language_system_C.schemas['Semantic_WM'].show_SemRep()
# Print the cxn with name cxn_name (STR) if it is found in the grammar. # """ # cxn = self.find_construction(cxn_name) # if not(cxn): # print "%s not found..." % cxn_name # else: # print cxn ############################################################################### if __name__=='__main__': from loader import TCG_LOADER # Loading data grammar_name = 'TCG_grammar_VB_main' my_conceptual_knowledge = TCG_LOADER.load_conceptual_knowledge("TCG_semantics_main.json", "./data/semantics/") grammar_file = "%s.json" %grammar_name my_grammar = TCG_LOADER.load_grammar(grammar_file, "./data/grammars/", my_conceptual_knowledge) cxn = my_grammar.constructions[0] cxn2 = my_grammar.constructions[3] (cxn3, c, u_map) = CXN.unify(cxn, cxn.SynForm.form[0], cxn2) # cxn3.SemFrame.draw() # print [f.name for f in cxn3.SynForm.form] # print cxn3.SymLinks.SL cxn3.show()
""" # First check if one of the concept is the neutral element. if self.neutral and ((cpt1 == self.neutral) or (cpt2 == self.neutral)): return True return super(CONCEPTUAL_KNOWLEDGE, self).match(cpt1, cpt2, match_type=match_type) ############################################################################### if __name__ == '__main__': import viewer # I have a bug in the module loading (circularity). This is a cheap hack to make it work for now. from loader import TCG_LOADER my_conceptual_knowledge = TCG_LOADER.load_conceptual_knowledge( "TCG_semantics.json", "./data/semantics/") clothing = my_conceptual_knowledge.find_meaning('CLOTHING') dress = my_conceptual_knowledge.find_meaning('DRESS') print dress.match(clothing) color = my_conceptual_knowledge.find_meaning('COLOR') blue = my_conceptual_knowledge.find_meaning('BLUE') print blue.match(color) human = my_conceptual_knowledge.find_meaning('HUMAN') woman = my_conceptual_knowledge.find_meaning('WOMAN') obj = my_conceptual_knowledge.find_meaning('OBJECT') print woman.match(human) clothing = my_conceptual_knowledge.find_meaning('CLOTHING') neutral = my_conceptual_knowledge.find_meaning('?')
def SALVIA_P_light(name='SALVIA_P_verbal_guidance', grammar_name='TCG_grammar_VB_main', semantics_name='TCG_semantics_main', grammar_path='./data/grammars/', semantics_path='./data/semantics/'): """ Creates and returns a light version of the SALVIA production model. It bypasses the VisualWM and the Conceptualizer """ # Instantiating all the necessary system schemas scene_perception = ps.SCENE_PERCEPTION() conceptLTM = ls.CONCEPT_LTM() semanticWM = ls.SEMANTIC_WM() grammaticalWM_P = ls.GRAMMATICAL_WM_P() grammaticalLTM = ls.GRAMMATICAL_LTM() cxn_retrieval_P = ls.CXN_RETRIEVAL_P() phonWM_P = ls.PHON_WM_P() control = ls.CONTROL() utter = ls.UTTER() # Defining schema to brain mappings. brain_mappings = { 'Scene_perception': ['Ventral stream'], 'Concept_LTM': [''], 'Semantic_WM': ['left_SFG', 'LIP', 'Hippocampus'], 'Grammatical_WM_P': ['left_BA45', 'leftBA44'], 'Grammatical_LTM': ['left_STG', 'left_MTG'], 'Cxn_retrieval_P': [], 'Phonological_WM_P': ['left_BA6'], 'Utter': [''], 'Control': ['DLPFC'] } schemas = [ scene_perception, conceptLTM, grammaticalLTM, cxn_retrieval_P, semanticWM, grammaticalWM_P, phonWM_P, utter, control ] # Creating model and adding system schemas model = st.MODEL(name) model.add_schemas(schemas) # Defining connections model.add_connection(scene_perception, 'to_semantic_WM', semanticWM, 'from_conceptualizer') model.add_connection(semanticWM, 'to_visual_WM', scene_perception, 'from_semantic_WM') model.add_connection(semanticWM, 'to_cxn_retrieval_P', cxn_retrieval_P, 'from_semantic_WM') model.add_connection(grammaticalLTM, 'to_cxn_retrieval_P', cxn_retrieval_P, 'from_grammatical_LTM') model.add_connection(cxn_retrieval_P, 'to_grammatical_WM_P', grammaticalWM_P, 'from_cxn_retrieval_P') model.add_connection(semanticWM, 'to_grammatical_WM_P', grammaticalWM_P, 'from_semantic_WM') model.add_connection(grammaticalWM_P, 'to_semantic_WM', semanticWM, 'from_grammatical_WM_P') model.add_connection(grammaticalWM_P, 'to_phonological_WM_P', phonWM_P, 'from_grammatical_WM_P') model.add_connection(phonWM_P, 'to_grammatical_WM_P', grammaticalWM_P, 'from_phonological_WM_P') model.add_connection(semanticWM, 'to_control', control, 'from_semantic_WM') model.add_connection(phonWM_P, 'to_utter', utter, 'from_phonological_WM_P') model.add_connection(phonWM_P, 'to_control', control, 'from_phonological_WM_P') model.add_connection(control, 'to_grammatical_WM_P', grammaticalWM_P, 'from_control') model.add_connection(control, 'to_semantic_WM', semanticWM, 'from_control') model.set_input_ports([scene_perception.find_port('from_input')]) model.set_output_ports([ utter.find_port('to_output'), scene_perception.find_port('to_output') ]) # Setting up schema to brain mappings description_brain_mapping = st.BRAIN_MAPPING() description_brain_mapping.schema_mapping = brain_mappings model.brain_mapping = description_brain_mapping # Parameters system_names = model.schemas.keys() model_params = parameters(system_names) model.update_params(model_params) grammaticalLTM.init_act = grammaticalWM_P.params['C2'][ 'confidence_threshold'] # Loading data semantics_file = "%s.json" % semantics_name my_conceptual_knowledge = TCG_LOADER.load_conceptual_knowledge( semantics_file, semantics_path) grammar_file = "%s.json" % grammar_name my_grammar = TCG_LOADER.load_grammar(grammar_file, grammar_path, my_conceptual_knowledge) # Initialize concept LTM content conceptLTM.initialize(my_conceptual_knowledge) # Initialize grammatical LTM content grammaticalLTM.initialize(my_grammar) return model
def TCG_language_system(name='language_system', grammar_name='TCG_grammar_VB_main', semantics_name='TCG_semantics_main', grammar_path='./data/grammars/', semantics_path='./data/semantics/'): """ Creates and returns the TCG language model, including both production and comprehension. """ # Instantiating all the necessary system schemas semanticWM = ls.SEMANTIC_WM() conceptLTM = ls.CONCEPT_LTM() grammaticalLTM = ls.GRAMMATICAL_LTM() grammaticalWM_P = ls.GRAMMATICAL_WM_P() cxn_retrieval_P = ls.CXN_RETRIEVAL_P() grammaticalWM_C = ls.GRAMMATICAL_WM_C() cxn_retrieval_C = ls.CXN_RETRIEVAL_C() phonWM_P = ls.PHON_WM_P() utter = ls.UTTER() phonWM_C = ls.PHON_WM_C() control = ls.CONTROL() # Defining schema to brain mappings. language_mapping = { 'Semantic_WM': ['left_SFG', 'LIP', 'Hippocampus'], 'Grammatical_WM_P': ['left_BA45', 'leftBA44'], 'Grammatical_LTM': ['left_STG', 'left_MTG'], 'Cxn_retrieval_P': [], 'Phonological_WM_P': ['left_BA6'], 'Utter': [''], 'Cxn_retrieval_C': [], 'Phonological_WM_C': ['Wernicke'], 'Grammatical_WM_C': ['lBA44, lBA45'], 'Control': ['DLPFC'], 'Concept_LTM': [''] } # Initializing model model = st.MODEL(name) # Setting up schema to brain mappings language_brain_mapping = st.BRAIN_MAPPING() language_brain_mapping.schema_mapping = language_mapping model.brain_mapping = language_brain_mapping # Setting up language model. language_schemas = [ semanticWM, conceptLTM, grammaticalLTM, cxn_retrieval_P, grammaticalWM_P, phonWM_P, utter, phonWM_C, grammaticalWM_C, cxn_retrieval_C, control ] model.add_schemas(language_schemas) model.add_connection(semanticWM, 'to_cxn_retrieval_P', cxn_retrieval_P, 'from_semantic_WM') model.add_connection(grammaticalLTM, 'to_cxn_retrieval_P', cxn_retrieval_P, 'from_grammatical_LTM') model.add_connection(cxn_retrieval_P, 'to_grammatical_WM_P', grammaticalWM_P, 'from_cxn_retrieval_P') model.add_connection(semanticWM, 'to_grammatical_WM_P', grammaticalWM_P, 'from_semantic_WM') model.add_connection(grammaticalWM_P, 'to_semantic_WM', semanticWM, 'from_grammatical_WM_P') model.add_connection(grammaticalWM_P, 'to_phonological_WM_P', phonWM_P, 'from_grammatical_WM_P') model.add_connection(phonWM_P, 'to_grammatical_WM_P', grammaticalWM_P, 'from_phonological_WM_P') model.add_connection(semanticWM, 'to_control', control, 'from_semantic_WM') model.add_connection(phonWM_P, 'to_control', control, 'from_phonological_WM_P') model.add_connection(phonWM_P, 'to_utter', utter, 'from_phonological_WM_P') model.add_connection(control, 'to_grammatical_WM_P', grammaticalWM_P, 'from_control') model.add_connection(grammaticalLTM, 'to_cxn_retrieval_C', cxn_retrieval_C, 'from_grammatical_LTM') model.add_connection(phonWM_C, 'to_grammatical_WM_C', grammaticalWM_C, 'from_phonological_WM_C') model.add_connection(grammaticalWM_C, 'to_cxn_retrieval_C', cxn_retrieval_C, 'from_grammatical_WM_C') model.add_connection(cxn_retrieval_C, 'to_grammatical_WM_C', grammaticalWM_C, 'from_cxn_retrieval_C') model.add_connection(control, 'to_semantic_WM', semanticWM, 'from_control') model.add_connection(control, 'to_grammatical_WM_C', grammaticalWM_C, 'from_control') model.add_connection(grammaticalWM_C, 'to_semantic_WM', semanticWM, 'from_grammatical_WM_C') model.add_connection(conceptLTM, 'to_semantic_WM', semanticWM, 'from_concept_LTM') model.set_input_ports([phonWM_C.find_port('from_input')]) model.set_output_ports([utter.find_port('to_output')]) # Parameters system_names = model.schemas.keys() model_params = parameters(system_names) model.update_params(model_params) # grammaticalLTM.init_act = grammaticalWM_P.params['C2']['confidence_threshold']*0.5 # Loading data semantics_file = "%s.json" % semantics_name my_conceptual_knowledge = TCG_LOADER.load_conceptual_knowledge( semantics_file, semantics_path) grammar_file = "%s.json" % grammar_name my_grammar = TCG_LOADER.load_grammar(grammar_file, grammar_path, my_conceptual_knowledge) # Initialize conceptual LTM content conceptLTM.initialize(my_conceptual_knowledge) # Initialize grammatical LTM content grammaticalLTM.initialize(my_grammar) return