def get_initial_variables():
	var = [prop for prop in PROPERTIES]
	env = GridEnvironment(8, 8)
	env.init_environment(RandomInitializer())
	index, state = env.select_non_empty_cell()
	var.append(state)
	var.append(env)
	return var
def check_knowledge(graph):
	for property in PROPERTIES:
		for vertex in graph.vs:
			node = vertex['node']
			if node.take_args(property) and node.can_apply_operation():
				if node.do_operation():
					print("node {0} used property {1}".format(node, property)


def search_for_operations_from_returned(root_node, graph):
	for returned in root_node.returned:
		for n in nodes:
			if n == root_node:
				continue
			if n.take_args(returned.value) and n.can_apply_operation():
				if n.do_operation():
					graph.add_vertex(n)
					source = graph.vs['node'==root_node]
					target = graph.vs['node'==n]
					graph.add_edge(source, target)
					returned.bind(n)




if __name__ == "__main__":
	global nodes, properties
	vars = []
	env = GridEnvironment(8, 8)
	env.init_environment(RandomInitializer())
	index, state = env.select_non_empty_cell()
	vars.append(env)
	vars.append(state)
	## initialize cognitive operations
	nodes = [Node(CognitiveOperation(typ.name, typ, *val)) for typ, val in COGNITIVE_TYPE_DICT.iteritems()]
	graphs = []
	for var in vars:
		for node in nodes:
			if node.take_args(var) and node.can_apply_operation():
				if node.do_operation():
					g = Graph(directed=True)
					g.add_vertex(node=node, name=node.name)
					check_knowledge(g)
					graphs.append(g)
					search_for_operations_from_returned(node, g)


	for g in graphs:
		for v in g.vs:
			print(v['name']

	for node in nodes:
		print(node.name, ":==== ", node.get_unhandled()
def create_game(neg_beta):
	env = GridEnvironment(env_size, env_size, goals=GOALS, symbols=AgentKnowledge.holistic_symbols)
	RandomInitializer(num_objects=num_objects, reduced=True).init_environments(env)
	learners = [AgentWithStrategy(StrategyTypes.MIXED,
	                              architecture=HolisticCognitiveArchitecture(len(AgentKnowledge.holistic_symbols), len(env.goals), neg_beta=neg_beta, random=True),
	                              role=Role.LISTENER,
	                              max_tries=num_tries,
	                              failure_alignment=True)
		for i in range(num_agents)]
	teacher = AgentWithStrategy(StrategyTypes.MIXED, architecture=HolisticCognitiveArchitecture(len(AgentKnowledge.holistic_symbols), len(env.goals)), role=Role.LISTENER)
	teacher.architecture.language_matrix = get_non_zero_permutation_matrix(len(AgentKnowledge.holistic_symbols), len(env.goals))
	name = "teachers_learners_game_holistic_"+str(num_tries)
	game = CooperationHolisticGameWithTeachersAndLearners(env, [teacher], learners, name, umpire=HolisticGameUmpire(env, len(env.goals)))
	return game
	window.setGeometry(300, 300, 1000, 600)
	gamesContainer, gridWidget_1, gridWidget_2 = make_two_game_widget(gridEnv1, gridEnv2)
	buttonsContainer = make_buttons_container(make_buttons())
	mainWindowLayout = QtGui.QHBoxLayout()
	mainWindowLayout.addWidget(buttonsContainer)
	mainWidget = QtGui.QWidget()
	mainWindowLayout.addWidget(gamesContainer)
	mainWidget.setLayout(mainWindowLayout)
	window.setCentralWidget(mainWidget)
	return window, gridWidget_1, gridWidget_2


if __name__ == "__main__":
	grid_env_1 = GridEnvironment(8, 8)
	grid_env_2 = GridEnvironment(8, 8)
	initialiser = RandomInitializer(max_objects=30)
	initialiser.init_environments(grid_env_1, grid_env_2)
	global timer, interval, start_new_interaction
	timer = QtCore.QTimer()
	interval = 2.5
	start_new_interaction = True
	app = QtGui.QApplication([])
	window, grid_widget_1, grid_widget_2 = setup_window(grid_env_1, grid_env_2)
	window.show()
	# COOPERATIVE GAME
	cooperative_agent1 = AgentWithCognitiveNetwork(role=Role.SPEAKER)
	cooperative_agent2 = AgentWithCognitiveNetwork(role=Role.LISTENER)
	cooperative_game = GraphicalGame(grid_widget_1, CooperateStrategy.callback, cooperative_agent1, cooperative_agent2)
	# NON-COOPERATIVE GAME
	non_cooperative_agent1 = AgentWithCognitiveNetwork(role=Role.SPEAKER)
	non_cooperative_agent2 = AgentWithCognitiveNetwork(role=Role.LISTENER)
		file_name = "holistic_teachers_{0}_sweeping_neg_beta.pkl".format(aligned)
		store_files(umpires, learners, winners, file_name, directory)
		sys.exit(0)

	for beta in arange(max_neg_beta, min_neg_beta, beta_step):
		print(beta)
		game = create_game(beta)
		language_matrix = game.teachers[0].architecture.language_matrix
		game.umpire.compute_distance_learners(game.learners, language_matrix)
		for i in range(num_games):
			is_game_finished = False
			while not is_game_finished:
				is_game_finished = game.consume_time_step()
				if is_game_finished:
					print("GAME FINISHED ",i)
					winner_tries = game.learners[0].strategy.max_tries
					winners.append(winner_tries)
					reward_agents(game.learners, reward)
					break
			game.umpire.compute_distance_learners(game.learners, language_matrix)
			game.umpire.compute_mean_fitness(game.learners)
			env = GridEnvironment(env_size, env_size, goals=GOALS, symbols=AgentKnowledge.holistic_symbols)
			RandomInitializer(num_objects=num_objects, reduced=True).init_environments(env)
			game.reset_game(env)
		umpires = game.umpire
		learners = game.learners
		aligned = "false_aligned"
		file_name = "holistic_teachers_{0}_sweeping_neg_beta_{1}.pkl".format(aligned, beta)
		store_files(umpires, learners, winners, file_name, directory)

Beispiel #6
0
def get_new_reduced_environment(size, num_objects):
	grid_env = GridEnvironment(size, size)
	RandomInitializer(num_objects=num_objects, reduced=True).init_environments(grid_env)
	return grid_env
Beispiel #7
0
def stop_game():
    timer.stop()

def make_buttons():
    btn1 = QtGui.QPushButton('start game')
    btn2 = QtGui.QPushButton('stop game')
    QtCore.QObject.connect(btn1, QtCore.SIGNAL("clicked()"), start_game)
    QtCore.QObject.connect(btn2, QtCore.SIGNAL("clicked()"), stop_game)

    return btn1, btn2



if __name__ == "__main__":
    grid_env = GridEnvironment(8,8)
    initaliser = RandomInitializer()
    initaliser.init_environments(grid_env)
    # grid_env.init_environment(RandomInitializer())
    global timer
    timer = QtCore.QTimer()
    app = QtGui.QApplication([])
    window = QtGui.QMainWindow()
    window.setGeometry(300, 300, 1000, 600)
    gamesContainer, gridWidget1, gridWidget2 = make_two_game_widget(grid_env)
    buttonsContainer = make_buttons_container(make_buttons())
    mainWindowLayout = QtGui.QHBoxLayout()
    mainWindowLayout.addWidget(buttonsContainer)
    mainWidget = QtGui.QWidget()
    mainWindowLayout.addWidget(gamesContainer)
    mainWidget.setLayout(mainWindowLayout)
    window.setCentralWidget(mainWidget)
Beispiel #8
0
def get_new_environments(n_environments, size, num_objects, goals=GOALS, symbols=AgentKnowledge.symbols):
	envs = [GridEnvironment(size, size, goals=goals, symbols=symbols) for i in range(n_environments)]
	RandomInitializer(num_objects=num_objects, reduced=True).init_environments(envs)
	return envs
Beispiel #9
0
def setup_environments(size, num_objects):
	grid_env_1 = GridEnvironment(size, size)
	grid_env_2 = GridEnvironment(size, size)
	RandomInitializer(num_objects=num_objects).init_environments(grid_env_1, grid_env_2)
	return grid_env_1, grid_env_2
Beispiel #10
0
                status.number_of_attempts)
            return False


def coop_interaction_cb(game):
    return perform_interaction(game)


def non_coop_interaction_cb(game):
    return perform_interaction(game)


if __name__ == "__main__":
    grid_env_1 = GridEnvironment(8, 8)
    grid_env_2 = GridEnvironment(8, 8)
    initializer = RandomInitializer(num_objects=100)
    initializer.init_environments(grid_env_1, grid_env_2)

    app = QtGui.QApplication([])
    window = DoubleWindow(grid_env_1, grid_env_2, interval=.1)

    number_agents = 10
    cooperative_agents = [
        AgentWithStrategy(StrategyTypes.EXHAUSTIVE,
                          architecture='multiple',
                          creator='uniform_random')
        for i in range(number_agents)
    ]
    cooperative_game = GraphicalGame(window.gridWidget_1, coop_interaction_cb,
                                     cooperative_agents,
                                     "cooperative_10_agents_1")
    from GridEnvironment import GridEnvironment
    from environment import RandomInitializer
    ## Always start by initializing Qt (only once per application)
    app = QtGui.QApplication([])

    ## Define a top-level widget to hold everything
    w = QtGui.QWidget()

    # w = GridWithConsole(width=500, height=500)
    ## Create some widgets to be placed inside
    btn = QtGui.QPushButton('press me')
    # text = QtGui.QLineEdit('enter text')
    listw = QtGui.QListWidget()
    text = QDbgConsole()
    grid_env = GridEnvironment(6, 6)
    grid_env.init_environment(RandomInitializer())
    plot = GridWidget(width=500, height=400)
    plot.set_grid(grid_env)
    # w.grid.set_grid(grid_env)
    ## Create a grid layout to manage the widgets size and position
    layout = QtGui.QGridLayout()
    w.setLayout(layout)

    # Add widgets to the layout in their proper positions
    layout.addWidget(btn, 0, 0)  # button goes in upper-left
    layout.addWidget(text, 1, 0, 1, 1)  # text edit goes in middle-left
    # layout.addWidget(listw, 2, 2, 1)  # list widget goes in bottom-left
    layout.addWidget(plot, 0, 1, 1,
                     1)  # plot goes on right side, spanning 3 rows

    ## Display the widget as a new window
Beispiel #12
0
def create_environment(n_rows=8, n_columns=8):
    global grid_env
    grid_env = GridEnvironment(n_rows, n_columns)
    initializer = RandomInitializer()
    grid_env.init_environment(initializer)