def validation_acc(model, dev_iters, epoch, epochs, node_dict, edge_dict, max_nodes, cuda): """ Evaluate the model on dev set""" model.eval() eval_st = time.time() graphs, graph_corrects = 0, 0 for i, dev_it in enumerate(dev_iters): if cuda: samples = move_to_cuda(dev_it) else: samples = dev_it _, _, _, batch_graph_correct = greedy_search( model, samples["src_graph"], samples["src_text"], samples["tgt_graph"], node_dict, edge_dict, max_nodes, cuda) graph_corrects += batch_graph_correct graphs += 1 acc = graph_corrects / graphs eval_time = (time.time() - eval_st) / 60 eval_info = "[ Eval {:02}/{:02}]: accuracy={:.4f} elapse={:.4f} mins" print(eval_info.format(epoch + 1, epochs, acc, eval_time)) model.train() return acc
def BuscaGulosa(self): table = self.get_table() print(table) initial_state = StateNode(Game8(table), None, None, 0, 0) start = int(round(time.time() * 1000)) path = greedy_search(initial_state) end = int(round(time.time() * 1000)) for state in path: self.depth_label['text'] = 'Profundidade: ' + str(state.depth) b = state.game.get_b_position() state.game.table[b[0]][b[1]] = '' self.set_table(state.game.table) state.game.show_table() time.sleep(1) self.generated_nodes_label['text'] = 'Nós gerados: ' + str( s.generated_nodes) self.execution_time['text'] = 'Tempo de execução (ms): ' + str(end - start)
def infer_search(src_tokenizer, dst_tokenizer, transformer, config, methord='beam_search'): if methord == 'beam_search': _, y_outputs, _, x_placeholder = beam_search(batch_size=1, beam_width=FLAGS.beam_width, vocab_size=config.vocab_size, max_len=FLAGS.max_len, hidden_size=config.hidden_size, sos_id=dst_tokenizer.bos_id(), eos_id=dst_tokenizer.eos_id(), inst=transformer) elif methord == 'greedy_search': _, y_outputs, x_placeholder = greedy_search(batch_size=1, max_len=FLAGS.max_len, sos_id=dst_tokenizer.bos_id(), eos_id=dst_tokenizer.eos_id(), inst=transformer) else: raise NotImplementedError('尚未支持') sess = tf.Session() saver = tf.train.Saver() model_file = tf.train.latest_checkpoint(FLAGS.model_dir) saver.restore(sess=sess, save_path=model_file) fpw = open(file=FLAGS.infer_file + '.dst', mode='w', encoding='utf-8') with open(file=FLAGS.infer_file, mode='r', encoding='utf-8') as fp: for line in fp: line = line.strip() idxs = src_tokenizer.encode_as_ids(input=line) idxs = idxs[:FLAGS.max_len-1] idxs.append(src_tokenizer.eos_id()) for i in range(len(idxs), FLAGS.max_len): idxs.append(0) y_idxs, = sess.run( fetches=[y_outputs], feed_dict={ x_placeholder: [idxs] } ) y_idxs_val = dst_tokenizer.decode_ids(ids=y_idxs[0].tolist()) fpw.write(y_idxs_val + '\n') fpw.close()
elif grid[i][j] == u'diamond_block': grid[i][j] = 'd' elif grid[i][j] == u'emerald_block': grid[i][j] = 'E' elif grid[i][j] == u'redstone_block': grid[i][j] = 'R' else: grid[i][j] = '?' pretty_print_grid(grid) problem = MazeProblem(grid) if search_alg == 'bfs': plan = breadth_first_search(problem) elif search_alg == 'gs': plan = greedy_search(problem) if plan: for action in plan: print 'action: {0}'.format(action) command = commands[action] agent_host.sendCommand(command) time.sleep(0.5) world_state = agent_host.getWorldState() if world_state.is_mission_running or len( world_state.rewards ) == 0 or world_state.rewards[-1].getValue() < 100.0: print 'Mission failed: did not reach goal state.' break else: print 'Mission accomplished: goal state reached.'