import argparse from parserr import Parser from datamanager import DataManager from actor import ActorNetwork from LSTM_critic import LSTM_CriticNetwork tf.logging.set_verbosity(tf.logging.ERROR) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #get parse argv = sys.argv[1:] parser = Parser().getParser() args, _ = parser.parse_known_args(argv) random.seed(args.seed) #get data dataManager = DataManager(args.dataset) train_data, dev_data, test_data = dataManager.getdata(args.grained, args.maxlenth) word_vector = dataManager.get_wordvector(args.word_vector) if args.fasttest == 1: train_data = train_data[:100] dev_data = dev_data[:20] test_data = test_data[:20] print("train_data ", len(train_data)) print("dev_data", len(dev_data)) print("test_data", len(test_data)) def sampling_RL(sess, actor, inputs, vec, lenth, Random=True): current_lower_state = np.zeros((1, 2*args.dim), dtype=np.float32) actions = [] states = [] #sampling actions
#get parse argv = sys.argv[1:] parser = Parser().getParser() args, _ = parser.parse_known_args(argv) random.seed(args.seed) #get data ME_DIR = os.path.dirname(os.path.realpath(__file__)) work_dir = ME_DIR embedding_file = work_dir + '/embedding/glove.twitter.27B.200d.txt' emoji_embedding_file = work_dir + '/embedding/emoji2vec.txt' embedding_file_ = work_dir+'/embedding/dict_file.csv' embedding_dim = 200 datamanager = DataManager('a') train_data, test_data, dev_data = datamanager.getdata(2, args.maxlenth) word_vector = datamanager.get_wordvector(embedding_file,emoji_embedding_file) def sampling_RL(sess, actor, inputs, lenth, Random=True): current_lower_state = np.zeros((1, state_size), dtype=np.float32) current_upper_state = np.zeros((1, state_size), dtype=np.float32) actions = [] states = [] #sampling actions for pos in range(lenth): out_d, current_lower_state = critic.lower_LSTM_target(current_lower_state, [[inputs[pos]]]) predicted = actor.predict_target(current_upper_state, current_lower_state) #print predicted states.append([current_upper_state, current_lower_state]) if Random: