def __init__(self, is_Setting_Embedder=True): self.vocab_size = 85 self.cell_Dec_fw = tf.nn.rnn_cell.BasicLSTMCell(128) self.cell_Dec_bw = tf.nn.rnn_cell.BasicLSTMCell(128) self.cell_Output_fw = tf.nn.rnn_cell.BasicLSTMCell(self.vocab_size) self.cell_Output_bw = tf.nn.rnn_cell.BasicLSTMCell(self.vocab_size) self.Weight_Hidden = self.weight_variable(shape=[50, 128], name='Weight_Hidden_Tagger') self.Bias_Hidden = self.bias_variable(shape=[128], name='Bias_Hiddne_Tagger') self.Weight_Embedding = self.weight_variable(shape=[128, 256], name='Weight_Embedding_Tagger') self.Bias_Embedding = self.bias_variable(shape=[256], name='Bias_Embedding_Tagger') self.Batch = 500 self.P_Length = 100 self.C_Length = 20 self.Char_Onehot = 128 self.Embedding_Size = 50 self.X_Input = tf.placeholder(shape=[None, self.P_Length, self.Embedding_Size], dtype=tf.float32, name='Tagger_Input') if is_Setting_Embedder: self.pos_Embed = POS_Embed.POS_Embedder() self.DataHolder = data_processor.Data_holder(is_Just_Embedding=True) self.DataHolder.set_batch() self.pos_Embed.set_Data_Hlder(self.DataHolder)
def __init__(self): self.dataset = dp.Data_holder() self.dataset.set_batch() self.dataset.set_sentence_batch() self.question = 0 self.paragraph = 0 self.start_index = 0 self.stop_index = 0 self.batch = 500 self.p_length = 100 self.q_length = 30 self.embedding_size = 50 self.paragraph, self.question, self.start_index, self.stop_index = self.dataset.get_next_batch( ) self.cell_Qr_f = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_Pr_f = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_f = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_b = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_f2 = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_b2 = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_f3 = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_b3 = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_output_f = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_output_b = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_Qr_f_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_Pr_f_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_f_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_b_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_f2_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_b2_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_f3_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_stack_b3_ = tf.nn.rnn_cell.BasicLSTMCell(self.embedding_size) self.cell_output_f_ = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_output_b_ = tf.nn.rnn_cell.BasicLSTMCell(1) self.output_Start = None self.output_Stop = None self.x_q_holer = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.q_length, self.embedding_size]) self.x_p_holer = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.p_length, self.embedding_size])
def __init__(self): self.dataset = dp.Data_holder() self.dataset.set_batch() self.question = 0 self.paragraph = 0 self.start_index = 0 self.stop_index = 0 self.batch = 1000 self.p_length = 125 self.q_length = 40 self.seq_length = [40] * 1000 self.seq_length_ = [125] * 1000 self.paragraph, self.question, self.start_index, self.stop_index = self.dataset.get_next_batch( ) self.cell_Qr = tf.nn.rnn_cell.BasicLSTMCell(100) self.cell_Pr = tf.nn.rnn_cell.BasicLSTMCell(100) self.cell_output_START = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_output_STOP = tf.nn.rnn_cell.BasicLSTMCell(1) self.W_conv1 = self.weight_variable([2, 100, 1, 4]) self.b_conv1 = self.bias_variable([4]) self.W_conv2 = self.weight_variable([3, 100, 1, 4]) self.b_conv2 = self.bias_variable([4]) self.W_conv3 = self.weight_variable([4, 100, 1, 4]) self.b_conv3 = self.bias_variable([4]) self.W_conv4 = self.weight_variable([5, 100, 1, 4]) self.b_conv4 = self.bias_variable([4]) self.W_conv5 = self.weight_variable([6, 100, 1, 4]) self.b_conv5 = self.bias_variable([4]) self.W_fc_Q = self.weight_variable([20, 90]) self.b_fc = self.bias_variable([90]) self.output_Start = None self.output_Stop = None self.x_q_holer = tf.placeholder(dtype=tf.float32, shape=[self.batch, 40, 100]) self.x_p_holer = tf.placeholder(dtype=tf.float32, shape=[self.batch, 125, 100])
def __init__(self, is_Para=True): self.dataset = dp.Data_holder() self.dataset.set_batch() #self.dataset.get_POS() self.question = 0 self.paragraph = 0 self.POS_Embeddings = 0 self.POS_Q_Embeddings = 0 self.start_index = 0 self.stop_index = 0 self.attention_Label = 0 self.embedding_size = 50 self.POS_Embedding_Size = 128 self.batch = 500 self.p_length = 125 self.q_length = 30 self.paragraph, self.question, self.start_index, self.stop_index, self.POS_Embeddings = self.dataset.get_next_batch( ) self.cell_Q_Enc_fw = tf.nn.rnn_cell.BasicLSTMCell(32) self.cell_Q_Enc_bw = tf.nn.rnn_cell.BasicLSTMCell(32) self.cell_P_Enc_fw = tf.nn.rnn_cell.BasicLSTMCell(32) self.cell_P_Enc_bw = tf.nn.rnn_cell.BasicLSTMCell(32) self.cell_POS_Enc_fw = tf.nn.rnn_cell.BasicLSTMCell(32) self.cell_POS_Enc_bw = tf.nn.rnn_cell.BasicLSTMCell(32) self.cell_output_Index_fw = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_output_Index_bw = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_modeling_fw = tf.nn.rnn_cell.BasicLSTMCell(64) self.cell_modeling_bw = tf.nn.rnn_cell.BasicLSTMCell(64) self.cell_modeling_fw_ = tf.nn.rnn_cell.BasicLSTMCell(32) self.cell_modeling_bw_ = tf.nn.rnn_cell.BasicLSTMCell(32) self.W_conv_2 = self.weight_variable([2, 1, 1, 4]) self.W_conv_3 = self.weight_variable([3, 1, 1, 4]) self.W_conv_4 = self.weight_variable([4, 1, 1, 4]) self.W_conv_5 = self.weight_variable([5, 1, 1, 4]) self.Weight_Paragraph = self.weight_variable(shape=[1, 64, 64]) self.Weight_POS = self.weight_variable(shape=[1, 64, 64]) self.Bias_Paragraph = self.bias_variable(shape=[64]) self.Bias_POS = self.bias_variable(shape=[64]) self.Weight_Dec_Input = self.weight_variable(shape=[1, 64 * 5, 128]) self.Bias_Dec_Input = self.bias_variable(shape=[128]) self.Weight_D_Out = self.weight_variable(shape=[1, 128, 1]) self.Bias_D_Out = self.weight_variable(shape=[1]) self.Weight_D_Out_Stop = self.weight_variable(shape=[1, 128, 1]) self.Bias_D_Out_Stop = self.weight_variable(shape=[1]) self.W_fc = self.weight_variable([1, 16, 1]) self.b_fc = self.bias_variable([1]) self.output_Start = None self.output_Stop = None self.x_q_holer = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.q_length, self.embedding_size], name='x_q_holer') self.x_p_holer = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.p_length, self.embedding_size], name='x_p_holer') self.x_pos_q_holder = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.q_length, self.POS_Embedding_Size], name='x_POS_holer') self.x_pos_holder = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.p_length, 85], name='x_POS_holer')
fileName = 'C:\\Users\\Administrator\\Desktop\\qadataset\\pos_embed' f = open(fileName, 'w') for a in range(84): string = "" for i in range(128): string = string + str(embeddings[a, 0, i]) + " " string = string + "#" f.write(string) f.close() pos_Embed = POS_Embed.POS_Embedder() dataholder = data_processor.Data_holder() dataholder.set_batch() pos_Embed.set_Data_Hlder(dataholder) """ pos_Modeler = Combined_POS_Processor.POS_Processor(is_Setting_Embedder=False) pos_Modeler.setEmbedder(pos_Embed) pos_Embed.set_POS_Tagger(pos_Modeler) a, b = pos_Embed.read_POS_From_Embed() pos_Embed.get_Fianl_POS(dataholder.paragraph_arr, dataholder.numberOf_available_question, a) """ print('Complete')
def __init__(self, is_Para=True): self.dataset = dp.Data_holder() self.dataset.set_batch() if is_Para: self.dataset.set_sentence_batch() else: self.dataset.set_sentence_batch_para() self.question = 0 self.paragraph = 0 self.POS_Embeddings = 0 self.POS_Q_Embeddings = 0 self.start_index = 0 self.stop_index = 0 self.attention_Label = 0 self.embedding_size = 50 self.POS_Embedding_Size = 128 self.batch = 500 self.p_length = 100 self.q_length = 30 self.paragraph, self.question, self.start_index, self.stop_index, self.attention_Label, \ self.POS_Embeddings, self.POS_Q_Embeddings = self.dataset.get_next_batch() self.cell_Q_Enc_fw = tf.nn.rnn_cell.BasicLSTMCell(100) self.cell_Q_Enc_bw = tf.nn.rnn_cell.BasicLSTMCell(100) self.cell_P_Enc_fw = tf.nn.rnn_cell.BasicLSTMCell(100) self.cell_P_Enc_bw = tf.nn.rnn_cell.BasicLSTMCell(100) self.cell_POS_Enc_fw = tf.nn.rnn_cell.BasicLSTMCell(50) self.cell_POS_Enc_bw = tf.nn.rnn_cell.BasicLSTMCell(50) self.cell_POS_Enc_fw_Q = tf.nn.rnn_cell.BasicLSTMCell(50) self.cell_POS_Enc_bw_Q = tf.nn.rnn_cell.BasicLSTMCell(50) self.cell_output_Index_fw = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_output_Index_bw = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_modelling_fw = tf.nn.rnn_cell.BasicLSTMCell(1) self.cell_modelling_bw = tf.nn.rnn_cell.BasicLSTMCell(1) self.W_conv_2 = self.weight_variable([2, 1, 1, 4]) self.W_conv_3 = self.weight_variable([3, 1, 1, 4]) self.W_conv_4 = self.weight_variable([4, 1, 1, 4]) self.W_conv_5 = self.weight_variable([5, 1, 1, 4]) self.W_fc = self.weight_variable([16, 1]) self.b_fc = self.bias_variable([1]) self.output_Start = None self.output_Stop = None self.x_q_holer = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.q_length, self.embedding_size], name='x_q_holer') self.x_p_holer = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.p_length, self.embedding_size], name='x_p_holer') self.x_pos_q_holder = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.q_length, self.POS_Embedding_Size], name='x_POS_holer') self.x_pos_holder = tf.placeholder( dtype=tf.float32, shape=[self.batch, self.p_length, self.POS_Embedding_Size], name='x_POS_holer') self.P_Length = 100