示例#1
0
    def build_model(self):
        input = tf.placeholder(tf.float32, [None, self.num_steps], name='inputs')  # input
        noise = tf.placeholder(tf.float32, [None, self.num_steps], name='noise')

        real_fake_label = tf.placeholder(tf.float32, [None, 2], name='real_fake_label')

        F_new_value = tf.placeholder(tf.float32, [None, self.K], name='F_new_value')
        # F = tf.Variable(tf.eye(self.batch_size,num_columns = self.K), trainable = False)
        F = tf.get_variable('F', shape=[self.batch_size, self.K],
                            initializer=tf.orthogonal_initializer(gain=1.0, seed=None, dtype=tf.float32),
                            trainable=False)

        # inputs has shape (batch_size, n_steps, embedding_size)
        inputs = tf.reshape(input, [-1, self.num_steps, self.embedding_size])
        noises = tf.reshape(noise, [-1, self.num_steps, self.embedding_size])

        # a list of 'n_steps' tenosrs, each has shape (batch_size, embedding_size)
        # encoder_inputs = utils._rnn_reformat(x = inputs, input_dims = self.embedding_size, n_steps = self.num_steps)

        # noise_input has shape (batch_size, n_steps, embedding_size)
        if self.denosing:
            print('Noise')
            noise_input = inputs + noises
        else:
            print('Non_noise')
            noise_input = inputs

        reverse_noise_input = tf.reverse(noise_input, axis=[1])
        decoder_inputs = utils._rnn_reformat(x=noise_input, input_dims=self.embedding_size, n_steps=self.num_steps)
        targets = utils._rnn_reformat(x=inputs, input_dims=self.embedding_size, n_steps=self.num_steps)

        if self.cell_type == 'LSTM':
            raise ValueError('LSTMs have not support yet!')

        elif self.cell_type == 'GRU':
            cell = tf.contrib.rnn.GRUCell(np.sum(self.hidden_size) * 2)

        cell = rnn_cell_extensions.LinearSpaceDecoderWrapper(cell, self.embedding_size)

        lf = None
        if self.sample_loss:
            print
            'Sample Loss'

            def lf(prev, i):
                return prev

        # encoder_output has shape 'layer' list of tensor [batch_size, n_steps, hidden_size]
        with tf.variable_scope('fw'):
            _, encoder_output_fw = drnn.drnn_layer_final(noise_input, self.hidden_size, self.dilations, self.num_steps,
                                                         self.embedding_size, self.cell_type)

        with tf.variable_scope('bw'):
            _, encoder_output_bw = drnn.drnn_layer_final(reverse_noise_input, self.hidden_size, self.dilations,
                                                         self.num_steps, self.embedding_size, self.cell_type)

        if self.cell_type == 'LSTM':
            raise ValueError('LSTMs have not support yet!')
        elif self.cell_type == 'GRU':
            fw = []
            bw = []
            for i in range(len(self.hidden_size)):
                fw.append(encoder_output_fw[i][:, -1, :])
                bw.append(encoder_output_bw[i][:, -1, :])
            encoder_state_fw = tf.concat(fw, axis=1)
            encoder_state_bw = tf.concat(bw, axis=1)

            # encoder_state has shape [batch_size, sum(hidden_size)*2]
            encoder_state = tf.concat([encoder_state_fw, encoder_state_bw], axis=1)

        decoder_outputs, _ = tf.contrib.legacy_seq2seq.rnn_decoder(decoder_inputs=decoder_inputs,
                                                                   initial_state=encoder_state, cell=cell,
                                                                   loop_function=lf)

        if self.cell_type == 'LSTM':
            hidden_abstract = encoder_state.h
        elif self.cell_type == 'GRU':
            hidden_abstract = encoder_state

        # F_update
        F_update = tf.assign(F, F_new_value)

        real_hidden_abstract = tf.split(hidden_abstract, 2)[0]

        # W has shape [sum(hidden_size)*2, batch_size]
        W = tf.transpose(real_hidden_abstract)
        WTW = tf.matmul(real_hidden_abstract, W)
        FTWTWF = tf.matmul(tf.matmul(tf.transpose(F), WTW), F)

        with tf.name_scope("loss_reconstruct"):
            loss_reconstruct = tf.losses.mean_squared_error(labels=tf.split(targets, 2, axis=1)[0],
                                                            predictions=tf.split(decoder_outputs, 2, axis=1)[0])

        with tf.name_scope("k-means_loss"):
            loss_k_means = tf.trace(WTW) - tf.trace(FTWTWF)

        with tf.name_scope("discriminative_loss"):
            weight1 = weight_variable(shape=[hidden_abstract.get_shape().as_list()[1], 128])
            bias1 = bias_variable(shape=[128])

            weight2 = weight_variable(shape=[128, 2])
            bias2 = bias_variable(shape=[2])

            hidden = tf.nn.relu(tf.matmul(hidden_abstract, weight1) + bias1)
            output = tf.matmul(hidden, weight2) + bias2
            predict = tf.reshape(output, shape=[-1, 2])
            discriminative_loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=predict, labels=real_fake_label))

        with tf.name_scope("loss_total"):
            loss = loss_reconstruct + self.lamda / 2 * loss_k_means + discriminative_loss

        regularization_loss = 0.0
        for i in range(len(tf.trainable_variables())):
            regularization_loss += tf.nn.l2_loss(tf.trainable_variables()[i])
        loss = loss + 1e-4 * regularization_loss
        input_tensors = {
            'inputs': input,
            'noise': noise,
            'F_new_value': F_new_value,
            'real_fake_label': real_fake_label
        }
        loss_tensors = {
            'loss_reconstruct': loss_reconstruct,
            'loss_k_means': loss_k_means,
            'regularization_loss': regularization_loss,
            'discriminative_loss': discriminative_loss,
            'loss': loss
        }
        output_tensor = {'prediction': predict}
        return input_tensors, loss_tensors, real_hidden_abstract, F_update, output_tensor
示例#2
0
    def build_model(self):
        inputs = tf.placeholder(tf.float32,
                                [None, self.num_steps, self.embedding_size],
                                name='inputs')  # input
        targets = tf.placeholder(tf.float32, [None, self.class_num],
                                 name='targets')
        vaild_length = tf.placeholder(tf.int32, [None], name='vaild_len')
        keep_prob = tf.placeholder(tf.float32)

        assert len(self.hidden_size) == len(self.dilations)
        x = tf.contrib.layers.dropout(inputs, keep_prob=keep_prob)
        # outputs has a shape "layer" of "n_step" list of [batch,hidden], output shape "layer", [batch, n_step, hidden]
        outputs, output = drnn.drnn_layer_final(x,
                                                self.hidden_size,
                                                self.dilations,
                                                self.num_steps,
                                                input_dims=self.embedding_size)

        assert len(self.dilations) == len(outputs)

        assert len(self.dilations) == len(output)

        # output has [batch, n_step, hidden_size *len(self.dilations),channels ]
        output = tf.stack(output, axis=2)
        output = tf.reshape(
            output,
            [-1, self.num_steps,
             len(self.dilations) * self.hidden_size[0], 1])
        #output_drop = tf.contrib.layers.dropout( output, keep_prob = keep_prob )

        # conv_output has [batch, n_step, 1, self.output_channel]
        conv_output = tf.contrib.layers.conv2d(
            inputs=output,
            num_outputs=self.output_channel,
            kernel_size=[3, output.shape[2]],
            padding='VALID')

        # max_pool has [batch, 1, 1, self.output_channel]
        max_pool = tf.contrib.layers.max_pool2d(
            inputs=conv_output,
            kernel_size=[conv_output.shape[1], conv_output.shape[2]],
            stride=1,
            padding='VALID')

        # max_pool has [batch, self.output_channel]
        max_pool = tf.reshape(max_pool, [-1, self.output_channel])
        max_pool = tf.contrib.layers.dropout(max_pool, keep_prob=keep_prob)

        # vaild_hidden has list of 'layer' of [batch,hidden]
        vaild_hidden = []
        for i in range(len(outputs)):
            L = []
            for j in range(self.batch_size):
                L.append(
                    tf.gather(outputs[i], tf.gather(vaild_length, j))[j, :])
            vaild_hidden.append(
                tf.reshape(L, [self.batch_size, self.hidden_size[0]]))

        #vaild_hidden = []
        #for i in range(len(outputs)):
        #	vaild_hidden.append ( outputs[i][-1] )

        # vaild_hidden has list of 'layer' of [batch,hidden] then vaild_concat_hidden is [ batch, hidden*(layer) ]
        vaild_concat_hidden = tf.stack(vaild_hidden, axis=1)
        vaild_concat_hidden = tf.reshape(
            vaild_concat_hidden,
            [-1, self.hidden_size[0] * len(self.dilations)])

        # lstm_aulu_task
        #lstm_fully_connected = tf.contrib.layers.legacy_fully_connected(x = vaild_concat_hidden ,num_output_units = self.fully_hidden)
        #lstm_fully_connected = tf.contrib.layers.dropout( lstm_fully_connected, keep_prob = keep_prob )
        #lstm_logits = tf.contrib.layers.legacy_fully_connected(x = lstm_fully_connected ,num_output_units = self.class_num)
        #aulu_ce = tf.nn.softmax_cross_entropy_with_logits(labels = targets, logits = lstm_logits, name = 'aulu_ce')

        # final_represent has shape of [batch, hidden*(layer) + num_output_units ]
        final_represent = tf.concat([vaild_concat_hidden, max_pool], axis=1)

        #final_represent_drop = tf.contrib.layers.dropout( final_represent, keep_prob = keep_prob )

        #fully_connected has shape of [batch,num_output_units]
        #fully_connected = tf.contrib.layers.legacy_fully_connected(x = final_represent_drop ,num_output_units = self.fully_hidden)
        fully_connected = tf.contrib.layers.legacy_fully_connected(
            x=final_represent, num_output_units=self.fully_hidden)

        #logits has shape of [batch,self.class_num]
        fully_connected = tf.contrib.layers.dropout(fully_connected,
                                                    keep_prob=keep_prob)
        logits = tf.contrib.layers.legacy_fully_connected(
            x=fully_connected, num_output_units=self.class_num)
        ce = tf.nn.softmax_cross_entropy_with_logits(labels=targets,
                                                     logits=logits,
                                                     name='ce')

        regularization_loss = 0.0
        for i in xrange(len(tf.trainable_variables())):
            regularization_loss += tf.nn.l2_loss(tf.trainable_variables()[i])

        loss = tf.reduce_sum(
            ce, name='loss'
        ) + 1e-3 * regularization_loss  #+ tf.reduce_sum(aulu_ce)

        answer_probab = tf.nn.softmax(logits, name='answer_probab')
        predictions = tf.argmax(answer_probab, 1)
        correct_predictions = tf.equal(tf.argmax(answer_probab, 1),
                                       tf.argmax(targets, 1))
        accuracy = tf.cast(correct_predictions, tf.float32)

        input_tensors = {
            'inputs': inputs,
            'targets': targets,
            'vaild_length': vaild_length,
            'keep_prob': keep_prob
        }

        return input_tensors, loss, predictions, accuracy