def __init__(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.y = placeholder(tf.float32, [None, n_y])
            self.z = placeholder(tf.float32, [None, n_z])
            self.keep_prob = placeholder(tf.float32, [])
            self.Wy1 = tf.get_variable('Wy1', [n_z, 512], tf.float32, glorot_normal_initializer())
            self.by1 = tf.get_variable('by1', [512], tf.float32, zeros_initializer())
            self.Wy2 = tf.get_variable('Wy2', [512, 512], tf.float32, glorot_normal_initializer())
            self.by2 = tf.get_variable('by2', [512], tf.float32, zeros_initializer())
            self.Wy3 = tf.get_variable('Wy3', [512, n_y], tf.float32, glorot_normal_initializer())
            self.by3 = tf.get_variable('by3', [n_y], tf.float32, zeros_initializer())

            z = dropout(self.z, self.keep_prob)
            h = tf.nn.relu(tf.matmul(z, self.Wy1) + self.by1)
            h = dropout(h, self.keep_prob)
            h = tf.nn.relu(tf.matmul(h, self.Wy2) + self.by2)
            h = dropout(h, self.keep_prob)

            self.pred = tf.sigmoid(tf.matmul(h, self.Wy3) + self.by3)

            cost = -self.y * tf.log(self.pred + 1e-6) - (1. - self.y) * tf.log(1. - self.pred + 1e-6)
            self.cost = tf.reduce_mean(tf.reduce_sum(cost, 1))

            self.pred_mask = tf.cast(self.pred >= 0.5, tf.int32)
            self.tmp = tf.cast(self.y, tf.int32)
            self.acc_mask = tf.cast(tf.equal(self.tmp, self.pred_mask), tf.float32)
            self.acc = tf.reduce_mean(self.acc_mask)
Exemplo n.º 2
0
def cnn_graph(x, keep_prob, size, captcha_list = CAPTCHA_LIST, captcha_len = CAPTCHA_LEN):
    x_image = reshape(x, shape = [-1, size[0], size[1], 1])
    w_conv1 = weight_variable([3, 3, 1, 32])
    b_conv1 = bias_variable([32])
    h_conv1 = relu(conv2d(x_image, w_conv1) + b_conv1)
    h_pool1 = max_pool2d(h_conv1)
    h_drop1 = dropout(h_pool1, rate = 1 - keep_prob)
    w_conv2 = weight_variable([3, 3, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = relu(conv2d(h_drop1, w_conv2) + b_conv2)
    h_pool2 = max_pool2d(h_conv2)
    h_drop2 = dropout(h_pool2, rate = 1 - keep_prob)
    w_conv3 = weight_variable([3, 3, 64, 64])
    b_conv3 = bias_variable([64])
    h_conv3 = relu(conv2d(h_drop2, w_conv3) + b_conv3)
    h_pool3 = max_pool2d(h_conv3)
    h_drop3 = dropout(h_pool3, rate = 1 - keep_prob)
    image_height = int(h_drop3.shape[1])
    image_width = int(h_drop3.shape[2])
    w_fc = weight_variable([image_height * image_width * 64, 1024])
    b_fc = bias_variable([1024])
    h_drop3_re = reshape(h_drop3, [-1, image_height * image_width * 64])
    h_fc = relu(matmul(h_drop3_re, w_fc) + b_fc)
    h_drop_fc = dropout(h_fc, rate = 1 - keep_prob)
    w_out = weight_variable([1024, len(captcha_list) * captcha_len])
    b_out = bias_variable([len(captcha_list) * captcha_len])
    y_conv = matmul(h_drop_fc, w_out) + b_out
    return y_conv
Exemplo n.º 3
0
    def build(self,
              lstm_unit=256,
              hidden_unit=16,
              output_unit=1,
              learning_rate=0.001,
              encoder='lstm'):
        word_embA = self.embedding_layer(
            self.input_sentA)  # (batch_size, num_step, emb_dim)
        word_embB = self.embedding_layer(
            self.input_sentB)  # (batch_size, num_step, emb_dim)

        if encoder == 'lstm':
            repA = self.lstm(word_embA, self.input_seq_lenA, lstm_unit,
                             None)  # (batch_size, lstm_unit)
            repB = self.lstm(word_embB, self.input_seq_lenB, lstm_unit,
                             True)  # (batch_size, lstm_unit)
            input_dim = lstm_unit * 2
        elif encoder == 'bilstm':
            repA = self.bilstm(word_embA, self.input_seq_lenA, lstm_unit,
                               None)  # (batch_size, num_step, lstm_unit * 2)
            repB = self.bilstm(word_embA, self.input_seq_lenB, lstm_unit,
                               True)  # (batch_size, num_step, lstm_unit * 2)
            repA = tf.reduce_sum(repA, axis=1)  # (batch_size, lstm_unit * 2)
            repB = tf.reduce_sum(repB, axis=1)  # (batch_size, lstm_unit * 2)
            input_dim = lstm_unit * 4

        rep = tf.concat(
            [repA, repB], axis=1
        )  # lstm: (batch_size, lstm_unit * 2), bilstm: (batch_size, lstm_unit * 4)
        rep = dropout(rep, keep_prob=0.5)

        hidden = self.dense(rep, hidden_unit,
                            'hidden')  # (batch_size, hidden_unit)
        hidden = dropout(hidden, keep_prob=0.5)

        self.output = self.dense(hidden, output_unit,
                                 'output')  # (batch_size, output_unit)
        self.output = tf.reshape(self.output, (-1, ))

        self.loss = self.loss_function(self.output)  # ()
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(self.loss)
def add_layer_dropput(input, in_size, out_size,keep_prob = None,activation_function=None):
    Weights = Variable(random_normal([out_size, in_size]))
    Biases = Variable(zeros([out_size, 1]) + 0.1)
    Wx_plus_b = matmul(Weights, input) + Biases
    Wx_plus_b = nn.dropout(Wx_plus_b,keep_prob= keep_prob)
    if activation_function == None:
        output = Wx_plus_b
    else:
        output = activation_function(transpose(Wx_plus_b))
        output = transpose(output)
    return output
Exemplo n.º 5
0
    def forward_pass(self, input_data):
        input_data = input_data / 255

        pre_inception = InceptionNet._pre_inception_layer(
            input_data, self.pre_inception_filters)

        inception_1 = InceptionNet._inception_layer(pre_inception,
                                                    self.inception_1_filters)
        inception_2 = InceptionNet._inception_layer(inception_1,
                                                    self.inception_2_filters)
        # print(inception_2)

        post_inception_pool = InceptionNet._max_pool(inception_2, (3, 3),
                                                     (2, 2))

        inception_3 = InceptionNet._inception_layer(post_inception_pool,
                                                    self.inception_3_filters)
        inception_4 = InceptionNet._inception_layer(inception_3,
                                                    self.inception_4_filters)
        inception_5 = InceptionNet._inception_layer(inception_4,
                                                    self.inception_5_filters)
        inception_6 = InceptionNet._inception_layer(inception_5,
                                                    self.inception_6_filters)
        inception_7 = InceptionNet._inception_layer(inception_6,
                                                    self.inception_7_filters)

        post_inception_pool_2 = InceptionNet._max_pool(inception_7, (3, 3),
                                                       (2, 2))

        inception_8 = InceptionNet._inception_layer(post_inception_pool_2,
                                                    self.inception_8_filters)
        inception_9 = InceptionNet._inception_layer(inception_8,
                                                    self.inception_9_filters)

        post_inception_pool_3 = nn.avg_pool(inception_9, (7, 7),
                                            strides=4,
                                            padding="SAME")

        flatten_layer = tf.reshape(
            tf.keras.backend.flatten(post_inception_pool_3),
            [1024, input_data.shape[0]])

        relu_layer = nn.relu(
            tf.matmul(tf.transpose(self.relu_weights), flatten_layer) +
            self.relu_bias)

        dropout_layer = nn.dropout(relu_layer, .4)

        linear_layer = tf.matmul(tf.transpose(self.linear_weights),
                                 dropout_layer) + self.linear_bias
        return nn.softmax(tf.transpose(nn.tanh(linear_layer)))
    def _construct_model(self):
        n_steps = tf.shape(self.words)[0]
        n_x, n_h, n_v = self.options['n_x'], self.options['n_h'], self.options[
            'n_v']
        batch_size = tf.shape(self.y)[0]
        words_embed = embedding_lookup(self.embeddings, self.words)
        # n_steps, batch_size, embed_dim
        words_embed = dropout(words_embed, self.keep_prob,
                              (1, batch_size, n_x))
        # convert video feature from None * n_z to None * n_x
        # vid_feat_proj shape: (1, batch_size, embed_dim)
        vid_feat_proj = expand_dims(matmul(self.z, self.c0), 0)
        # use video feature as the input for the first step
        # state_below shape: (n_steps, batch_size, embed_dim)
        state_below = concat([vid_feat_proj, words_embed[:-1]], 0)

        # h_list shape: (n_steps, batch_size, h_dims)
        self.h_list = self._decoder_layer(state_below)
        self.h_list_reshape = reshape(self.h_list, [-1, n_h])
        # logits shape: (n_steps*batch_size, n_vocabulary)
        self.logits = matmul(self.h_list_reshape, self.output_w) + self.bhid
        logits_reshaped = reshape(self.logits, [-1, batch_size, n_v])
        self.sents = tf.argmax(logits_reshaped, -1)
        # w_reshape shape: (n_steps, batch_size)
        weighted_mask = self.mask / (tf.reduce_sum(self.mask, 0, keepdims=True)
                                     **0.7)
        self.loss = sparse_softmax_cross_entropy(
            logits=logits_reshaped + 1e-8,
            labels=self.words,
            weights=weighted_mask,
            reduction=tf.losses.Reduction.SUM)
        self.train_loss = self.loss / tf.cast(batch_size, tf.float32)

        test_h_list = self._test_layer(matmul(self.z, self.c0))
        test_h_list = reshape(test_h_list, (-1, n_h))
        test_logits = matmul(test_h_list, self.output_w) + self.bhid
        test_logits = reshape(test_logits, (-1, batch_size, n_v))
        self.test_sents = tf.argmax(test_logits, -1)
Exemplo n.º 7
0
def build_preprocess(input, embedding, training):
    cell_input = nn.embedding_lookup(embedding, input)
    if training:
        cell_input = nn.dropout(cell_input, DROPOUT)
    return cell_input
Exemplo n.º 8
0
    def train(self, input, h, ffd_drop, hid_units, adj, number):
        # self.input = tf.placeholder(dtype=tf.float64, shape=(self.batch_size, 3025, 1870))
        # self.h = tf.placeholder(dtype=tf.float64, shape=(self.batch_size, 64))


        embed_list = []
        """g"""
        input = tf.expand_dims(input, 0)
        # print(input.shape, 'fdfdfdfdfdfd')
        # exit()
        if ffd_drop != 0.0:
            seq = tf.nn.dropout(input, 1.0 - ffd_drop)
            input = tf.layers.conv1d(seq, hid_units, 1, use_bias=False)
        # print(input.shape, 'kokokoko')
        # print(h.shape)
        # print(ffd_drop)
        # print(hid_units)
        # print(adj)
        # exit()
        # adj = adj.astype(np.float32)
        # input1 = tf.matmul(adj, input) + input
        # print(input_1.shape)
        # exit()
        print(number)
        print(self.batch_size)

        for i in range(self.batch_size):
            one_adj = adj[0][i + number*self.batch_size].reshape(adj[0].shape[0], 1)
            input1 = one_adj * input
            # embed_list.append(input1)
            # input = tf.concat(embed_list, axis=1)
            # print(multi_embed.shape, 'lplplplpllp')
            # print(input1.shape)
            # exit()

            # input1 = tf.reshape(input1, [55, 55, 8])
            # print(input1.shape)
            # exit()
            # input1 = tf.tile(input=input1, multiples=[3025, 1, 1])
            # # self.input = input
            # # self.h = h

            # input1 = tf.nn.conv2d(input1, self.w3, strides=[1, 3, 3, 1], padding='SAME')
            # input1 = tf.nn.conv2d(input1, self.w4, strides=[1, 3, 3, 1], padding='SAME')
            # input1 = tf.nn.conv2d(input1, self.w5, strides=[1, 3, 3, 1], padding='SAME')
            # input1 = tf.nn.conv2d(input1, self.w6, strides=[1, 3, 3, 1], padding='SAME')
            # # print(input.shape, 'ffff')
            # # exit()
            # # input1 = tf.squeeze(input1)
            # # print(input1.shape)
            input1 = tf.reshape(input1, [1, -1, 8])
            # print(input1.shape)

            embed_list.append(input1)
        input = tf.concat(embed_list, axis=0)
        print(input.shape)
        input1 = tf.expand_dims(input, 0)
        #
        input1 = tf.nn.conv2d(input1, self.w, strides=[1, 1, 3, 1], padding='SAME')
        input1 = tf.nn.conv2d(input1, self.w1, strides=[1, 1, 3, 1], padding='SAME')
        input1 = tf.nn.conv2d(input1, self.w2, strides=[1, 1, 3, 1], padding='SAME')
        input1 = tf.nn.conv2d(input1, self.w3, strides=[1, 1, 3, 1], padding='SAME')
        # print(input1.shape)
        # exit()
        input = tf.squeeze(input1)
        h = h[number*self.batch_size : self.batch_size + number*self.batch_size]
        # print(input.shape, 'ffff')
        # exit()
        # input = tf.reshape(input, [3025, -1, 8])
        # input = tf.expand_dims(input, 0)
        # input = tf.tile(input=input, multiples=[self.batch_size, 1, 1])
        mb = input.shape[0]
        # print(mb, 'ssssss')
        n_channels = input.shape[2]
        dd = input.shape[1]
        # print(input.shape, 'jijijjj')
        # exit()
        # print(mb, n_channels, dd, "opopopopooopopoop")
        # print(input.shape,'fffffff')
        x_flat = tf.concat([input, self.coord_tensor], 2)
        # print(h, 'qqqqq')
        # h = tf.squeeze(h)
        # self.h = tf.expand_dims(self.h, 0)
        print(h.shape, 'jojojoj')
        qst = tf.expand_dims(h, 1)
        qst = tf.tile(input=qst, multiples=[1, 38, 1])
        qst = tf.expand_dims(qst, 2)

        x_i = tf.expand_dims(x_flat, 1)  # (64x1x25x26+5)
        x_i = tf.tile(input=x_i, multiples=[1, 38, 1, 1])  # (64x25x25x26+5)
        x_j = tf.expand_dims(x_flat, 2)  # (64x25x1x26+5)
        x_j = tf.concat([x_j, qst], 3)
        x_j = tf.tile(input=x_j, multiples=[1, 1, 38, 1])  # (64x25x25x26+5)

        # concatenate all together
        x_full = tf.concat([x_i, x_j], 3)  # (64x25x25x2*26+5)
        # print(x_f)
        # print(x_full.shape, 'uiuiuiuiu')
        # exit()
        # reshape for passing through network
        x_ = tf.reshape(x_full, [mb*dd*dd, 84])
        # print(x_.shape, 'kokokkoko')
        # exit()

        x_ = tf.layers.dense(x_, 128)
        x_ = nn.relu(x_)
        x_ = tf.layers.dense(x_, 128)
        x_ = nn.relu(x_)
        x_ = tf.layers.dense(x_, 128)
        x_ = nn.relu(x_)
        x_ = tf.layers.dense(x_, 128)
        x_ = nn.relu(x_)

        # reshape again and sum
        # print(x_.shape, 'jijijijijiji')
        # exit()
        # print(mb, 'fdfdfdfdfffd')
        # print(dd*dd, 'jijijijijj')
        x_g = tf.reshape(x_, [mb, dd*dd, 128])
        # print(x_g.shape, 'llolooollololo')
        # exit()
        # print(x_g.shape, 'jiijijjji')
        x_g = tf.reduce_sum(x_g, 1)
        # print(x_g.shape, 'kokokokok')
        # exit()
        # print(x_g.shape, 'huuhuhuhu')
        x_g = tf.squeeze(x_g)
        # print(x_g.shape, 'huuhuhuhu')
        # exit()
        """f"""
        x_f = tf.layers.dense(x_g, 128)
        x_f = nn.relu(x_f)
        x = tf.layers.dense(x_f, 128)
        x = nn.relu(x)
        x = nn.dropout(x, keep_prob=0.5)
        x = tf.layers.dense(x, 64)
        return x
Exemplo n.º 9
0
    def __init__(self, **kwargs):
        '''The following arguments are accepted:

        Parameters
        ----------
        vocab_size  :   int
                        Size of the vocabulary for creating embeddings
        embedding_matrix    :   int
                                Dimensionality of the embedding space
        memory_size :   int
                        LSTM memory size
        keep_prob   :   float
                        Inverse of dropout percentage for embedding and LSTM
        subsequence_length  :   int
                                Length of the subsequences (all embeddings are padded to this
                                length)
        optimizer   :   OptimizerSpec
        '''
        ############################################################################################
        #                                 Get all hyperparameters                                  #
        ############################################################################################
        vocab_size = kwargs['vocab_size']
        embedding_size = kwargs['embedding_size']
        memory_size = kwargs['memory_size']
        keep_prob = kwargs['keep_prob']
        subsequence_length = kwargs['subsequence_length']
        optimizer_spec = kwargs['optimizer']
        optimizer = optimizer_spec.create()
        self.learning_rate = optimizer_spec.learning_rate
        self.step_counter = optimizer_spec.step_counter

        ############################################################################################
        #                                        Net inputs                                        #
        ############################################################################################
        self.batch_size = placeholder(tf.int32, shape=[], name='batch_size')
        self.is_training = placeholder(tf.bool, shape=[], name='is_training')
        self.word_ids = placeholder(tf.int32,
                                    shape=(None, subsequence_length),
                                    name='word_ids')
        self.labels = placeholder(tf.int32, shape=(None, ), name='labels')
        self.hidden_state = placeholder(tf.float32,
                                        shape=(None, memory_size),
                                        name='hidden_state')
        self.cell_state = placeholder(tf.float32,
                                      shape=(None, memory_size),
                                      name='cell_state')

        lengths = sequence_lengths(self.word_ids)

        ############################################################################################
        #                                        Embedding                                         #
        ############################################################################################
        self.embedding_matrix, _bias = get_weights_and_bias(
            (vocab_size, embedding_size))
        embeddings = cond(
            self.is_training, lambda: nn.dropout(nn.embedding_lookup(
                self.embedding_matrix, self.word_ids),
                                                 keep_prob=keep_prob),
            lambda: nn.embedding_lookup(self.embedding_matrix, self.word_ids))

        ############################################################################################
        #                                        LSTM layer                                        #
        ############################################################################################
        cell = BasicLSTMCell(memory_size, activation=tf.nn.tanh)

        # during inference, use entire ensemble
        keep_prob = cond(self.is_training, lambda: constant(keep_prob),
                         lambda: constant(1.0))
        cell = DropoutWrapper(cell, output_keep_prob=keep_prob)

        # what's the difference to just creating a zero-filled tensor tuple?
        self.zero_state = cell.zero_state(self.batch_size, tf.float32)
        state = LSTMStateTuple(h=self.cell_state, c=self.hidden_state)

        # A dynamic rnn creates the graph on the fly, so it can deal with embeddings of different
        # lengths. We do not need to unstack the embedding tensor to get rows, instead we compute
        # the actual sequence lengths and pass that
        # We are not sure how any of this works. Do we need to mask the cost function so the cell
        # outputs for _NOT_A_WORD_ inputs are ignored? Is the final cell state really relevant if it
        # was last updated with _NOT_A_WORD_ input? Does static_rnn absolve us of any of those
        # issues?
        outputs, self.state = nn.dynamic_rnn(cell,
                                             embeddings,
                                             sequence_length=lengths,
                                             initial_state=state)
        # Recreate tensor from list
        outputs = reshape(concat(outputs, 1),
                          [-1, subsequence_length * memory_size])
        self.outputs = reduce_mean(outputs)

        ############################################################################################
        #                        Fully connected layer, loss, and training                         #
        ############################################################################################
        ff1 = fully_connected(outputs, 2, with_activation=False, use_bias=True)
        loss = reduce_mean(
            nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels,
                                                        logits=ff1))
        self.train_step = optimizer.minimize(loss,
                                             global_step=self.step_counter)
        self.predictions = nn.softmax(ff1)
        correct_prediction = equal(cast(argmax(self.predictions, 1), tf.int32),
                                   self.labels)
        self.accuracy = reduce_mean(cast(correct_prediction, tf.float32))

        ############################################################################################
        #                                    Create summaraies                                     #
        ############################################################################################
        with tf.variable_scope('summary'):
            self.summary_loss = tf.summary.scalar('loss', loss)
            self.summary_accuracy = tf.summary.scalar('accuracy',
                                                      self.accuracy)
    def _decoder_layer(self, state_below):
        """state below: size of (n_steps, batch_size, n_x)
        """
        n_x = self.options['n_x']
        n_h = self.options['n_h']
        n_f = self.options['n_f']
        # batch_size * n_f
        y = dropout(self.y, self.keep_prob)
        tmp2_i = matmul(y, self.Wb_i)
        tmp2_f = matmul(y, self.Wb_f)
        tmp2_o = matmul(y, self.Wb_o)
        tmp2_c = matmul(y, self.Wb_c)
        # batch_size * n_f
        z = dropout(self.z, self.keep_prob)
        tmp3_i = matmul(z, self.Ca_i)
        tmp3_f = matmul(z, self.Ca_f)
        tmp3_o = matmul(z, self.Ca_o)
        tmp3_c = matmul(z, self.Ca_c)
        # batch_size * n_f
        tmp4_i = matmul(y, self.Cb_i)
        tmp4_f = matmul(y, self.Cb_f)
        tmp4_o = matmul(y, self.Cb_o)
        tmp4_c = matmul(y, self.Cb_c)

        def _state_below(tmp1, tmp2, tmp3, tmp4, Wc, Cc, b):
            # print('tmp1:', tmp1, 'tmp2:', tmp2, 'tmp3:', tmp3, 'tmp4:', tmp4)
            state_b = matmul(tmp1 * tmp2, Wc) + matmul(tmp3 * tmp4, Cc) + b
            return state_b

        def _step(a, b):
            print('in decoder layer', a, b)
            word_embed1, step = b[0], b[1]

            def _preactivate(a, y, w1, w2, w3, x):
                p = matmul(matmul(a, w1) * matmul(y, w2), w3) + x
                return p

            def _get_word_embed(h):
                word_logit = matmul(h, self.output_w) + self.bhid
                word_chosen1 = tf.argmax(word_logit, 1)
                word_chosen2 = tf.multinomial(word_logit, 1)
                word_chosen2 = tf.squeeze(word_chosen2)
                word_chosen = tf.cond(self.if_argmax, lambda: word_chosen1,
                                      lambda: word_chosen2)
                return embedding_lookup(self.embeddings, word_chosen)

            word_embed = tf.cond((tf.random_uniform([]) >= self.sample_prob)
                                 | tf.equal(step, 0), lambda: word_embed1,
                                 lambda: _get_word_embed(a[0]))
            # batch_size = tf.shape(word_embed)[0]
            word_embed = tf.reshape(word_embed, (-1, n_x))

            # batch_size * n_f
            tmp1_i = matmul(word_embed, self.Wa_i)
            tmp1_f = matmul(word_embed, self.Wa_f)
            tmp1_o = matmul(word_embed, self.Wa_o)
            tmp1_c = matmul(word_embed, self.Wa_c)
            # batch_size * n_h
            input_i = _state_below(tmp1_i, tmp2_i, tmp3_i, tmp4_i, self.Wc_i,
                                   self.Cc_i, self.b_i)
            input_f = _state_below(tmp1_f, tmp2_f, tmp3_f, tmp4_f, self.Wc_f,
                                   self.Cc_f, self.b_f)
            input_o = _state_below(tmp1_o, tmp2_o, tmp3_o, tmp4_o, self.Wc_o,
                                   self.Cc_o, self.b_o)
            input_c = _state_below(tmp1_c, tmp2_c, tmp3_c, tmp4_c, self.Wc_c,
                                   self.Cc_c, self.b_c)
            # batch_size * n_h
            preact_i = _preactivate(a[0], y, self.Ua_i, self.Ub_i, self.Uc_i,
                                    input_i)
            preact_f = _preactivate(a[0], y, self.Ua_f, self.Ub_f, self.Uc_f,
                                    input_f)
            preact_o = _preactivate(a[0], y, self.Ua_o, self.Ub_o, self.Uc_o,
                                    input_o)
            preact_c = _preactivate(a[0], y, self.Ua_c, self.Ub_c, self.Uc_c,
                                    input_c)

            i = tf.sigmoid(preact_i)
            f = tf.sigmoid(preact_f)
            o = tf.sigmoid(preact_o)
            c = tf.tanh(preact_c)
            c = f * a[1] + i * c
            h = o * tf.tanh(c)
            return h, c

        # self.mask: shape n_steps * batch_size
        # state_below: shape n_steps * batch_size * n_x
        n_steps = tf.shape(state_below)[0]
        steps = tf.range(n_steps, dtype=tf.int32)
        elems = [state_below, steps]
        batch_size = tf.shape(state_below)[1]
        init_t = (tf.zeros([batch_size, n_h]), tf.zeros([batch_size, n_h]))
        h_list, c_list = tf.scan(_step, elems, init_t)
        h_list = dropout(h_list, self.keep_prob, [1, batch_size, n_h])
        return h_list
Exemplo n.º 11
0
    def train_layers(self, train_x, train_y, test_x, test_y):
        params = {}

        X = tf.placeholder(tf.float32, [None, self.opt['n_dim']])
        Y = tf.placeholder(tf.float32, [None, self.opt['n_classes']])
        keep_prob = tf.placeholder(tf.float32)  #for dropout

        params['W1'] = tf.Variable(
            tf.random_normal([self.opt['n_dim'], self.opt['num_hidden1']],
                             mean=0,
                             stddev=self.opt['std']))
        params['b1'] = tf.Variable(
            tf.random_normal([self.opt['num_hidden1']],
                             mean=0,
                             stddev=self.opt['std']))
        params['a1'] = nn.sigmoid(tf.matmul(X, params['W1']) + params['b1'])
        params['dropout1'] = nn.dropout(params['a1'], keep_prob)

        params['W2'] = tf.Variable(
            tf.random_normal(
                [self.opt['num_hidden1'], self.opt['num_hidden2']],
                mean=0,
                stddev=self.opt['std']))
        params['b2'] = tf.Variable(
            tf.random_normal([self.opt['num_hidden2']],
                             mean=0,
                             stddev=self.opt['std']))
        params['a2'] = nn.relu(
            tf.matmul(params['dropout1'], params['W2']) + params['b2'])
        params['dropout2'] = nn.dropout(params['a2'], keep_prob)

        params['W3'] = tf.Variable(
            tf.random_normal(
                [self.opt['num_hidden2'], self.opt['num_hidden3']],
                mean=0,
                stddev=self.opt['std']))
        params['b3'] = tf.Variable(
            tf.random_normal([self.opt['num_hidden3']],
                             mean=0,
                             stddev=self.opt['std']))
        params['a3'] = nn.tanh(
            tf.matmul(params['dropout2'], params['W3']) + params['b3'])
        params['dropout3'] = nn.dropout(params['a3'], keep_prob)

        params['outW'] = tf.Variable(
            tf.random_normal([self.opt['num_hidden3'], self.opt['n_classes']],
                             mean=0,
                             stddev=self.opt['std']))
        params['outb'] = tf.Variable(
            tf.random_normal([self.opt['n_classes']],
                             mean=0,
                             stddev=self.opt['std']))

        out = nn.softmax(
            tf.matmul(params['dropout3'], params['outW']) + params['outb'])

        cost = tf.reduce_mean(
            -tf.reduce_sum(Y * tf.log(out), reduction_indices=[1]))
        optimizer = tf.train.AdamOptimizer(
            self.opt['learning_rate']).minimize(cost)

        correct_pred = tf.equal(tf.argmax(out, 1), tf.argmax(Y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        cost_history = np.empty(shape=[1], dtype=float)
        y, y_pred = None, None

        #reshape labels into a one hot vector
        f = FeatureParser()
        train_y = f.one_hot_encode(train_y)
        test_y = f.one_hot_encode(test_y)

        print('TRAIN_ONE_HOT_LABEL{}'.format(train_y))

        print('Training...')
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            for epoch in range(training_epochs):
                _, loss, acc = sess.run([optimizer, cost, accuracy],
                                        feed_dict={
                                            X: train_x,
                                            Y: train_y,
                                            keep_prob: 0.5
                                        })
                cost_history = np.append(cost_history, loss)
                if epoch % 50 == 0:
                    print('Epoch#', epoch, 'Cost:', loss, 'Train acc.:', acc)

            y_pred = sess.run(tf.argmax(out, 1),
                              feed_dict={
                                  X: test_x,
                                  keep_prob: 1.0
                              })
            y = sess.run(tf.argmax(test_y, 1))

            print(
                "Test accuracy: ",
                round(
                    sess.run(accuracy,
                             feed_dict={
                                 X: test_x,
                                 Y: test_y,
                                 keep_prob: 1.0
                             }), 3))

        fig = plt.figure(figsize=(10, 8))
        plt.plot(cost_history)
        plt.xlabel('Iterations')
        plt.ylabel('Cost')
        plt.axis([0, training_epochs, 0, np.max(cost_history)])
        plt.show()

        precision, recall, f_score, s = precision_recall_fscore_support(
            y, y_pred, average='micro')
        print('F score:', round(f_score, 3))