예제 #1
0
파일: uba.py 프로젝트: zss8848/3book
def do_birnn_wordbag(trainX, testX, trainY, testY):
    y_test=testY
    #trainX = pad_sequences(trainX, maxlen=100, value=0.)
    #testX = pad_sequences(testX, maxlen=100, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    # Network building
    net = input_data(shape=[None, 100])
    net = tflearn.embedding(net, input_dim=10000, output_dim=128)
    net = tflearn.bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
    net = dropout(net, 0.5)
    net = fully_connected(net, 2, activation='softmax')
    net = regression(net, optimizer='adam', loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
              batch_size=1,run_id="uba",n_epoch=10)

    y_predict_list = model.predict(testX)
    #print y_predict_list

    y_predict = []
    for i in y_predict_list:
        #print  i[0]
        if i[0] >= 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
예제 #2
0
def recurrent_net(net, rec_type, rec_size, return_sequence):
    """
    A quick if else block to build a recurrent layer, based on the type specified
    by the user.
    """
    if rec_type == 'lstm':
        net = tflearn.layers.recurrent.lstm(net,
                                            rec_size,
                                            return_seq=return_sequence)
    elif rec_type == 'gru':
        net = tflearn.layers.recurrent.gru(net,
                                           rec_size,
                                           return_seq=return_sequence)
    elif rec_type == 'bi_lstm':
        net = bidirectional_rnn(net,
                                BasicLSTMCell(rec_size),
                                BasicLSTMCell(rec_size),
                                return_seq=return_sequence)
    elif rec_type == 'bi_gru':
        net = bidirectional_rnn(net,
                                GRUCell(rec_size),
                                GRUCell(rec_size),
                                return_seq=return_sequence)
    else:
        raise ValueError(
            'Incorrect rnn type passed. Try lstm, gru, bi_lstm or bi_gru.')
    return net
예제 #3
0
def bi_lstm(trainX, trainY, testX, testY):
    trainX = pad_sequences(trainX, maxlen=200, value=0.)
    testX = pad_sequences(testX, maxlen=200, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Network building
    net = tflearn.input_data(shape=[None, 200])
    net = tflearn.embedding(net, input_dim=20000, output_dim=128)
    net = tflearn.bidirectional_rnn(net, BasicLSTMCell(128),
                                    BasicLSTMCell(128))
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net,
                             optimizer='adam',
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
    model.fit(trainX,
              trainY,
              validation_set=0.1,
              show_metric=True,
              batch_size=64,
              run_id="rnn-bilstm")
예제 #4
0
    def LoadModel(self):
        self.window_height = 16
        self.window_width = 4
        self.threshold = 0.03
        self.ground_height = 2
        symbol_count = 13

        network = input_data(
            shape=[None, self.window_height * self.window_width, symbol_count])
        network = bidirectional_rnn(network, BasicLSTMCell(2),
                                    BasicLSTMCell(2))
        network = dropout(network, 0.8)
        network = fully_connected(network,
                                  self.window_height * symbol_count,
                                  activation='prelu')
        network = tf.reshape(network, [-1, self.window_height, symbol_count])
        network = regression(network,
                             optimizer='adagrad',
                             learning_rate=0.005,
                             loss='mean_square',
                             name='target',
                             batch_size=64)

        self.model = tflearn.DNN(network)
        self.model.load('./LSTMmodel/model.tfl')
예제 #5
0
    def learning(self):

        # Network building
        net = input_data(shape=[None, MAXIMUM_LENGTH_DATA, 1])
        index = tf.placeholder(shape=[None], dtype=tf.int32)

        # net = bidirectional_rnn(net, BasicLSTMCell(200), BasicLSTMCell(200),return_seq=True)
        # net = dropout(net, 0.5)
        # net = tflearn.time_distributed(net, tflearn.fully_connected, [1,'softmax'])
        # net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, loss='binary_crossentropy')
        # model = tflearn.DNN(net, tensorboard_verbose=3)
        # model.fit(self.trainingData.data, self.trainingData.label, validation_set=0.1, show_metric=True, batch_size=1000,n_epoch=100)

        net = bidirectional_rnn(net,
                                BasicLSTMCell(200),
                                BasicLSTMCell(200),
                                return_seq=True,
                                dynamic=True)
        net = fully_connected(net[index], 2, activation='softmax')
        net = regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy')

        # # Training
        model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
        model.fit(self.trainingData.data,
                  self.trainingData.label,
                  validation_set=0.1,
                  show_metric=True,
                  batch_size=64)
    def build_network(self, ilearning_rate=0.001):
        '''
        :param ilearning_rate:
        :return:
        Build network
        '''

        #tf.reset_default_graph()
        net = input_data(shape=[None, 200])
        net = embedding(net,
                        input_dim=51887,
                        output_dim=200,
                        trainable=False,
                        name="EmbeddingLayer")

        #net = embedding(net, input_dim=20000, output_dim=128, trainable=False, weights_init=W,
        #                        name="EmbeddingLayer")
        #net = tflearn.embedding(net, input_dim=20000, output_dim=128, trainable=False, weights_init = W, name="EmbeddingLayer")
        net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
        net = dropout(net, 0.5)
        net = fully_connected(net, 2, activation='softmax')
        net = regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=ilearning_rate)

        return net
def lstm_block(input,
               hidden_units=128,
               dropout=0.5,
               reuse=False,
               layers=1,
               dynamic=True,
               return_seq=False,
               bidirectional=False,
               return_state=False):
    output = None
    prev_output = input
    for n_layer in range(layers):
        if not bidirectional:
            if n_layer < layers - 1:
                output = tflearn.lstm(prev_output,
                                      hidden_units,
                                      dropout=dropout,
                                      dynamic=dynamic,
                                      reuse=reuse,
                                      scope='lstm_{}'.format(n_layer),
                                      return_seq=True)
                output = tf.stack(output, axis=0)
                output = tf.transpose(output, perm=[1, 0, 2])
                prev_output = output
                continue
            output = tflearn.lstm(prev_output,
                                  hidden_units,
                                  dropout=dropout,
                                  dynamic=dynamic,
                                  reuse=reuse,
                                  scope='lstm_{}'.format(n_layer),
                                  return_seq=return_seq,
                                  return_state=return_state)
        else:
            if n_layer < layers - 1:
                output = bidirectional_rnn(prev_output,
                                           BasicLSTMCell(hidden_units,
                                                         reuse=reuse),
                                           BasicLSTMCell(hidden_units,
                                                         reuse=reuse),
                                           dynamic=dynamic,
                                           scope='blstm_{}'.format(n_layer),
                                           return_seq=True)
                output = tf.stack(output, axis=0)
                output = tf.transpose(output, perm=[1, 0, 2])
                prev_output = output
                continue
            output = bidirectional_rnn(prev_output,
                                       BasicLSTMCell(hidden_units,
                                                     reuse=reuse),
                                       BasicLSTMCell(hidden_units,
                                                     reuse=reuse),
                                       dynamic=dynamic,
                                       scope='blstm_{}'.format(n_layer),
                                       return_seq=return_seq,
                                       return_states=return_state)
    return output
예제 #8
0
def bi_LSTM():
    # Network building
    net = input_data(shape=[None, charvec_len])
    net = embedding(net, input_dim=in_dim, output_dim=nn_dim)
    net = dropout(net, drop1)

    net = bidirectional_rnn(net, BasicLSTMCell(nn_dim, forget_bias=1.),
                            BasicLSTMCell(nn_dim, forget_bias=1.))
    net = dropout(net, drop2)
    net = fully_connected(net, 2, activation='softmax')

    net = regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=lrate)

    return net
예제 #9
0
def bi_LSTM():
    # Network building
    net = input_data(shape=[None, 440])
    net = embedding(net, input_dim=20000, output_dim=128)
    net = dropout(net, 0.9)

    net = bidirectional_rnn(net, BasicLSTMCell(128, forget_bias=1.),
                            BasicLSTMCell(128, forget_bias=1.))
    net = dropout(net, 0.7)
    net = fully_connected(net, 2, activation='softmax')

    net = regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

    return net
예제 #10
0
def encoder(x: tf.Tensor, noise, initial_state: LSTMStateTuple, seq_len,
            n_joints, motion_selection):
    x = tf.expand_dims(x, axis=1)
    x = tf.tile(x, [1, seq_len, 1])
    x = tf.concat([x, tf.expand_dims(noise, axis=2)], axis=2)

    with tf.variable_scope('lstm1'):
        lstm = BasicLSTMCell(2**n_joints + motion_selection)
        x, final_state = tf.nn.dynamic_rnn(lstm,
                                           x,
                                           dtype=tf.float32,
                                           initial_state=initial_state)
    x = tf.layers.dense(x, 2**n_joints + motion_selection)

    with tf.variable_scope('lstm2'):
        lstm = BasicLSTMCell(2**n_joints + motion_selection)
        x, final_state = tf.nn.dynamic_rnn(lstm,
                                           x,
                                           dtype=tf.float32,
                                           initial_state=initial_state)

    x = tf.layers.dense(x, n_joints)
    x = tf.clip_by_value(x, -pi, pi)
    return x, final_state
예제 #11
0
파일: resNet_4.py 프로젝트: rslu2000/ENCASE
                                  downsample=True)
print("resn16", net.get_shape())
'''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
print("resn18", net.get_shape())
net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
print("resn20", net.get_shape())'''

net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
#net = tflearn.global_avg_pool(net)
# LSTM
print("before LSTM, before reshape", net.get_shape())
############ reshape for sub_seq
net = tf.reshape(net, [-1, n_dim // n_split, 512])
print("before LSTM", net.get_shape())
net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
print("after LSTM", net.get_shape())
#net = tflearn.layers.recurrent.lstm(net, n_units=512)
#print("after LSTM", net.get_shape())
net = dropout(net, 0.5)

# Regression
net = tflearn.fully_connected(net, 32, activation='sigmoid')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 4, activation='softmax')
print("dense", net.get_shape())
net = tflearn.regression(
    net,
    optimizer='adam',  #momentum',
    loss='categorical_crossentropy')
#,learning_rate=0.05)
예제 #12
0
x_frames = np.array(x_frames)
y_frames = np.array(y_frames)

x_frames, y_frames = unison_shuffled_copies(x_frames, y_frames)

split = int(0.05 * x_frames.shape[0])
valid_x = x_frames[0:split]
valid_y = y_frames[0:split]
train_x = x_frames[split:]
train_y = y_frames[split:]

print x_frames.shape
print x_frames.shape[1], x_frames.shape[2]
net = tflearn.input_data([None, x_frames.shape[1], x_frames.shape[2]])
print net.get_shape().as_list()
net = bidirectional_rnn(net, BasicLSTMCell(number_hidden),
                        BasicLSTMCell(number_hidden))
net = dropout(net, 0.8)
fc = tflearn.fully_connected(net, highway_size, activation='elu',
                             regularizer='L2', weight_decay=0.001)
net = fc

for i in xrange(highway_layer_amount):
    net = tflearn.highway(net, highway_size, activation='elu',
                          regularizer='L2', weight_decay=0.001,
                          transform_dropout=0.8)

net = tflearn.fully_connected(net, y_frames.shape[1], activation='elu')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                         loss='mean_square')
예제 #13
0
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=input_length_each_seq, value=0.)
testX = pad_sequences(testX, maxlen=input_length_each_seq, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY,n_class)
testY = to_categorical(testY,n_class)

# Network building
net = input_data(shape=[None, input_length_each_seq]) # [none,200]
net = embedding(net, input_dim=n_datas, output_dim=hiddle_layes) # [none,200,128]
net=tf.unstack(net,input_length_each_seq,1)
print(net);exit(-1)
net = bidirectional_rnn(net, BasicLSTMCell(hiddle_layes), BasicLSTMCell(hiddle_layes))

net = dropout(net, 0.5)
net = fully_connected(net, n_class, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
2、tensorflow

参考:TensorFlow-Examples

""" Bi-directional Recurrent Neural Network.
A Bi-directional Recurrent Neural Network (LSTM) implementation example using 
TensorFlow library. This example is using the MNIST database of handwritten 
예제 #14
0
def SlstmLayer(incoming,
               seq_length,
               input_dim,
               output_dim,
               policy,
               dropout_keepprob=0.5,
               pooling=False,
               update="straight",
               scope=None,
               name="SlstmLayer"):
    '''
	incomming:
		[batch_size, sen_length, choose_length, input_dim[0]]
		[batch_size, sen_length, choose_length, input_dim[1]]
	output:
		[batch_size, output_dim[0]]
		[batch_size, sen_length]
	'''

    with tf.variable_scope(scope, default_name=name,
                           values=[incoming]) as scope:
        name = scope.name

        batch_size = tf.shape(incoming[0])[0]
        sen_length = incoming[0].get_shape()[1].value
        choose_length = incoming[0].get_shape()[2].value

        cell = BasicLSTMCell(output_dim[0])

        def call_cell(inputs, status):
            with tf.variable_scope("cell") as scope:
                ans = cell(inputs, status, scope=scope)[1]
                cell.reuse = True
                return ans

        cell_p = BasicLSTMCell(output_dim[1])

        def call_cell_p(inputs, status):
            with tf.variable_scope("cell_p") as scope:
                ans = cell_p(inputs, status, scope=scope)[1]
                cell_p.reuse = True
                return ans

        x_seq = tf.unstack(incoming[0], axis=1)
        h_seq = [tf.zeros([batch_size, output_dim[0]])] * choose_length
        c_seq = [tf.zeros([batch_size, output_dim[0]])] * choose_length

        if incoming[1]:
            x_p_seq = tf.unstack(incoming[1], axis=1)
            h_p_seq = [tf.zeros([batch_size, output_dim[1]])] * choose_length
            c_p_seq = [tf.zeros([batch_size, output_dim[1]])] * choose_length
        else:
            x_p_seq = x_seq
            h_p_seq = h_seq
            c_p_seq = c_seq

        action_continous_seq = []
        action_seq = []

        for time, (x, x_p) in enumerate(zip(x_seq, x_p_seq)):

            h_pre = tf.stack(h_seq[:-choose_length - 1:-1], axis=1)
            h_pre_s = tf.stop_gradient(h_pre)
            x_s = tf.stop_gradient(x)

            if incoming[1]:
                h_p_pre = tf.stack(h_p_seq[:-choose_length - 1:-1], axis=1)
                action = policy(tf.concat([h_pre_s, h_p_pre], axis=2),
                                tf.concat([x_s, x_p], axis=2),
                                scope=scope)
            else:
                action = policy(h_pre_s, x_s, scope=scope)

            action_continous_seq.append(action)

            g = tf.get_default_graph()
            with ops.name_scope("MultinomialSample") as name:
                with g.gradient_override_map({
                        "OneHot": "ST_OneHot",
                        "Multinomial": "ST_Multinomial"
                }):
                    action = tf.one_hot(tf.multinomial(action, 1),
                                        choose_length,
                                        on_value=1.0,
                                        off_value=0.0,
                                        dtype=tf.float32,
                                        axis=-1)

            action_seq.append(action)

            h_pre = h_seq[:-choose_length - 1:-1]
            c_pre = c_seq[:-choose_length - 1:-1]
            x_seq = tf.unstack(x, axis=1)

            h_p_pre = h_p_seq[:-choose_length - 1:-1]
            c_p_pre = c_p_seq[:-choose_length - 1:-1]
            x_p_seq = tf.unstack(x_p, axis=1)

            all_h = []
            all_c = []
            all_h_p = []
            all_c_p = []
            for h, c, h_p, c_p, r, r_p in zip(h_pre, c_pre, h_p_pre, c_p_pre,
                                              x_seq, x_p_seq):
                state = call_cell(r, (c, h))
                all_c.append(state[0])
                all_h.append(state[1])
                if input_dim[1] == 0:
                    all_c_p.append(c_p_seq[-1])
                    all_h_p.append(h_p_seq[-1])
                else:
                    state_p = call_cell_p(
                        tf.concat([tf.stop_gradient(r), r_p], axis=1),
                        (c_p, h_p))
                    all_c_p.append(state_p[0])
                    all_h_p.append(state_p[1])

            action = tf.reshape(action, [batch_size, choose_length, 1])
            now_h = all_h[0]
            now_c = all_c[0]
            #now_h = tf.reduce_sum(tf.stack(all_h, axis = 1) * action, axis = 1)
            # now_h = tf.where(tf.less(time, seq_length), now_h, h_seq[-1])
            #now_c = tf.reduce_sum(tf.stack(all_c, axis = 1) * action, axis = 1)
            # now_c = tf.where(tf.less(time, seq_length), now_c, c_seq[-1])
            h_seq.append(now_h)
            c_seq.append(now_c)

            if incoming[1]:
                now_h_p = tf.reduce_sum(tf.stack(all_h_p, axis=1) * action,
                                        axis=1)
                now_h_p = tf.where(tf.less(time, seq_length), now_h_p,
                                   h_p_seq[-1])
                now_c_p = tf.reduce_sum(tf.stack(all_c_p, axis=1) * action,
                                        axis=1)
                now_c_p = tf.where(tf.less(time, seq_length), now_c_p,
                                   c_p_seq[-1])
                h_p_seq.append(now_h_p)
                c_p_seq.append(now_c_p)

        if pooling:
            output_h = tf.reduce_max(tf.stack(h_seq, axis=2), axis=2)
        else:
            output_h = h_seq[-1]

        output_h = tflearn.dropout(output_h, dropout_keepprob, name="dropOut")
        output_action = tf.stack(action_seq, axis=1)
        action_continous = tf.stack(action_continous_seq, axis=1)

    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, output_h)
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, output_action)
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name,
                         action_continous)

    return output_h, output_action
예제 #15
0
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Network building
net = input_data(shape=[None, 200])  #same shape as the max length
net = embedding(
    net, input_dim=20000, output_dim=128
)  #creates embedding matrix. Not sure if I need this for project 4
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(
    128))  #has two LSTMs with 128 units go in forward and backward directions.
net = dropout(net,
              0.5)  #dropout with keep probability of 0.5. All are finalized
net = fully_connected(
    net, 2, activation='softmax'
)  #makes softmax probabilities over 2 categories, true and false
net = regression(
    net, optimizer='adam', loss='categorical_crossentropy'
)  #runs adam optimizer on net minimizing catagorical cross entropy loss

# Training
model = tflearn.DNN(
    net, clip_gradients=5, tensorboard_verbose=1
)  # clips gradients at 5. Prints out loss, accuracy and gradients. All are finalized
model.fit(
예제 #16
0
def get_resnet_feature(test_data, test_label, test_pid, pid_map):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_split, 1])
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    net = tflearn.batch_normalization(net)

    # Residual blocks
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_dim // n_split, 512])
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(feature_layer, 0.5)
    net = tflearn.fully_connected(net, 4, activation='softmax')
    net = tflearn.regression(
        net,
        optimizer='adam',  #momentum',
        loss='categorical_crossentropy')
    #,learning_rate=0.1)
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/' + run_id)

    ### create new model, and get features
    m2 = tflearn.DNN(feature_layer, session=model.session)
    tmp_feature = []
    num_of_test = len(test_data)
    cur_data = []
    pre = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i != 0:
            #tmp_test_data = test_data[i].reshape([-1, n_dim, 1])
            tmp_testX = np.array(cur_data, dtype=np.float32)
            tmp_feature.extend(m2.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
            pre.extend(model.predict(tmp_testX))

    tmp_feature = np.array(tmp_feature)

    test_pid = np.array(test_pid, dtype=np.string_)

    y_num = len(pid_map)
    features = [[0. for j in range(32)] for i in range(y_num)]
    re_labels = [[0. for j in range(4)] for i in range(y_num)]
    y_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_sec_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_third_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_fourth_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_fifth_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_sixth_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_seventh_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_groundtruth = [[0. for j in range(4)] for i in range(y_num)]
    #print(y_num)

    for j in range(len(tmp_feature)):
        feature_pred = np.array(tmp_feature[j], dtype=np.float32)
        #print(len(feature_pred))
        i_pred = np.array(pre[j], dtype=np.float32)
        cur_pid = str(test_pid[j], 'utf-8')

        list_id = pid_map[cur_pid]
        #print (list_id)
        temp_feature = np.array(features[list_id], dtype=np.float32)
        temp_pre = np.array(y_pre[list_id], dtype=np.float32)
        temp_sec_pre = np.array(y_sec_pre[list_id], dtype=np.float32)
        temp_third_pre = np.array(y_third_pre[list_id], dtype=np.float32)
        #print(temp_pre)

        max_p = temp_pre[np.argmax(temp_pre)]
        max_sec_p = temp_sec_pre[np.argmax(temp_sec_pre)]
        max_third_p = temp_third_pre[np.argmax(temp_third_pre)]
        sec_p = 0
        sec_sec_p = 0
        sec_third_p = 0
        for k in range(len(temp_pre)):
            if temp_pre[k] == max_p:
                continue
            if temp_pre[k] > sec_p:
                sec_p = temp_pre[k]

            if temp_sec_pre[k] == max_sec_p:
                continue
            if temp_sec_pre[k] > sec_sec_p:
                sec_sec_p = temp_sec_pre[k]

            if temp_third_pre[k] == max_third_p:
                continue
            if temp_third_pre[k] > sec_third_p:
                sec_third_p = temp_third_pre[k]

        cur_max_p = i_pred[np.argmax(i_pred)]
        cur_sec_p = 0
        for k in range(len(i_pred)):
            if i_pred[k] == cur_max_p:
                continue
            if i_pred[k] > cur_sec_p:
                cur_sec_p = i_pred[k]

        if (cur_max_p - cur_sec_p) > (max_p - sec_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = y_third_pre[list_id]
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = y_pre[list_id]
            y_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_sec_p - sec_sec_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = y_third_pre[list_id]
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_third_p - sec_third_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = y_third_pre[list_id]
            y_third_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_fourth_p - sec_fourth_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_fifth_p - sec_fifth_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_sixth_p - sec_sixth_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_seventh_p - sec_seventh_p):
            y_seventh_pre[list_id] = i_pred

        max_f = 0
        for k in range(len(temp_feature)):
            if temp_feature[k] > max_f:
                max_f = temp_feature[k]
        if max_f > 0:
            feature_pred = (feature_pred + temp_feature) / 2
            #for k in range(len(temp_feature)):
            #    feature_pred[k] = (feature_pred[k]+temp_feature[k])/2

        features[list_id] = feature_pred

        y_groundtruth[list_id] = test_label[j]

        gt_list = ["N", "A", "O", "~"]
        pred_1 = gt_list[np.argmax(i_pred)]

        if pred_1 == 'N':
            re_labels[list_id][0] += 1
        elif pred_1 == 'A':
            re_labels[list_id][1] += 1
        elif pred_1 == 'O':
            re_labels[list_id][2] += 1
        elif pred_1 == '~':
            re_labels[list_id][3] += 1
        else:
            print('wrong label')

    out_feature = []
    for i in range(len(features)):
        out_feature.append(features[i])

    out_feature = np.array(out_feature)

    for k in range(len(y_pre)):
        labels = [0. for j in range(4)]
        pred_1 = np.argmax(y_pre[k])
        labels[pred_1] += 1
        pred_2 = np.argmax(y_sec_pre[k])
        labels[pred_2] += 1
        pred_3 = np.argmax(y_third_pre[k])
        labels[pred_3] += 1

        if pred_1 == 2:
            print("O was selected!")
            continue
        elif pred_2 == 2:
            y_pre[k] = y_sec_pre[k]
            print("O was selected!")
        elif pred_3 == 2:
            y_pre[k] = y_third_pre[k]
            print("O was selected!")
        if pred_1 != np.argmax(labels):
            if pred_2 == np.argmax(labels):
                y_pre[k] = y_sec_pre[k]
                print("Second was selected!")
    MyEval.F1Score3_num(pre, test_label[:len(pre)])

    MyEval.F1Score3_num(y_pre, y_groundtruth)
    MyEval.F1Score3_num(re_labels, y_groundtruth)

    return out_feature
예제 #17
0
def get_deep_mimic_feats(test_data):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 4, downsample=True)
    print("resn4", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 4, downsample=True)
    # print("resn6", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 4, downsample=True)
    # print("resn8", net.get_shape())


    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    print("before reshape", net.get_shape())

    # net = tf.reshape(net, [-1, n_dim//n_split*net.get_shape()[-2], net.get_shape()[-1]])
    # LSTM
    ############ reshape for sub_seq 
    before_reshaped_shape = net.get_shape().as_list()
    net = tf.reshape(net, [-1, n_dim//n_split, before_reshaped_shape[1]*before_reshaped_shape[2]])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(64), BasicLSTMCell(64))
    print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    net = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.fully_connected(net, 4, activation='softmax')
    print("dense", net.get_shape())
    net = tflearn.regression(net, optimizer='adam', loss='mean_square')


    # Training
    model = tflearn.DNN(net)
    model.load('../model/mimic/mimic_model_offline_v4.1')

    pred = []
    num_of_test = len(test_data)
    cur_data = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i !=0:
            tmp_testX = np.array(cur_data, dtype=np.float32)
            pred.extend(model.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
    
    return pred
예제 #18
0
# Sequence padding
trainX = pad_sequences(trainX, maxlen=maxLen, value=0.)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=2, value=0.)
embeddings = concat_2Dvectors(embeddings, Flatten_3Dto2D(POS_vectors))

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net,
                input_dim=len(embeddings),
                output_dim=len(embeddings[0]),
                trainable=False,
                name="EmbeddingLayer")
print(net.get_shape().as_list())
net = bidirectional_rnn(net, BasicLSTMCell(1024), BasicLSTMCell(1024))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net,
                 optimizer='adam',
                 loss='categorical_crossentropy',
                 learning_rate=0.005)

testX = trainX[int(0.3 * len(trainY)):]
testY = trainY[int(0.3 * len(trainY)):]

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
embeddingWeights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
# Assign your own weights (for example, a numpy array [input_dim, output_dim])
model.set_weights(embeddingWeights, embeddings)
예제 #19
0
def get_model():
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())'''

    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn10", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn12", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn14", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn16", net.get_shape())
    '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn18", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn20", net.get_shape())'''

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    print("before LSTM, before reshape", net.get_shape())
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_dim // n_split, 512])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    print("after LSTM", net.get_shape())
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(feature_layer, 0.5)
    net = tflearn.fully_connected(net, 4, activation='softmax')
    print("dense", net.get_shape())
    net = tflearn.regression(
        net,
        optimizer='adam',  #momentum',
        loss='categorical_crossentropy')
    #,learning_rate=0.1)
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/' + run_id)

    all_names = tflearn.variables.get_all_variables()
    print(all_names[0])
    ttt = model.get_weights(all_names[0])
    print(type(ttt))
    print(ttt)

    # tflearn.variables.get_value(all_names[0], xxx)

    return all_names
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test


# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)


# Converting labels to binary vectors
trainY = to_categorical(trainY,2)
testY = to_categorical(testY,2)

# Network building
net = input_data(shape=[None, 200])
net = embedding(net, input_dim=20000, output_dim=128)
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, n_epoch=100,validation_set=0.1, show_metric=True, batch_size=64)
model.save('sentiment_model.tfl')
예제 #21
0
def get_resnet_feature(test_data):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())'''

    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn12", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn14", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn16", net.get_shape())
    '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn18", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn20", net.get_shape())'''

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    print("before LSTM, before reshape", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_dim//n_split, 512])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    print("after LSTM", net.get_shape())
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    net = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(net, 0.5)
    # net, feature_layer = tflearn.fully_connected(net, 4, activation='softmax', return_logit = True)
    feature_layer = tflearn.fully_connected(net, 4, activation='softmax')
    print('feature_layer: ', feature_layer.get_shape())
    print("dense", net.get_shape())
    net = tflearn.regression(net, optimizer='adam',#momentum',
                             loss='categorical_crossentropy')
                             #,learning_rate=0.1)
    print('final output: ', net.get_shape())
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/'+run_id)
    
    # print(tflearn.variables.get_all_variables())

    ### create new model, and get features
    m2 = tflearn.DNN(feature_layer, session=model.session)
    tmp_feature = []
    num_of_test = len(test_data)
    cur_data = []
    pre = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i !=0:
            #tmp_test_data = test_data[i].reshape([-1, n_dim, 1])
            tmp_testX = np.array(cur_data, dtype=np.float32)
            tmp_feature.extend(m2.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
            pre.extend(model.predict(tmp_testX))
            print(i, len(tmp_feature), len(tmp_feature[0]))

    tmp_feature = np.array(tmp_feature)

    return tmp_feature
예제 #22
0
def lstm(incoming, n_units, activation='tanh', inner_activation='sigmoid',
         dropout=None, bias=True, weights_init=None, forget_bias=1.0,
         return_seq=False, return_state=False, initial_state=None,
         dynamic=False, trainable=True, restore=True, reuse=False,
         scope=None, name="LSTM"):
    """ LSTM.

    Long Short Term Memory Recurrent Layer.

    Input:
        3-D Tensor [samples, timesteps, input dim].

    Output:
        if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
        else: 2-D Tensor [samples, output dim].

    Arguments:
        incoming: `Tensor`. Incoming 3-D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'tanh'.
        inner_activation: `str` (name) or `function` (returning a `Tensor`).
            LSTM inner activation. Default: 'sigmoid'.
        dropout: `tuple` of `float`: (input_keep_prob, output_keep_prob). The
            input and output keep probability.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (See tflearn.initializations).
        forget_bias: `float`. Bias of the forget gate. Default: 1.0.
        return_seq: `bool`. If True, returns the full sequence instead of
            last sequence output only.
        return_state: `bool`. If True, returns a tuple with output and
            states: (output, states).
        initial_state: `Tensor`. An initial state for the RNN.  This must be
            a tensor of appropriate type and shape [batch_size x cell.state_size].
        dynamic: `bool`. If True, dynamic computation is performed. It will not
            compute RNN steps above the sequence length. Note that because TF
            requires to feed sequences of same length, 0 is used as a mask.
            So a sequence padded with 0 at the end must be provided. When
            computation is performed, it will stop when it meets a step with
            a value of 0.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: `str`. A name for this layer (optional).

    References:
        Long Short Term Memory, Sepp Hochreiter & Jurgen Schmidhuber,
        Neural Computation 9(8): 1735-1780, 1997.

    Links:
        [http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf]
        (http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)

    """
    cell = BasicLSTMCell(n_units, activation=activation,
                         inner_activation=inner_activation,
                         forget_bias=forget_bias, bias=bias,
                         weights_init=weights_init, trainable=trainable,
                         restore=restore, reuse=reuse)
    x = _rnn_template(incoming, cell=cell, dropout=dropout,
                      return_seq=return_seq, return_state=return_state,
                      initial_state=initial_state, dynamic=dynamic,
                      scope=scope, name=name)
  
    return x
예제 #23
0
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Building network
network = input_data(shape=[None, maxlen], name='input')

network = embedding(network,
                    input_dim=vocab_size,
                    output_dim=embedding_dim,
                    trainable=True)

network = bidirectional_rnn(network,
                            BasicLSTMCell(rnn_hidden_size,
                                          activation='tanh',
                                          inner_activation='sigmoid'),
                            BasicLSTMCell(rnn_hidden_size,
                                          activation='tanh',
                                          inner_activation='sigmoid'),
                            return_seq=True,
                            dynamic=True)
network = tf.pack(network, axis=1)

fw_outputs, bw_outputs = tf.split(split_dim=2, num_split=2, value=network)
network = tf.add(fw_outputs, bw_outputs)

branch1 = conv_1d(network,
                  num_filters,
                  3,
                  padding='valid',
예제 #24
0
def get_resNet_proba(long_data, long_pid, model_path):
    all_pid = np.array(long_pid)
    all_feature = np.array(long_data)
    all_label = np.array([])
    test_data, test_label, test_pid = slide_and_cut(all_feature, all_label,
                                                    all_pid)

    pid_map = {}
    for i in range(len(all_pid)):
        pid_map[all_pid[i]] = i

    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_split, 1])
    net = tflearn.conv_1d(net, 64, 16, 2)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')

    # Residual blocks
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_dim // n_split, 512])
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    net = dropout(net, 0.5)

    # Regression
    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(feature_layer, 0.5)
    net = tflearn.fully_connected(net, 4, activation='softmax')
    net = tflearn.regression(
        net,
        optimizer='adam',  #momentum',
        loss='categorical_crossentropy')
    #,learning_rate=0.1)
    ## save model
    ### load
    model = tflearn.DNN(net)
    model.load(model_path)

    ### create new model, and get features
    num_of_test = len(test_data)
    cur_data = []
    pre = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (num_of_test > 1 and (i % 2000 == 0 or i == (num_of_test - 1))
                and i != 0) or (num_of_test == 1):
            tmp_testX = np.array(cur_data, dtype=np.float32)
            cur_data = []
            pre.extend(model.predict(tmp_testX))

    test_pid = np.array(test_pid, dtype=np.string_)

    y_num = len(pid_map)
    y_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_sec_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_third_pre = [[0. for j in range(4)] for i in range(y_num)]
    #print(y_num)

    for j in range(len(pre)):
        i_pred = np.array(pre[j], dtype=np.float32)
        cur_pid = str(test_pid[j], 'utf-8')

        list_id = pid_map[cur_pid]
        temp_pre = np.array(y_pre[list_id], dtype=np.float32)
        temp_sec_pre = np.array(y_sec_pre[list_id], dtype=np.float32)
        temp_third_pre = np.array(y_third_pre[list_id], dtype=np.float32)

        max_p = temp_pre[np.argmax(temp_pre)]
        max_sec_p = temp_sec_pre[np.argmax(temp_sec_pre)]
        max_third_p = temp_third_pre[np.argmax(temp_third_pre)]
        sec_p = 0
        sec_sec_p = 0
        sec_third_p = 0
        for k in range(len(temp_pre)):
            if temp_pre[k] == max_p:
                continue
            if temp_pre[k] > sec_p:
                sec_p = temp_pre[k]

            if temp_sec_pre[k] == max_sec_p:
                continue
            if temp_sec_pre[k] > sec_sec_p:
                sec_sec_p = temp_sec_pre[k]

            if temp_third_pre[k] == max_third_p:
                continue
            if temp_third_pre[k] > sec_third_p:
                sec_third_p = temp_third_pre[k]

        cur_max_p = i_pred[np.argmax(i_pred)]
        cur_sec_p = 0
        for k in range(len(i_pred)):
            if i_pred[k] == cur_max_p:
                continue
            if i_pred[k] > cur_sec_p:
                cur_sec_p = i_pred[k]

        if (cur_max_p - cur_sec_p) > (max_p - sec_p):
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = y_pre[list_id]
            y_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_sec_p - sec_sec_p):
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_third_p - sec_third_p):
            y_third_pre[list_id] = i_pred

    for k in range(len(y_pre)):
        labels = [0. for j in range(4)]
        pred_1 = np.argmax(y_pre[k])
        labels[pred_1] += 1
        pred_2 = np.argmax(y_sec_pre[k])
        labels[pred_2] += 1
        pred_3 = np.argmax(y_third_pre[k])
        labels[pred_3] += 1

        if pred_1 == 2:  # and (abs(y_pre[k][np.argmax(labels)] - y_pre[k][2])/y_pre[k][np.argmax(labels)] <= 0.2):
            continue
        elif pred_2 == 2:  # and (abs(y_pre[k][np.argmax(labels)] - y_sec_pre[k][2])/y_pre[k][np.argmax(labels)] <= 0.2):
            y_pre[k] = y_sec_pre[k]
        elif pred_3 == 2:  # and (abs(y_pre[k][np.argmax(labels)] - y_third_pre[k][2])/y_pre[k][np.argmax(labels)] <= 0.2):
            y_pre[k] = y_third_pre[k]
        elif pred_1 != np.argmax(labels):
            if pred_2 == np.argmax(labels):
                y_pre[k] = y_sec_pre[k]

    return y_pre
예제 #25
0
# pickle.dump (X_test, open ("xtest.p", b))

# X_train = pickle.load (open ("xtrain.p", rb))
# X_test = pickle.load (open ("xtest.p", rb))

### Models

print('Build model')

net = input_data([None, model_size])
net = embedding(net, input_dim=n_words, output_dim=embedding_size)
if cell_type == "lstm":
    for i in range(len(cell_size)):
        if i < len(cell_size) - 1:
            net = bidirectional_rnn(net,
                                    BasicLSTMCell(cell_size[i]),
                                    BasicLSTMCell(cell_size[i]),
                                    return_seq=True)
            net = dropout(net, dropout_ratio)
        else:
            net = bidirectional_rnn(net, BasicLSTMCell(cell_size[i]),
                                    BasicLSTMCell(cell_size[i]))
            net = dropout(net, dropout_ratio)
elif cell_type == "gru":
    for i in range(len(cell_size)):
        if i < len(cell_size) - 1:
            net = bidirectional_rnn(net,
                                    GRUCell(cell_size[i]),
                                    GRUCell(cell_size[i]),
                                    return_seq=True)
            net = dropout(net, dropout_ratio)
예제 #26
0
#Get data
trainX, trainY, maxLen, vocabSize, _ = data.get_Data_Vectors()

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=maxLen, value=0.)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=maxLen, value=0.)

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net, input_dim=vocabSize, output_dim=128)
#print(net.get_shape().as_list())
net = bidirectional_rnn(net, BasicLSTMCell(512), BasicLSTMCell(512))
net = dropout(net, 0.5)
net = fully_connected(net, maxLen, activation='softmax')
net = regression(net, optimizer='adam',
                 loss='categorical_crossentropy')  #, learning_rate=0.001)

#"""
# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX,
          trainY,
          validation_set=0.2,
          show_metric=True,
          batch_size=64,
          shuffle=True)
#"""
예제 #27
0
trainX = pad_sequences(trainX, maxlen=maxLen, value=0)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=maxLen, value=0)
embeddings = concat_2Dvectors(embeddings, Flatten_3Dto2D(POS_vectors))

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net,
                input_dim=len(embeddings),
                output_dim=len(embeddings[0]),
                trainable=False,
                name="EmbeddingLayer")
print("After embeddings : ", net.get_shape().as_list())
net = bidirectional_rnn(net,
                        BasicLSTMCell(1024),
                        BasicLSTMCell(1024),
                        return_seq=True)
#net = [dropout(net[i], 0.5) for i in range(len(net))]
net = [
    fully_connected(net[i], 1, activation='sigmoid') for i in range(len(net))
]
net = merge(net, mode='concat')
print("After RNN : ", net.get_shape().as_list())
print("After Dropout : ", net.get_shape().as_list())
net = regression(net,
                 optimizer='adam',
                 loss='binary_crossentropy',
                 learning_rate=0.005)
print("After regression : ", net.get_shape().as_list())
예제 #28
0
파일: resNet_5.py 프로젝트: rslu2000/ENCASE
    def make_core_network(net, regularizer='L2'):
        ############ reshape for sub_seq
        net = tf.reshape(net, [-1, n_split, 1])
        print("reshaped input", net.get_shape())
        net = tflearn.conv_1d(net, 64, 16, 2)
        #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
        print("cov1", net.get_shape())
        net = tflearn.batch_normalization(net)
        print("bn1", net.get_shape())
        net = tflearn.activation(net, 'relu')
        print("relu1", net.get_shape())

        # Residual blocks
        '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
        print("resn2", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
        print("resn4", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
        print("resn6", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
        print("resn8", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn10", net.get_shape())'''

        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          64,
                                          downsample_strides=2,
                                          downsample=True,
                                          is_first_block=True)
        print("resn2", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          64,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn4", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          128,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn6", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          128,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn8", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          256,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn10", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          256,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn12", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          512,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn14", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          512,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn16", net.get_shape())
        '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn18", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn20", net.get_shape())'''

        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        #net = tflearn.global_avg_pool(net)
        # LSTM
        print("before LSTM, before reshape", net.get_shape())
        ############ reshape for sub_seq
        net = tf.reshape(net, [-1, n_dim // n_split, 512])
        print("before LSTM", net.get_shape())
        net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
        print("after LSTM", net.get_shape())
        #net = tflearn.layers.recurrent.lstm(net, n_units=512)
        #print("after LSTM", net.get_shape())
        net = dropout(net, 0.5)

        # Regression
        feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
        net = tflearn.dropout(feature_layer, 0.5)
        net = tflearn.fully_connected(net, 4, activation='softmax')
        print("dense", net.get_shape())
        return net, feature_layer