Beispiel #1
0
def recurrent_net(net, rec_type, rec_size, return_sequence):
    """
    A quick if else block to build a recurrent layer, based on the type specified
    by the user.
    """
    if rec_type == 'lstm':
        net = tflearn.layers.recurrent.lstm(net,
                                            rec_size,
                                            return_seq=return_sequence)
    elif rec_type == 'gru':
        net = tflearn.layers.recurrent.gru(net,
                                           rec_size,
                                           return_seq=return_sequence)
    elif rec_type == 'bi_lstm':
        net = bidirectional_rnn(net,
                                BasicLSTMCell(rec_size),
                                BasicLSTMCell(rec_size),
                                return_seq=return_sequence)
    elif rec_type == 'bi_gru':
        net = bidirectional_rnn(net,
                                GRUCell(rec_size),
                                GRUCell(rec_size),
                                return_seq=return_sequence)
    else:
        raise ValueError(
            'Incorrect rnn type passed. Try lstm, gru, bi_lstm or bi_gru.')
    return net
def lstm_block(input,
               hidden_units=128,
               dropout=0.5,
               reuse=False,
               layers=1,
               dynamic=True,
               return_seq=False,
               bidirectional=False,
               return_state=False):
    output = None
    prev_output = input
    for n_layer in range(layers):
        if not bidirectional:
            if n_layer < layers - 1:
                output = tflearn.lstm(prev_output,
                                      hidden_units,
                                      dropout=dropout,
                                      dynamic=dynamic,
                                      reuse=reuse,
                                      scope='lstm_{}'.format(n_layer),
                                      return_seq=True)
                output = tf.stack(output, axis=0)
                output = tf.transpose(output, perm=[1, 0, 2])
                prev_output = output
                continue
            output = tflearn.lstm(prev_output,
                                  hidden_units,
                                  dropout=dropout,
                                  dynamic=dynamic,
                                  reuse=reuse,
                                  scope='lstm_{}'.format(n_layer),
                                  return_seq=return_seq,
                                  return_state=return_state)
        else:
            if n_layer < layers - 1:
                output = bidirectional_rnn(prev_output,
                                           BasicLSTMCell(hidden_units,
                                                         reuse=reuse),
                                           BasicLSTMCell(hidden_units,
                                                         reuse=reuse),
                                           dynamic=dynamic,
                                           scope='blstm_{}'.format(n_layer),
                                           return_seq=True)
                output = tf.stack(output, axis=0)
                output = tf.transpose(output, perm=[1, 0, 2])
                prev_output = output
                continue
            output = bidirectional_rnn(prev_output,
                                       BasicLSTMCell(hidden_units,
                                                     reuse=reuse),
                                       BasicLSTMCell(hidden_units,
                                                     reuse=reuse),
                                       dynamic=dynamic,
                                       scope='blstm_{}'.format(n_layer),
                                       return_seq=return_seq,
                                       return_states=return_state)
    return output
    def build_network(self, ilearning_rate=0.001):
        '''
        :param ilearning_rate:
        :return:
        Build network
        '''

        #tf.reset_default_graph()
        net = input_data(shape=[None, 200])
        net = embedding(net,
                        input_dim=51887,
                        output_dim=200,
                        trainable=False,
                        name="EmbeddingLayer")

        #net = embedding(net, input_dim=20000, output_dim=128, trainable=False, weights_init=W,
        #                        name="EmbeddingLayer")
        #net = tflearn.embedding(net, input_dim=20000, output_dim=128, trainable=False, weights_init = W, name="EmbeddingLayer")
        net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
        net = dropout(net, 0.5)
        net = fully_connected(net, 2, activation='softmax')
        net = regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=ilearning_rate)

        return net
Beispiel #4
0
    def learning(self):

        # Network building
        net = input_data(shape=[None, MAXIMUM_LENGTH_DATA, 1])
        index = tf.placeholder(shape=[None], dtype=tf.int32)

        # net = bidirectional_rnn(net, BasicLSTMCell(200), BasicLSTMCell(200),return_seq=True)
        # net = dropout(net, 0.5)
        # net = tflearn.time_distributed(net, tflearn.fully_connected, [1,'softmax'])
        # net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, loss='binary_crossentropy')
        # model = tflearn.DNN(net, tensorboard_verbose=3)
        # model.fit(self.trainingData.data, self.trainingData.label, validation_set=0.1, show_metric=True, batch_size=1000,n_epoch=100)

        net = bidirectional_rnn(net,
                                BasicLSTMCell(200),
                                BasicLSTMCell(200),
                                return_seq=True,
                                dynamic=True)
        net = fully_connected(net[index], 2, activation='softmax')
        net = regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy')

        # # Training
        model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
        model.fit(self.trainingData.data,
                  self.trainingData.label,
                  validation_set=0.1,
                  show_metric=True,
                  batch_size=64)
Beispiel #5
0
    def LoadModel(self):
        self.window_height = 16
        self.window_width = 4
        self.threshold = 0.03
        self.ground_height = 2
        symbol_count = 13

        network = input_data(
            shape=[None, self.window_height * self.window_width, symbol_count])
        network = bidirectional_rnn(network, BasicLSTMCell(2),
                                    BasicLSTMCell(2))
        network = dropout(network, 0.8)
        network = fully_connected(network,
                                  self.window_height * symbol_count,
                                  activation='prelu')
        network = tf.reshape(network, [-1, self.window_height, symbol_count])
        network = regression(network,
                             optimizer='adagrad',
                             learning_rate=0.005,
                             loss='mean_square',
                             name='target',
                             batch_size=64)

        self.model = tflearn.DNN(network)
        self.model.load('./LSTMmodel/model.tfl')
Beispiel #6
0
def bi_LSTM():
    # Network building
    net = input_data(shape=[None, charvec_len])
    net = embedding(net, input_dim=in_dim, output_dim=nn_dim)
    net = dropout(net, drop1)

    net = bidirectional_rnn(net, BasicLSTMCell(nn_dim, forget_bias=1.),
                            BasicLSTMCell(nn_dim, forget_bias=1.))
    net = dropout(net, drop2)
    net = fully_connected(net, 2, activation='softmax')

    net = regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=lrate)

    return net
Beispiel #7
0
def bi_LSTM():
    # Network building
    net = input_data(shape=[None, 440])
    net = embedding(net, input_dim=20000, output_dim=128)
    net = dropout(net, 0.9)

    net = bidirectional_rnn(net, BasicLSTMCell(128, forget_bias=1.),
                            BasicLSTMCell(128, forget_bias=1.))
    net = dropout(net, 0.7)
    net = fully_connected(net, 2, activation='softmax')

    net = regression(net,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

    return net
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test


# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)


# Converting labels to binary vectors
trainY = to_categorical(trainY,2)
testY = to_categorical(testY,2)

# Network building
net = input_data(shape=[None, 200])
net = embedding(net, input_dim=20000, output_dim=128)
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, n_epoch=100,validation_set=0.1, show_metric=True, batch_size=64)
model.save('sentiment_model.tfl')
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Building network
network = input_data(shape=[None, maxlen], name='input')

network = embedding(network,
                    input_dim=vocab_size,
                    output_dim=embedding_dim,
                    trainable=True)

network = bidirectional_rnn(network,
                            BasicLSTMCell(rnn_hidden_size,
                                          activation='tanh',
                                          inner_activation='sigmoid'),
                            BasicLSTMCell(rnn_hidden_size,
                                          activation='tanh',
                                          inner_activation='sigmoid'),
                            return_seq=True,
                            dynamic=True)
network = tf.pack(network, axis=1)

fw_outputs, bw_outputs = tf.split(split_dim=2, num_split=2, value=network)
network = tf.add(fw_outputs, bw_outputs)

branch1 = conv_1d(network,
                  num_filters,
                  3,
                  padding='valid',
                  activation='relu',
                  regularizer="L2")
# pickle.dump (X_test, open ("xtest.p", b))

# X_train = pickle.load (open ("xtrain.p", rb))
# X_test = pickle.load (open ("xtest.p", rb))

### Models

print('Build model')

net = input_data([None, model_size])
net = embedding(net, input_dim=n_words, output_dim=embedding_size)
if cell_type == "lstm":
    for i in range(len(cell_size)):
        if i < len(cell_size) - 1:
            net = bidirectional_rnn(net,
                                    BasicLSTMCell(cell_size[i]),
                                    BasicLSTMCell(cell_size[i]),
                                    return_seq=True)
            net = dropout(net, dropout_ratio)
        else:
            net = bidirectional_rnn(net, BasicLSTMCell(cell_size[i]),
                                    BasicLSTMCell(cell_size[i]))
            net = dropout(net, dropout_ratio)
elif cell_type == "gru":
    for i in range(len(cell_size)):
        if i < len(cell_size) - 1:
            net = bidirectional_rnn(net,
                                    GRUCell(cell_size[i]),
                                    GRUCell(cell_size[i]),
                                    return_seq=True)
            net = dropout(net, dropout_ratio)
        else:
Beispiel #11
0
x_frames = np.array(x_frames)
y_frames = np.array(y_frames)

x_frames, y_frames = unison_shuffled_copies(x_frames, y_frames)

split = int(0.05 * x_frames.shape[0])
valid_x = x_frames[0:split]
valid_y = y_frames[0:split]
train_x = x_frames[split:]
train_y = y_frames[split:]

print x_frames.shape
print x_frames.shape[1], x_frames.shape[2]
net = tflearn.input_data([None, x_frames.shape[1], x_frames.shape[2]])
print net.get_shape().as_list()
net = bidirectional_rnn(net, BasicLSTMCell(number_hidden),
                        BasicLSTMCell(number_hidden))
net = dropout(net, 0.8)
fc = tflearn.fully_connected(net, highway_size, activation='elu',
                             regularizer='L2', weight_decay=0.001)
net = fc

for i in xrange(highway_layer_amount):
    net = tflearn.highway(net, highway_size, activation='elu',
                          regularizer='L2', weight_decay=0.001,
                          transform_dropout=0.8)

net = tflearn.fully_connected(net, y_frames.shape[1], activation='elu')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                         loss='mean_square')

Beispiel #12
0
                                  downsample=True)
print("resn16", net.get_shape())
'''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
print("resn18", net.get_shape())
net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
print("resn20", net.get_shape())'''

net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
#net = tflearn.global_avg_pool(net)
# LSTM
print("before LSTM, before reshape", net.get_shape())
############ reshape for sub_seq
net = tf.reshape(net, [-1, n_dim // n_split, 512])
print("before LSTM", net.get_shape())
net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
print("after LSTM", net.get_shape())
#net = tflearn.layers.recurrent.lstm(net, n_units=512)
#print("after LSTM", net.get_shape())
net = dropout(net, 0.5)

# Regression
net = tflearn.fully_connected(net, 32, activation='sigmoid')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 4, activation='softmax')
print("dense", net.get_shape())
net = tflearn.regression(
    net,
    optimizer='adam',  #momentum',
    loss='categorical_crossentropy')
#,learning_rate=0.05)
Beispiel #13
0
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Network building
net = input_data(shape=[None, 200])  #same shape as the max length
net = embedding(
    net, input_dim=20000, output_dim=128
)  #creates embedding matrix. Not sure if I need this for project 4
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(
    128))  #has two LSTMs with 128 units go in forward and backward directions.
net = dropout(net,
              0.5)  #dropout with keep probability of 0.5. All are finalized
net = fully_connected(
    net, 2, activation='softmax'
)  #makes softmax probabilities over 2 categories, true and false
net = regression(
    net, optimizer='adam', loss='categorical_crossentropy'
)  #runs adam optimizer on net minimizing catagorical cross entropy loss

# Training
model = tflearn.DNN(
    net, clip_gradients=5, tensorboard_verbose=1
)  # clips gradients at 5. Prints out loss, accuracy and gradients. All are finalized
model.fit(
    trainX,
Beispiel #14
0
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=input_length_each_seq, value=0.)
testX = pad_sequences(testX, maxlen=input_length_each_seq, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY,n_class)
testY = to_categorical(testY,n_class)

# Network building
net = input_data(shape=[None, input_length_each_seq]) # [none,200]
net = embedding(net, input_dim=n_datas, output_dim=hiddle_layes) # [none,200,128]
net=tf.unstack(net,input_length_each_seq,1)
print(net);exit(-1)
net = bidirectional_rnn(net, BasicLSTMCell(hiddle_layes), BasicLSTMCell(hiddle_layes))

net = dropout(net, 0.5)
net = fully_connected(net, n_class, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
2、tensorflow

参考:TensorFlow-Examples

""" Bi-directional Recurrent Neural Network.
A Bi-directional Recurrent Neural Network (LSTM) implementation example using 
TensorFlow library. This example is using the MNIST database of handwritten 
Beispiel #15
0
def get_resnet_feature(test_data, test_label, test_pid, pid_map):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_split, 1])
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    net = tflearn.batch_normalization(net)

    # Residual blocks
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_dim // n_split, 512])
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(feature_layer, 0.5)
    net = tflearn.fully_connected(net, 4, activation='softmax')
    net = tflearn.regression(
        net,
        optimizer='adam',  #momentum',
        loss='categorical_crossentropy')
    #,learning_rate=0.1)
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/' + run_id)

    ### create new model, and get features
    m2 = tflearn.DNN(feature_layer, session=model.session)
    tmp_feature = []
    num_of_test = len(test_data)
    cur_data = []
    pre = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i != 0:
            #tmp_test_data = test_data[i].reshape([-1, n_dim, 1])
            tmp_testX = np.array(cur_data, dtype=np.float32)
            tmp_feature.extend(m2.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
            pre.extend(model.predict(tmp_testX))

    tmp_feature = np.array(tmp_feature)

    test_pid = np.array(test_pid, dtype=np.string_)

    y_num = len(pid_map)
    features = [[0. for j in range(32)] for i in range(y_num)]
    re_labels = [[0. for j in range(4)] for i in range(y_num)]
    y_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_sec_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_third_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_fourth_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_fifth_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_sixth_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_seventh_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_groundtruth = [[0. for j in range(4)] for i in range(y_num)]
    #print(y_num)

    for j in range(len(tmp_feature)):
        feature_pred = np.array(tmp_feature[j], dtype=np.float32)
        #print(len(feature_pred))
        i_pred = np.array(pre[j], dtype=np.float32)
        cur_pid = str(test_pid[j], 'utf-8')

        list_id = pid_map[cur_pid]
        #print (list_id)
        temp_feature = np.array(features[list_id], dtype=np.float32)
        temp_pre = np.array(y_pre[list_id], dtype=np.float32)
        temp_sec_pre = np.array(y_sec_pre[list_id], dtype=np.float32)
        temp_third_pre = np.array(y_third_pre[list_id], dtype=np.float32)
        #print(temp_pre)

        max_p = temp_pre[np.argmax(temp_pre)]
        max_sec_p = temp_sec_pre[np.argmax(temp_sec_pre)]
        max_third_p = temp_third_pre[np.argmax(temp_third_pre)]
        sec_p = 0
        sec_sec_p = 0
        sec_third_p = 0
        for k in range(len(temp_pre)):
            if temp_pre[k] == max_p:
                continue
            if temp_pre[k] > sec_p:
                sec_p = temp_pre[k]

            if temp_sec_pre[k] == max_sec_p:
                continue
            if temp_sec_pre[k] > sec_sec_p:
                sec_sec_p = temp_sec_pre[k]

            if temp_third_pre[k] == max_third_p:
                continue
            if temp_third_pre[k] > sec_third_p:
                sec_third_p = temp_third_pre[k]

        cur_max_p = i_pred[np.argmax(i_pred)]
        cur_sec_p = 0
        for k in range(len(i_pred)):
            if i_pred[k] == cur_max_p:
                continue
            if i_pred[k] > cur_sec_p:
                cur_sec_p = i_pred[k]

        if (cur_max_p - cur_sec_p) > (max_p - sec_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = y_third_pre[list_id]
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = y_pre[list_id]
            y_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_sec_p - sec_sec_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = y_third_pre[list_id]
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_third_p - sec_third_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = y_third_pre[list_id]
            y_third_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_fourth_p - sec_fourth_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = y_fourth_pre[list_id]
            y_fourth_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_fifth_p - sec_fifth_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = y_fifth_pre[list_id]
            y_fifth_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_sixth_p - sec_sixth_p):
            y_seventh_pre[list_id] = y_sixth_pre[list_id]
            y_sixth_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_seventh_p - sec_seventh_p):
            y_seventh_pre[list_id] = i_pred

        max_f = 0
        for k in range(len(temp_feature)):
            if temp_feature[k] > max_f:
                max_f = temp_feature[k]
        if max_f > 0:
            feature_pred = (feature_pred + temp_feature) / 2
            #for k in range(len(temp_feature)):
            #    feature_pred[k] = (feature_pred[k]+temp_feature[k])/2

        features[list_id] = feature_pred

        y_groundtruth[list_id] = test_label[j]

        gt_list = ["N", "A", "O", "~"]
        pred_1 = gt_list[np.argmax(i_pred)]

        if pred_1 == 'N':
            re_labels[list_id][0] += 1
        elif pred_1 == 'A':
            re_labels[list_id][1] += 1
        elif pred_1 == 'O':
            re_labels[list_id][2] += 1
        elif pred_1 == '~':
            re_labels[list_id][3] += 1
        else:
            print('wrong label')

    out_feature = []
    for i in range(len(features)):
        out_feature.append(features[i])

    out_feature = np.array(out_feature)

    for k in range(len(y_pre)):
        labels = [0. for j in range(4)]
        pred_1 = np.argmax(y_pre[k])
        labels[pred_1] += 1
        pred_2 = np.argmax(y_sec_pre[k])
        labels[pred_2] += 1
        pred_3 = np.argmax(y_third_pre[k])
        labels[pred_3] += 1

        if pred_1 == 2:
            print("O was selected!")
            continue
        elif pred_2 == 2:
            y_pre[k] = y_sec_pre[k]
            print("O was selected!")
        elif pred_3 == 2:
            y_pre[k] = y_third_pre[k]
            print("O was selected!")
        if pred_1 != np.argmax(labels):
            if pred_2 == np.argmax(labels):
                y_pre[k] = y_sec_pre[k]
                print("Second was selected!")
    MyEval.F1Score3_num(pre, test_label[:len(pre)])

    MyEval.F1Score3_num(y_pre, y_groundtruth)
    MyEval.F1Score3_num(re_labels, y_groundtruth)

    return out_feature
Beispiel #16
0
def train_model(train,
                test,
                vocab_size,
                max_seq_size,
                chunks=1024,
                num_epochs=10,
                learning_rate=0.001,
                n_units=256,
                dropout=0.5):

    trainX = train['sub_seqs']
    trainY = train['sub_label']
    testX = test['sub_seqs']
    testY = test['sub_label']

    # Sequence padding
    trainX = pad_sequences(trainX,
                           maxlen=max_seq_size,
                           value=0.,
                           padding='post')
    testX = pad_sequences(testX, maxlen=max_seq_size, value=0., padding='post')

    trainX = da.from_array(np.asarray(trainX), chunks=chunks)
    trainY = da.from_array(np.asarray(trainY), chunks=chunks)
    testX = da.from_array(np.asarray(testX), chunks=chunks)
    testY = da.from_array(np.asarray(testY), chunks=chunks)

    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=vocab_size)
    testY = to_categorical(testY, nb_classes=vocab_size)

    # Network building
    net = tflearn.input_data([None, max_seq_size])
    net = tflearn.embedding(net,
                            input_dim=vocab_size,
                            output_dim=128,
                            trainable=True)
    net = bidirectional_rnn(net, GRUCell(128), GRUCell(128), return_seq=True)
    net = tflearn.dropout(net, dropout)
    net = bidirectional_rnn(net, GRUCell(128), GRUCell(128), return_seq=False)
    net = tflearn.dropout(net, dropout)
    net = tflearn.fully_connected(
        net,
        vocab_size,
        activation='softmax',
        weights_init=tflearn.initializations.xavier())
    net = tflearn.regression(net,
                             optimizer='adam',
                             learning_rate=learning_rate,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net,
                        tensorboard_dir='/tmp/tflearn_logs/deep_gru/',
                        tensorboard_verbose=2)
    #checkpoint_path='/tmp/tflearn_logs/shallow_lstm/',
    #best_checkpoint_path="C:/Users/macle/Desktop/UPC Masters/Semester 2/CI/SubRecommender/models/")

    model.fit(trainX,
              trainY,
              validation_set=(testX, testY),
              show_metric=False,
              snapshot_epoch=True,
              batch_size=256,
              n_epoch=num_epochs,
              run_id=str(learning_rate) + "-" + str(n_units) + "-" +
              str(dropout))

    return model
Beispiel #17
0
def get_model():
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())'''

    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn10", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn12", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn14", net.get_shape())
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    print("resn16", net.get_shape())
    '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn18", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn20", net.get_shape())'''

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    print("before LSTM, before reshape", net.get_shape())
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_dim // n_split, 512])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    print("after LSTM", net.get_shape())
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(feature_layer, 0.5)
    net = tflearn.fully_connected(net, 4, activation='softmax')
    print("dense", net.get_shape())
    net = tflearn.regression(
        net,
        optimizer='adam',  #momentum',
        loss='categorical_crossentropy')
    #,learning_rate=0.1)
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/' + run_id)

    all_names = tflearn.variables.get_all_variables()
    print(all_names[0])
    ttt = model.get_weights(all_names[0])
    print(type(ttt))
    print(ttt)

    # tflearn.variables.get_value(all_names[0], xxx)

    return all_names
Beispiel #18
0
def get_resNet_proba(long_data, long_pid, model_path):
    all_pid = np.array(long_pid)
    all_feature = np.array(long_data)
    all_label = np.array([])
    test_data, test_label, test_pid = slide_and_cut(all_feature, all_label,
                                                    all_pid)

    pid_map = {}
    for i in range(len(all_pid)):
        pid_map[all_pid[i]] = i

    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_split, 1])
    net = tflearn.conv_1d(net, 64, 16, 2)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')

    # Residual blocks
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True,
                                      is_first_block=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      64,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      128,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      256,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)
    net = tflearn.residual_bottleneck(net,
                                      2,
                                      16,
                                      512,
                                      downsample_strides=2,
                                      downsample=True)

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    ############ reshape for sub_seq
    net = tf.reshape(net, [-1, n_dim // n_split, 512])
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    net = dropout(net, 0.5)

    # Regression
    feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(feature_layer, 0.5)
    net = tflearn.fully_connected(net, 4, activation='softmax')
    net = tflearn.regression(
        net,
        optimizer='adam',  #momentum',
        loss='categorical_crossentropy')
    #,learning_rate=0.1)
    ## save model
    ### load
    model = tflearn.DNN(net)
    model.load(model_path)

    ### create new model, and get features
    num_of_test = len(test_data)
    cur_data = []
    pre = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (num_of_test > 1 and (i % 2000 == 0 or i == (num_of_test - 1))
                and i != 0) or (num_of_test == 1):
            tmp_testX = np.array(cur_data, dtype=np.float32)
            cur_data = []
            pre.extend(model.predict(tmp_testX))

    test_pid = np.array(test_pid, dtype=np.string_)

    y_num = len(pid_map)
    y_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_sec_pre = [[0. for j in range(4)] for i in range(y_num)]
    y_third_pre = [[0. for j in range(4)] for i in range(y_num)]
    #print(y_num)

    for j in range(len(pre)):
        i_pred = np.array(pre[j], dtype=np.float32)
        cur_pid = str(test_pid[j], 'utf-8')

        list_id = pid_map[cur_pid]
        temp_pre = np.array(y_pre[list_id], dtype=np.float32)
        temp_sec_pre = np.array(y_sec_pre[list_id], dtype=np.float32)
        temp_third_pre = np.array(y_third_pre[list_id], dtype=np.float32)

        max_p = temp_pre[np.argmax(temp_pre)]
        max_sec_p = temp_sec_pre[np.argmax(temp_sec_pre)]
        max_third_p = temp_third_pre[np.argmax(temp_third_pre)]
        sec_p = 0
        sec_sec_p = 0
        sec_third_p = 0
        for k in range(len(temp_pre)):
            if temp_pre[k] == max_p:
                continue
            if temp_pre[k] > sec_p:
                sec_p = temp_pre[k]

            if temp_sec_pre[k] == max_sec_p:
                continue
            if temp_sec_pre[k] > sec_sec_p:
                sec_sec_p = temp_sec_pre[k]

            if temp_third_pre[k] == max_third_p:
                continue
            if temp_third_pre[k] > sec_third_p:
                sec_third_p = temp_third_pre[k]

        cur_max_p = i_pred[np.argmax(i_pred)]
        cur_sec_p = 0
        for k in range(len(i_pred)):
            if i_pred[k] == cur_max_p:
                continue
            if i_pred[k] > cur_sec_p:
                cur_sec_p = i_pred[k]

        if (cur_max_p - cur_sec_p) > (max_p - sec_p):
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = y_pre[list_id]
            y_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_sec_p - sec_sec_p):
            y_third_pre[list_id] = y_sec_pre[list_id]
            y_sec_pre[list_id] = i_pred
        elif (cur_max_p - cur_sec_p) > (max_third_p - sec_third_p):
            y_third_pre[list_id] = i_pred

    for k in range(len(y_pre)):
        labels = [0. for j in range(4)]
        pred_1 = np.argmax(y_pre[k])
        labels[pred_1] += 1
        pred_2 = np.argmax(y_sec_pre[k])
        labels[pred_2] += 1
        pred_3 = np.argmax(y_third_pre[k])
        labels[pred_3] += 1

        if pred_1 == 2:  # and (abs(y_pre[k][np.argmax(labels)] - y_pre[k][2])/y_pre[k][np.argmax(labels)] <= 0.2):
            continue
        elif pred_2 == 2:  # and (abs(y_pre[k][np.argmax(labels)] - y_sec_pre[k][2])/y_pre[k][np.argmax(labels)] <= 0.2):
            y_pre[k] = y_sec_pre[k]
        elif pred_3 == 2:  # and (abs(y_pre[k][np.argmax(labels)] - y_third_pre[k][2])/y_pre[k][np.argmax(labels)] <= 0.2):
            y_pre[k] = y_third_pre[k]
        elif pred_1 != np.argmax(labels):
            if pred_2 == np.argmax(labels):
                y_pre[k] = y_sec_pre[k]

    return y_pre
Beispiel #19
0
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.embedding_ops import embedding
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell
from tflearn.layers.estimator import regression

# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)

# Network building
net = input_data(shape=[None, 200])
net = embedding(net, input_dim=20000, output_dim=128)
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
Beispiel #20
0
    def make_core_network(net, regularizer='L2'):
        ############ reshape for sub_seq
        net = tf.reshape(net, [-1, n_split, 1])
        print("reshaped input", net.get_shape())
        net = tflearn.conv_1d(net, 64, 16, 2)
        #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
        print("cov1", net.get_shape())
        net = tflearn.batch_normalization(net)
        print("bn1", net.get_shape())
        net = tflearn.activation(net, 'relu')
        print("relu1", net.get_shape())

        # Residual blocks
        '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
        print("resn2", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
        print("resn4", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
        print("resn6", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
        print("resn8", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn10", net.get_shape())'''

        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          64,
                                          downsample_strides=2,
                                          downsample=True,
                                          is_first_block=True)
        print("resn2", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          64,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn4", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          128,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn6", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          128,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn8", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          256,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn10", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          256,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn12", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          512,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn14", net.get_shape())
        net = tflearn.residual_bottleneck(net,
                                          2,
                                          16,
                                          512,
                                          downsample_strides=2,
                                          downsample=True)
        print("resn16", net.get_shape())
        '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn18", net.get_shape())
        net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
        print("resn20", net.get_shape())'''

        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        #net = tflearn.global_avg_pool(net)
        # LSTM
        print("before LSTM, before reshape", net.get_shape())
        ############ reshape for sub_seq
        net = tf.reshape(net, [-1, n_dim // n_split, 512])
        print("before LSTM", net.get_shape())
        net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
        print("after LSTM", net.get_shape())
        #net = tflearn.layers.recurrent.lstm(net, n_units=512)
        #print("after LSTM", net.get_shape())
        net = dropout(net, 0.5)

        # Regression
        feature_layer = tflearn.fully_connected(net, 32, activation='sigmoid')
        net = tflearn.dropout(feature_layer, 0.5)
        net = tflearn.fully_connected(net, 4, activation='softmax')
        print("dense", net.get_shape())
        return net, feature_layer
Beispiel #21
0
def get_resnet_feature(test_data):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    ### split
    #X = X.reshape([-1, n_split, 1])
    #testX = testX.reshape([-1, n_split, 1])

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    '''net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())'''

    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True)
    print("resn4", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn6", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)
    print("resn8", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn10", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)
    print("resn12", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn14", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)
    print("resn16", net.get_shape())
    '''net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn18", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 1024, downsample_strides = 2, downsample=True)
    print("resn20", net.get_shape())'''

    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    #net = tflearn.global_avg_pool(net)
    # LSTM
    print("before LSTM, before reshape", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_dim//n_split, 512])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(256), BasicLSTMCell(256))
    print("after LSTM", net.get_shape())
    #net = tflearn.layers.recurrent.lstm(net, n_units=512)
    #print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    net = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.dropout(net, 0.5)
    # net, feature_layer = tflearn.fully_connected(net, 4, activation='softmax', return_logit = True)
    feature_layer = tflearn.fully_connected(net, 4, activation='softmax')
    print('feature_layer: ', feature_layer.get_shape())
    print("dense", net.get_shape())
    net = tflearn.regression(net, optimizer='adam',#momentum',
                             loss='categorical_crossentropy')
                             #,learning_rate=0.1)
    print('final output: ', net.get_shape())
    ## save model
    ### load
    model = tflearn.DNN(net)
    run_id = 'resnet_6000_500_10_5_v1'
    model.load('../model/resNet/'+run_id)
    
    # print(tflearn.variables.get_all_variables())

    ### create new model, and get features
    m2 = tflearn.DNN(feature_layer, session=model.session)
    tmp_feature = []
    num_of_test = len(test_data)
    cur_data = []
    pre = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i !=0:
            #tmp_test_data = test_data[i].reshape([-1, n_dim, 1])
            tmp_testX = np.array(cur_data, dtype=np.float32)
            tmp_feature.extend(m2.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
            pre.extend(model.predict(tmp_testX))
            print(i, len(tmp_feature), len(tmp_feature[0]))

    tmp_feature = np.array(tmp_feature)

    return tmp_feature
Beispiel #22
0
#Get data
trainX, trainY, maxLen, vocabSize, _ = data.get_Data_Vectors()

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=maxLen, value=0.)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=maxLen, value=0.)

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net, input_dim=vocabSize, output_dim=128)
#print(net.get_shape().as_list())
net = bidirectional_rnn(net, BasicLSTMCell(512), BasicLSTMCell(512))
net = dropout(net, 0.5)
net = fully_connected(net, maxLen, activation='softmax')
net = regression(net, optimizer='adam',
                 loss='categorical_crossentropy')  #, learning_rate=0.001)

#"""
# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX,
          trainY,
          validation_set=0.2,
          show_metric=True,
          batch_size=64,
          shuffle=True)
#"""
Beispiel #23
0
def get_deep_mimic_feats(test_data):
    n_dim = 6000
    n_split = 300

    tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    # Building Residual Network
    net = tflearn.input_data(shape=[None, n_dim, 1])
    print("input", net.get_shape())
    ############ reshape for sub_seq 
    net = tf.reshape(net, [-1, n_split, 1])
    print("reshaped input", net.get_shape())
    net = tflearn.conv_1d(net, 64, 16, 2)
    #net = tflearn.conv_1d(net, 64, 16, 2, regularizer='L2', weight_decay=0.0001)
    print("cov1", net.get_shape())
    net = tflearn.batch_normalization(net)
    print("bn1", net.get_shape())
    net = tflearn.activation(net, 'relu')
    print("relu1", net.get_shape())

    # Residual blocks
    net = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)
    print("resn2", net.get_shape())
    net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 4, downsample=True)
    print("resn4", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 4, downsample=True)
    # print("resn6", net.get_shape())
    # net = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 4, downsample=True)
    # print("resn8", net.get_shape())


    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    print("before reshape", net.get_shape())

    # net = tf.reshape(net, [-1, n_dim//n_split*net.get_shape()[-2], net.get_shape()[-1]])
    # LSTM
    ############ reshape for sub_seq 
    before_reshaped_shape = net.get_shape().as_list()
    net = tf.reshape(net, [-1, n_dim//n_split, before_reshaped_shape[1]*before_reshaped_shape[2]])
    print("before LSTM", net.get_shape())
    net = bidirectional_rnn(net, BasicLSTMCell(64), BasicLSTMCell(64))
    print("after LSTM", net.get_shape())
    net = dropout(net, 0.5)

    # Regression
    net = tflearn.fully_connected(net, 32, activation='sigmoid')
    net = tflearn.fully_connected(net, 4, activation='softmax')
    print("dense", net.get_shape())
    net = tflearn.regression(net, optimizer='adam', loss='mean_square')


    # Training
    model = tflearn.DNN(net)
    model.load('../model/mimic/mimic_model_offline_v4.1')

    pred = []
    num_of_test = len(test_data)
    cur_data = []
    for i in range(num_of_test):
        cur_data.append(test_data[i])
        if (i % 2000 == 0 or i == (num_of_test - 1)) and i !=0:
            tmp_testX = np.array(cur_data, dtype=np.float32)
            pred.extend(model.predict(tmp_testX.reshape([-1, n_dim, 1])))
            cur_data = []
    
    return pred
Beispiel #24
0
trainX = pad_sequences(trainX, maxlen=maxLen, value=0)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=maxLen, value=0)
embeddings = concat_2Dvectors(embeddings, Flatten_3Dto2D(POS_vectors))

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net,
                input_dim=len(embeddings),
                output_dim=len(embeddings[0]),
                trainable=False,
                name="EmbeddingLayer")
print("After embeddings : ", net.get_shape().as_list())
net = bidirectional_rnn(net,
                        BasicLSTMCell(1024),
                        BasicLSTMCell(1024),
                        return_seq=True)
#net = [dropout(net[i], 0.5) for i in range(len(net))]
net = [
    fully_connected(net[i], 1, activation='sigmoid') for i in range(len(net))
]
net = merge(net, mode='concat')
print("After RNN : ", net.get_shape().as_list())
print("After Dropout : ", net.get_shape().as_list())
net = regression(net,
                 optimizer='adam',
                 loss='binary_crossentropy',
                 learning_rate=0.005)
print("After regression : ", net.get_shape().as_list())

testX = trainX[int(0.3 * len(trainY)):]
Beispiel #25
0
# Sequence padding
trainX = pad_sequences(trainX, maxlen=maxLen, value=0.)
#Converting labels to binary vectors
trainY = pad_sequences(trainY, maxlen=2, value=0.)
embeddings = concat_2Dvectors(embeddings, Flatten_3Dto2D(POS_vectors))

# Network building
print("Beginning neural network")
net = input_data(shape=[None, maxLen])
net = embedding(net,
                input_dim=len(embeddings),
                output_dim=len(embeddings[0]),
                trainable=False,
                name="EmbeddingLayer")
print(net.get_shape().as_list())
net = bidirectional_rnn(net, BasicLSTMCell(1024), BasicLSTMCell(1024))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net,
                 optimizer='adam',
                 loss='categorical_crossentropy',
                 learning_rate=0.005)

testX = trainX[int(0.3 * len(trainY)):]
testY = trainY[int(0.3 * len(trainY)):]

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
embeddingWeights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
# Assign your own weights (for example, a numpy array [input_dim, output_dim])
model.set_weights(embeddingWeights, embeddings)