Exemplo n.º 1
0
    def create_critic_network(self, state_dim, action_dim):
        inputs = input_data(shape=state_dim)
        action = input_data(shape=[None, action_dim])

        net = conv_1d(inputs, 128, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        net = conv_1d(net, 256, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        print(net.get_shape().as_list())

        net = fully_connected(net, 1024, activation='relu')
        net = dropout(net, 0.8)
        net = fully_connected(net, 1024, activation='relu')

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = fully_connected(net, 2048)
        t2 = fully_connected(action, 2048)

        net = activation(tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b,
                         activation='relu')

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = initializations.uniform(minval=-0.003, maxval=0.003)
        out = fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
Exemplo n.º 2
0
    def CreateNetwork(self, inputs):
        with tf.variable_scope('actor'):
            split_0 = tflearn.fully_connected(
                inputs[:, 0:1, -1], FEATURE_NUM, activation='relu')
            split_1 = tflearn.fully_connected(
                inputs[:, 1:2, -1], FEATURE_NUM, activation='relu')
            split_2 = tflearn.conv_1d(
                inputs[:, 2:3, :], FEATURE_NUM, 4, activation='relu')
            split_3 = tflearn.conv_1d(
                inputs[:, 3:4, :], FEATURE_NUM, 4, activation='relu')
            split_4 = tflearn.conv_1d(
                inputs[:, 4:5, :self.a_dim], FEATURE_NUM, 4, activation='relu')
            split_5 = tflearn.fully_connected(
                inputs[:, 5:6, -1], FEATURE_NUM, activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge(
                [split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')

            pi_net = tflearn.fully_connected(
                merge_net, FEATURE_NUM, activation='relu')
            pi = tflearn.fully_connected(pi_net, self.a_dim, activation='softmax') 
            val_net = tflearn.fully_connected(
                merge_net, FEATURE_NUM, activation='relu')
            val = tflearn.fully_connected(val_net, 1, activation='tanh') 
            return pi, val
Exemplo n.º 3
0
    def input_transform_net(point_cloud, K):
        num_point = point_cloud.get_shape()[1].value

        net = tflearn.conv_1d(point_cloud,
                              nb_filter=64,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.conv_1d(net,
                              nb_filter=128,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.conv_1d(net,
                              nb_filter=256,
                              filter_size=1,
                              padding="valid",
                              strides=1,
                              activation="relu")
        net = tflearn.max_pool_1d(net, kernel_size=num_point, padding="valid")
        net = tflearn.fully_connected(net, 256, activation="relu")
        net = tflearn.fully_connected(net, 64, activation="relu")

        weights = tf.Variable(tf.zeros(shape=[64, K * K], dtype=tf.float32))
        biases = tf.Variable(
            tf.reshape(tf.eye(K, dtype=tf.float32), shape=[-1]))
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)
        transform = tf.reshape(transform, [-1, K, K])

        return transform
Exemplo n.º 4
0
def build():
    network = input_data([None, Meta.max_string_len])
    network = embedding(network, input_dim=Meta.max_one_hot, output_dim=128)
    branch1 = conv_1d(network,
                      128,
                      3,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch2 = conv_1d(network,
                      128,
                      4,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    branch3 = conv_1d(network,
                      128,
                      5,
                      padding='valid',
                      activation='relu',
                      regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = dropout(network, 0.5)
    network = lstm(network, 128)
    # network = fully_connected(network, 20)
    network = fully_connected(network, 2, activation='softmax')
    network = tflearn.regression(network,
                                 optimizer='adam',
                                 learning_rate=0.001,
                                 loss='categorical_crossentropy')
    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Exemplo n.º 5
0
Arquivo: a3c.py Projeto: xgw/proj
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.conv_1d(inputs[:, 1:2, :],
                                      128,
                                      4,
                                      activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :NUM_OF_TRACKS],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.fully_connected(inputs[:, 3:4, -1],
                                              128,
                                              activation='relu')

            split_1_flat = tflearn.flatten(split_1)
            split_2_flat = tflearn.flatten(split_2)

            merge_net = tflearn.merge(
                [split_0, split_1_flat, split_2_flat, split_3], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
Exemplo n.º 6
0
    def create_network(self):
        # TODO try convolutional RNN and LSTM shapes
        net = tflearn.input_data(shape=[None, len(self.string_to_number)])

        # self.net = tflearn.fully_connected(self.net, 32)
        # self.net = tflearn.fully_connected(self.net, 64)

        # usually we need a 3D-Tensor
        net = tflearn.reshape(net, (-1, len(self.string_to_number), 1))

        # get the "good" parts
        net = tflearn.conv_1d(net, 64, 3, activation='relu', regularizer="L2")
        net = tflearn.max_pool_1d(net, 2)
        net = tflearn.batch_normalization(net)
        net = tflearn.conv_1d(net, 128, 3, activation='relu', regularizer="L2")
        net = tflearn.max_pool_1d(net, 2)
        net = tflearn.batch_normalization(net)

        # remember the meanings
        # net = tflearn.lstm(net, 64)
        # net = tflearn.dropout(net, 0.5)

        # net = tflearn.simple_rnn(net, len(self.string_to_number) * 2)

        # map to next value
        net = tflearn.fully_connected(net, 256, activation='tanh')
        net = tflearn.dropout(net, 0.5)
        net = tflearn.fully_connected(net,
                                      len(self.string_to_number),
                                      activation='softmax')
        self.net = tflearn.regression(net, optimizer='adam')
        self.model = tflearn.DNN(self.net)
Exemplo n.º 7
0
 def create_dual_network(self, inputs, s_dim):
     with tf.variable_scope(self.scope + '-dual', reuse=self.reuse):
         split_array = []
         for i in range(s_dim[0]):
             tmp = tf.reshape(inputs[:, i:i + 1, :], (-1, s_dim[1], 1))
             branch1 = tflearn.conv_1d(tmp,
                                       FEATURE_NUM,
                                       3,
                                       activation='relu')
             branch2 = tflearn.conv_1d(tmp,
                                       FEATURE_NUM,
                                       4,
                                       activation='relu')
             branch3 = tflearn.conv_1d(tmp,
                                       FEATURE_NUM,
                                       5,
                                       activation='relu')
             network = tflearn.merge([branch1, branch2, branch3],
                                     mode='concat',
                                     axis=1)
             network = tf.expand_dims(network, 2)
             network = tflearn.global_avg_pool(network)
             split_array.append(network)
         #out, _ = self.attention(split_array, FEATURE_NUM)
         out = tflearn.merge(split_array, 'concat')
         self.reuse = True
         return out
Exemplo n.º 8
0
    def create_actor_network(self):
        with tf.variable_scope(self.scope + '-actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in range(self.s_dim[0]):
                tmp = tf.reshape(inputs[:, i:i + 1, :], (-1, self.s_dim[1], 1))
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM // 4,
                                        KERNEL,
                                        activation='relu')
                split = tflearn.avg_pool_1d(split, 2)
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM // 2,
                                        KERNEL,
                                        activation='relu')
                split = tflearn.avg_pool_1d(split, 2)
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM,
                                        KERNEL,
                                        activation='relu')
                #split = tflearn.avg_pool_1d(split, 2)
                flattern = tflearn.flatten(split)
                split_array.append(flattern)
            dense_net_0 = tflearn.merge(split_array, 'concat')
            dense_net_0 = tflearn.fully_connected(dense_net_0,
                                                  FEATURE_NUM,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
Exemplo n.º 9
0
def create_network(s_lengths):
    '''
    Create the neural network.

    :a_dim: array containing the dimension space for each action
    :s_len: array containing the length of each metric information
    :last_layer: activation mode for the output neurons
    '''
    inputs = tflearn.input_data(shape=[None, len(s_lengths), max(s_lengths)])
    splits = list()

    # Add a convolution layer for each input vector
    for i, s_len in enumerate(s_lengths):
        splits.append(
            tflearn.conv_1d(inputs[:, i:i + 1, :s_len],
                            128,
                            4,
                            activation='relu',
                            name='Input%s' % i))

    # Merge all initial convolution layers
    dense_net = tflearn.merge(splits, 'concat', name='MergeNet')

    # Hidden layers
    for i in range(NETWORK_DEPTH):
        dense_net = tflearn.conv_1d(dense_net,
                                    128,
                                    4,
                                    activation='relu',
                                    name='Dense%s' % i)

    return inputs, dense_net
Exemplo n.º 10
0
def create_convolutional(max_sequence_length, dict_size):
    net = tflearn.input_data([None, max_sequence_length])
    net = tflearn.embedding(net, input_dim=dict_size + 1, output_dim=128)
    branch1 = tflearn.conv_1d(net,
                              128,
                              3,
                              padding='valid',
                              activation='relu',
                              regularizer='L2')
    branch2 = tflearn.conv_1d(net,
                              128,
                              4,
                              padding='valid',
                              activation='relu',
                              regularizer='L2')
    branch3 = tflearn.conv_1d(net,
                              128,
                              5,
                              padding='valid',
                              activation='relu',
                              regularizer='L2')
    net = tflearn.merge([branch1, branch2, branch3], mode='concat', axis=1)
    net = tf.expand_dims(net, 2)
    net = tflearn.layers.conv.global_max_pool(net)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net,
                             optimizer='sgd',
                             learning_rate=0.1,
                             loss='categorical_crossentropy')

    return tflearn.DNN(net,
                       tensorboard_verbose=0,
                       tensorboard_dir='../tensorboard/tensorboard_conv')
Exemplo n.º 11
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')
            split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')

            reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])
            split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')

            split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')
            split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')

            flatten_0 = tflearn.flatten(split_3)
            flatten_1 = tflearn.flatten(split_4)
            flatten_2 = tflearn.flatten(split_5)

            merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')

            # for multiple video, mask out the invalid actions
            linear_out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='linear')
            linear_out = tf.transpose(linear_out)  # [None, a_dim] -> [a_dim, None]
            mask_out = tf.boolean_mask(linear_out, self.mask)  # [a_dim, None] -> [masked, None]
            mask_out = tf.transpose(mask_out)  # [masked, None] -> [None, masked]
            softmax_out = tf.nn.softmax(mask_out)

            return inputs, softmax_out
Exemplo n.º 12
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              128,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, -self.s_len:],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, -self.s_len:],
                                      128,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 4:5, -1],
                                              128,
                                              activation='relu')

            #add a param
            #success ,only in actor network
            #test_6  = tflearn.fully_connected(inputs[:, 5:6, -1], 128, activation='relu')

            # [buffer1,bitrate1,buffer2,bitrate2,...]

            otherAgentData = tflearn.conv_1d(inputs[:, 5:6, :],
                                             128,
                                             2,
                                             strides=2,
                                             activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)
            otherAgentData_flat = tflearn.flatten(otherAgentData)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5, otherAgentData_flat
            ], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
Exemplo n.º 13
0
def neural_network_model(input_size):
    network = input_data(shape=[None, input_size, 1], name='input')
    # Convolution
    branch1 = tflearn.conv_1d(network,
                              128,
                              2,
                              padding='valid',
                              activation='relu',
                              regularizer="L2")
    branch2 = tflearn.conv_1d(network,
                              128,
                              2,
                              padding='valid',
                              activation='relu',
                              regularizer="L2")
    branch3 = tflearn.conv_1d(network,
                              128,
                              2,
                              padding='valid',
                              activation='relu',
                              regularizer="L2")
    branch4 = tflearn.conv_1d(network,
                              128,
                              2,
                              padding='valid',
                              activation='relu',
                              regularizer="L2")
    branch5 = tflearn.conv_1d(network,
                              128,
                              2,
                              padding='valid',
                              activation='relu',
                              regularizer="L2")
    network = tflearn.merge([branch1, branch2, branch3, branch4, branch5],
                            mode='concat',
                            axis=1)
    # LSTM
    network = tflearn.lstm(network, 128, activation='relu', dropout=0.8)
    # Fully connected
    network = tflearn.fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = tflearn.fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)
    network = tflearn.fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)
    network = tflearn.fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)
    network = tflearn.fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = tflearn.regression(network,
                                 optimizer='adam',
                                 learning_rate=0.001,
                                 loss='categorical_crossentropy')
    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Exemplo n.º 14
0
def tflearn_imdb():
    """
    文本情感分析
    :return:
    """
    (X_train, Y_train), (X_test, Y_test) = imdb.load_data()

    X_train, Y_train = pad_sequences(Y_train,
                                     maxlen=100), to_categorical(Y_train,
                                                                 nb_classes=2)
    X_test, Y_test = pad_sequences(Y_test,
                                   maxlen=100), to_categorical(Y_test,
                                                               nb_classes=2)

    network = input_data([None, 100], name="input")
    tflearn.embedding(network, input_dim=10000, output_dim=128)

    branch1 = tflearn.conv_1d(network,
                              128,
                              3,
                              padding="valid",
                              activation="relu",
                              regularizer="L2")
    branch2 = tflearn.conv_1d(network,
                              128,
                              4,
                              padding="valid",
                              activation="relu",
                              regularizer="L2")
    branch3 = tflearn.conv_1d(network,
                              128,
                              5,
                              padding="valid",
                              activation="relu",
                              regularizer="L2")

    network = tflearn.merge([branch1, branch2, branch3], mode="concat", axis=1)
    network = tf.expand_dims(network, 2)
    network = tflearn.global_avg_pool(network)
    network = tflearn.dropout(network, 0.5)
    network = tflearn.fully_connected(network, 2, activation="softmax")

    network = tflearn.regression(network,
                                 optimizer="adam",
                                 learning_rate=0.001,
                                 loss="categorical_crossentropy",
                                 name="target")

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(X_train,
              Y_train,
              n_epoch=5,
              shuffle=True,
              validation_set=(X_test, Y_test),
              show_metric=True,
              batch_size=32)
Exemplo n.º 15
0
    def _build_c(self, s, a, scope, trainable):
        with tf.variable_scope(scope):
            inputs = tf.reshape(s, [-1, S_INFO, S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_1 = tflearn.fully_connected(inputs[:, 1:2, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_5 = tflearn.fully_connected(inputs[:, 5:6, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_6 = tflearn.fully_connected(a,
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5, split_6
            ],
                           axis=1)
            net = tflearn.fully_connected(net,
                                          FEATURE_NUM,
                                          activation='relu',
                                          trainable=trainable)
            net = tflearn.fully_connected(net,
                                          1,
                                          activation='linear',
                                          trainable=trainable)
            return net
Exemplo n.º 16
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              128,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      128,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 4:5, -1],
                                              128,
                                              activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')
            # print("inputs2",inputs[:,2:3,:])
            # print("split_0",split_0.shape)
            # print("split_1",split_1.shape)
            # print("split_2",split_2.shape,"split_2_flat",split_2_flat.shape)
            # print("split_3",split_3.shape,"split_3_flat",split_3_flat.shape)
            # print("split_4",split_4.shape,"split_4_flat",split_4_flat.shape)
            # print("split_5",split_5.shape)
            # print("merge_net",merge_net.shape)
            # exit()

            return inputs, out
Exemplo n.º 17
0
def cnn(eps, lr, shape):
    import tflearn
    inputs = tflearn.input_data(shape=shape)
    net = tflearn.conv_1d(inputs,
                          nb_filter=64,
                          filter_size=3,
                          strides=1,
                          padding='same',
                          activation='tanh')
    net = tflearn.avg_pool_1d(net, kernel_size=3)
    net = tflearn.conv_1d(net, 64, 3, 1, padding='same', activation='tanh')
    shape = net.get_shape().as_list()
    print(shape)
    net = tflearn.reshape(net, [-1, shape[1] * shape[2]])

    net = tflearn.normalization.batch_normalization(net)
    net = tflearn.fully_connected(net,
                                  512,
                                  activation='tanh',
                                  regularizer='L2')
    net = tflearn.dropout(net, 0.8)

    net = tflearn.normalization.batch_normalization(net)
    net = tflearn.fully_connected(net,
                                  1024,
                                  activation='tanh',
                                  regularizer='L2')
    net = tflearn.dropout(net, 0.8)

    net = tflearn.normalization.batch_normalization(net)
    net = tflearn.fully_connected(net,
                                  512,
                                  activation='tanh',
                                  regularizer='L2')

    softmax = tflearn.fully_connected(net, 20, activation='softmax')

    # sgd = tflearn.SGD(learning_rate=lr, lr_decay=0.96, decay_step=15)
    adam = tflearn.optimizers.adam(epsilon=eps, learning_rate=lr)
    regression = tflearn.regression(softmax,
                                    optimizer=adam,
                                    metric='accuracy',
                                    loss='categorical_crossentropy')

    model = tflearn.DNN(regression,
                        tensorboard_verbose=3,
                        tensorboard_dir='.',
                        best_val_accuracy=0.7,
                        best_checkpoint_path='./bestFinal/cnn',
                        checkpoint_path='./checkpoints/cnn',
                        max_checkpoints=10)
    print('Model created')
    return model
Exemplo n.º 18
0
    def __init__(self, sess, n_features, n_actions, lr=0.001):
        self.sess = sess

        self.s = tf.placeholder(tf.float32, [1, n_features[0], n_features[1]],
                                "state")
        self.a = tf.placeholder(tf.int32, None, "act")
        self.td_error = tf.placeholder(tf.float32, None, "td_error")
        self.lr = tf.placeholder(tf.float32, None, 'lr_ph')

        with tf.variable_scope('Actor_1'):
            split0 = tflearn.conv_1d(self.s[:, 0:1, :], 5, 1)
            split1 = tflearn.conv_1d(self.s[:, 1:2, :], 16, 1)
            a = split0[:, 0, :]
            b = split1[:, 0, :]
            c = self.s[:, 2, -1:]
            d = self.s[:, 3, -1:]
            e = self.s[:, 4, -1:]
            f = self.s[:, 5, -1:]
            g = self.s[:, 6, -1:]
            hidden0 = tf.concat([a, b, c, d, e, f, g], axis=1)

            l1 = tf.layers.dense(
                inputs=hidden0,
                units=128,
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(0., .1),
                bias_initializer=tf.constant_initializer(0.1),
                name='l1')
            l2 = tf.layers.dense(
                inputs=l1,
                units=128,
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(0., .1),
                bias_initializer=tf.constant_initializer(0.1),
                name='l2')

            self.acts_prob = tf.layers.dense(
                inputs=l2,
                units=n_actions,
                activation=tf.nn.softmax,
                kernel_initializer=tf.random_normal_initializer(0., .1),
                bias_initializer=tf.constant_initializer(0.1),
                name='acts_prob')

        with tf.variable_scope('exp_v'):
            log_prob = tf.log(self.acts_prob[0, self.a])
            self.exp_v = tf.reduce_mean(
                log_prob * self.td_error)  # advantage (TD_error) guided loss

        with tf.variable_scope('train'):
            self.train_op = tf.train.AdamOptimizer(self.lr).minimize(
                -self.exp_v)  # minimize(-exp_v) = maximize(exp_v)
Exemplo n.º 19
0
    def create_network(self, s, scope='Actor/eval', trainable=True):
        with tf.variable_scope(scope):
            inputs = tf.reshape(s, [-1, S_INFO, S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_1 = tflearn.fully_connected(inputs[:, 1:2, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_5 = tflearn.fully_connected(inputs[:, 5:6, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ],
                           axis=1)
            net = tflearn.fully_connected(net,
                                          FEATURE_NUM,
                                          activation='relu',
                                          trainable=trainable)
            merge_net, alphas = self.attention(net, FEATURE_NUM)
            dense_net_0 = self.nac(merge_net, FEATURE_NUM, trainable=trainable)
            out = self.nac(dense_net_0, 2, trainable=trainable)
            out = tf.nn.sigmoid(out, trainable=trainable)
            #a = tflearn.fully_connected(
            #    net, 2, activation='sigmoid', trainable=trainable)
            return tf.multiply(out, 60., name='scaled_a'), alphas
Exemplo n.º 20
0
    def create_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.S_INFO, self.S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              FEATURE_NUM,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              FEATURE_NUM,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :self.A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_5 = tflearn.conv_1d(inputs[:, 5:6, :self.A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_6 = tflearn.fully_connected(inputs[:, 6:7, -1],
                                              FEATURE_NUM,
                                              activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)
            split_5_flat = tflearn.flatten(split_5)

            merge_net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5_flat, split_6
            ],
                                 axis=-1)
            # shuffle to fit gru layer
            merge_net = tf.transpose(merge_net, [0, 2, 1])
            dense_net_0 = tflearn.gru(merge_net,
                                      FEATURE_NUM,
                                      activation='relu')

            out = tflearn.fully_connected(dense_net_0,
                                          self.A_DIM,
                                          activation='softmax')

            return inputs, out
Exemplo n.º 21
0
Arquivo: NN.py Projeto: szsam/DSLearn
def build():
    network = input_data([None, Meta.max_string_len])
    network = embedding(network, input_dim=Meta.max_one_hot, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = dropout(network, 0.5)
    network = lstm(network, 128)
    # network = fully_connected(network, 20)
    network = fully_connected(network, 2, activation='softmax')
    network = tflearn.regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy')
    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
Exemplo n.º 22
0
    def __init__(self, sess, n_features, lr=0.01):
        self.sess = sess

        self.s = tf.placeholder(tf.float32, [1, n_features[0], n_features[1]],
                                "state")
        self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
        self.r = tf.placeholder(tf.float32, None, 'r')
        self.lr = tf.placeholder(tf.float32, None, 'lr_ph')

        with tf.variable_scope('Critic'):
            split0 = tflearn.conv_1d(self.s[:, 0:1, :], 5, 1)
            split1 = tflearn.conv_1d(self.s[:, 1:2, :], 16, 1)
            a = split0[:, 0, :]
            b = split1[:, 0, :]
            c = self.s[:, 2, -1:]
            d = self.s[:, 3, -1:]
            e = self.s[:, 4, -1:]
            f = self.s[:, 5, -1:]
            g = self.s[:, 6, -1:]
            hidden0 = tf.concat([a, b, c, d, e, f, g], axis=1)

            l1 = tf.layers.dense(
                inputs=hidden0,
                units=128,
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(0., .1),
                bias_initializer=tf.constant_initializer(0.1),
                name='l1')
            l2 = tf.layers.dense(
                inputs=l1,
                units=128,
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(0., .1),
                bias_initializer=tf.constant_initializer(0.1),
                name='l2')

            self.v = tf.layers.dense(
                inputs=l2,
                units=1,
                activation=None,
                kernel_initializer=tf.random_normal_initializer(0., .1),
                bias_initializer=tf.constant_initializer(0.1),
                name='V')

        with tf.variable_scope('squared_TD_error'):
            self.td_error = self.r + 0.9 * self.v_ - self.v
            self.loss = tf.square(
                self.td_error)  # TD_error = (r+gamma*V_next) - V_eval
        with tf.variable_scope('train'):
            self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
Exemplo n.º 23
0
    def _build_a(self, s, scope, trainable):
        with tf.variable_scope(scope):
            inputs = tf.reshape(s, [-1, S_INFO, S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_1 = tflearn.fully_connected(inputs[:, 1:2, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_5 = tflearn.fully_connected(inputs[:, 5:6, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ],
                           axis=1)
            net = tflearn.fully_connected(net,
                                          FEATURE_NUM,
                                          activation='relu',
                                          trainable=trainable)
            a = tflearn.fully_connected(net,
                                        self.a_dim,
                                        activation='sigmoid',
                                        trainable=trainable)
            return tf.multiply(a, self.a_bound, name='scaled_a')
Exemplo n.º 24
0
    def create_network(self):
        with tf.variable_scope('nn'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])
            split_0 = tflearn.conv_1d(inputs[:, 0:1, :], 128, 4, activation='relu')
            split_1 = tflearn.conv_1d(inputs[:, 1:2, :], 128, 4, activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :], 128, 4, activation='relu')

            merge_net = tflearn.merge([split_0, split_1, split_2, split_3, split_4], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 100, activation='relu')
            out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='linear')

            return inputs, out
Exemplo n.º 25
0
def hybrid_header(x, reuse=False):
    size = 3
    inputs_shape = x.get_shape().as_list()
    with tf.variable_scope('1d-cnn'):
        split_array = []
        for t in xrange(S_LEN):
            tmp_split = tflearn.conv_1d(
                x[:, t:t + 1, :], FEATURE_NUM, size, activation='relu')
            tmp_split_flat = tflearn.flatten(tmp_split)
            tmp_split_flat = tflearn.layers.normalization.batch_normalization(tmp_split_flat)
            split_array.append(tmp_split_flat)
        merge_net = tflearn.merge(split_array, 'concat')
        _count = merge_net.get_shape().as_list()[1]
        out_cnn = tf.reshape(
            merge_net, [-1, inputs_shape[1], _count / inputs_shape[1]])

    with tf.variable_scope('gru'):
        net = tflearn.gru(out_cnn, FEATURE_NUM, return_seq=True)
        out_gru = tflearn.gru(net, FEATURE_NUM)
        out_gru = tf.expand_dims(out_gru, 1)

    conv_1d_net = tflearn.conv_1d(out_gru, FEATURE_NUM, size, activation='relu')
    conv_1d_net_flattern = tflearn.flatten(conv_1d_net)
    
    # with tf.name_scope('1d-cnn'):
    #     network_array = []
    #     for p in xrange(S_INFO - 1):
    #         branch_array = []
    #         for i in xrange(2,4):
    #             sp_branch = tflearn.conv_1d(x[:, :, p:p+1], FEATURE_NUM, i, padding='valid', activation='relu', regularizer="L2")
    #             branch_array.append(sp_branch)
    #         branch = tflearn.merge(branch_array, mode='concat', axis=1)
    #         branch = tf.expand_dims(branch, 2)
    #         branch = global_max_pool(branch)
    #         branch = tflearn.dropout(branch, 0.5)
    #         network_array.append(branch)
    #     out_cnn = tflearn.merge(network_array, 'concat')

    #with tf.name_scope('gru'):
    #    #net = tflearn.gru(x, FEATURE_NUM, return_seq=True)
    #    net = tflearn.gru(x, FEATURE_NUM)
    #    out_gru = tflearn.fully_connected(
    #        net, FEATURE_NUM, activation='relu')
    #    out_gru = tflearn.dropout(out_gru, 0.5)

    #merge_net = tflearn.merge([out_cnn, out_gru], 'concat')

    return conv_1d_net_flattern
Exemplo n.º 26
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              128,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      128,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 5:6, -1],
                                              128,
                                              activation='relu')
            # split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
            #split_6 = tflearn.fully_connected(inputs[:, 6:7, -1], 128, activation='relu')
            #split_7 = tflearn.fully_connected(inputs[:, 7:8, -1], 128, activation='relu')
            #split_8 = tflearn.fully_connected(inputs[:, 8:9, -1], 128, activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
Exemplo n.º 27
0
Arquivo: a3c.py Projeto: yogeshVU/QARC
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in xrange(self.s_dim[0] - 1):
                split = tflearn.conv_1d(inputs[:, i:i + 1, :],
                                        FEATURE_NUM,
                                        KERNEL,
                                        activation='relu')
                flattern = tflearn.flatten(split)
                split_array.append(flattern)

            dense_net = tflearn.fully_connected(inputs[:, -1:, :],
                                                FEATURE_NUM,
                                                activation='relu')
            split_array.append(dense_net)
            merge_net = tflearn.merge(split_array, 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  64,
                                                  activation='relu')
            # dense_net_0 = tflearn.dropout(dense_net_0, 0.8)
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
Exemplo n.º 28
0
def build_hybrid_net(x):
    inputs = tflearn.input_data(placeholder=x)
    with tf.name_scope('1d-cnn'):
        network_array = []
        for p in xrange(S_INFO):
            branch = tflearn.conv_1d(inputs[:, :, p:p + 1],
                                     FEATURE_NUM,
                                     3,
                                     activation='relu',
                                     regularizer="L2")
            branch = tflearn.flatten(branch)
            network_array.append(branch)
        out_cnn = tflearn.merge(network_array, 'concat')
    with tf.name_scope('gru'):
        net = tflearn.gru(inputs, FEATURE_NUM, return_seq=True)
        out_gru = tflearn.gru(net, FEATURE_NUM)

    header = tflearn.merge([out_cnn, out_gru], 'concat')
    dense_net = tflearn.fully_connected(header,
                                        FEATURE_NUM * 2,
                                        activation='relu',
                                        regularizer="L2")
    dense_net = tflearn.fully_connected(dense_net,
                                        FEATURE_NUM * 1,
                                        activation='relu',
                                        regularizer="L2")
    out = tflearn.fully_connected(dense_net,
                                  1,
                                  activation='sigmoid',
                                  regularizer="L2")
    return out, header
Exemplo n.º 29
0
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])  #(None, 7, 91)
            '''
            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
            
            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)
            merge_net = tflearn.merge([split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')

            '''
            split = tflearn.conv_1d(inputs, 128, 1,
                                    activation='relu')  #(None, 7, 128)
            split_flat = tflearn.flatten(split)

            #dense_net_0 = tflearn.fully_connected(split_flat, 1024, activation='relu')
            #dense_net_1 = tflearn.fully_connected(dense_net_0, 512, activation='relu')
            dense_net_2 = tflearn.fully_connected(split_flat,
                                                  256,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_2, 1, activation='linear')

            return inputs, out
Exemplo n.º 30
0
 def CNN_Core(self, x, reuse=False):
     with tf.variable_scope(self.scope + '-cnn_core', reuse=reuse):
         tmp = tflearn.conv_1d(x,
                               FEATURE_NUM // 4,
                               KERNEL,
                               activation='relu')
         tmp = tflearn.batch_normalization(tmp)
         tmp = tflearn.flatten(tmp)
         return tmp
Exemplo n.º 31
0
    def CreateTarget(self, inputs, name):
        with tf.variable_scope(name):
            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              FEATURE_NUM,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              FEATURE_NUM,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :self.a_dim],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 5:6, -1],
                                              FEATURE_NUM,
                                              activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ], 'concat')

            net = tflearn.fully_connected(merge_net,
                                          FEATURE_NUM,
                                          activation='relu')

            value = tflearn.fully_connected(net,
                                            self.a_dim,
                                            activation='linear')

            return value