예제 #1
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')
            split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')

            reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])
            split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')

            split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')
            split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')

            flatten_0 = tflearn.flatten(split_3)
            flatten_1 = tflearn.flatten(split_4)
            flatten_2 = tflearn.flatten(split_5)

            merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')

            # for multiple video, mask out the invalid actions
            linear_out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='linear')
            linear_out = tf.transpose(linear_out)  # [None, a_dim] -> [a_dim, None]
            mask_out = tf.boolean_mask(linear_out, self.mask)  # [a_dim, None] -> [masked, None]
            mask_out = tf.transpose(mask_out)  # [masked, None] -> [None, masked]
            softmax_out = tf.nn.softmax(mask_out)

            return inputs, softmax_out
예제 #2
0
파일: a3c.py 프로젝트: xgw/proj
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.conv_1d(inputs[:, 1:2, :],
                                      128,
                                      4,
                                      activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :NUM_OF_TRACKS],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.fully_connected(inputs[:, 3:4, -1],
                                              128,
                                              activation='relu')

            split_1_flat = tflearn.flatten(split_1)
            split_2_flat = tflearn.flatten(split_2)

            merge_net = tflearn.merge(
                [split_0, split_1_flat, split_2_flat, split_3], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
예제 #3
0
파일: a3c_raw.py 프로젝트: wushilan/arsenal
    def create_actor_network(self,reuse = tf.AUTO_REUSE):
        with tf.variable_scope('actor',reuse = tf.AUTO_REUSE):
            # tf.Graph()

            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]], name = "actor_input")     # input layer
            # input_loss = tflearn.layers.normalization.batch_normalization(inputs[:, 0:1, :, :])
            # input_dealy_interval = tflearn.layers.normalization.batch_normalization(inputs[:, 1:2, :, :])

            # print input_loss.shape
            # print input_dealy_interval.shape

            # split_1 = tflearn.conv_2d(inputs[:, 0:1, :, :], 64, 3, activation='LeakyReLU',restore = False)
            # split_2 = tflearn.conv_2d(inputs[:, 1:2, :, :], 64, 3, activation='LeakyReLU')

            # # split_2 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'fc2')

            split_1_flat = tflearn.flatten(inputs[:, 0:1, :, :])
            split_2_flat = tflearn.flatten(inputs[:, 1:2, :, :])

            # merge_net = tflearn.merge([split_1_flat, split_2_flat], 'concat')

            dense_net_0 = tflearn.fully_connected(split_1_flat, 64, activation='LeakyReLU')
            dense_net_1 = tflearn.fully_connected(split_2_flat, 64, activation='LeakyReLU')
            merge_net = tflearn.merge([dense_net_0, dense_net_1], 'concat')

            print('aaaaaaaaaaaaaaaaaaaaaaaaaaa')
            print(dense_net_0)
            out = tflearn.fully_connected(merge_net, self.a_dim, activation='softmax', name = "actor_output")     # output layer
            print('type out',type(out),out.name)
            return inputs, out
예제 #4
0
파일: a3c_raw.py 프로젝트: wushilan/arsenal
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]], name = 'critic_input')

            # split_1 = tflearn.conv_2d(inputs[:, 0:1,:, :], 128, 4, activation='relu')
            # split_2 = tflearn.conv_2d(inputs[:, 1:2,:, :], 128, 4, activation='relu')

            # split_1_flat_1 = tflearn.flatten(split_1)
            # split_1_flat_2 = tflearn.flatten(split_2)

            # merge_net = tflearn.merge([split_1_flat_1, split_1_flat_2], 'concat')

            # dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')
            split_1_flat = tflearn.flatten(inputs[:, 0:1, :, :])
            split_2_flat = tflearn.flatten(inputs[:, 1:2, :, :])

            # merge_net = tflearn.merge([split_1_flat, split_2_flat], 'concat')

            dense_net_0 = tflearn.fully_connected(split_1_flat, 64, activation='LeakyReLU')
            dense_net_1 = tflearn.fully_connected(split_2_flat, 64, activation='LeakyReLU')
            merge_net = tflearn.merge([dense_net_0, dense_net_1], 'concat')

            out = tflearn.fully_connected(merge_net, 1, activation='linear', name = 'critic_output')

            return inputs, out
예제 #5
0
파일: a3c.py 프로젝트: wushilan/arsenal
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]])
            print 'self.s_dim[0]',self.s_dim[0]
            # split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'critic_fc1')
            print 'inputs[:, 0:1, :].shape:', inputs[:, 0:1, :].shape
            print 'inputs[:, 1:2, :].shape:', inputs[:, 1:2, :].shape
            x_reshape1 = tflearn.reshape(inputs[:, 0:1, :], [-1, self.s_dim[1],self.s_dim[2],1])
            x_reshape2 = tflearn.reshape(inputs[:, 1:2, :], [-1, self.s_dim[1],self.s_dim[2],1])
            print 'x_reshape1.shape:', x_reshape1.shape
            print 'x_reshape2.shape:', x_reshape2.shape
            split_1 = tflearn.conv_2d(x_reshape1, 128, 4, activation='relu', scope = 'critic_conv1_1')
            print 'split_1.shape:', split_1.shape
            # split_2 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'critic_fc2')
            split_2 = tflearn.conv_2d(x_reshape2, 128, 4, activation='relu', scope = 'critic_conv1_2')
            print 'split_2.shape:', split_2.shape

            split_1_flat_1 = tflearn.flatten(split_1)
            split_1_flat_2 = tflearn.flatten(split_2)

            merge_net = tflearn.merge([split_1_flat_1, split_1_flat_2], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu', scope = 'critic_fc3')
            out = tflearn.fully_connected(dense_net_0, 1, activation='linear', scope = 'critic_fc4')

            return inputs, out
예제 #6
0
파일: dualppo.py 프로젝트: zchao520/Zwei
    def CreateNetwork(self, inputs):
        with tf.variable_scope('actor'):
            split_0 = tflearn.fully_connected(
                inputs[:, 0:1, -1], FEATURE_NUM, activation='relu')
            split_1 = tflearn.fully_connected(
                inputs[:, 1:2, -1], FEATURE_NUM, activation='relu')
            split_2 = tflearn.conv_1d(
                inputs[:, 2:3, :], FEATURE_NUM, 4, activation='relu')
            split_3 = tflearn.conv_1d(
                inputs[:, 3:4, :], FEATURE_NUM, 4, activation='relu')
            split_4 = tflearn.conv_1d(
                inputs[:, 4:5, :self.a_dim], FEATURE_NUM, 4, activation='relu')
            split_5 = tflearn.fully_connected(
                inputs[:, 5:6, -1], FEATURE_NUM, activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge(
                [split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')

            pi_net = tflearn.fully_connected(
                merge_net, FEATURE_NUM, activation='relu')
            pi = tflearn.fully_connected(pi_net, self.a_dim, activation='softmax') 
            val_net = tflearn.fully_connected(
                merge_net, FEATURE_NUM, activation='relu')
            val = tflearn.fully_connected(val_net, 1, activation='tanh') 
            return pi, val
예제 #7
0
def stock_predictor(inputs, feature_number, predictor_type, use_batch_norm, activation_function, weight_decay):
    window_length = inputs.get_shape()[2]
    assert predictor_type in ['cnn', 'lstm'], 'type must be either cnn or lstm'
    if predictor_type == 'cnn':
        net = tflearn.conv_2d(inputs, 32, (1, 3), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)

        net = tflearn.conv_2d(net, 32, (1, window_length - 2), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)

        #################################################
        net = tflearn.conv_2d(net, 1, (1, 1), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)
        ##################################################
        
        if DEBUG:
            print('After conv2d:', net.shape)
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    elif predictor_type == 'lstm':
        num_stocks = inputs.get_shape()[1]
        hidden_dim = 32
        net = tflearn.reshape(inputs, new_shape=[-1, window_length, feature_number])
        if DEBUG:
            print('Reshaped input:', net.shape)
        net = tflearn.lstm(net, hidden_dim, activation = activation_function,  weights_init = 'xavier')
        if DEBUG:
            print('After LSTM:', net.shape)
        net = tflearn.reshape(net, new_shape=[-1, num_stocks, hidden_dim,1]) ## reshape for conv2d in the next step
        if DEBUG:
            print('After reshape:', net.shape)

        #################################################
        net = tflearn.conv_2d(net, 1, (1, hidden_dim), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)
        ##################################################
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    else:
        raise NotImplementedError
    return net
예제 #8
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              128,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, -self.s_len:],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, -self.s_len:],
                                      128,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 4:5, -1],
                                              128,
                                              activation='relu')

            #add a param
            #success ,only in actor network
            #test_6  = tflearn.fully_connected(inputs[:, 5:6, -1], 128, activation='relu')

            # [buffer1,bitrate1,buffer2,bitrate2,...]

            otherAgentData = tflearn.conv_1d(inputs[:, 5:6, :],
                                             128,
                                             2,
                                             strides=2,
                                             activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)
            otherAgentData_flat = tflearn.flatten(otherAgentData)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5, otherAgentData_flat
            ], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
예제 #9
0
    def _build_c(self, s, a, scope, trainable):
        with tf.variable_scope(scope):
            inputs = tf.reshape(s, [-1, S_INFO, S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_1 = tflearn.fully_connected(inputs[:, 1:2, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_5 = tflearn.fully_connected(inputs[:, 5:6, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_6 = tflearn.fully_connected(a,
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5, split_6
            ],
                           axis=1)
            net = tflearn.fully_connected(net,
                                          FEATURE_NUM,
                                          activation='relu',
                                          trainable=trainable)
            net = tflearn.fully_connected(net,
                                          1,
                                          activation='linear',
                                          trainable=trainable)
            return net
예제 #10
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              128,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      128,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 4:5, -1],
                                              128,
                                              activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')
            # print("inputs2",inputs[:,2:3,:])
            # print("split_0",split_0.shape)
            # print("split_1",split_1.shape)
            # print("split_2",split_2.shape,"split_2_flat",split_2_flat.shape)
            # print("split_3",split_3.shape,"split_3_flat",split_3_flat.shape)
            # print("split_4",split_4.shape,"split_4_flat",split_4_flat.shape)
            # print("split_5",split_5.shape)
            # print("merge_net",merge_net.shape)
            # exit()

            return inputs, out
예제 #11
0
파일: model.py 프로젝트: godka/epilogue
    def create_network(self, s, scope='Actor/eval', trainable=True):
        with tf.variable_scope(scope):
            inputs = tf.reshape(s, [-1, S_INFO, S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_1 = tflearn.fully_connected(inputs[:, 1:2, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_5 = tflearn.fully_connected(inputs[:, 5:6, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ],
                           axis=1)
            net = tflearn.fully_connected(net,
                                          FEATURE_NUM,
                                          activation='relu',
                                          trainable=trainable)
            merge_net, alphas = self.attention(net, FEATURE_NUM)
            dense_net_0 = self.nac(merge_net, FEATURE_NUM, trainable=trainable)
            out = self.nac(dense_net_0, 2, trainable=trainable)
            out = tf.nn.sigmoid(out, trainable=trainable)
            #a = tflearn.fully_connected(
            #    net, 2, activation='sigmoid', trainable=trainable)
            return tf.multiply(out, 60., name='scaled_a'), alphas
예제 #12
0
    def create_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.S_INFO, self.S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              FEATURE_NUM,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              FEATURE_NUM,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :self.A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_5 = tflearn.conv_1d(inputs[:, 5:6, :self.A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu')
            split_6 = tflearn.fully_connected(inputs[:, 6:7, -1],
                                              FEATURE_NUM,
                                              activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)
            split_5_flat = tflearn.flatten(split_5)

            merge_net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5_flat, split_6
            ],
                                 axis=-1)
            # shuffle to fit gru layer
            merge_net = tf.transpose(merge_net, [0, 2, 1])
            dense_net_0 = tflearn.gru(merge_net,
                                      FEATURE_NUM,
                                      activation='relu')

            out = tflearn.fully_connected(dense_net_0,
                                          self.A_DIM,
                                          activation='softmax')

            return inputs, out
예제 #13
0
    def _build_a(self, s, scope, trainable):
        with tf.variable_scope(scope):
            inputs = tf.reshape(s, [-1, S_INFO, S_LEN])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_1 = tflearn.fully_connected(inputs[:, 1:2, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      FEATURE_NUM,
                                      4,
                                      activation='relu',
                                      trainable=trainable)
            split_5 = tflearn.fully_connected(inputs[:, 5:6, :],
                                              FEATURE_NUM,
                                              activation='relu',
                                              trainable=trainable)

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            net = tf.stack([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ],
                           axis=1)
            net = tflearn.fully_connected(net,
                                          FEATURE_NUM,
                                          activation='relu',
                                          trainable=trainable)
            a = tflearn.fully_connected(net,
                                        self.a_dim,
                                        activation='sigmoid',
                                        trainable=trainable)
            return tf.multiply(a, self.a_bound, name='scaled_a')
예제 #14
0
def hybrid_header(x, reuse=False):
    size = 3
    inputs_shape = x.get_shape().as_list()
    with tf.variable_scope('1d-cnn'):
        split_array = []
        for t in xrange(S_LEN):
            tmp_split = tflearn.conv_1d(
                x[:, t:t + 1, :], FEATURE_NUM, size, activation='relu')
            tmp_split_flat = tflearn.flatten(tmp_split)
            tmp_split_flat = tflearn.layers.normalization.batch_normalization(tmp_split_flat)
            split_array.append(tmp_split_flat)
        merge_net = tflearn.merge(split_array, 'concat')
        _count = merge_net.get_shape().as_list()[1]
        out_cnn = tf.reshape(
            merge_net, [-1, inputs_shape[1], _count / inputs_shape[1]])

    with tf.variable_scope('gru'):
        net = tflearn.gru(out_cnn, FEATURE_NUM, return_seq=True)
        out_gru = tflearn.gru(net, FEATURE_NUM)
        out_gru = tf.expand_dims(out_gru, 1)

    conv_1d_net = tflearn.conv_1d(out_gru, FEATURE_NUM, size, activation='relu')
    conv_1d_net_flattern = tflearn.flatten(conv_1d_net)
    
    # with tf.name_scope('1d-cnn'):
    #     network_array = []
    #     for p in xrange(S_INFO - 1):
    #         branch_array = []
    #         for i in xrange(2,4):
    #             sp_branch = tflearn.conv_1d(x[:, :, p:p+1], FEATURE_NUM, i, padding='valid', activation='relu', regularizer="L2")
    #             branch_array.append(sp_branch)
    #         branch = tflearn.merge(branch_array, mode='concat', axis=1)
    #         branch = tf.expand_dims(branch, 2)
    #         branch = global_max_pool(branch)
    #         branch = tflearn.dropout(branch, 0.5)
    #         network_array.append(branch)
    #     out_cnn = tflearn.merge(network_array, 'concat')

    #with tf.name_scope('gru'):
    #    #net = tflearn.gru(x, FEATURE_NUM, return_seq=True)
    #    net = tflearn.gru(x, FEATURE_NUM)
    #    out_gru = tflearn.fully_connected(
    #        net, FEATURE_NUM, activation='relu')
    #    out_gru = tflearn.dropout(out_gru, 0.5)

    #merge_net = tflearn.merge([out_cnn, out_gru], 'concat')

    return conv_1d_net_flattern
예제 #15
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              128,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :],
                                      128,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 5:6, -1],
                                              128,
                                              activation='relu')
            # split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
            #split_6 = tflearn.fully_connected(inputs[:, 6:7, -1], 128, activation='relu')
            #split_7 = tflearn.fully_connected(inputs[:, 7:8, -1], 128, activation='relu')
            #split_8 = tflearn.fully_connected(inputs[:, 8:9, -1], 128, activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5
            ], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
예제 #16
0
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(inputs[:, i:i + 1, :, :, :],
                                     [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _split_array.append(CNN_Core(tmp_network))
            else:
                _split_array.append(CNN_Core(tmp_network, True))

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-lstm'):
            net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ])
            net = tflearn.gru(net, DENSE_SIZE, return_seq=True)
            out_gru = tflearn.gru(net, DENSE_SIZE, dropout=0.8)
            gru_result = tflearn.fully_connected(out_gru,
                                                 DENSE_SIZE,
                                                 activation='relu')

        out = tflearn.fully_connected(gru_result,
                                      OUTPUT_DIM,
                                      activation='sigmoid')

        return out
예제 #17
0
    def create_actor_network(self):
        with tf.variable_scope(self.scope + '-actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in range(self.s_dim[0]):
                tmp = tf.reshape(inputs[:, i:i + 1, :], (-1, self.s_dim[1], 1))
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM // 4,
                                        KERNEL,
                                        activation='relu')
                split = tflearn.avg_pool_1d(split, 2)
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM // 2,
                                        KERNEL,
                                        activation='relu')
                split = tflearn.avg_pool_1d(split, 2)
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM,
                                        KERNEL,
                                        activation='relu')
                #split = tflearn.avg_pool_1d(split, 2)
                flattern = tflearn.flatten(split)
                split_array.append(flattern)
            dense_net_0 = tflearn.merge(split_array, 'concat')
            dense_net_0 = tflearn.fully_connected(dense_net_0,
                                                  FEATURE_NUM,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
예제 #18
0
def CNN_Core(x, reuse=False):
    with tf.variable_scope('cnn_core', reuse=reuse):
        network = tflearn.conv_2d(x,
                                  KERNEL,
                                  3,
                                  activation='relu',
                                  regularizer="L2",
                                  weight_decay=0.0001)
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.conv_2d(network,
                                  KERNEL,
                                  2,
                                  activation='relu',
                                  regularizer="L2",
                                  weight_decay=0.0001)
        network = tflearn.max_pool_2d(network, 2)
        network = tflearn.conv_2d(network,
                                  KERNEL,
                                  2,
                                  activation='relu',
                                  regularizer="L2",
                                  weight_decay=0.0001)
        network = tflearn.max_pool_2d(network, 2)

        split_flat = tflearn.flatten(network)
        return split_flat
예제 #19
0
def net_alzheimer_cnn(image_dims):
    net = layers.core.input_data(shape=[None, image_dims[0], image_dims[1], image_dims[2], image_dims[3]], dtype=tf.float32)
    
    #3d convolutions layers
    net = layers.conv.conv_3d(net, 8, 3, strides=1, activation='relu')
    net = layers.conv.max_pool_3d(net, [1,2,2,2,1], strides=[1,2,2,2,1])

    net = layers.conv.conv_3d(net, 8, 3, strides=1, activation='relu')
    net = layers.conv.max_pool_3d(net, [1,2,2,2,1], strides=[1,2,2,2,1])

    net = layers.conv.conv_3d(net, 8, 3, strides=1, activation='relu')
    net = layers.conv.max_pool_3d(net, [1,2,2,2,1], strides=[1,2,2,2,1])
    
    #fully connected with 1x1 conv
    net = tflearn.flatten(net)
    net = layers.core.fully_connected(net, 2000, activation='relu')
    net = layers.core.dropout(net, 0.8)
    net = layers.core.fully_connected(net, 500, activation='relu')
    net = layers.core.dropout(net, 0.8)
    
    #classification layer
    net = layers.core.fully_connected(net, 2, activation='softmax')
    
    net = layers.estimator.regression(net, optimizer='adam',
                                      loss='categorical_crossentropy',
                                      learning_rate=0.001)
    return net
예제 #20
0
def build_hybrid_net(x):
    inputs = tflearn.input_data(placeholder=x)
    with tf.name_scope('1d-cnn'):
        network_array = []
        for p in xrange(S_INFO):
            branch = tflearn.conv_1d(inputs[:, :, p:p + 1],
                                     FEATURE_NUM,
                                     3,
                                     activation='relu',
                                     regularizer="L2")
            branch = tflearn.flatten(branch)
            network_array.append(branch)
        out_cnn = tflearn.merge(network_array, 'concat')
    with tf.name_scope('gru'):
        net = tflearn.gru(inputs, FEATURE_NUM, return_seq=True)
        out_gru = tflearn.gru(net, FEATURE_NUM)

    header = tflearn.merge([out_cnn, out_gru], 'concat')
    dense_net = tflearn.fully_connected(header,
                                        FEATURE_NUM * 2,
                                        activation='relu',
                                        regularizer="L2")
    dense_net = tflearn.fully_connected(dense_net,
                                        FEATURE_NUM * 1,
                                        activation='relu',
                                        regularizer="L2")
    out = tflearn.fully_connected(dense_net,
                                  1,
                                  activation='sigmoid',
                                  regularizer="L2")
    return out, header
예제 #21
0
def create_conv_part(net_inputs):
    """Creates an input stream from depth image."""
    net = tflearn.conv_2d(incoming=net_inputs,
                          nb_filter=32,
                          filter_size=5,
                          strides=5,
                          padding='valid',
                          activation='relu')
    net = tflearn.max_pool_2d(incoming=net, kernel_size=2, strides=2)
    net = tflearn.conv_2d(incoming=net,
                          nb_filter=32,
                          filter_size=2,
                          strides=2,
                          padding='valid',
                          activation='relu')
    net = tflearn.max_pool_2d(incoming=net, kernel_size=2, strides=2)
    net = tflearn.conv_2d(incoming=net,
                          nb_filter=64,
                          filter_size=2,
                          strides=2,
                          padding='valid',
                          activation='relu')
    net = tflearn.max_pool_2d(incoming=net, kernel_size=2, strides=2)
    net = tflearn.flatten(incoming=net)
    return net
예제 #22
0
파일: a3c.py 프로젝트: yogeshVU/QARC
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in xrange(self.s_dim[0] - 1):
                split = tflearn.conv_1d(inputs[:, i:i + 1, :],
                                        FEATURE_NUM,
                                        KERNEL,
                                        activation='relu')
                flattern = tflearn.flatten(split)
                split_array.append(flattern)

            dense_net = tflearn.fully_connected(inputs[:, -1:, :],
                                                FEATURE_NUM,
                                                activation='relu')
            split_array.append(dense_net)
            merge_net = tflearn.merge(split_array, 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  64,
                                                  activation='relu')
            # dense_net_0 = tflearn.dropout(dense_net_0, 0.8)
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
예제 #23
0
파일: gray.py 프로젝트: yogeshVU/QARC
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []
        _cnn_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(inputs[:, i:i + 1, :, :, :],
                                     [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _tmp_split, _tmp_cnn = CNN_Core(tmp_network)
            else:
                _tmp_split, _tmp_cnn = CNN_Core(tmp_network, True)
            _split_array.append(_tmp_split)
            _cnn_array.append(_tmp_cnn)

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-lstm'):
            net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ])
            net = tflearn.gru(net, HIDDEN_UNIT, return_seq=True)
            net = tflearn.gru(net, HIDDEN_UNIT, return_seq=True)
            net, alphas = attention(net, HIDDEN_UNIT)
            out = tflearn.fully_connected(net,
                                          OUTPUT_DIM,
                                          activation='sigmoid')

        return out, tf.stack(_cnn_array, axis=0), alphas
예제 #24
0
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])  #(None, 7, 91)
            '''
            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :], 128, 4, activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, :], 128, 4, activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM], 128, 4, activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 4:5, -1], 128, activation='relu')
            
            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)
            merge_net = tflearn.merge([split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')

            '''
            split = tflearn.conv_1d(inputs, 128, 1,
                                    activation='relu')  #(None, 7, 128)
            split_flat = tflearn.flatten(split)

            #dense_net_0 = tflearn.fully_connected(split_flat, 1024, activation='relu')
            #dense_net_1 = tflearn.fully_connected(dense_net_0, 512, activation='relu')
            dense_net_2 = tflearn.fully_connected(split_flat,
                                                  256,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_2, 1, activation='linear')

            return inputs, out
예제 #25
0
	def build_network(self, num_classes, input_shape, model):
		network = tflearn.input_data(shape=[None, input_shape[0], input_shape[1], input_shape[2]])
		if model == 'DeepFace':
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 32, 11, strides=1, padding='VALID', name='Conv2d_1'))
			maxpool_1 = tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1')
			conv_2 = tflearn.relu(tflearn.conv_2d(maxpool_1, 32, 9, strides=1, padding='VALID', name='Conv2d_2'))

			local_1 = tflearn.relu(self.local(conv_2, 16, 9, 1, 'Local_1'))
			local_2 = tflearn.relu(self.local(local_1, 16, 7, 1, 'Local_2'))
			local_3 = tflearn.relu(self.local(local_2, 16, 5, 1, 'Local_3'))

			flatterned = tflearn.flatten(local_3)
			full_1 = tflearn.dropout(tflearn.relu(tflearn.fully_connected(flatterned, 4096, name='Fully_Connected_1')), 0.5)
			output = tflearn.fully_connected(full_1, num_classes, activation='softmax', name='Output')

		elif model == 'Song':
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 64, 5, strides=1, padding='VALID', name='Conv_1'))
			maxpool_1 = tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1')
			conv_2 = tflearn.relu(tflearn.conv_2d(maxpool_1, 64 , 5, strides=1, padding='VALID', name='Conv_2'))
			maxpool_2 = tflearn.max_pool_2d(conv_2, 3, strides=2, padding='VALID', name='MaxPool_2')

			local_1 = tflearn.dropout(tflearn.relu(self.local(maxpool_2, 32, 3, 1, 'Local_1')), 1)
			local_2 = tflearn.dropout(tflearn.relu(self.local(local_1, 32, 3, 1, 'Local_2')), 1)
			flatterned = tflearn.flatten(local_2)
			output = tflearn.fully_connected(flatterned, num_classes, activation='softmax', name='Output')

		else:
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 64, 7, strides=2, bias=True, padding='VALID', name='Conv2d_1'))
			maxpool_1 = tflearn.batch_normalization(tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1'))

			conv_2a = tflearn.relu(tflearn.conv_2d(maxpool_1, 96, 1, strides=1, padding='VALID', name='Conv_2a_FX1'))
			maxpool_2a = tflearn.max_pool_2d(maxpool_1, 3, strides=1, padding='VALID', name='MaxPool_2a_FX1')
			conv_2b = tflearn.relu(tflearn.conv_2d(conv_2a, 208, 3, strides=1, padding='VALID', name='Conv_2b_FX1'))
			conv_2c = tflearn.relu(tflearn.conv_2d(maxpool_2a, 64, 1, strides=1, padding='VALID', name='Conv_2c_FX1'))
			FX1_out = tflearn.merge([conv_2b, conv_2c], mode='concat', axis=3, name='FX1_out')

			conv_3a = tflearn.relu(tflearn.conv_2d(FX1_out, 96, 1, strides=1, padding='VALID', name='Conv_3a_FX2'))
			maxpool_3a = tflearn.max_pool_2d(FX1_out, 3, strides=1, padding='VALID', name='MaxPool_3a_FX2')
			conv_3b = tflearn.relu(tflearn.conv_2d(conv_3a, 208, 3, strides=1, padding='VALID', name='Conv_3b_FX2'))
			conv_3c = tflearn.relu(tflearn.conv_2d(maxpool_3a, 64, 1, strides=1, padding='VALID', name='Conv_3c_FX2'))
			FX2_out = tflearn.merge([conv_3b, conv_3c], mode='concat', axis=3, name='FX2_out')
			net = tflearn.flatten(FX2_out)
			output = tflearn.fully_connected(net, num_classes, activation='softmax', name='Output')

		return tflearn.regression(output, optimizer='Adam', loss='categorical_crossentropy', learning_rate=0.000001)
예제 #26
0
 def CNN_Core(self, x, reuse=False):
     with tf.variable_scope(self.scope + '-cnn_core', reuse=reuse):
         tmp = tflearn.conv_1d(x,
                               FEATURE_NUM // 4,
                               KERNEL,
                               activation='relu')
         tmp = tflearn.batch_normalization(tmp)
         tmp = tflearn.flatten(tmp)
         return tmp
예제 #27
0
    def build_dqn(self):
        '''Defines the value function model'''

        with tf.variable_scope(self.scope):
            self.inputs = tf.placeholder(tf.float32, [None] + list(INPUT_SHAPE))
            net = tflearn.flatten(self.inputs)
            net = tflearn.fully_connected(net,NUM_HIDDEN_UNITS,activation='relu')
            self.qvals = tflearn.fully_connected(net, self.num_actions)
        return
예제 #28
0
    def __init__(self, h_size=512, n_actions=4, learning_rate=1e-4):
        input_x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 3], name="Input")
        conv1 = tflearn.conv_2d(input_x, nb_filter=32, filter_size=8, strides=4,
                                padding='VALID', weights_init='xavier',
                                activation='relu', name="Conv1")
        conv2 = tflearn.conv_2d(conv1, nb_filter=64, filter_size=4, strides=2,
                                padding='VALID', weights_init='xavier',
                                activation='relu', name="Conv2")
        conv3 = tflearn.conv_2d(conv2, nb_filter=64, filter_size=3, strides=1,
                                padding='VALID', weights_init='xavier',
                                activation='relu', name="Conv3")
        conv4 = tflearn.conv_2d(conv3, nb_filter=h_size, filter_size=7, strides=1,
                                padding='VALID', weights_init='xavier',
                                activation='relu', name="Conv4")

        # We take the output from the final convolutional layer and split it into separate advantage and value streams.
        streamAC, streamVC = tf.split(conv4, 2, 3)
        streamA = tflearn.flatten(streamAC, name="streamA")
        streamV = tflearn.flatten(streamVC, name="streamV")

        Advantage = tflearn.fully_connected(streamA, n_units=n_actions, activation='linear',
                                            weights_init='xavier',  name="Advantage")
        Value = tflearn.fully_connected(streamV, n_units=1, activation='linear',
                                        weights_init='xavier', name="Value")
        Qout = tf.add(Value, tf.subtract(Advantage, tf.reduce_mean(Advantage, axis=1, keep_dims=True)), name="Qout")
        predict = tf.argmax(Qout, 1, name="Predict")

        targetQ = tf.placeholder(shape=[None], dtype=tf.float32)
        actions = tf.placeholder(shape=[None], dtype=tf.int32)
        actions_onehot = tf.one_hot(actions, n_actions, dtype=tf.float32)
        Q = tf.reduce_sum(tf.multiply(Qout, actions_onehot), axis=1)

        td_error = tf.square(targetQ - Q)
        loss = tf.reduce_mean(td_error)
        trainer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        update_model = trainer.minimize(loss)

        self.input_x = input_x
        self.predict = predict
        self.loss = loss
        self.Qout = Qout
        self.targetQ = targetQ
        self.actions = actions
        self.update_model = update_model
예제 #29
0
 def create_generate_network(self):
     with tf.variable_scope(self.scope + '-gan-g', reuse=self.reuse_gan):
         inputs = tflearn.input_data(
             shape=[None, self.s_dim[0], self.s_dim[1]])
         gan_inputs = tflearn.input_data(shape=[None, GAN_CORE])
         _input = tflearn.flatten(inputs)
         _com = tflearn.merge([_input, gan_inputs], 'concat')
         _com = tflearn.flatten(_com)
         net = tflearn.fully_connected(_com,
                                       FEATURE_NUM,
                                       activation='leakyrelu')
         net = tflearn.batch_normalization(net)
         net = tflearn.fully_connected(net,
                                       FEATURE_NUM // 2,
                                       activation='leakyrelu')
         net = tflearn.batch_normalization(net)
         out = tflearn.fully_connected(net, GAN_CORE, activation='sigmoid')
         self.reuse_gan = True
         return inputs, gan_inputs, out
예제 #30
0
def Network(data_input, training = True):
    x = tflearn.conv_2d(data_input, 32, 3, strides = 1, activation='prelu', weights_init = 'xavier')
    x = tflearn.conv_2d(x, 32, 3, strides = 2, activation='prelu', weights_init = 'xavier')
    x = tflearn.conv_2d(x, 64, 3, strides = 1, activation='prelu', weights_init = 'xavier')
    x = tflearn.conv_2d(x, 64, 3, strides = 2, activation='prelu', weights_init = 'xavier')
    x = tflearn.conv_2d(x, 128, 3, strides = 1, activation='prelu', weights_init = 'xavier')
    x = tflearn.conv_2d(x, 128, 3, strides = 2, activation='prelu', weights_init = 'xavier')
    x = tflearn.flatten(x)
    feat = tflearn.fully_connected(x, 2, weights_init = 'xavier')
    return feat
예제 #31
0
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
def encoder(inputs,hidden_layer):
    net = tflearn.conv_2d(inputs, 16, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L1",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 32, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L3",net.get_shape()
    print "========================"
    net = tflearn.conv_2d(net, 32, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L4",net.get_shape()
    print "========================"
    net = tflearn.flatten(net)
    #net = tflearn.fully_connected(net, nb_feature,activation="sigmoid")
    net = tflearn.fully_connected(net, nb_feature)
    hidden_layer = net
    net = tflearn.batch_normalization(net)
    net = tflearn.sigmoid(net)
    print "========================"
    print "hidden",net.get_shape()
    print "========================"

    return [net,hidden_layer]
예제 #33
0
    def _build_network(self, layers):
        network = tf.transpose(self.input_tensor, [0, 2, 3, 1])
        # [batch, assets, window, features]
        network = network / network[:, :, -1, 0, None, None]
        for layer_number, layer in enumerate(layers):
            if layer["type"] == "DenseLayer":
                network = tflearn.layers.core.fully_connected(network,
                                                              int(layer["neuron_number"]),
                                                              layer["activation_function"],
                                                              regularizer=layer["regularizer"],
                                                              weight_decay=layer["weight_decay"] )
            elif layer["type"] == "DropOut":
                network = tflearn.layers.core.dropout(network, layer["keep_probability"])
            elif layer["type"] == "EIIE_Dense":
                width = network.get_shape()[2]
                network = tflearn.layers.conv_2d(network, int(layer["filter_number"]),
                                                 [1, width],
                                                 [1, 1],
                                                 "valid",
                                                 layer["activation_function"],
                                                 regularizer=layer["regularizer"],
                                                 weight_decay=layer["weight_decay"])
            elif layer["type"] == "ConvLayer":
                network = tflearn.layers.conv_2d(network, int(layer["filter_number"]),
                                                 allint(layer["filter_shape"]),
                                                 allint(layer["strides"]),
                                                 layer["padding"],
                                                 layer["activation_function"],
                                                 regularizer=layer["regularizer"],
                                                 weight_decay=layer["weight_decay"])
            elif layer["type"] == "MaxPooling":
                network = tflearn.layers.conv.max_pool_2d(network, layer["strides"])
            elif layer["type"] == "AveragePooling":
                network = tflearn.layers.conv.avg_pool_2d(network, layer["strides"])
            elif layer["type"] == "LocalResponseNormalization":
                network = tflearn.layers.normalization.local_response_normalization(network)
            elif layer["type"] == "EIIE_Output":
                width = network.get_shape()[2]
                network = tflearn.layers.conv_2d(network, 1, [1, width], padding="valid",
                                                 regularizer=layer["regularizer"],
                                                 weight_decay=layer["weight_decay"])
                network = network[:, :, 0, 0]
                btc_bias = tf.ones((self.input_num, 1))
                network = tf.concat([btc_bias, network], 1)
                network = tflearn.layers.core.activation(network, activation="softmax")
            elif layer["type"] == "Output_WithW":
                network = tflearn.flatten(network)
                network = tf.concat([network,self.previous_w], axis=1)
                network = tflearn.fully_connected(network, self._rows+1,
                                                  activation="softmax",
                                                  regularizer=layer["regularizer"],
                                                  weight_decay=layer["weight_decay"])
            elif layer["type"] == "EIIE_Output_WithW":
                width = network.get_shape()[2]
                height = network.get_shape()[1]
                features = network.get_shape()[3]
                network = tf.reshape(network, [self.input_num, int(height), 1, int(width*features)])
                w = tf.reshape(self.previous_w, [-1, int(height), 1, 1])
                network = tf.concat([network, w], axis=3)
                network = tflearn.layers.conv_2d(network, 1, [1, 1], padding="valid",
                                                 regularizer=layer["regularizer"],
                                                 weight_decay=layer["weight_decay"])
                network = network[:, :, 0, 0]
                #btc_bias = tf.zeros((self.input_num, 1))
                btc_bias = tf.get_variable("btc_bias", [1, 1], dtype=tf.float32,
                                       initializer=tf.zeros_initializer)
                btc_bias = tf.tile(btc_bias, [self.input_num, 1])
                network = tf.concat([btc_bias, network], 1)
                self.voting = network
                network = tflearn.layers.core.activation(network, activation="softmax")

            elif layer["type"] == "EIIE_LSTM" or\
                            layer["type"] == "EIIE_RNN":
                network = tf.transpose(network, [0, 2, 3, 1])
                resultlist = []
                reuse = False
                for i in range(self._rows):
                    if i > 0:
                        reuse = True
                    if layer["type"] == "EIIE_LSTM":
                        result = tflearn.layers.lstm(network[:, :, :, i],
                                                     int(layer["neuron_number"]),
                                                     dropout=layer["dropouts"],
                                                     scope="lstm"+str(layer_number),
                                                     reuse=reuse)
                    else:
                        result = tflearn.layers.simple_rnn(network[:, :, :, i],
                                                           int(layer["neuron_number"]),
                                                           dropout=layer["dropouts"],
                                                           scope="rnn"+str(layer_number),
                                                           reuse=reuse)
                    resultlist.append(result)
                network = tf.stack(resultlist)
                network = tf.transpose(network, [1, 0, 2])
                network = tf.reshape(network, [-1, self._rows, 1, int(layer["neuron_number"])])
            else:
                raise ValueError("the layer {} not supported.".format(layer["type"]))
        return network