示例#1
0
def run_combo_XOR():
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_nand = [[1.], [1.], [1.], [0.]]
    Y_or = [[0.], [1.], [1.], [1.]]

    g = tflearn.input_data(shape=[None, 2])

    # Nand graph
    g_nand = tflearn.fully_connected(g, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
    g_nand = tflearn.regression(
        g_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

    # Nand graph
    g_or = tflearn.fully_connected(g, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
    g_or = tflearn.regression(
        g_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

    g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

    m = train_model(g_xor, X, [Y_nand, Y_or])
    # sess = tf.Session()  # separate from DNN session
    sess = m.session  # separate from DNN session
    print(
        sess.run(tflearn.merge([Y_nand, Y_or], mode='elemwise_mul')))
    def create_network(self):
        input_state = tflearn.input_data(shape=[None, self._dim_state],
                                         name='input_state')
        input_goal = tflearn.input_data(shape=[None, self._dim_goal],
                                        name='input_goal')

        input_memory = tflearn.input_data(
            shape=[None, MAX_STEPS, self._dim_state + self._dim_action])

        input_ff = tflearn.merge([input_goal, input_state], 'concat')

        ff_branch = tflearn.fully_connected(input_ff, UNITS)
        ff_branch = tflearn.activations.relu(ff_branch)

        # recurrent_branch = tflearn.fully_connected(input_memory, UNITS)
        # recurrent_branch = tflearn.activations.relu(recurrent_branch)
        recurrent_branch = tflearn.lstm(input_memory, UNITS, dynamic=True)

        merged_branch = tflearn.merge([ff_branch, recurrent_branch], 'concat')
        merged_branch = tflearn.fully_connected(merged_branch, UNITS)
        merged_branch = tflearn.activations.relu(merged_branch)

        merged_branch = tflearn.fully_connected(merged_branch, UNITS)
        merged_branch = tflearn.activations.relu(merged_branch)

        weights_init = tflearn.initializations.uniform(minval=-0.003,
                                                       maxval=0.003)
        out = tflearn.fully_connected(merged_branch,
                                      self._dim_action,
                                      activation='tanh',
                                      weights_init=weights_init)
        # Scale output to -action_bound to action_bound
        scaled_out = tf.multiply(out, self._action_bound)
        return [input_state, input_goal, input_memory], out, scaled_out
示例#3
0
 def create_dual_network(self, inputs, s_dim):
     with tf.variable_scope(self.scope + '-dual', reuse=self.reuse):
         split_array = []
         for i in range(s_dim[0]):
             tmp = tf.reshape(inputs[:, i:i + 1, :], (-1, s_dim[1], 1))
             branch1 = tflearn.conv_1d(tmp,
                                       FEATURE_NUM,
                                       3,
                                       activation='relu')
             branch2 = tflearn.conv_1d(tmp,
                                       FEATURE_NUM,
                                       4,
                                       activation='relu')
             branch3 = tflearn.conv_1d(tmp,
                                       FEATURE_NUM,
                                       5,
                                       activation='relu')
             network = tflearn.merge([branch1, branch2, branch3],
                                     mode='concat',
                                     axis=1)
             network = tf.expand_dims(network, 2)
             network = tflearn.global_avg_pool(network)
             split_array.append(network)
         #out, _ = self.attention(split_array, FEATURE_NUM)
         out = tflearn.merge(split_array, 'concat')
         self.reuse = True
         return out
示例#4
0
def run_combo_XOR():
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_nand = [[1.], [1.], [1.], [0.]]
    Y_or = [[0.], [1.], [1.], [1.]]

    g = tflearn.input_data(shape=[None, 2])

    # Nand graph
    g_nand = tflearn.fully_connected(g, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
    g_nand = tflearn.regression(g_nand,
                                optimizer='sgd',
                                learning_rate=2.,
                                loss='binary_crossentropy')

    # Nand graph
    g_or = tflearn.fully_connected(g, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
    g_or = tflearn.regression(g_or,
                              optimizer='sgd',
                              learning_rate=2.,
                              loss='binary_crossentropy')

    g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

    m = train_model(g_xor, X, [Y_nand, Y_or])
    # sess = tf.Session()  # separate from DNN session
    sess = m.session  # separate from DNN session
    print(sess.run(tflearn.merge([Y_nand, Y_or], mode='elemwise_mul')))
示例#5
0
def build_hybrid_net(x):
    inputs = tflearn.input_data(placeholder=x)
    with tf.name_scope('1d-cnn'):
        network_array = []
        for p in xrange(S_INFO):
            branch = tflearn.conv_1d(inputs[:, :, p:p + 1],
                                     FEATURE_NUM,
                                     3,
                                     activation='relu',
                                     regularizer="L2")
            branch = tflearn.flatten(branch)
            network_array.append(branch)
        out_cnn = tflearn.merge(network_array, 'concat')
    with tf.name_scope('gru'):
        net = tflearn.gru(inputs, FEATURE_NUM, return_seq=True)
        out_gru = tflearn.gru(net, FEATURE_NUM)

    header = tflearn.merge([out_cnn, out_gru], 'concat')
    dense_net = tflearn.fully_connected(header,
                                        FEATURE_NUM * 2,
                                        activation='relu',
                                        regularizer="L2")
    dense_net = tflearn.fully_connected(dense_net,
                                        FEATURE_NUM * 1,
                                        activation='relu',
                                        regularizer="L2")
    out = tflearn.fully_connected(dense_net,
                                  1,
                                  activation='sigmoid',
                                  regularizer="L2")
    return out, header
示例#6
0
def hybrid_header(x, reuse=False):
    # size = 3
    # inputs_shape = x.get_shape().as_list()
    # with tf.variable_scope('1d-cnn'):
    #     split_array = []
    #     for t in xrange(S_LEN - 1):
    #         tmp_split = tflearn.conv_1d(
    #             x[:, t:t + 1, :], FEATURE_NUM, size, activation='relu')
    #         tmp_split_flat = tflearn.flatten(tmp_split)
    #         tmp_split_flat = tflearn.layers.normalization.batch_normalization(tmp_split_flat)
    #         split_array.append(tmp_split_flat)
    #     merge_net = tflearn.merge(split_array, 'concat')
    #     _count = merge_net.get_shape().as_list()[1]
    #     out_cnn = tf.reshape(out_cnn
    #         merge_net, [-1, inputs_shape[1], _count / inputs_shape[1]])

    # with tf.variable_scope('gru'):
    #     net = tflearn.gru(out_cnn, FEATURE_NUM, return_seq=True)
    #     out_gru = tflearn.gru(net, FEATURE_NUM)
    #     out_gru = tf.expand_dims(out_gru, 1)

    #conv_1d_net = tflearn.conv_1d(out_gru, FEATURE_NUM, size, activation='relu')
    #conv_1d_net_flattern = tflearn.flatten(conv_1d_net)

    with tf.name_scope('1d-cnn'):
        network_array = []
        for p in xrange(S_INFO - 1):
            branch_array = []
            for i in xrange(2, 4):
                sp_branch = tflearn.conv_1d(x[:, :, p:p + 1],
                                            FEATURE_NUM,
                                            i,
                                            padding='valid',
                                            activation='relu',
                                            regularizer="L2")
                branch_array.append(sp_branch)
            branch = tflearn.merge(branch_array, mode='concat', axis=1)
            branch = tf.expand_dims(branch, 2)
            branch = global_max_pool(branch)
            #branch = tflearn.dropout(branch, 0.5)
            network_array.append(branch)
        out_cnn = tflearn.merge(network_array, 'concat')

    #with tf.name_scope('gru'):
    #    #net = tflearn.gru(x, FEATURE_NUM, return_seq=True)
    #    net = tflearn.gru(x, FEATURE_NUM)
    #    out_gru = tflearn.fully_connected(
    #        net, FEATURE_NUM, activation='relu')
    #    out_gru = tflearn.dropout(out_gru, 0.5)

    #merge_net = tflearn.merge([out_cnn, out_gru], 'concat')

    return out_cnn
示例#7
0
文件: vqn-cnn.py 项目: yogeshVU/QARC
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(inputs[:, i:i + 1, :, :, :],
                                     [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _split_array.append(CNN_Core(tmp_network))
            else:
                _split_array.append(CNN_Core(tmp_network, True))

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-cnn'):
            net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ, 1])
            network = tflearn.conv_2d(net,
                                      KERNEL,
                                      5,
                                      activation='relu',
                                      regularizer="L2",
                                      weight_decay=0.0001)
            network = tflearn.max_pool_2d(network, 3)
            network = tflearn.layers.normalization.batch_normalization(network)
            network = tflearn.conv_2d(network,
                                      KERNEL,
                                      3,
                                      activation='relu',
                                      regularizer="L2",
                                      weight_decay=0.0001)
            network = tflearn.max_pool_2d(network, 2)
            network = tflearn.layers.normalization.batch_normalization(network)
            CNN_result = tflearn.fully_connected(network,
                                                 DENSE_SIZE,
                                                 activation='relu')
            #CNN_result = tflearn.fully_connected(CNN_result, OUTPUT_DIM, activation='sigmoid')

        # with tf.variable_scope('full-gru'):
        #    net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ])
        #    net = tflearn.gru(net, DENSE_SIZE, return_seq=True)
        #    out_gru = tflearn.gru(net, DENSE_SIZE,dropout=0.8)
        #    gru_result = tflearn.fully_connected(out_gru, DENSE_SIZE, activation='relu')
        #gru_result = tflearn.fully_connected(gru_result, OUTPUT_DIM, activation='sigmoid')

        merge_net = tflearn.merge([gru_result, CNN_result], 'concat')
        out = tflearn.fully_connected(CNN_result,
                                      OUTPUT_DIM,
                                      activation='sigmoid')

        return out
示例#8
0
    def create_critic_network(self):
        """The critic has 2 flows, depth image and vector of state."""
        s_inputs = tflearn.input_data(shape=[None, self.s_dim])
        observation = tflearn.merge([self._image_encoder.embedding, s_inputs],
                                    mode='concat',
                                    axis=1)

        action = tflearn.input_data(shape=[None, self.a_dim])
        net = tflearn.merge([observation, action], mode='concat', axis=1)

        net = create_fc_part(net)
        out = tflearn.fully_connected(incoming=net, n_units=1)

        return s_inputs, action, out
示例#9
0
文件: gray.py 项目: yogeshVU/QARC
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []
        _cnn_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(inputs[:, i:i + 1, :, :, :],
                                     [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _tmp_split, _tmp_cnn = CNN_Core(tmp_network)
            else:
                _tmp_split, _tmp_cnn = CNN_Core(tmp_network, True)
            _split_array.append(_tmp_split)
            _cnn_array.append(_tmp_cnn)

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-lstm'):
            net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ])
            net = tflearn.gru(net, HIDDEN_UNIT, return_seq=True)
            net = tflearn.gru(net, HIDDEN_UNIT, return_seq=True)
            net, alphas = attention(net, HIDDEN_UNIT)
            out = tflearn.fully_connected(net,
                                          OUTPUT_DIM,
                                          activation='sigmoid')

        return out, tf.stack(_cnn_array, axis=0), alphas
示例#10
0
def def_model(obj_shape, obj_features_tensor, word_emb_net, obj_coord_tensor,
              coord_shape):
    # get dense representation of object features pairwise. the output will be of size 128

    obj_features = tf.split(obj_features_tensor, obj_shape[1] / 2, axis=1)
    obj_coord = tf.split(obj_coord_tensor, coord_shape[1] / 2, axis=1)
    obj_fc_pair = []
    for feature_pair, coord_pair in zip(obj_features, obj_coord):
        concat_feature = tf.reshape(feature_pair, [-1, 2 * 4096])
        concat_coord = tf.reshape(coord_pair, [-1, 2 * 4])

        input_obj1 = tflearn.input_data(shape=[None, 2 * 4096])
        fc_obj1 = tflearn.fully_connected(input_obj1, 128, activation="tanh")
        '''
        fc_obj_word_1 = tflearn.merge([fc_obj1, word_emb_net], mode="concat")
        fc_obj_word_2 = tflearn.fully_connected(fc_obj_word_1, 128, activation="tanh")
        '''
        input_coord1 = tflearn.input_data(shape=[None, 2 * 4096])
        fc_coord1 = tflearn.fully_connected(input_coord1,
                                            128,
                                            activation="tanh")

        fc_obj_word_coord_1 = tflearn.merge([fc_obj1, fc_coord1],
                                            mode="concat")
        fc_obj_word_coord_2 = tflearn.fully_connected(fc_obj_word_coord_1,
                                                      128,
                                                      activation="tanh")

        obj_fc_pair.append(fc_obj_word_coord_2)
    print("length of obj_fc_pair list: %d" % (len(obj_fc_pair)))
    obj_fc_pair = tf.stack(obj_fc_pair)

    print(obj_fc_pair.get_shape().as_list())
    obj_fc_pair = tf.transpose(obj_fc_pair, perm=[1, 0, 2])
    return obj_fc_pair
示例#11
0
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]], name = 'critic_input')

            # split_1 = tflearn.conv_2d(inputs[:, 0:1,:, :], 128, 4, activation='relu')
            # split_2 = tflearn.conv_2d(inputs[:, 1:2,:, :], 128, 4, activation='relu')

            # split_1_flat_1 = tflearn.flatten(split_1)
            # split_1_flat_2 = tflearn.flatten(split_2)

            # merge_net = tflearn.merge([split_1_flat_1, split_1_flat_2], 'concat')

            # dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')
            split_1_flat = tflearn.flatten(inputs[:, 0:1, :, :])
            split_2_flat = tflearn.flatten(inputs[:, 1:2, :, :])

            # merge_net = tflearn.merge([split_1_flat, split_2_flat], 'concat')

            dense_net_0 = tflearn.fully_connected(split_1_flat, 64, activation='LeakyReLU')
            dense_net_1 = tflearn.fully_connected(split_2_flat, 64, activation='LeakyReLU')
            merge_net = tflearn.merge([dense_net_0, dense_net_1], 'concat')

            out = tflearn.fully_connected(merge_net, 1, activation='linear', name = 'critic_output')

            return inputs, out
示例#12
0
def create_convolutional(max_sequence_length, dict_size):
    net = tflearn.input_data([None, max_sequence_length])
    net = tflearn.embedding(net, input_dim=dict_size + 1, output_dim=128)
    branch1 = tflearn.conv_1d(net,
                              128,
                              3,
                              padding='valid',
                              activation='relu',
                              regularizer='L2')
    branch2 = tflearn.conv_1d(net,
                              128,
                              4,
                              padding='valid',
                              activation='relu',
                              regularizer='L2')
    branch3 = tflearn.conv_1d(net,
                              128,
                              5,
                              padding='valid',
                              activation='relu',
                              regularizer='L2')
    net = tflearn.merge([branch1, branch2, branch3], mode='concat', axis=1)
    net = tf.expand_dims(net, 2)
    net = tflearn.layers.conv.global_max_pool(net)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net,
                             optimizer='sgd',
                             learning_rate=0.1,
                             loss='categorical_crossentropy')

    return tflearn.DNN(net,
                       tensorboard_verbose=0,
                       tensorboard_dir='../tensorboard/tensorboard_conv')
示例#13
0
    def create_actor_network(self,reuse = tf.AUTO_REUSE):
        with tf.variable_scope('actor',reuse = tf.AUTO_REUSE):
            # tf.Graph()

            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]], name = "actor_input")     # input layer
            # input_loss = tflearn.layers.normalization.batch_normalization(inputs[:, 0:1, :, :])
            # input_dealy_interval = tflearn.layers.normalization.batch_normalization(inputs[:, 1:2, :, :])

            # print input_loss.shape
            # print input_dealy_interval.shape

            # split_1 = tflearn.conv_2d(inputs[:, 0:1, :, :], 64, 3, activation='LeakyReLU',restore = False)
            # split_2 = tflearn.conv_2d(inputs[:, 1:2, :, :], 64, 3, activation='LeakyReLU')

            # # split_2 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'fc2')

            split_1_flat = tflearn.flatten(inputs[:, 0:1, :, :])
            split_2_flat = tflearn.flatten(inputs[:, 1:2, :, :])

            # merge_net = tflearn.merge([split_1_flat, split_2_flat], 'concat')

            dense_net_0 = tflearn.fully_connected(split_1_flat, 64, activation='LeakyReLU')
            dense_net_1 = tflearn.fully_connected(split_2_flat, 64, activation='LeakyReLU')
            merge_net = tflearn.merge([dense_net_0, dense_net_1], 'concat')

            print('aaaaaaaaaaaaaaaaaaaaaaaaaaa')
            print(dense_net_0)
            out = tflearn.fully_connected(merge_net, self.a_dim, activation='softmax', name = "actor_output")     # output layer
            print('type out',type(out),out.name)
            return inputs, out
示例#14
0
def RCNN1(network, prev_activation=None, scale=False):
    if prev_activation is None:
        prev_activation = tf.zeros([1, 2500])

    if scale is True:
        network = tf.transpose(
            tf.reshape(network, [-1, num_rows * num_cols * num_channels]))
        mean, var = tf.nn.moments(network, [0])
        network = tf.transpose((network - mean) / (tf.sqrt(var) + 1e-6))
        network = tf.reshape(network, [-1, num_rows, num_cols, num_channels])

    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 2500, activation='tanh')
    network = dropout(network, drop_prob)
    feat_layer = fully_connected(network, 2500, activation='tanh')
    network = merge([feat_layer, prev_activation], 'concat', axis=1)
    network = lstm(network, 250, dropout=drop_prob, activation='relu')
    network = tflearn.fully_connected(network, 4, activation='softmax')

    return network, feat_layer
示例#15
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')
            split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')

            reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])
            split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')

            split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')
            split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')

            flatten_0 = tflearn.flatten(split_3)
            flatten_1 = tflearn.flatten(split_4)
            flatten_2 = tflearn.flatten(split_5)

            merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')

            # for multiple video, mask out the invalid actions
            linear_out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='linear')
            linear_out = tf.transpose(linear_out)  # [None, a_dim] -> [a_dim, None]
            mask_out = tf.boolean_mask(linear_out, self.mask)  # [a_dim, None] -> [masked, None]
            mask_out = tf.transpose(mask_out)  # [masked, None] -> [None, masked]
            softmax_out = tf.nn.softmax(mask_out)

            return inputs, softmax_out
示例#16
0
def build_nce_model(num_words, num_docs, doc_embedding_size=doc_embedding_size, word_embedding_size=word_embedding_size):
    X1 = tflearn.input_data(shape=[None, 1])
    X2 = tflearn.input_data(shape=[None, 3])
    
    Y = tf.placeholder(tf.float32, [None, 1])

    d1, = tflearn.embedding(X1, input_dim=num_docs, output_dim=doc_embedding_size)
    w1, w2, w3 = tflearn.embedding(X2, input_dim=num_words, output_dim=word_embedding_size)

    embedding_layer = tflearn.merge([d1, w1, w2, w3], mode='concat')

    num_classes = num_words
    dim = doc_embedding_size + 3*word_embedding_size
        
    with tf.variable_scope("NCELoss"):
        weights = tflearn.variables.variable('W', [num_classes, dim])
        biases  = tflearn.variables.variable('b', [num_classes])

        batch_loss = tf.nn.nce_loss(weights, biases, embedding_layer, Y, num_sampled=100, num_classes=num_classes)
        loss = tf.reduce_mean(batch_loss)

    optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
    
    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                          metric=None, batch_size=32)

    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0, checkpoint_path='embedding_model_nce')
    return trainer, X1, X2, Y
示例#17
0
文件: a3c.py 项目: xgw/proj
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.conv_1d(inputs[:, 1:2, :],
                                      128,
                                      4,
                                      activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, :NUM_OF_TRACKS],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.fully_connected(inputs[:, 3:4, -1],
                                              128,
                                              activation='relu')

            split_1_flat = tflearn.flatten(split_1)
            split_2_flat = tflearn.flatten(split_2)

            merge_net = tflearn.merge(
                [split_0, split_1_flat, split_2_flat, split_3], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
示例#18
0
 def create_critic_network(self, weights_initial, layer_sizes):
     inputs = tflearn.input_data(shape=[None, self.state_size])
     action_all = tflearn.input_data(
         shape=[None, self.action_dim + self.action_param_dim])
     #action_input = tflearn.input_data(shape=[None, self.action_dim])
     #action_params = tflearn.input_data(shape=[None, self.action_param_dim])
     # merged_input = tflearn.merge([inputs, action_input, action_params], 'concat', 1)
     net = tflearn.merge([inputs, action_all], 'concat', 1)
     for i in xrange(1, len(layer_sizes) - 1):
         net = tflearn.fully_connected(net,
                                       layer_sizes[i],
                                       weights_init=weights_initial[i - 1])
         net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))
     '''
     net = tflearn.fully_connected(net, 1024, weights_init = weights_initial[0])
     net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))
     
     net = tflearn.fully_connected(net, 512, weights_init = weights_initial[1])
     net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))
     
     net = tflearn.fully_connected(net, 256, weights_init = weights_initial[2])
     net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))
     
     net = tflearn.fully_connected(net, 128, weights_init = weights_initial[3])
     net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))
     '''
     # linear layer connected to 1 output representing Q(s,a)
     # Weights are init to normal(0, 0.01)
     out = tflearn.fully_connected(
         net, 1, weights_init=weights_initial[len(weights_initial) - 1])
     #return inputs, action_input, action_params, out
     return inputs, action_all, out
示例#19
0
def create_network(s_lengths):
    '''
    Create the neural network.

    :a_dim: array containing the dimension space for each action
    :s_len: array containing the length of each metric information
    :last_layer: activation mode for the output neurons
    '''
    inputs = tflearn.input_data(shape=[None, len(s_lengths), max(s_lengths)])
    splits = list()

    # Add a convolution layer for each input vector
    for i, s_len in enumerate(s_lengths):
        splits.append(
            tflearn.conv_1d(inputs[:, i:i + 1, :s_len],
                            128,
                            4,
                            activation='relu',
                            name='Input%s' % i))

    # Merge all initial convolution layers
    dense_net = tflearn.merge(splits, 'concat', name='MergeNet')

    # Hidden layers
    for i in range(NETWORK_DEPTH):
        dense_net = tflearn.conv_1d(dense_net,
                                    128,
                                    4,
                                    activation='relu',
                                    name='Dense%s' % i)

    return inputs, dense_net
示例#20
0
def create_multi_digit_model(model_file='', digit_count=DIGIT_COUNT):
    input_layer, last_cnn_layer = create_cnn_layers()

    outputs = []
    for index in range(0, digit_count):
        # h = Dense(256, activation='relu')(last_cnn_layer)
        h = fully_connected(last_cnn_layer, 256, activation='relu')
        # h = Dropout(0.5)(h)
        h = dropout(h, 1 - 0.5)
        out_name = OUT_PUT_NAME_FORMAT % index
        # output = Dense(CLASS_COUNT, activation='softmax', name=out_name)(h)
        h = fully_connected(h, CLASS_COUNT, activation='softmax')
        output = regression(h,
                            optimizer=OPTIMIZER,
                            loss='categorical_crossentropy',
                            name=out_name,
                            op_name=out_name)
        outputs.append(output)

    network = tflearn.merge(outputs, 'concat')

    # model = Model(input=input_layer, output=outputs)
    model = tflearn.DNN(network,
                        tensorboard_verbose=3,
                        tensorboard_dir='./logs/')

    return model
示例#21
0
文件: gray.py 项目: yogeshVU/QARC
def attention(inputs, attention_size):
    _inputs = tflearn.merge(inputs, 'concat')
    #print _inputs.get_shape().as_list()
    inputs = tf.reshape(_inputs, (-1, INPUT_SEQ, HIDDEN_UNIT))
    # the length of sequences processed in the antecedent RNN layer
    sequence_length = inputs.get_shape()[1].value
    hidden_size = inputs.get_shape()[2].value  # hidden size of the RNN layer

    # Attention mechanism
    W_omega = tf.Variable(
        tf.random_normal([hidden_size, attention_size], stddev=0.1))
    b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
    u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))

    v = tf.tanh(
        tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_omega) +
        tf.reshape(b_omega, [1, -1]))
    vu = tf.matmul(v, tf.reshape(u_omega, [-1, 1]))
    #alphas = tf.nn.softmax(vu)
    exps = tf.reshape(tf.exp(vu), [-1, sequence_length])
    alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])

    # Output of Bi-RNN is reduced with attention vector
    output = tf.reduce_sum(
        inputs * tf.reshape(alphas, [-1, sequence_length, 1]), 1)

    return output, alphas
示例#22
0
    def npi_core(self):
        """
        Build the NPI LSTM core, feeding the program embedding and state encoding to a multi-layered
        LSTM, returning the h-state of the final LSTM layer.

        References: Reed, de Freitas [2]
        """
        s_in = self.state_encoding  # Shape: [bsz, state_dim]
        p_in = self.program_embedding  # Shape: [bsz, 1, program_dim]

        # Reshape state_in
        s_in = tflearn.reshape(
            s_in, [-1, 1, self.state_dim])  # Shape: [bsz, 1, state_dim]

        # Concatenate s_in, p_in
        c = tflearn.merge([s_in, p_in], 'concat',
                          axis=2)  # Shape: [bsz, 1, state + prog]

        # Feed through Multi-Layer LSTM
        for i in range(self.npi_core_layers):
            c, [self.h_states[i]] = self.lstm(c,
                                              self.npi_core_dim,
                                              return_seq=True,
                                              initial_state=self.h_states[i],
                                              return_states=True)

        # Return Top-Most LSTM H-State
        top_state = tf.split(self.h_states[-1], 2, 1)[1]
        return top_state  # Shape: [bsz, npi_core_dim]
def xor_operation():
    # Function to simulate XOR operation using graph combo of NAND and OR
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_nand = [[1.], [1.], [1.], [0.]]
    Y_or = [[0.], [1.], [1.], [1.]]

    with tf.Graph().as_default():
        graph = tflearn.input_data(shape=[None, 2])
        graph_nand = tflearn.fully_connected(graph, 32, activation='linear')
        graph_nand = tflearn.fully_connected(graph_nand, 32, activation='linear')
        graph_nand = tflearn.fully_connected(graph_nand, 1, activation='sigmoid')
        graph_nand = tflearn.regression(graph_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

        graph_or = tflearn.fully_connected(graph, 32, activation='linear')
        graph_or = tflearn.fully_connected(graph_or, 32, activation='linear')
        graph_or = tflearn.fully_connected(graph_or, 1, activation='sigmoid')
        graph_or = tflearn.regression(graph_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')

        graph_xor = tflearn.merge([graph_nand, graph_or], mode='elemwise_mul')

        # Model training
        model = tflearn.DNN(graph_xor)

        model.fit(X, [Y_nand, Y_or], n_epoch=100, snapshot_epoch=False)
        prediction = model.predict([[0., 1.]])
        print("Prediction: ", prediction)
示例#24
0
文件: ddpg.py 项目: wnstlr/rl_project
    def create_critic_network(self):
        inputs = tflearn.input_data(shape=[None, self.state_size])
        action_all = tflearn.input_data(
            shape=[None, self.action_dim + self.action_param_dim])
        #action_input = tflearn.input_data(shape=[None, self.action_dim])
        #action_params = tflearn.input_data(shape=[None, self.action_param_dim])
        # merged_input = tflearn.merge([inputs, action_input, action_params], 'concat', 1)
        merged_input = tflearn.merge([inputs, action_all], 'concat', 1)
        w_init = tflearn.initializations.normal(stddev=0.01)
        net = tflearn.fully_connected(merged_input, 1024, weights_init=w_init)
        net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))

        w_init = tflearn.initializations.normal(stddev=0.01)
        net = tflearn.fully_connected(net, 512, weights_init=w_init)
        net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))

        w_init = tflearn.initializations.normal(stddev=0.01)
        net = tflearn.fully_connected(net, 256, weights_init=w_init)
        net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))

        w_init = tflearn.initializations.normal(stddev=0.01)
        net = tflearn.fully_connected(net, 128, weights_init=w_init)
        net = tflearn.activation(tflearn.activations.leaky_relu(net, 0.01))

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to normal(0, 0.01)
        w_init = tflearn.initializations.normal(stddev=0.01)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        #return inputs, action_input, action_params, out
        return inputs, action_all, out
示例#25
0
def vqn_model(x):
    with tf.variable_scope('vqn'):
        inputs = tflearn.input_data(placeholder=x)
        _split_array = []

        for i in range(INPUT_SEQ):
            tmp_network = tf.reshape(inputs[:, i:i + 1, :, :, :],
                                     [-1, INPUT_H, INPUT_W, INPUT_D])
            if i == 0:
                _split_array.append(CNN_Core(tmp_network))
            else:
                _split_array.append(CNN_Core(tmp_network, True))

        merge_net = tflearn.merge(_split_array, 'concat')
        merge_net = tflearn.flatten(merge_net)
        _count = merge_net.get_shape().as_list()[1]

        with tf.variable_scope('full-lstm'):
            net = tf.reshape(merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ])
            net = tflearn.gru(net, DENSE_SIZE, return_seq=True)
            out_gru = tflearn.gru(net, DENSE_SIZE, dropout=0.8)
            gru_result = tflearn.fully_connected(out_gru,
                                                 DENSE_SIZE,
                                                 activation='relu')

        out = tflearn.fully_connected(gru_result,
                                      OUTPUT_DIM,
                                      activation='sigmoid')

        return out
示例#26
0
文件: a3c.py 项目: yogeshVU/QARC
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in xrange(self.s_dim[0] - 1):
                split = tflearn.conv_1d(inputs[:, i:i + 1, :],
                                        FEATURE_NUM,
                                        KERNEL,
                                        activation='relu')
                flattern = tflearn.flatten(split)
                split_array.append(flattern)

            dense_net = tflearn.fully_connected(inputs[:, -1:, :],
                                                FEATURE_NUM,
                                                activation='relu')
            split_array.append(dense_net)
            merge_net = tflearn.merge(split_array, 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  64,
                                                  activation='relu')
            # dense_net_0 = tflearn.dropout(dense_net_0, 0.8)
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
示例#27
0
文件: dualppo.py 项目: zchao520/Zwei
    def CreateNetwork(self, inputs):
        with tf.variable_scope('actor'):
            split_0 = tflearn.fully_connected(
                inputs[:, 0:1, -1], FEATURE_NUM, activation='relu')
            split_1 = tflearn.fully_connected(
                inputs[:, 1:2, -1], FEATURE_NUM, activation='relu')
            split_2 = tflearn.conv_1d(
                inputs[:, 2:3, :], FEATURE_NUM, 4, activation='relu')
            split_3 = tflearn.conv_1d(
                inputs[:, 3:4, :], FEATURE_NUM, 4, activation='relu')
            split_4 = tflearn.conv_1d(
                inputs[:, 4:5, :self.a_dim], FEATURE_NUM, 4, activation='relu')
            split_5 = tflearn.fully_connected(
                inputs[:, 5:6, -1], FEATURE_NUM, activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)

            merge_net = tflearn.merge(
                [split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')

            pi_net = tflearn.fully_connected(
                merge_net, FEATURE_NUM, activation='relu')
            pi = tflearn.fully_connected(pi_net, self.a_dim, activation='softmax') 
            val_net = tflearn.fully_connected(
                merge_net, FEATURE_NUM, activation='relu')
            val = tflearn.fully_connected(val_net, 1, activation='tanh') 
            return pi, val
示例#28
0
文件: a3c.py 项目: wushilan/arsenal
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]])
            print 'self.s_dim[0]',self.s_dim[0]
            # split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'critic_fc1')
            print 'inputs[:, 0:1, :].shape:', inputs[:, 0:1, :].shape
            print 'inputs[:, 1:2, :].shape:', inputs[:, 1:2, :].shape
            x_reshape1 = tflearn.reshape(inputs[:, 0:1, :], [-1, self.s_dim[1],self.s_dim[2],1])
            x_reshape2 = tflearn.reshape(inputs[:, 1:2, :], [-1, self.s_dim[1],self.s_dim[2],1])
            print 'x_reshape1.shape:', x_reshape1.shape
            print 'x_reshape2.shape:', x_reshape2.shape
            split_1 = tflearn.conv_2d(x_reshape1, 128, 4, activation='relu', scope = 'critic_conv1_1')
            print 'split_1.shape:', split_1.shape
            # split_2 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'critic_fc2')
            split_2 = tflearn.conv_2d(x_reshape2, 128, 4, activation='relu', scope = 'critic_conv1_2')
            print 'split_2.shape:', split_2.shape

            split_1_flat_1 = tflearn.flatten(split_1)
            split_1_flat_2 = tflearn.flatten(split_2)

            merge_net = tflearn.merge([split_1_flat_1, split_1_flat_2], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu', scope = 'critic_fc3')
            out = tflearn.fully_connected(dense_net_0, 1, activation='linear', scope = 'critic_fc4')

            return inputs, out
示例#29
0
    def create_actor_network(self):
        with tf.variable_scope(self.scope + '-actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            split_array = []
            for i in range(self.s_dim[0]):
                tmp = tf.reshape(inputs[:, i:i + 1, :], (-1, self.s_dim[1], 1))
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM // 4,
                                        KERNEL,
                                        activation='relu')
                split = tflearn.avg_pool_1d(split, 2)
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM // 2,
                                        KERNEL,
                                        activation='relu')
                split = tflearn.avg_pool_1d(split, 2)
                split = tflearn.conv_1d(tmp,
                                        FEATURE_NUM,
                                        KERNEL,
                                        activation='relu')
                #split = tflearn.avg_pool_1d(split, 2)
                flattern = tflearn.flatten(split)
                split_array.append(flattern)
            dense_net_0 = tflearn.merge(split_array, 'concat')
            dense_net_0 = tflearn.fully_connected(dense_net_0,
                                                  FEATURE_NUM,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
    def npi_core(self):
        """
        Build the NPI LSTM core, feeding the program embedding and state encoding to a multi-layered
        LSTM, returning the h-state of the final LSTM layer.

        References: Reed, de Freitas [2]
        """
        s_in = self.state_encoding  # Shape: [bsz, state_dim]
        p_in = self.program_embedding  # Shape: [bsz, 1, program_dim]

        # Reshape state_in
        s_in = tflearn.reshape(
            s_in, [-1, 1, self.state_dim])  # Shape: [bsz, 1, state_dim]

        # Concatenate s_in, p_in
        c = tflearn.merge([s_in, p_in], 'concat',
                          axis=2)  # Shape: [bsz, 1, state + prog]

        # Feed through Multi-Layer LSTM
        # print('-'*100)
        net, state = tflearn.layers.recurrent.lstm(c,
                                                   self.npi_core_dim,
                                                   return_seq=True,
                                                   return_state=True)
        net, state = tflearn.layers.recurrent.lstm(net,
                                                   self.npi_core_dim,
                                                   return_seq=True,
                                                   return_state=True)
        # print('*'*100)
        # print(state)
        top_state = state.c
        return top_state
示例#31
0
	def build_network(self, num_classes, input_shape, model):
		network = tflearn.input_data(shape=[None, input_shape[0], input_shape[1], input_shape[2]])
		if model == 'DeepFace':
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 32, 11, strides=1, padding='VALID', name='Conv2d_1'))
			maxpool_1 = tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1')
			conv_2 = tflearn.relu(tflearn.conv_2d(maxpool_1, 32, 9, strides=1, padding='VALID', name='Conv2d_2'))

			local_1 = tflearn.relu(self.local(conv_2, 16, 9, 1, 'Local_1'))
			local_2 = tflearn.relu(self.local(local_1, 16, 7, 1, 'Local_2'))
			local_3 = tflearn.relu(self.local(local_2, 16, 5, 1, 'Local_3'))

			flatterned = tflearn.flatten(local_3)
			full_1 = tflearn.dropout(tflearn.relu(tflearn.fully_connected(flatterned, 4096, name='Fully_Connected_1')), 0.5)
			output = tflearn.fully_connected(full_1, num_classes, activation='softmax', name='Output')

		elif model == 'Song':
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 64, 5, strides=1, padding='VALID', name='Conv_1'))
			maxpool_1 = tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1')
			conv_2 = tflearn.relu(tflearn.conv_2d(maxpool_1, 64 , 5, strides=1, padding='VALID', name='Conv_2'))
			maxpool_2 = tflearn.max_pool_2d(conv_2, 3, strides=2, padding='VALID', name='MaxPool_2')

			local_1 = tflearn.dropout(tflearn.relu(self.local(maxpool_2, 32, 3, 1, 'Local_1')), 1)
			local_2 = tflearn.dropout(tflearn.relu(self.local(local_1, 32, 3, 1, 'Local_2')), 1)
			flatterned = tflearn.flatten(local_2)
			output = tflearn.fully_connected(flatterned, num_classes, activation='softmax', name='Output')

		else:
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 64, 7, strides=2, bias=True, padding='VALID', name='Conv2d_1'))
			maxpool_1 = tflearn.batch_normalization(tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1'))

			conv_2a = tflearn.relu(tflearn.conv_2d(maxpool_1, 96, 1, strides=1, padding='VALID', name='Conv_2a_FX1'))
			maxpool_2a = tflearn.max_pool_2d(maxpool_1, 3, strides=1, padding='VALID', name='MaxPool_2a_FX1')
			conv_2b = tflearn.relu(tflearn.conv_2d(conv_2a, 208, 3, strides=1, padding='VALID', name='Conv_2b_FX1'))
			conv_2c = tflearn.relu(tflearn.conv_2d(maxpool_2a, 64, 1, strides=1, padding='VALID', name='Conv_2c_FX1'))
			FX1_out = tflearn.merge([conv_2b, conv_2c], mode='concat', axis=3, name='FX1_out')

			conv_3a = tflearn.relu(tflearn.conv_2d(FX1_out, 96, 1, strides=1, padding='VALID', name='Conv_3a_FX2'))
			maxpool_3a = tflearn.max_pool_2d(FX1_out, 3, strides=1, padding='VALID', name='MaxPool_3a_FX2')
			conv_3b = tflearn.relu(tflearn.conv_2d(conv_3a, 208, 3, strides=1, padding='VALID', name='Conv_3b_FX2'))
			conv_3c = tflearn.relu(tflearn.conv_2d(maxpool_3a, 64, 1, strides=1, padding='VALID', name='Conv_3c_FX2'))
			FX2_out = tflearn.merge([conv_3b, conv_3c], mode='concat', axis=3, name='FX2_out')
			net = tflearn.flatten(FX2_out)
			output = tflearn.fully_connected(net, num_classes, activation='softmax', name='Output')

		return tflearn.regression(output, optimizer='Adam', loss='categorical_crossentropy', learning_rate=0.000001)
示例#32
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              128,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              128,
                                              activation='relu')
            split_2 = tflearn.conv_1d(inputs[:, 2:3, -self.s_len:],
                                      128,
                                      4,
                                      activation='relu')
            split_3 = tflearn.conv_1d(inputs[:, 3:4, -self.s_len:],
                                      128,
                                      4,
                                      activation='relu')
            split_4 = tflearn.conv_1d(inputs[:, 4:5, :A_DIM],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.fully_connected(inputs[:, 4:5, -1],
                                              128,
                                              activation='relu')

            #add a param
            #success ,only in actor network
            #test_6  = tflearn.fully_connected(inputs[:, 5:6, -1], 128, activation='relu')

            # [buffer1,bitrate1,buffer2,bitrate2,...]

            otherAgentData = tflearn.conv_1d(inputs[:, 5:6, :],
                                             128,
                                             2,
                                             strides=2,
                                             activation='relu')

            split_2_flat = tflearn.flatten(split_2)
            split_3_flat = tflearn.flatten(split_3)
            split_4_flat = tflearn.flatten(split_4)
            otherAgentData_flat = tflearn.flatten(otherAgentData)

            merge_net = tflearn.merge([
                split_0, split_1, split_2_flat, split_3_flat, split_4_flat,
                split_5, otherAgentData_flat
            ], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  128,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0,
                                          self.a_dim,
                                          activation='softmax')

            return inputs, out
示例#33
0
def Network_LSTM(S_LEN, Input_LEN, length, class_num):

    data_len = Input_LEN * (S_LEN * 2 + 1)
    x = tf.placeholder(tf.float32, shape=[None, length, data_len], name='x')
    y_ = tf.placeholder(tf.int32, shape=[
        None,
    ], name='y_')
    # x_reshape = tflearn.reshape(x, [-1, length,data_len])

    # print x_reshape.shape
    split_1 = tflearn.lstm(x[:, :, 0:Input_LEN * S_LEN],
                           64,
                           activation='tanh',
                           inner_activation='LeakyReLU',
                           return_seq=False,
                           name='LSTM_1')
    split_2 = tflearn.lstm(x[:, :, Input_LEN * S_LEN:Input_LEN * S_LEN * 2],
                           64,
                           activation='tanh',
                           inner_activation='LeakyReLU',
                           return_seq=False,
                           name='LSTM_1')
    split_3 = tflearn.lstm(x[:, :, Input_LEN * S_LEN * 2:data_len],
                           64,
                           activation='tanh',
                           inner_activation='LeakyReLU',
                           return_seq=False,
                           name='LSTM_1')

    # split_1 = tflearn.conv_2d(x_reshape[:,:,0:Input_LEN*S_LEN,:], 64, 3, activation='LeakyReLU',restore = False)
    # split_2 = tflearn.conv_2d(x_reshape[:,:,Input_LEN*S_LEN:Input_LEN*S_LEN*2,:], 64, 3, activation='LeakyReLU')
    # split_3 = tflearn.conv_2d(x_reshape[:,:,Input_LEN*S_LEN*2:data_len,:], 64, 3, activation='LeakyReLU')
    # print split_1.shape
    # print split_2.shape
    # print split_3.shape

    # dense_concat = tf.concat([split_1[:,:,:,np.newaxis],split_2[:,:,:,np.newaxis],split_3[:,:,:,np.newaxis]],axis = 2)
    dense_concat = tflearn.merge([split_1, split_2, split_3], 'concat', axis=1)
    # print dense_concat.shape

    # cov=tflearn.conv_2d(dense_concat, 128, 3, activation='relu')
    # #print type(cov)
    # cov = tflearn.flatten(cov)
    #print cov.shape
    logits = tf.layers.dense(
        inputs=dense_concat,
        units=256,
        activation=tf.nn.relu,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    logits = tf.layers.dense(
        inputs=logits,
        units=class_num,
        activation=None,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    return logits, x, y_
示例#34
0
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
示例#35
0
 def build_simple_model(self):
     """Build a simple model for test
     Returns:
         DNN, [ (input layer name, input placeholder, input data) ], Target data
     """
     inputPlaceholder1, inputPlaceholder2 = \
         tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2")
     input1 = tflearn.input_data(placeholder = inputPlaceholder1)
     input2 = tflearn.input_data(placeholder = inputPlaceholder2)
     network = tflearn.merge([ input1, input2 ], "sum")
     network = tflearn.reshape(network, (1, 1))
     network = tflearn.fully_connected(network, 1)
     network = tflearn.regression(network)
     return (
         tflearn.DNN(network),
         [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ],
         self.TARGET,
     )
示例#36
0
def build_model(num_words, num_docs,
		doc_embedding_size=doc_embedding_size, word_embedding_size=word_embedding_size):
    input_layer1 = tflearn.input_data(shape=[None, 1])
    input_layer2 = tflearn.input_data(shape=[None, 3])

    d1, = tflearn.embedding(input_layer1, input_dim=num_docs, output_dim=doc_embedding_size)
    w1, w2, w3 = tflearn.embedding(input_layer2, input_dim=num_words, output_dim=word_embedding_size)

    embedding_layer = tflearn.merge([d1, w1, w2, w3], mode='concat')
    softmax = tflearn.fully_connected(embedding_layer, num_words, activation='softmax')

    optimizer = tflearn.optimizers.Adam(learning_rate=0.001)
    # optimizer = tflearn.optimizers.SGD(learning_rate=0.1)

    metric = tflearn.metrics.Accuracy()
    net = tflearn.regression(softmax, optimizer=optimizer, metric=metric, batch_size=16,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net, tensorboard_verbose=0, checkpoint_path='embedding_model')
    return model
def create_multi_digit_model(model_file='', digit_count=DIGIT_COUNT):
    input_layer, last_cnn_layer = create_cnn_layers()

    outputs = []
    for index in range(0, digit_count):
        # h = Dense(256, activation='relu')(last_cnn_layer)
        h = fully_connected(last_cnn_layer, 256, activation='relu')
        # h = Dropout(0.5)(h)
        h = dropout(h, 1-0.5)
        out_name = OUT_PUT_NAME_FORMAT % index
        # output = Dense(CLASS_COUNT, activation='softmax', name=out_name)(h)
        h = fully_connected(h, CLASS_COUNT, activation='softmax')
        output = regression(h, optimizer=OPTIMIZER,
                     loss='categorical_crossentropy', name=out_name, op_name=out_name)
        outputs.append(output)

    network = tflearn.merge(outputs, 'concat')

    # model = Model(input=input_layer, output=outputs)
    model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir='./logs/')

    return model
示例#38
0
文件: logical.py 项目: 21hub/tflearn
with tf.Graph().as_default():
    # Building a network with 2 optimizers
    g = tflearn.input_data(shape=[None, 2])
    # Nand operator definition
    g_nand = tflearn.fully_connected(g, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
    g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
    g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                learning_rate=2.,
                                loss='binary_crossentropy')
    # Or operator definition
    g_or = tflearn.fully_connected(g, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 32, activation='linear')
    g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
    g_or = tflearn.regression(g_or, optimizer='sgd',
                              learning_rate=2.,
                              loss='binary_crossentropy')
    # XOR merging Nand and Or operators
    g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

    # Training
    m = tflearn.DNN(g_xor)
    m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

    # Testing
    print("Testing XOR operator")
    print("0 xor 0:", m.predict([[0., 0.]]))
    print("0 xor 1:", m.predict([[0., 1.]]))
    print("1 xor 0:", m.predict([[1., 0.]]))
    print("1 xor 1:", m.predict([[1., 1.]]))