コード例 #1
0
ファイル: a3c.py プロジェクト: wushilan/arsenal
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]])
            print 'self.s_dim[0]',self.s_dim[0]
            # split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'critic_fc1')
            print 'inputs[:, 0:1, :].shape:', inputs[:, 0:1, :].shape
            print 'inputs[:, 1:2, :].shape:', inputs[:, 1:2, :].shape
            x_reshape1 = tflearn.reshape(inputs[:, 0:1, :], [-1, self.s_dim[1],self.s_dim[2],1])
            x_reshape2 = tflearn.reshape(inputs[:, 1:2, :], [-1, self.s_dim[1],self.s_dim[2],1])
            print 'x_reshape1.shape:', x_reshape1.shape
            print 'x_reshape2.shape:', x_reshape2.shape
            split_1 = tflearn.conv_2d(x_reshape1, 128, 4, activation='relu', scope = 'critic_conv1_1')
            print 'split_1.shape:', split_1.shape
            # split_2 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'critic_fc2')
            split_2 = tflearn.conv_2d(x_reshape2, 128, 4, activation='relu', scope = 'critic_conv1_2')
            print 'split_2.shape:', split_2.shape

            split_1_flat_1 = tflearn.flatten(split_1)
            split_1_flat_2 = tflearn.flatten(split_2)

            merge_net = tflearn.merge([split_1_flat_1, split_1_flat_2], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu', scope = 'critic_fc3')
            out = tflearn.fully_connected(dense_net_0, 1, activation='linear', scope = 'critic_fc4')

            return inputs, out
コード例 #2
0
ファイル: test_layers.py プロジェクト: wvg-ds/tflearn
    def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.9)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
コード例 #3
0
 def sam_dan_2(self):
     imgprep = tflearn.data_preprocessing.ImagePreprocessing()
     imgprep.add_featurewise_zero_center()
     imgprep.add_featurewise_stdnorm()
     network = tflearn.layers.core.input_data([None, 128, 128], dtype=np.float32)  # ,data_preprocessing=imgprep)
     network = tflearn.reshape(network, new_shape=[-1])
     network = tflearn.reshape(network, new_shape=[-1, 128, 128, 1])
     network = tflearn.layers.conv.conv_2d(network, 32, 5, strides=2, activation='relu', regularizer="L2")
     network = tflearn.layers.conv.max_pool_2d(network, 2)
     # network = tflearn.layers.local_response_normalization(network)
     #network = tflearn.layers.conv.conv_2d(network, 64, 5, activation='relu', regularizer="L2")
     network = tflearn.layers.conv.conv_2d(network, 32, 3, activation='relu', regularizer="L2")
     network = tflearn.layers.conv.max_pool_2d(network, 2)
     #network = tflearn.layers.core.fully_connected(network, 128, activation='relu')
     network = tflearn.layers.core.dropout(network, 0.5)
     network = tflearn.layers.core.fully_connected(network, 10, activation='softmax', regularizer="L1")
     network = tflearn.layers.estimator.regression(network, optimizer='adam', loss='categorical_crossentropy',
                                                   learning_rate=.00001)
     self.network = network
     return 0
     model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='first_test.tf1_2.ckpt')
     # model.load("Models/sam_dan.tfl")
     #print(self.trainlabels)
     model.fit(self.dataset, self.datalabels, n_epoch=100, shuffle=None,
               validation_set=.3, show_metric=True, batch_size=100, snapshot_epoch=True,
               run_id='First_test')
     model.save("Models/sam_dan_2.tfl")
コード例 #4
0
ファイル: test_layers.py プロジェクト: EddywardoFTW/tflearn
    def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
コード例 #5
0
def stock_predictor(inputs, feature_number, predictor_type, use_batch_norm, activation_function, weight_decay):
    window_length = inputs.get_shape()[2]
    assert predictor_type in ['cnn', 'lstm'], 'type must be either cnn or lstm'
    if predictor_type == 'cnn':
        net = tflearn.conv_2d(inputs, 32, (1, 3), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)

        net = tflearn.conv_2d(net, 32, (1, window_length - 2), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)

        #################################################
        net = tflearn.conv_2d(net, 1, (1, 1), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)
        ##################################################
        
        if DEBUG:
            print('After conv2d:', net.shape)
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    elif predictor_type == 'lstm':
        num_stocks = inputs.get_shape()[1]
        hidden_dim = 32
        net = tflearn.reshape(inputs, new_shape=[-1, window_length, feature_number])
        if DEBUG:
            print('Reshaped input:', net.shape)
        net = tflearn.lstm(net, hidden_dim, activation = activation_function,  weights_init = 'xavier')
        if DEBUG:
            print('After LSTM:', net.shape)
        net = tflearn.reshape(net, new_shape=[-1, num_stocks, hidden_dim,1]) ## reshape for conv2d in the next step
        if DEBUG:
            print('After reshape:', net.shape)

        #################################################
        net = tflearn.conv_2d(net, 1, (1, hidden_dim), padding='valid', weights_init = 'xavier',\
                              regularizer='L2', weight_decay = weight_decay)
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        net = activation_net(net, activation_function)
        ##################################################
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    else:
        raise NotImplementedError
    return net
コード例 #6
0
    def npi_core(self):
        """
        Build the NPI LSTM core, feeding the program embedding and state encoding to a multi-layered
        LSTM, returning the h-state of the final LSTM layer.

        References: Reed, de Freitas [2]
        """
        s_in = self.state_encoding  # Shape: [bsz, state_dim]
        p_in = self.program_embedding  # Shape: [bsz, 1, program_dim]

        # Reshape state_in
        s_in = tflearn.reshape(
            s_in, [-1, 1, self.state_dim])  # Shape: [bsz, 1, state_dim]

        # Concatenate s_in, p_in
        c = tflearn.merge([s_in, p_in], 'concat',
                          axis=2)  # Shape: [bsz, 1, state + prog]

        # Feed through Multi-Layer LSTM
        for i in range(self.npi_core_layers):
            c, [self.h_states[i]] = self.lstm(c,
                                              self.npi_core_dim,
                                              return_seq=True,
                                              initial_state=self.h_states[i],
                                              return_states=True)

        # Return Top-Most LSTM H-State
        top_state = tf.split(self.h_states[-1], 2, 1)[1]
        return top_state  # Shape: [bsz, npi_core_dim]
コード例 #7
0
def CNN_decoder(z, tsamples, nsamples, reuse=False):
    with tf.variable_scope("CNN_decoder", reuse=reuse):
        x = tflearn.fully_connected(z, 38 * 32, activation='tanh')
        x = tf.reshape(x, shape=[-1, 1, 38, 32])
        decoder = tflearn.conv_2d(x, 32, [3, 3], activation='tanh')
        print(decoder.get_shape())
        #decoder = tflearn.batch_normalization(decoder)
        decoder = tflearn.upsample_2d(decoder, [2, 2])
        print(decoder.get_shape())
        decoder = tflearn.conv_2d(decoder, 64, [3, 3], activation='tanh')
        print(decoder.get_shape())
        #decoder = tflearn.batch_normalization(decoder)
        #decoder= tflearn.upsample_2d(decoder, [3,2])
        decoder = tflearn.upsample_2d(decoder, [3, 2])
        print(decoder.get_shape())
        decoder = tflearn.conv_2d(decoder, 64, [3, 3], activation='tanh')
        print(decoder.get_shape())
        #decoder = tflearn.batch_normalization(decoder)
        decoder = tflearn.upsample_2d(decoder, [1, 2])
        print(decoder.get_shape())
        y = tflearn.conv_2d(decoder,
                            1, [3, 3],
                            activation='tanh',
                            name="sigout")
        print(y.get_shape())
        #y = tflearn.conv_2d(x, 1, [2,2], activation='relu', name="sigout0")
        y = tflearn.fully_connected(y,
                                    tsamples * nsamples,
                                    activation="linear",
                                    name="sigout")
        #y = tf.reshape(y,[-1,tsamples,nsamples])
        #x = tf.sigmoid(y)
        x = y
        x = tflearn.reshape(x, [-1, tsamples, nsamples], name="reshaped")
    return x, y
コード例 #8
0
ファイル: test_layers.py プロジェクト: nagyist/tflearn
    def test_feed_dict_no_None(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.],
             [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4], name="X_in")
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)

            def do_fit():
                m.fit({
                    "X_in": X,
                    'non_existent': X
                },
                      Y,
                      n_epoch=30,
                      snapshot_epoch=False)

            self.assertRaisesRegexp(
                Exception,
                "Feed dict asks for variable named 'non_existent' but no such variable is known to exist",
                do_fit)
コード例 #9
0
    def create_actor_network(self):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1]])

            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 64, activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1], 64, activation='relu')
            split_2 = tflearn.fully_connected(inputs[:, 4:5, -1], 64, activation='relu')

            reshape_0 = tflearn.reshape(inputs[:, 2:4, :], [-1, 2, self.s_dim[1], 1])
            split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')

            split_4 = tflearn.conv_1d(inputs[:, 5:6, :], 128, 4, activation='relu')
            split_5 = tflearn.conv_1d(inputs[:, 6:7, :], 128, 4, activation='relu')

            flatten_0 = tflearn.flatten(split_3)
            flatten_1 = tflearn.flatten(split_4)
            flatten_2 = tflearn.flatten(split_5)

            merge_net = tflearn.merge([split_0, split_1, split_2, flatten_0, flatten_1, flatten_2], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu')

            # for multiple video, mask out the invalid actions
            linear_out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='linear')
            linear_out = tf.transpose(linear_out)  # [None, a_dim] -> [a_dim, None]
            mask_out = tf.boolean_mask(linear_out, self.mask)  # [a_dim, None] -> [masked, None]
            mask_out = tf.transpose(mask_out)  # [masked, None] -> [None, masked]
            softmax_out = tf.nn.softmax(mask_out)

            return inputs, softmax_out
コード例 #10
0
    def create_network(self):
        # TODO try convolutional RNN and LSTM shapes
        net = tflearn.input_data(shape=[None, len(self.string_to_number)])

        # self.net = tflearn.fully_connected(self.net, 32)
        # self.net = tflearn.fully_connected(self.net, 64)

        # usually we need a 3D-Tensor
        net = tflearn.reshape(net, (-1, len(self.string_to_number), 1))

        # get the "good" parts
        net = tflearn.conv_1d(net, 64, 3, activation='relu', regularizer="L2")
        net = tflearn.max_pool_1d(net, 2)
        net = tflearn.batch_normalization(net)
        net = tflearn.conv_1d(net, 128, 3, activation='relu', regularizer="L2")
        net = tflearn.max_pool_1d(net, 2)
        net = tflearn.batch_normalization(net)

        # remember the meanings
        # net = tflearn.lstm(net, 64)
        # net = tflearn.dropout(net, 0.5)

        # net = tflearn.simple_rnn(net, len(self.string_to_number) * 2)

        # map to next value
        net = tflearn.fully_connected(net, 256, activation='tanh')
        net = tflearn.dropout(net, 0.5)
        net = tflearn.fully_connected(net,
                                      len(self.string_to_number),
                                      activation='softmax')
        self.net = tflearn.regression(net, optimizer='adam')
        self.model = tflearn.DNN(self.net)
コード例 #11
0
    def npi_core(self):
        """
        Build the NPI LSTM core, feeding the program embedding and state encoding to a multi-layered
        LSTM, returning the h-state of the final LSTM layer.

        References: Reed, de Freitas [2]
        """
        s_in = self.state_encoding  # Shape: [bsz, state_dim]
        p_in = self.program_embedding  # Shape: [bsz, 1, program_dim]

        # Reshape state_in
        s_in = tflearn.reshape(
            s_in, [-1, 1, self.state_dim])  # Shape: [bsz, 1, state_dim]

        # Concatenate s_in, p_in
        c = tflearn.merge([s_in, p_in], 'concat',
                          axis=2)  # Shape: [bsz, 1, state + prog]

        # Feed through Multi-Layer LSTM
        # print('-'*100)
        net, state = tflearn.layers.recurrent.lstm(c,
                                                   self.npi_core_dim,
                                                   return_seq=True,
                                                   return_state=True)
        net, state = tflearn.layers.recurrent.lstm(net,
                                                   self.npi_core_dim,
                                                   return_seq=True,
                                                   return_state=True)
        # print('*'*100)
        # print(state)
        top_state = state.c
        return top_state
コード例 #12
0
ファイル: npi.py プロジェクト: AnuragGolla/npi
    def npi_core(self):
        """
        Build the NPI LSTM core, feeding the program embedding and state encoding to a multi-layered
        LSTM, returning the h-state of the final LSTM layer.

        References: Reed, de Freitas [2]
        """
        s_in = self.state_encoding  # Shape: [bsz, state_dim]
        p_in = self.program_embedding  # Shape: [bsz, 1, program_dim]

        # Reshape state_in
        s_in = tflearn.reshape(
            s_in, [-1, 1, self.state_dim])  # Shape: [bsz, 1, state_dim]

        # Concatenate s_in, p_in
        c = tflearn.merge([s_in, p_in], 'concat',
                          axis=2)  # Shape: [bsz, 1, state + prog]

        # # Feed through Multi-Layer LSTM
        # for i in range(self.npi_core_layers):
        #     c, [self.h_states[i]] = tflearn.lstm(c, self.npi_core_dim, return_seq=True,
        #                                          initial_state=self.h_states[i], return_state=True)

        # Feed through Multi-Layer LSTM
        for i in range(self.npi_core_layers):
            # print("H STATE before: ", self.h_states[i])
            c, self.h_states[i] = tflearn.lstm(
                c, self.npi_core_dim, return_seq=True,
                return_state=True)  ######CHECK! initial_state=TRUE
            # print("H STATE after: ", self.h_states[i])

        # # Layer 1 Feed through
        # c, [self.h_state_l1] = tflearn.lstm(c, self.npi_core_dim, return_seq=True,
        #                                      initial_state=self.h_state_l1, return_state=True)
        #
        # # Layer 2 Feed through
        # c, [self.h_state_l2] = tflearn.lstm(c, self.npi_core_dim, return_seq=True,
        #                                     initial_state=self.h_state_l2, return_state=True)

        # zero_state = tf.zeros([self.bsz, self.npi_core_dim])
        # c, self= tflearn.lstm(c, self.npi_core_dim, return_seq=True, return_state=True) ######CHECK! initial_state=TRUE
        # print("PRINT A, B: ", a, b)
        # print("TF PRINT TEST: ")
        # tf.Print(test)

        # Return Top-Most LSTM H-State
        # print("H STATE L2: ", self.h_states[-1])
        other_state, top_state = tf.split(self.h_states[-1],
                                          num_or_size_splits=2,
                                          axis=0)
        # print("o state: ", other_state)
        # print("top state: ", top_state)
        # time.sleep(5)

        return top_state  # Shape: [bsz, npi_core_dim]
コード例 #13
0
def get_network_wide(frames, height, width, num_classes):
    """Create a one-layer LSTM"""
    net = tflearn.input_data(shape=[None, frames, height, width, 1])
    net = tflearn.reshape(net, [-1, height, width, 1])
    # (?, 18, 32, 8)
    net = tflearn.conv_2d(net, 8, [20, 20], 5)
    # (?, 6, 8, 16)
    net = tflearn.conv_2d(net, 16, [6, 8], [3, 4])
    # 768
    net = tflearn.reshape(net, [-1, frames, 8 * 6 * 16])
    # 128
    net = tflearn.lstm(net, 128, dropout=0.2)
    # 10
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net,
                             optimizer='adam',
                             learning_rate=0.0005,
                             loss='categorical_crossentropy',
                             name='output1')
    return net
コード例 #14
0
ファイル: Util.py プロジェクト: LilMarc0/KaggleHARDataset
def cnn(eps, lr, shape):
    import tflearn
    inputs = tflearn.input_data(shape=shape)
    net = tflearn.conv_1d(inputs,
                          nb_filter=64,
                          filter_size=3,
                          strides=1,
                          padding='same',
                          activation='tanh')
    net = tflearn.avg_pool_1d(net, kernel_size=3)
    net = tflearn.conv_1d(net, 64, 3, 1, padding='same', activation='tanh')
    shape = net.get_shape().as_list()
    print(shape)
    net = tflearn.reshape(net, [-1, shape[1] * shape[2]])

    net = tflearn.normalization.batch_normalization(net)
    net = tflearn.fully_connected(net,
                                  512,
                                  activation='tanh',
                                  regularizer='L2')
    net = tflearn.dropout(net, 0.8)

    net = tflearn.normalization.batch_normalization(net)
    net = tflearn.fully_connected(net,
                                  1024,
                                  activation='tanh',
                                  regularizer='L2')
    net = tflearn.dropout(net, 0.8)

    net = tflearn.normalization.batch_normalization(net)
    net = tflearn.fully_connected(net,
                                  512,
                                  activation='tanh',
                                  regularizer='L2')

    softmax = tflearn.fully_connected(net, 20, activation='softmax')

    # sgd = tflearn.SGD(learning_rate=lr, lr_decay=0.96, decay_step=15)
    adam = tflearn.optimizers.adam(epsilon=eps, learning_rate=lr)
    regression = tflearn.regression(softmax,
                                    optimizer=adam,
                                    metric='accuracy',
                                    loss='categorical_crossentropy')

    model = tflearn.DNN(regression,
                        tensorboard_verbose=3,
                        tensorboard_dir='.',
                        best_val_accuracy=0.7,
                        best_checkpoint_path='./bestFinal/cnn',
                        checkpoint_path='./checkpoints/cnn',
                        max_checkpoints=10)
    print('Model created')
    return model
コード例 #15
0
    def create_actor_network(self, target):
        """
        self.s_dim: a list specifies shape
        """
        nb_classes, window_length = self.s_dim
        assert nb_classes == self.a_dim[0]
        assert window_length > 2, 'This architecture only support window length larger than 2.'
        inputs = tflearn.input_data(shape=[None] + self.s_dim + [1],
                                    name='input')

        portfolio_inputs = None
        portfolio_reshaped = None
        if self.use_previous:
            portfolio_inputs = tflearn.input_data(shape=[None] + self.a_dim,
                                                  name='portfolio_input')
            portfolio_reshaped = tflearn.reshape(portfolio_inputs,
                                                 new_shape=[-1] + self.a_dim +
                                                 [1, 1])

        net, auxil = stock_predictor_actor(inputs, self.predictor_type,
                                           self.use_batch_norm,
                                           self.use_previous,
                                           portfolio_reshaped,
                                           self.auxiliary_prediction, target)
        out = tf.nn.softmax(net)
        scaled_out = tf.multiply(out, self.action_bound)

        # net = tflearn.fully_connected(net, 64)
        # if self.use_batch_norm:
        #     net = tflearn.layers.normalization.batch_normalization(net)
        # # net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        # net = tflearn.fully_connected(net, 64)
        # if self.use_batch_norm:
        #     net = tflearn.layers.normalization.batch_normalization(net)
        # # net = tflearn.layers.normalization.batch_normalization(net)
        # net = tflearn.activations.relu(net)
        # # Final layer weights are init to Uniform[-3e-3, 3e-3]
        # w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        # out = tflearn.fully_connected(net, self.a_dim[0], activation='softmax', weights_init=w_init)
        # # Scale output to -action_bound to action_bound
        # scaled_out = tf.multiply(out, self.action_bound)

        loss = None
        future_y_inputs = None
        if self.auxiliary_prediction > 0:
            print("HERE")
            future_y_inputs = tflearn.input_data(shape=[None] + self.a_dim,
                                                 name='portfolio_input')
            loss = tf.reduce_mean(
                tf.reduce_sum(tf.square(auxil - future_y_inputs), axis=-1))

        return inputs, out, scaled_out, portfolio_inputs, loss, future_y_inputs
コード例 #16
0
 def get_model():
     x = tflearn.input_data(shape=[None, 28, 28],
                            dtype=tf.float32,
                            name='img')
     x = tflearn.reshape(x, [-1, 28, 28, 1])
     # First Convolutional Layer
     net = tflearn.conv_2d(x,
                           32,
                           5,
                           strides=[1, 1, 1, 1],
                           activation='relu',
                           name='conv1')
     net = tflearn.max_pool_2d(net,
                               kernel_size=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               name='maxpool1')
     # Second Convolutional Layer
     net = tflearn.conv_2d(net, 64, 5, activation='relu', name='conv2')
     net = tflearn.max_pool_2d(net,
                               kernel_size=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               name='maxpool2')
     # Densely Connected Layer
     net = tflearn.reshape(net, [-1, 7 * 7 * 64])
     net = tflearn.fully_connected(net, 1024, activation='relu')
     # Dropout
     net = tflearn.dropout(net, 0.5)
     # Readout Layer
     W_fc2 = CaptchaModel.weight_variable([1024, 36])
     b_fc2 = CaptchaModel.bias_variable([36])
     net = tf.matmul(net, W_fc2) + b_fc2
     regression = tflearn.regression(
         net,
         name='label',
         learning_rate=0.0001,
         loss='softmax_categorical_crossentropy')
     model = tflearn.DNN(network=regression,
                         tensorboard_dir='tmp/tf.log',
                         tensorboard_verbose=1)
     return model
コード例 #17
0
def stock_predictor(inputs, predictor_type, use_batch_norm):
    """This the deep neuro network for policy gradient
    TODO: change this to use keras in TF
    """
    window_length = inputs.get_shape()[2]
    assert predictor_type in ['cnn', 'lstm'], 'type must be either cnn or lstm'
    if predictor_type == 'cnn':
        net = tflearn.conv_2d(inputs, 32, (1, 3), padding='valid')
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        net = tflearn.conv_2d(net, 32, (1, window_length - 2), padding='valid')
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        if DEBUG:
            print('After conv2d:', net.shape)
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    elif predictor_type == 'lstm':
        num_stocks = inputs.get_shape()[1]
        hidden_dim = 32
        net = tflearn.reshape(inputs, new_shape=[-1, window_length, 4]) #changed 1 to 4 for default normalizer
        if DEBUG:
            print('Reshaped input:', net.shape)
        net = tflearn.lstm(net, hidden_dim)
        if DEBUG:
            print('After LSTM:', net.shape)
        net = tflearn.reshape(net, new_shape=[-1, num_stocks, hidden_dim])
        if DEBUG:
            print('After reshape:', net.shape)
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    else:
        raise NotImplementedError

    return net
コード例 #18
0
 def make_core_network(network):
     network = tflearn.reshape(network, [-1, 28, 28, 1], name='reshape')
     network = conv_2d(network, 32, 3, activation='relu', regularizer='L2')
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = conv_2d(network, 64, 3, activation='relu', regularizer='L2')
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = fully_connected(network, 128, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 256, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 10, activation='softmax')
     return network
コード例 #19
0
 def make_core_network(network):
     network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
     network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = fully_connected(network, 128, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 256, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 10, activation='softmax')
     return network
コード例 #20
0
def Network_Conv(S_LEN, Input_LEN, length, class_num):

    data_len = Input_LEN * (S_LEN * 2 + 1)
    x = tf.placeholder(tf.float32, shape=[None, length, data_len], name='x')
    y_ = tf.placeholder(tf.int32, shape=[
        None,
    ], name='y_')
    x_reshape = tflearn.reshape(x, [-1, length, data_len, 1])

    print(x_reshape.shape)
    split_1 = tflearn.conv_2d(x_reshape[:, :, 0:Input_LEN * S_LEN, :],
                              64,
                              3,
                              activation='LeakyReLU',
                              restore=False)
    split_2 = tflearn.conv_2d(x_reshape[:, :, Input_LEN * S_LEN:Input_LEN *
                                        S_LEN * 2, :],
                              64,
                              3,
                              activation='LeakyReLU')
    split_3 = tflearn.conv_2d(x_reshape[:, :,
                                        Input_LEN * S_LEN * 2:data_len, :],
                              64,
                              3,
                              activation='LeakyReLU')
    print(split_1.shape)
    print(split_2.shape)
    print(split_3.shape)

    # dense_concat = tf.concat([split_1[:,:,:,np.newaxis],split_2[:,:,:,np.newaxis],split_3[:,:,:,np.newaxis]],axis = 2)
    dense_concat = tflearn.merge([split_1, split_2, split_3], 'concat', axis=2)
    # print dense_concat.shape

    cov = tflearn.conv_2d(dense_concat, 128, 3, activation='relu')
    #print type(cov)
    cov = tflearn.flatten(cov)
    #print cov.shape
    logits = tf.layers.dense(
        inputs=cov,
        units=256,
        activation=tf.nn.relu,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    logits = tf.layers.dense(
        inputs=logits,
        units=class_num,
        activation=None,
        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
    return logits, x, y_
コード例 #21
0
def stock_predictor(inputs, predictor_type, use_batch_norm):
    window_length = inputs.get_shape()[2]
    assert predictor_type in ['cnn', 'lstm'], 'type must be either cnn or lstm'
    if predictor_type == 'cnn':
        net = tflearn.conv_2d(inputs, 32, (1, 3), padding='valid')
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        net = tflearn.conv_2d(net, 32, (1, window_length - 2), padding='valid')
        if use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        if DEBUG:
            print('After conv2d:', net.shape)
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    elif predictor_type == 'lstm':
        num_stocks = inputs.get_shape()[1]
        hidden_dim = 32
        net = tflearn.reshape(inputs, new_shape=[-1, window_length, 1])
        if DEBUG:
            print('Reshaped input:', net.shape)
        net = tflearn.lstm(net, hidden_dim)
        if DEBUG:
            print('After LSTM:', net.shape)
        net = tflearn.reshape(net, new_shape=[-1, num_stocks, hidden_dim])
        if DEBUG:
            print('After reshape:', net.shape)
        net = tflearn.flatten(net)
        if DEBUG:
            print('Output:', net.shape)
    else:
        raise NotImplementedError

    return net
コード例 #22
0
ファイル: a3c.py プロジェクト: wushilan/arsenal
    def create_actor_network(self,reuse = True):
        with tf.variable_scope('actor'):
            inputs = tflearn.input_data(shape=[None, self.s_dim[0], self.s_dim[1],self.s_dim[2]])     # input layer
            print 'self.s_dim[0]',self.s_dim[0]
            # split_0 = tflearn.fully_connected(inputs[:, 0:1, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'critic_fc1')
            print 'inputs[:, 0:1, :].shape:', inputs[:, 0:1, :].shape
            print 'inputs[:, 1:2, :].shape:', inputs[:, 1:2, :].shape
            x_reshape1 = tflearn.reshape(inputs[:, 0:1, :], [-1, self.s_dim[1],self.s_dim[2],1])
            x_reshape2 = tflearn.reshape(inputs[:, 1:2, :], [-1, self.s_dim[1],self.s_dim[2],1])
            # split_0 = tflearn.fully_connected(inputs[:, 0:1, :], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'fc1')
            split_1 = tflearn.conv_2d(x_reshape1, 128, 4, activation='relu', scope = 'conv1_1')
            split_2 = tflearn.conv_2d(x_reshape2, 128, 4, activation='relu', scope = 'conv1_2')

            # split_2 = tflearn.fully_connected(inputs[:, 1:2, -1], 128, activation='relu', reuse=tf.AUTO_REUSE, scope = 'fc2')

            split_1_flat = tflearn.flatten(split_1)
            split_2_flat = tflearn.flatten(split_2)

            merge_net = tflearn.merge([split_1_flat, split_2_flat], 'concat')

            dense_net_0 = tflearn.fully_connected(merge_net, 128, activation='relu', scope = 'fc3')
            out = tflearn.fully_connected(dense_net_0, self.a_dim, activation='softmax', scope = 'fc4')     # output layer

            return inputs, out
コード例 #23
0
ファイル: test_layers.py プロジェクト: nagyist/tflearn
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand,
                                        optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or,
                                      optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
コード例 #24
0
ファイル: multinomal.py プロジェクト: skaftanis/DiplomaThesis
def model_lstm(output=1):

    network = input_data(shape=[None, 100, 250, 6])

    part_one = network[..., :3]
    part_two = network[..., 3:]

    network1 = conv_2d(part_one, 24, 5, strides=5, activation='elu')
    network2 = conv_2d(part_two, 24, 5, strides=5, activation='elu')

    network = tflearn.merge([network1, network2], 'concat')

    #from basic network
    #network = conv_2d(network, 24, 5, strides=5, activation='elu' )
    network = conv_2d(network, 36, 5, strides=5, activation='elu')
    network = conv_2d(network, 48, 5, strides=5, activation='elu')
    network = conv_2d(network, 64, 3, strides=3, activation='elu')
    network = conv_2d(network, 64, 3, strides=3, activation='elu')

    network = dropout(network, 0.5)

    network = fully_connected(network, 100, activation='elu')
    network = fully_connected(network, 50, activation='elu')
    network = fully_connected(network, 10, activation='elu')

    network = dropout(network, 0.5)
    network = tflearn.reshape(network, [-1, 1, 10])
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)

    network = fully_connected(network, output, activation='linear')

    #momentum =  tflearn.Momentum(learning_rate=0.00001, lr_decay=0.96, decay_step=1500)
    #network = tflearn.regression(network, optimizer=momentum, loss='huber_loss', metric=None)

    network = tflearn.regression(network,
                                 optimizer='adam',
                                 loss='huber_loss',
                                 metric=None,
                                 learning_rate=0.0001)

    model = tflearn.DNN(network,
                        checkpoint_path='model_spirosnet',
                        max_checkpoints=1,
                        tensorboard_verbose=2,
                        tensorboard_dir='log')

    return model
コード例 #25
0
def alexnet2backup(width, height, lr, output=3, timesteps=50):
    network = input_data(shape=[None, width, height, 3], name='input')
    network = tflearn.reshape(network, [-1, width, height, 3])
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = conv_2d(network, 256, 5, activation='relu')
    #network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2048, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2048, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2048, activation='tanh')
    network = dropout(network, 0.5)
    ###
    #network = tflearn.reshape(network, [-1, timesteps, 2048])
    #network = tflearn.lstm(network, 128, dropout=0.8)
    ###
    network = fully_connected(network, output, activation='softmax')
    network = regression(network,
                         optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr,
                         name='targets')

    model = tflearn.DNN(network,
                        checkpoint_path='model_alexnet',
                        max_checkpoints=1,
                        tensorboard_verbose=0,
                        tensorboard_dir='log')

    return model
コード例 #26
0
def decoder(inputs, decode_layer):
    net = tflearn.fully_connected(inputs, (side // 2**2)**2 * 32,
                                  name='DecFC1')
    d = tf.transpose(net.W)
    print "Decoder Weights shape", d.get_shape()
    net = tflearn.batch_normalization(net, name='DecBN1')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L1", net.get_shape()
    print "========================"

    net = tflearn.reshape(net, (-1, side // 2**2, side // 2**2, 32))
    net = tflearn.conv_2d(net, 32, 3, name='DecConv1')
    net = tflearn.batch_normalization(net, name='DecBN2')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L2", net.get_shape()
    print "========================"
    net = tflearn.conv_2d_transpose(net,
                                    16,
                                    3, [side // 2, side // 2],
                                    strides=2,
                                    padding='same',
                                    name='DecConvT1')
    net = tflearn.batch_normalization(net, name='DecBN3')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L3", net.get_shape()
    print "========================"
    net = tflearn.conv_2d(net, 16, 3, name='DecConv2')
    net = tflearn.batch_normalization(net, name='DecBN4')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L4", net.get_shape()
    print "========================"
    net = tflearn.conv_2d_transpose(net,
                                    channel,
                                    3, [side, side],
                                    strides=2,
                                    padding='same',
                                    activation='sigmoid',
                                    name='DecConvT2')

    print "========================"
    print "output layer", net.get_shape()
    print "========================"
    return [net, d]
コード例 #27
0
ファイル: test_layers.py プロジェクト: EddywardoFTW/tflearn
    def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
コード例 #28
0
ファイル: test_inputs.py プロジェクト: yutoc/tflearn
 def build_simple_model(self):
     """Build a simple model for test
     Returns:
         DNN, [ (input layer name, input placeholder, input data) ], Target data
     """
     inputPlaceholder1, inputPlaceholder2 = \
         tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2")
     input1 = tflearn.input_data(placeholder = inputPlaceholder1)
     input2 = tflearn.input_data(placeholder = inputPlaceholder2)
     network = tflearn.merge([ input1, input2 ], "sum")
     network = tflearn.reshape(network, (1, 1))
     network = tflearn.fully_connected(network, 1)
     network = tflearn.regression(network)
     return (
         tflearn.DNN(network),
         [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ],
         self.TARGET,
     )
コード例 #29
0
ファイル: test_inputs.py プロジェクト: EddywardoFTW/tflearn
 def build_simple_model(self):
     """Build a simple model for test
     Returns:
         DNN, [ (input layer name, input placeholder, input data) ], Target data
     """
     inputPlaceholder1, inputPlaceholder2 = \
         tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2")
     input1 = tflearn.input_data(placeholder = inputPlaceholder1)
     input2 = tflearn.input_data(placeholder = inputPlaceholder2)
     network = tflearn.merge([ input1, input2 ], "sum")
     network = tflearn.reshape(network, (1, 1))
     network = tflearn.fully_connected(network, 1)
     network = tflearn.regression(network)
     return (
         tflearn.DNN(network),
         [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ],
         self.TARGET,
     )
コード例 #30
0
ファイル: RCAE.py プロジェクト: kiminh/AMAD
	def __init__(self,instance_dim,hidden_dim):
		
		self.instance_dim = instance_dim
		self.hidden_dim = hidden_dim 
		
		hidden_layer = None
		decode_layer = None
		# Building the autoencoder model
		net = tflearn.input_data(shape=[None,self.instance_dim], name="data")
		net = tflearn.reshape(net,[-1,1,1,net.shape[1]])#turn to 4 D
		[net,hidden_layer] = self.encoder(net,hidden_layer)
		[net,decode_layer] = self.decoder(net,decode_layer)
		mue = 0.1
		net = tflearn.regression_RobustAutoencoder(net,mue,hidden_layer,decode_layer, optimizer='adam', learning_rate=0.001,
						loss='rPCA_autoencoderLoss', metric=None,name="vanilla_autoencoder")
		#rPCA_autoencoderLoss_FobsquareLoss
		#rPCA_autoencoderLoss
		#net = tflearn.regression(net, optimizer='adam', loss='mean_square', metric=None)
		model = tflearn.DNN(net, tensorboard_verbose=0, tensorboard_dir='tensorboard/')
コード例 #31
0
ファイル: test_layers.py プロジェクト: EddywardoFTW/tflearn
    def test_feed_dict_no_None(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4], name="X_in")
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)

            def do_fit():
                m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False)
            self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit)
コード例 #32
0
    def create_critic_network(self, target):
        inputs = tflearn.input_data(shape=[None] + self.s_dim + [1])
        action = tflearn.input_data(shape=[None] + self.a_dim)

        portfolio_inputs = None
        portfolio_reshaped = None
        if self.use_previous:
            portfolio_inputs = tflearn.input_data(shape=[None] + self.a_dim,
                                                  name='portfolio_input')
            portfolio_reshaped = tflearn.reshape(portfolio_inputs,
                                                 new_shape=[-1] + self.a_dim +
                                                 [1, 1])

        net, auxil = stock_predictor_critic(inputs, self.predictor_type,
                                            self.use_batch_norm,
                                            self.use_previous,
                                            portfolio_reshaped,
                                            auxil_commission, target)

        loss = 0
        future_y_inputs = None
        if self.auxiliary_commission > 0:
            future_y_inputs = tflearn.input_data(shape=[None] + self.a_dim,
                                                 name='portfolio_input')
            loss = tf.reduce_mean(
                tf.reduce_sum(tf.square(auxil - future_y_inputs), axis=-1))

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = tflearn.fully_connected(net, 64)
        t2 = tflearn.fully_connected(action, 64)

        net = tf.add(t1, t2)
        if self.use_batch_norm:
            net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out, portfolio_inputs, loss, future_y_inputs
コード例 #33
0
def LSTM_decoder(z, tsamples, nsamples, reuse=False):
    with tf.variable_scope("LSTM_decoder", reuse=reuse):
        x = tflearn.fully_connected(z,
                                    nsamples * tsamples,
                                    activation='linear')
        x = tflearn.activations.leaky_relu(x, alpha=0.01)
        x = tf.reshape(x, shape=[-1, tsamples, nsamples])
        y = tflearn.lstm(x,
                         nsamples,
                         return_seq=True,
                         name="lstm1",
                         activation="linear")
        #x = tf.reshape(x, shape=[-1, tsamples, nsamples,1])
        #x = tflearn.conv_2d(x, 128, [2,9], activation='linear')
        #x = tflearn.activations.leaky_relu(x, alpha=0.01)
        #x = tflearn.conv_2d(x, 1, [1,3], activation='sigmoid')
        #x = tf.sigmoid(y)
        x = y
        x = tflearn.reshape(x, [-1, tsamples, nsamples], name="reshaped")
    return x, y
コード例 #34
0
ファイル: a3c.py プロジェクト: TheRisingStar/pensieve
    def create_critic_network(self):
        with tf.variable_scope('critic'):
            inputs = tflearn.input_data(
                shape=[None, self.s_dim[0], self.s_dim[1]])
            split_0 = tflearn.fully_connected(inputs[:, 0:1, -1],
                                              64,
                                              activation='relu')
            split_1 = tflearn.fully_connected(inputs[:, 1:2, -1],
                                              64,
                                              activation='relu')
            split_2 = tflearn.fully_connected(inputs[:, 4:5, -1],
                                              64,
                                              activation='relu')

            reshape_0 = tflearn.reshape(inputs[:, 2:4, :],
                                        [-1, 2, self.s_dim[1], 1])
            split_3 = tflearn.conv_2d(reshape_0, 128, 3, activation='relu')

            split_4 = tflearn.conv_1d(inputs[:, 5:6, :],
                                      128,
                                      4,
                                      activation='relu')
            split_5 = tflearn.conv_1d(inputs[:, 6:7, :],
                                      128,
                                      4,
                                      activation='relu')

            flatten_0 = tflearn.flatten(split_3)
            flatten_1 = tflearn.flatten(split_4)
            flatten_2 = tflearn.flatten(split_5)

            merge_net = tflearn.merge(
                [split_0, split_1, split_2, flatten_0, flatten_1, flatten_2],
                'concat')

            dense_net_0 = tflearn.fully_connected(merge_net,
                                                  100,
                                                  activation='relu')
            out = tflearn.fully_connected(dense_net_0, 1, activation='linear')

            return inputs, out
コード例 #35
0
ファイル: Util.py プロジェクト: LilMarc0/KaggleHARDataset
def cnn_lstm(eps, lr, shape):
    import tflearn
    inputs = tflearn.input_data(shape=shape)

    net = tflearn.time_distributed(inputs, tflearn.conv_2d,
                                   [32, 3, 1, 'same', 'tanh'])
    net = tflearn.time_distributed(net, tflearn.conv_2d,
                                   [32, 3, 1, 'same', 'tanh'])
    net = tflearn.time_distributed(net, tflearn.dropout, [0.8])

    net = tflearn.time_distributed(net, tflearn.avg_pool_2d, [2])

    print(net.get_shape().as_list())

    net = tflearn.reshape(net, [
        -1,
        net.get_shape().as_list()[1],
        net.get_shape().as_list()[2] * net.get_shape().as_list()[3] *
        net.get_shape().as_list()[4]
    ])
    net = tflearn.lstm(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)

    net = tflearn.fully_connected(net,
                                  256,
                                  activation='tanh',
                                  regularizer='L2')
    softmax = tflearn.fully_connected(net, 20, activation='softmax')

    # sgd = tflearn.SGD(learning_rate=lr, lr_decay=0.96, decay_step=15)
    adam = tflearn.optimizers.adam(epsilon=eps, learning_rate=lr)
    regression = tflearn.regression(softmax, optimizer=adam, metric='accuracy')

    model = tflearn.DNN(regression,
                        tensorboard_verbose=3,
                        tensorboard_dir='.',
                        best_val_accuracy=0.6,
                        checkpoint_path='./checkpoints2/cnn2',
                        max_checkpoints=1)
    return model
コード例 #36
0
def decoder(inputs):
    net = tflearn.fully_connected(inputs, 1200 * 32, name='DecFC1')
    net = tflearn.batch_normalization(net, name='DecBN1')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L1",net.get_shape()
    print "========================"

    net = tflearn.reshape(net, (-1, side1 // 2**2, side2 // 2**2, 32))
    net = tflearn.conv_2d(net, 32, 3, name='DecConv1')
    net = tflearn.batch_normalization(net, name='DecBN2')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d_transpose(net, 16, 3, [side1 // 2, side2 // 2],
                                        strides=2, padding='same', name='DecConvT1')
    net = tflearn.batch_normalization(net, name='DecBN3')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L3",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, name='DecConv2')
    net = tflearn.batch_normalization(net, name='DecBN4')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L4",net.get_shape()
    print "========================"

    net = tflearn.conv_2d_transpose(net, channel, 3, [side1, side2],
                                        strides=2, padding='same', activation='sigmoid',
                                        name='DecConvT2')
    decode_layer = net
    print "========================"
    print "output layer",net.get_shape()
    print "========================"

    return [net,decode_layer]
コード例 #37
0
# # Define the convoluted ae architecture
# net = tflearn.input_data(shape=[None, d])
# #net = tflearn.fully_connected(net, 256)
# hidden_layer = tflearn.fully_connected(net, nb_feature)
# #net = tflearn.fully_connected(hidden_layer, 256)
# decoder = tflearn.fully_connected(hidden_layer, d, activation='sigmoid')


# Define the convoluted ae architecture another hidden layer
encode_1 = tflearn.input_data(shape=[None, side1,side2,1])
encode_2 = tflearn.fully_connected(encode_1, 256)
hidden_layer = tflearn.fully_connected(encode_2, nb_feature)
decode_1 = tflearn.fully_connected(hidden_layer, 256)
decoder_layer = tflearn.fully_connected(decode_1, d,activation="sigmoid")
decoder = tflearn.reshape(decoder_layer, (-1, side1, side2, 1))




# net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
#                          loss='mean_square', metric=None)

net = tflearn.regression_RobustAutoencoder(decoder,mue,hidden_layer,decoder_layer, optimizer='adam', learning_rate=0.001,
                         loss='rPCA_autoencoderLoss', metric=None,name="vanilla_autoencoder")
model = tflearn.DNN(net, tensorboard_verbose=0)




#define lamda set
コード例 #38
0
testX = np.asarray(testX)
testY = np.asarray(testY)
side = X.shape[1]
channel = X.shape[3]
noise_factor = 0.1
mue = 0.1
d = 3072
lamda_in_cost = 0.01
N_to_costfunc = np.zeros((200,d ))
# Define the convoluted ae architecture
net = tflearn.input_data(shape=[None, 32, 32, 3])
net = tflearn.fully_connected(net, 256)
hidden_layer = tflearn.fully_connected(net, nb_feature)
net = tflearn.fully_connected(hidden_layer, 256)
decoder = tflearn.fully_connected(net, 32*32*3,activation='sigmoid')
net = tflearn.reshape(decoder, (-1, 32, 32, 3))

# net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
#                          loss='mean_square', metric=None)

mue = 0.1
net = tflearn.regression_RobustAutoencoder(net,mue,hidden_layer,decoder, optimizer='adam', learning_rate=0.001,
                         loss='rPCA_autoencoderLoss_FobsquareLoss', metric=None,name="vanilla_autoencoder")

model = tflearn.DNN(net, tensorboard_verbose=0)



def addNoise(original, noise_factor):
    noisy = original + np.random.normal(loc=0.0, scale=noise_factor, size=original.shape)
    return np.clip(noisy, 0., 1.)