コード例 #1
0
ファイル: oop_tflearn.py プロジェクト: jmrinaldi/mhcPreds
    def model(self,
              type=None,
              mode="train",
              num_layers=2,
              state_size=32,
              learning_rate=0.001,
              tensorboard_verbose=3):

        net = tflearn.input_data(shape=[None, 9])
        net = tflearn.embedding(net,
                                input_dim=21,
                                output_dim=32,
                                weights_init='xavier')

        if type == 'bi_rnn':
            out_rnn = tflearn.bidirectional_rnn(net, tflearn.BasicLSTMCell(32),
                                                tflearn.BasicLSTMCell(32))

        elif type == 'basic_lstm':
            for i in range(4):
                net = tflearn.lstm(net, n_units=40, return_seq=True)
            #net = tflearn.lstm(net, n_units=40, return_seq=True)
            out_rnn = tflearn.lstm(net, n_units=40, return_seq=False)

        elif type == 'basic_rnn':
            out_rnn = tflearn.simple_rnn(net, 40)

        else:
            out_rnn = net

        net = tflearn.fully_connected(out_rnn, 100, activation='prelu')
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.dropout(net, 0.1)
        net = tflearn.fully_connected(net, 1, activation='sigmoid')
        """
        single_cell = getattr(tf.contrib.rnn, cell_type)(cell_size, state_is_tuple=True)

        if num_layers == 1:
            cell = single_cell
        else:
            cell = tf.contrib.rnn.MultiRNNCell([single_cell] * num_layers)
        """

        with tf.name_scope(
                "TargetsData"
        ):  # placeholder for target variable (i.e. trainY input)
            targetY = tf.placeholder(shape=[None, 1],
                                     dtype=tf.float32,
                                     name="Y")

        network = tflearn.regression(net,
                                     placeholder=targetY,
                                     optimizer=self.optimizer(learning_rate),
                                     learning_rate=learning_rate,
                                     loss=tflearn.mean_square(net, targetY),
                                     metric=self.accuracy(net, targetY),
                                     name='no rnn')

        model = tflearn.DNN(network, tensorboard_verbose=tensorboard_verbose)
        return model
コード例 #2
0
def layers(network, structre_array, model_ID, layer_number=0):
    layer_name = "layer_" + str(layer_number)

    if structre_array[0][0] == "conv":
        network = tflearn.conv_2d(network,
                                  structre_array[0][1],
                                  3,
                                  activation=structre_array[0][2],
                                  regularizer="L2",
                                  name=layer_name)
        model_ID += "c" + str(structre_array[0][1]) + "."

    elif structre_array[0][0] == "ann":
        network = tflearn.fully_connected(network,
                                          structre_array[0][1],
                                          activation=structre_array[0][2],
                                          bias=True,
                                          bias_init="Normal",
                                          name=layer_name)
        model_ID += "a" + str(structre_array[0][1]) + "."
        if len(structre_array) > 1 and structre_array[0][3] == "True":
            network = tflearn.dropout(network, 0.8)

    elif structre_array[0][0] == "maxpool":
        network = tflearn.max_pool_2d(network,
                                      structre_array[0][1],
                                      name=layer_name)
        model_ID += "m" + str(structre_array[0][1]) + "."

    elif structre_array[0][0] == "rnn":
        network = tflearn.simple_rnn(network,
                                     structre_array[0][1],
                                     activation=structre_array[0][2],
                                     bias=True,
                                     name=layer_name)
        model_ID += "r" + str(structre_array[0][1]) + "."

    elif structre_array[0][0] == "lstm":
        if len(structre_array) > 2 and structre_array[0][3] == "True":
            network = tflearn.lstm(network,
                                   structre_array[0][1],
                                   activation=structre_array[0][2],
                                   dropout=0.8,
                                   return_seq=True,
                                   name=layer_name)
        else:
            network = tflearn.lstm(network,
                                   structre_array[0][1],
                                   activation=structre_array[0][2],
                                   return_seq=False,
                                   name=layer_name)
        model_ID += "l" + str(structre_array[0][1]) + "."

    if len(structre_array) > 1:
        network, model_ID = layers(network,
                                   structre_array[1:],
                                   model_ID,
                                   layer_number=layer_number + 1)

    return network, model_ID
コード例 #3
0
 def create_network(self):
     self.net = tflearn.input_data(shape=[
         None, self.lenPre + self.lenSuff,
         len(self.string_to_number) + 1
     ])
     self.net = tflearn.simple_rnn(self.net, 128)
     self.net = tflearn.fully_connected(self.net, 128)
     self.net = tflearn.fully_connected(self.net, 128)
     self.net = tflearn.fully_connected(
         self.net, (len(self.string_to_number) + 1) * self.max_hole_size,
         activation='sigmoid')
     self.net = tflearn.regression(self.net)
     self.model = tflearn.DNN(self.net)
コード例 #4
0
def LayerMaker(network, structreArray, layerNumber=0):
    layerName = "layer" + str(layerNumber)

    layerInfo = structreArray[layerNumber]

    if layerInfo[0] == "conv":
        network = tflearn.conv_2d(network,
                                  layerInfo[1],
                                  3,
                                  activation=layerInfo[2],
                                  regularizer="L2",
                                  name=layerName)

    elif layerInfo[0] == "ann":
        network = tflearn.fully_connected(network,
                                          layerInfo[1],
                                          activation=layerInfo[2],
                                          name=layerName)
        #network = tflearn.dropout(network, 0.8)

    elif layerInfo[0] == "maxpool":
        network = tflearn.max_pool_2d(network, layerInfo[1], name=layerName)

    elif layerInfo[0] == "rnn":
        network = tflearn.simple_rnn(network,
                                     layerInfo[1],
                                     activation=layerInfo[2],
                                     bias=True,
                                     name=layerName)

    elif layerInfo[0] == "lstm":
        if len(layerInfo) > 2 and layerInfo[3] == "True":
            network = tflearn.lstm(network,
                                   layerInfo[1],
                                   activation=layerInfo[2],
                                   dropout=0.8,
                                   return_seq=True,
                                   name=layerName)
        else:
            network = tflearn.lstm(network,
                                   layerInfo[1],
                                   activation=layerInfo[2],
                                   return_seq=False,
                                   name=layerName)

    if len(structreArray) > layerNumber + 1:
        network = LayerMaker(network,
                             structreArray,
                             layerNumber=layerNumber + 1)

    return network
コード例 #5
0
    def add_deep_layers(self, net, model_type, out_embedding_dim, layer_size, n_layers):

        if model_type == 'embedding_rnn':
            out_rnn = tflearn.embedding(net, input_dim=21, output_dim=out_embedding_dim, weights_init='xavier')

        elif model_type == 'bi_rnn':
            out_rnn = tflearn.bidirectional_rnn(net, tflearn.BasicLSTMCell(layer_size), tflearn.BasicLSTMCell(layer_size))

        elif model_type == 'deep_rnn':
            for i in range(n_layers):
                net = tflearn.lstm(net, n_units=layer_size, return_seq=True)
            out_rnn = tflearn.lstm(net, layer_size)

        elif model_type == 'basic_rnn':
            out_rnn = tflearn.simple_rnn(net, layer_size)

        else:
            out_rnn = net

        return out_rnn
コード例 #6
0
# Setting batches
validation_batch = batch_generator.mfcc_batch_generator(
    gv.validation_batch_size, gv.height)
x, y, z = next(validation_batch)
testX, testY = x, y

training_batch = batch_generator.mfcc_batch_generator(gv.training_batch_size,
                                                      gv.height,
                                                      exclude=z)

# Network building
net = tflearn.input_data([None, gv.width, gv.height])
if mode == 'gru':
    net = tflearn.gru(net, 128, dropout=0.8)
elif mode == 'rnn':
    net = tflearn.simple_rnn(net, 128, dropout=0.8)
else:
    net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, gv.classes, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=gv.learning_rate,
                         loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, tensorboard_verbose=gv.tensorboard_verbosity)
for times in range(gv.training_iters):
    X, Y, Z = next(training_batch)
    trainX, trainY = X, Y
    model.fit(trainX,
              trainY,
コード例 #7
0
def load_h5(filename):
    h5f = h5py.File(filename, 'r')
    X = h5f['X']
    Y = h5f['Y']
    return X, Y


os.environ['CUDA_VISIBLE_DEVICES'] = '2'
X, Y = load_h5('train_720p_vmaf.h5')
testX, testY = load_h5('test_720p_vmaf.h5')
X = np.reshape(X, (-1, 25, 36 * 64 * 3))
testX = np.reshape(testX, (-1, 25, 36 * 64 * 3))

input_layer = tflearn.input_data(shape=[None, 25, 36 * 64 * 3])
lstm1 = tflearn.simple_rnn(input_layer, 64)
out = tflearn.fully_connected(lstm1, 5, activation='sigmoid')

net = tflearn.regression(out,
                         optimizer='adam',
                         loss='mean_square',
                         learning_rate=1e-3)

model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X,
          Y,
          n_epoch=100,
          validation_set=(testX, testY),
          show_metric=False,
          run_id="rnn_model")
コード例 #8
0
 def add_deep_layers(net, layer_size):
     return tflearn.simple_rnn(net, layer_size)
コード例 #9
0
def main():
    mode = 'k_time_data1'.upper()

    print "Running training in the {} setting".format(mode)
    data_dir = mode

    add_train = np.load(os.path.join(data_dir, 'train.additions.npy'))
    q_train = np.load(os.path.join(data_dir, 'train.questions.npy'))
    a_train = np.load(os.path.join(data_dir, 'train.answers.npy'))
    y_train = np.load(os.path.join(data_dir, 'train.labels.npy'))
    qids_train = np.load(os.path.join(data_dir, 'dev.qids.npy'))

    add_dev = np.load(os.path.join(data_dir, 'dev.additions.npy'))
    q_dev = np.load(os.path.join(data_dir, 'dev.questions.npy'))
    a_dev = np.load(os.path.join(data_dir, 'dev.answers.npy'))
    y_dev = np.load(os.path.join(data_dir, 'dev.labels.npy'))
    qids_dev = np.load(os.path.join(data_dir, 'dev.qids.npy'))

    add_test = np.load(os.path.join(data_dir, 'test.additions.npy'))
    q_test = np.load(os.path.join(data_dir, 'test.questions.npy'))
    a_test = np.load(os.path.join(data_dir, 'test.answers.npy'))
    y_test = np.load(os.path.join(data_dir, 'test.labels.npy'))
    qids_test = np.load(os.path.join(data_dir, 'test.qids.npy'))

    # Load word2vec embeddings
    fname = os.path.join(data_dir, 'emb_vectors.skip.1124.4m.10w.npy')

    print "Loading word embeddings from", fname
    vocab_emb = np.load(fname)
    print "word embedding shape: " + str(vocab_emb.shape)

    #(vocabulary_size, embedding_size) = vocab_emb.shape
    #embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
    #embed = tf.nn.embedding_lookup(embeddings, train_inputs)


    X_train, Y_train = form_xy(q_train, a_train, y_train, add_train, qids_train, vocab_emb)
    X_dev, Y_dev = form_xy(q_dev, a_dev, y_dev, add_dev, qids_dev, vocab_emb)
    X_test, Y_test = form_xy(q_test, a_test, y_test, add_test, qids_test, vocab_emb)

    print "train size " + str(X_train.shape)
    print "dev size " + str(X_dev.shape)
    print "test size " + str(X_test.shape)

    model_name = "./" + mode + "-rnn.model"
    seq_dim = position_num + 1
    feature_dim = vocab_emb.shape[1] + add_feature_num
    net = tflearn.input_data(shape=[None, seq_dim, feature_dim])
    #net = tflearn.lstm(net, 32, return_seq=True)
    net = tflearn.simple_rnn(net, 512)
    net = tflearn.fully_connected(net, 512, activation='tanh')
    net = tflearn.fully_connected(net, position_num, activation='sigmoid')
    net = tflearn.regression(net, optimizer='SGD', loss='categorical_crossentropy', name="output1")
    model = tflearn.DNN(net, tensorboard_verbose=3)

    #train model
    model.fit(X_train, Y_train, n_epoch=15, validation_set=(X_dev, Y_dev), show_metric=True,snapshot_step=1000)
    model.save(model_name)
    #model.load(model_name)

    #test model
    predict_Y_test = batch_predict(model, X_test)
    perp, perp_str = perplexity_score(y_test, predict_Y_test)
    print perp_str

    output_file_name = "./" + mode + "-rnn.predict.result"
    save_predict(y_test, predict_Y_test, output_file_name)
コード例 #10
0
from data_hydraulic import *

trainX, trainY, testX, testY = getHydraulicData()

#Hyperparams
neurons_num = 128    # Number of neurons in the RNN layer
keep_prob = 0.7 	 # Keep probability for the drop-out regularization
learning_rate = 0.01 # Learning rate for mini-batch SGD
batch_size = 32		 # Batch size
n_epoch = 1000       # Number of epoch


#Data preprocessing/ Converting data to vector for the 
trainX = pad_sequences(trainX, maxlen=60, value=0.)
testX = pad_sequences(testX, maxlen=60, value=0.)
trainY = to_categorical(trainY, 2)
testY = to_categorical(testY, 2)

#Build the network
net = tflearn.input_data([None, 60])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
	loss='categorical_crossentropy')

model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
		batch_size=batch_size, n_epoch=n_epoch)
		  
model.save('./model.tfl')
コード例 #11
0
    def _create(self, prefix=''):
        shape = [None] + [i for i in self.i_dim]
        inputs = tflearn.input_data(shape=shape)
        layer = inputs

        #        init_state = tf.get_variable('{}Initstate'.format(prefix), [1, 6],
        #                                 initializer=tf.constant_initializer(0.0))
        #        init_state = tf.tile(init_state, [2, 1])

        for i in range(self.num_hidden_layers):
            weights_init = tflearn.initializations.uniform(
                minval=-1 / sqrt(self.layer_size[i]),
                maxval=1 / sqrt(self.layer_size[i]))

            if 'dropout' in self.layer_other[i + 1]:
                dropout = self.dropout
            else:
                dropout = None

            if self.layer_type[i + 1] == 'fc':
                new_layer = tflearn.fully_connected(layer,
                                                    self.layer_size[i + 1],
                                                    name="{}Layer{}".format(
                                                        prefix, i),
                                                    weights_init=weights_init)
            elif self.layer_type[i + 1] == 'rnn':
                new_layer = tflearn.simple_rnn(
                    layer,
                    self.layer_size[i + 1],
                    name="{}Layer{}".format(prefix, i),
                    weights_init=weights_init,
                    return_seq=False,
                    activation='linear',
                    dropout=dropout,
                    #initial_state=init_state,
                    dynamic=True)
            elif self.layer_type[i + 1] == 'gru':
                new_layer = tflearn.gru(
                    layer,
                    self.layer_size[i + 1],
                    name="{}Layer{}".format(prefix, i),
                    weights_init=weights_init,
                    return_seq=False,
                    activation='linear',
                    dropout=dropout,
                    #initial_state=init_state,
                    dynamic=True)
            elif self.layer_type[i + 1] == 'lstm':
                new_layer = tflearn.lstm(layer,
                                         self.layer_size[i + 1],
                                         name="{}Layer{}".format(prefix, i),
                                         weights_init=weights_init,
                                         return_seq=False,
                                         activation='linear',
                                         dropout=dropout,
                                         dynamic=True)
            else:
                raise ValueError('Unsupported layer {}'.format(i))

            if self.batch_norm:
                new_layer = tflearn.layers.normalization.batch_normalization(
                    new_layer, name="{}Layer{}_norm".format(prefix, i))

            if self.layer_activation[i + 1] == 'linear':
                new_layer = tflearn.activations.linear(new_layer)
            elif self.layer_activation[i + 1] == 'relu':
                new_layer = tflearn.activations.relu(new_layer)
            elif self.layer_activation[i + 1] == 'tanh':
                new_layer = tflearn.activations.tanh(new_layer)
            elif self.layer_activation[i + 1] == 'sigmoid':
                new_layer = tflearn.activations.sigmoid(new_layer)

            if i < self.num_hidden_layers - 1:
                layer = new_layer
        return inputs, new_layer
コード例 #12
0
    def _create_critic(self, prefix=''):
        inputs_shape = [None] + [i for i in self.i_dim]
        inputs = tflearn.input_data(shape=inputs_shape)

        action_shape = [None] + [i for i in self.a_dim]
        action = tflearn.input_data(shape=action_shape)

        layer = inputs
        for i in range(self.num_hidden_layers):
            weights_init = tflearn.initializations.uniform(
                minval=-1 / sqrt(self.layer_size[i]),
                maxval=1 / sqrt(self.layer_size[i]))

            if 'dropout' in self.layer_other[i + 1]:
                dropout = self.dropout
            else:
                dropout = None

            if self.layer_type[i + 1] == 'fc':
                new_layer = tflearn.fully_connected(layer,
                                                    self.layer_size[i + 1],
                                                    name="{}Layer{}".format(
                                                        prefix, i),
                                                    weights_init=weights_init)
            elif self.layer_type[i + 1] == 'rnn':
                new_layer = tflearn.simple_rnn(layer,
                                               self.layer_size[i + 1],
                                               name="{}Layer{}".format(
                                                   prefix, i),
                                               weights_init=weights_init,
                                               return_seq=False,
                                               activation='linear',
                                               dropout=dropout,
                                               dynamic=True)
            else:
                raise ValueError('Unsupported layer {}'.format(i))

            if i == self.num_hidden_layers - 2:  # last layer is actor
                break

            if self.batch_norm:
                new_layer = tflearn.layers.normalization.batch_normalization(
                    new_layer, name="{}Layer{}_norm".format(prefix, i))

            if self.layer_activation[i + 1] == 'linear':
                new_layer = tflearn.activations.linear(new_layer)
            elif self.layer_activation[i + 1] == 'relu':
                new_layer = tflearn.activations.relu(new_layer)
            elif self.layer_activation[i + 1] == 'tanh':
                new_layer = tflearn.activations.tanh(new_layer)
            elif self.layer_activation[i + 1] == 'sigmoid':
                new_layer = tflearn.activations.sigmoid(new_layer)

            if i < self.num_hidden_layers - 1:
                layer = new_layer

        action_init = tflearn.initializations.uniform(
            minval=-1 / sqrt(self.layer_size[-3]),
            maxval=1 / sqrt(self.layer_size[-3]))
        if self.layer_type[-1] == 'fc':
            action_layer = tflearn.fully_connected(
                action,
                self.layer_size[-1],
                name="{}LayerAction".format(prefix),
                weights_init=action_init)
        else:
            raise ValueError('Unsupported actor layer')

        if self.layer_activation[-1] == 'relu':
            net = tflearn.activation(tf.matmul(layer, new_layer.W) +
                                     tf.matmul(action, action_layer.W) +
                                     action_layer.b,
                                     activation='relu')
        else:
            raise ValueError('Unsupported actor activation')

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        new_layer = tflearn.fully_connected(net,
                                            1,
                                            weights_init=w_init,
                                            name="{}Output".format(prefix))

        return inputs, action, new_layer
コード例 #13
0
ファイル: test_sin.py プロジェクト: martinrises/Toothless-Six
next_val = []

for i in range(0, len(x) - steps_of_history, steps_in_future):
    seq.append(x[i:i + steps_of_history])
    next_val.append(x[i + steps_of_history])

seq = np.reshape(seq, [-1, steps_of_history, 1])
next_val = np.reshape(next_val, [-1, 1])
print(np.shape(seq))

trainX = np.array(seq)
trainY = np.array(next_val)

# Network building
net = tflearn.input_data(shape=[None, steps_of_history, 1])
net = tflearn.simple_rnn(net, n_units=32, return_seq=False)
net = tflearn.fully_connected(net, 1, activation='linear')
net = tflearn.regression(net,
                         optimizer='sgd',
                         loss='mean_square',
                         learning_rate=0.1)

# Training
model = tflearn.DNN(net, clip_gradients=0.0, tensorboard_verbose=0)
model.fit(trainX, trainY, n_epoch=15, validation_set=0.1, batch_size=128)

# Testing
x = np.sin(np.arange(20 * math.pi, 24 * math.pi, step_radians))

seq = []
コード例 #14
0
ファイル: main_imdb.py プロジェクト: x0rb0t/FRU
def train(params): 

    # fix random seed 
    np.random.seed(params.random_seed)
    # set random seed before build the graph 
    tf.set_random_seed(params.random_seed)

    # IMDB Dataset loading
    os.system("mkdir -p datasets")
    train, valid, test = imdb.load_data(path='datasets/imdb.pkl', n_words=10000,
                                    valid_portion=0)
    trainX, trainY = train
    validX, validY = valid
    testX, testY = test

    total_len = 0 
    max_len = 0
    for x in trainX: 
        total_len += len(x)
        max_len = max(max_len, len(x))
    print("average sequence length = ", total_len/len(trainX))
    print("max sequence length = ", max_len)

    maxlen = int(params.dataset[len("imdb."):])
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=maxlen, value=0.)
    validX = pad_sequences(validX, maxlen=maxlen, value=0.)
    testX = pad_sequences(testX, maxlen=maxlen, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY)
    validY = to_categorical(validY)
    testY = to_categorical(testY)

    print("trainX.shape = ", len(trainX))
    print("trainY.shape = ", len(trainY))
    print("testX.shape = ", len(testX))
    print("testY.shape = ", len(testY))

    params.time_steps = maxlen
    params.input_size = 1
    params.output_size = 2
    params.regression_flag = False 
    #params.freqs = np.logspace(np.log2(0.25), np.log2(params.time_steps/3), 5-1, base=2).tolist()
    #params.freqs.append(0.0)
    #params.freqs.sort()
    #params.freqs = np.linspace(0, maxlen, 40).tolist()

    # Network building
    net = tflearn.input_data([None, maxlen])
    net = tflearn.embedding(net, input_dim=10000, output_dim=params.num_units)

    if params.cell == "RNN": 
        net = tflearn.simple_rnn(net, n_units=params.num_units, dropout=params.dropout_keep_rate)
    elif params.cell == "LSTM": 
        net = tflearn.lstm(net, n_units=params.num_units, dropout=params.dropout_keep_rate)
    elif params.cell == "SRU": 
        params.phi_size = 200 # only for SRU 
        net = tflearn.sru(net, 
                num_stats=params.phi_size, 
                mavg_alphas=tf.get_variable('alphas', initializer=tf.constant(params.alphas), trainable=False), 
                output_dims=params.num_units, 
                recur_dims=params.r_size, 
                dropout=params.dropout_keep_rate
                )
    elif params.cell == "FRU": 
        params.phi_size = 10 # only for FRU 
        net = tflearn.fru(net, 
                num_stats=params.phi_size, 
                freqs=params.freqs, 
                freqs_mask=params.freqs_mask, 
                seq_len=params.time_steps, 
                output_dims=params.num_units, 
                recur_dims=params.r_size, 
                dropout=params.dropout_keep_rate
                )
    else:
        assert 0, "unsupported cell %s" % (params.cell)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer=AdamDecay(learning_rate=params.initial_learning_rate, lr_decay=params.lr_decay, decay_step=1000), learning_rate=params.initial_learning_rate,
                             loss='categorical_crossentropy')

    print("parameters = ", params)

    print("trainable_variables = ", '\n'.join([str(v) for v in tf.trainable_variables()]))
    # summarize #vars 
    num_vars = 0 
    for var in tf.trainable_variables(): 
        num = 1
        for dim in var.get_shape(): 
            num *= dim.value 
        num_vars += num 
    print("# trainable_variables = ", num_vars)

    # Training
    best_checkpoint_path = None
    if validX: 
        assert 0 
        dt = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        best_checkpoint_path = "results/%s/" % (dt)
        os.mkdir(best_checkpoint_path)
        validation_set = (validX, validY)
    else:
        validation_set = (testX, testY)
    model = tflearn.DNN(net, tensorboard_verbose=0, best_checkpoint_path=best_checkpoint_path)
    model.fit(trainX, trainY, validation_set=validation_set, n_epoch=params.num_epochs, show_metric=True,
              batch_size=params.batch_size, snapshot_step=100, validation_batch_size=params.batch_size*4)
    if validX: 
        # load best checkpoint 
        best_checkpoint = None
        with open(best_checkpoint_path+"/checkpoint", "r") as f: 
            for line in f: 
                line = line.strip()
                if line.startswith("model_checkpoint_path:"):
                    best_checkpoint = line.split()[1].strip()[1:-1]
                    break 
        print("best_checkpoint = ", best_checkpoint)
        model.load(best_checkpoint)
        print("best validation = ", model.evaluate(validX, validY))
    print("final evaluation = ", model.evaluate(testX, testY))
コード例 #15
0
from tflearn.data_utils import to_categorical, pad_sequences
from data.data_glass import *

trainX, trainY, testX, testY = getGlassData()

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=10, value=0.)
testX = pad_sequences(testX, maxlen=10, value=0.)
# # # Converting labels to binary vectors
trainY = to_categorical(trainY, 6)
testY = to_categorical(testY, 6)

net = tflearn.input_data([None, 10])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.simple_rnn(net, 32, dropout=0.5)
net = tflearn.fully_connected(net, 6, activation='softmax')
net = tflearn.regression(net,
                         optimizer='adam',
                         learning_rate=0.0001,
                         loss='categorical_crossentropy')

model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX,
          trainY,
          validation_set=(testX, testY),
          show_metric=True,
          batch_size=32,
          n_epoch=35)

model.save('./saved/tf/rnn/model.tfl')