Exemple #1
0
def test_sequential_model_saving_2():
    # test with funkier config
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=objectives.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Exemple #2
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
Exemple #4
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Exemple #5
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test load_weights on model file
    model.load_weights(fname)
    os.remove(fname)
Exemple #6
0
def test_recursive():
    # test layer-like API

    graph = containers.Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)
    assert(loss < 2.5)

    loss = seq.evaluate(X_test_graph, y_test_graph, show_accuracy=True)
    seq.predict(X_test_graph)
    seq.get_config(verbose=1)
def save_bottleneck_features():
    model = Sequential()
    model.add(Convolution2D(32, 3, 3, input_shape=(1, img_width, img_height)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))

    model.add(Convolution2D(64, 2, 2))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Convolution2D(128, 2, 2))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.3))

    assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
    f = h5py.File(weights_path)
    layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
    weight_value_tuples = []
    for k, name in enumerate(layer_names):
        if k >= len(model.layers):
            break
        g = f[name]
        weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
        if len(weight_names):
            weight_values = [g[weight_name] for weight_name in weight_names]
            layer = model.layers[k]
            symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
            if len(weight_values) != len(symbolic_weights):
                raise Exception('Layer #' + str(k) +
                                ' (named "' + layer.name +
                                '" in the current model) was found to '
                                'correspond to layer ' + name +
                                ' in the save file. '
                                'However the new layer ' + layer.name +
                                ' expects ' + str(len(symbolic_weights)) +
                                ' weights, but the saved weights have ' +
                                str(len(weight_values)) +
                                ' elements.')
            weight_value_tuples += zip(symbolic_weights, weight_values)
    K.batch_set_value(weight_value_tuples)
    f.close()
    print('Model loaded.')

    X, y = load2d()
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
    X_flipped, y_flipped = flip_image(X_train, y_train)

    X_train = np.vstack((X_train, X_flipped))
    y_train = np.vstack((y_train, y_flipped))

    bottleneck_features_train = model.predict(X_train)
    np.save(open('bottleneck_features_train.npy', 'w'), bottleneck_features_train)
    np.save(open('label_train.npy', 'w'), y_train)

    bottleneck_features_validation = model.predict(X_val)
    np.save(open('bottleneck_features_validation.npy', 'w'), bottleneck_features_validation)
    np.save(open('label_validation.npy', 'w'), y_val)
Exemple #8
0
def test_autoencoder_advanced():
    encoder = containers.Sequential([core.Dense(5, input_shape=(10,))])
    decoder = containers.Sequential([core.Dense(10, input_shape=(5,))])
    X_train = np.random.random((100, 10))
    X_test = np.random.random((100, 10))

    model = Sequential()
    model.add(core.Dense(output_dim=10, input_dim=10))
    autoencoder = core.AutoEncoder(encoder=encoder, decoder=decoder,
                                   output_reconstruction=True)
    model.add(autoencoder)

    # training the autoencoder:
    model.compile(optimizer='sgd', loss='mse')
    assert autoencoder.output_reconstruction

    model.fit(X_train, X_train, nb_epoch=1, batch_size=32)

    # predicting compressed representations of inputs:
    autoencoder.output_reconstruction = False  # the autoencoder has to be recompiled after modifying this property
    assert not autoencoder.output_reconstruction
    model.compile(optimizer='sgd', loss='mse')
    representations = model.predict(X_test)
    assert representations.shape == (100, 5)

    # the model is still trainable, although it now expects compressed representations as targets:
    model.fit(X_test, representations, nb_epoch=1, batch_size=32)

    # to keep training against the original inputs, just switch back output_reconstruction to True:
    autoencoder.output_reconstruction = True
    model.compile(optimizer='sgd', loss='mse')
    model.fit(X_train, X_train, nb_epoch=1)

    reconstructions = model.predict(X_test)
    assert reconstructions.shape == (100, 10)
Exemple #9
0
class QNetwork:
    def __init__(self, learning_rate=0.01, state_size=2, action_size=3, hidden_size=10):
        self.model = Sequential()
        self.model.add(Dense(hidden_size, activation='relu', input_dim=state_size))
        self.model.add(Dense(hidden_size, activation='relu'))
        self.model.add(Dense(action_size, activation='linear'))
        self.optimizer = Adam(lr=learning_rate)  # 誤差を減らす学習方法はAdam
        # self.model.compile(loss='mse', optimizer=self.optimizer)
        self.model.compile(loss=huberloss, optimizer=self.optimizer)

    # 重みの学習
    def replay(self, memory, batch_size, gamma, targetQN):
        inputs = np.zeros((batch_size, 2))
        targets = np.zeros((batch_size, 3))
        mini_batch = memory.sample(batch_size)

        for i, (state_b, action_b, reward_b, next_state_b) in enumerate(mini_batch):
            inputs[i:i + 1] = state_b
            target = reward_b

            if not (next_state_b == np.zeros(state_b.shape)).all(axis=1):
                # 価値計算(DDQNにも対応できるように、行動決定のQネットワークと価値観数のQネットワークは分離)
                retmainQs = self.model.predict(next_state_b)[0]
                next_action = np.argmax(retmainQs)  # 最大の報酬を返す行動を選択する
                target = reward_b + gamma * targetQN.model.predict(next_state_b)[0][next_action]

            targets[i] = self.model.predict(state_b)    # Qネットワークの出力
            targets[i][action_b] = target               # 教師信号

        # shiglayさんよりアドバイスいただき、for文の外へ修正しました
        self.model.fit(inputs, targets, epochs=1, verbose=0)  # epochsは訓練データの反復回数、verbose=0は表示なしの設定
Exemple #10
0
def _test_smoke(channel_order=None):

    from kfs.layers.convolutional import Convolution2DEnergy_TemporalBasis
    from keras.models import Sequential
    #from keras.layers import Flatten, Dense
    input_shape = (12, 3, 64, 64)
    if channel_order is None:
        channel_order = K.image_data_format()
    if channel_order == 'channels_last':
        input_shape = (12, 64, 64, 3)

    rng = np.random.RandomState(42)
    datums = rng.randn(6, 12, 3, 64, 64).astype('float32')
    if channel_order == 'channels_last':
        datums = datums.transpose(0, 1, 3, 4, 2)

    nn2 = Sequential()
    nn2.add(Convolution2DEnergy_TemporalCorrelation(8, 16, 4, (5, 5), 7,
                                            padding='same',
                                            temporal_kernel_size=5,
                                            input_shape=input_shape))
    nn2.compile(loss='mse', optimizer='sgd')

    pred2 = nn2.predict(datums)

    return nn2, nn2.predict(datums)
model.add(Dense(200, activation='relu'))
model.add(Dense(1))

print(model.summary())

adam = keras.optimizers.Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=None,
                             decay=0.0,
                             amsgrad=False)
model.compile(loss='mean_absolute_error', optimizer=adam, metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=30, batch_size=2048)

print("Predicting ratings...")

Y_predict = model.predict(X_train)
Y_predict.shape = Y_predict.shape[0]

MAE = np.sum(np.abs(Y_predict - Y_train)) / Y_predict.shape[0]

print("The Mean Absolute Error for Nerual Network Is: " + str(MAE))

i = int(float(X_train.shape[1]) / 2.0)
j = int(X_train.shape[1])

Y_predict2 = np.sum((X_train[:, :i] * X_train[:, i:j]), axis=1)
MAE2 = np.sum(np.abs(Y_predict2 - Y_train)) / Y_predict.shape[0]

print("The Mean Absolute Error for Simple Dot Product: " + str(MAE2))
# Predicting the dog or cat

#from keras.preprocessing import image
#import numpy as np
#
#test_image = image.load_img('dataset/test_set/dogs/dog.4002.jpg', target_size=(64, 64))
#test_image = image.img_to_array(test_image)
#test_image = np.expand_dims(test_image, axis=0)
#
#classes = classifier.predict(test_image)
#print(classes[0][0])
#if classes[0][0] >= 0.5:
#    print("dog")
#else:
#    print("cat")

from keras.preprocessing import image
import numpy as np

test_image = image.load_img('dataset/test_set/dogs/dog.4002.jpg',
                            target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.array(test_image).astype('float32') / 255
test_image = np.expand_dims(test_image, axis=0)
classes = classifier.predict(test_image)
print(classes[0][0])
if classes[0][0] >= 0.5:
    print("dog")
else:
    print("cat")
Exemple #13
0
         inner_init='glorot_normal',
         activation='sigmoid'))
model.compile(loss='cosine_proximity', optimizer='adam', metrics=['accuracy'])

model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM500.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM1000.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM1500.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM2000.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM2500.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM3000.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM3500.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM4000.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM4500.h5')
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM5000.h5')

#from keras.models import load_model
#model = load_model('LSTM1000.h5')
predictions = model.predict(x_test)
mod = gensim.models.Word2Vec.load('word2vec.bin')
[mod.most_similar([predictions[10][i]])[0] for i in range(15)]
Exemple #14
0
class Model():
    
    def __init__(self, configs):
        self.model = Sequential()
        self.configs = configs
        
    def build_model(self, input_dim):

        for layer in self.configs['model']['layers']:
        
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
            
            if layer['type'] == 'dense':
            	self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
            	self.model.add(LSTM(neurons, input_shape=(input_timesteps, input_dim), return_sequences=return_seq))
            if layer['type'] == 'dropout':
            	self.model.add(Dropout(dropout_rate))
            
        self.model.compile(loss=self.configs['model']['loss'], optimizer=self.configs['model']['optimizer'])
        
        print('[Model] Model Compiled')
        
        
    def train(self, x, y, epochs, batch_size):
        
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
		
        save_fname = os.path.join(self.configs["data"]["save_dir"], '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
			EarlyStopping(monitor='val_loss', patience=2),
			ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True)
		]
        
        
        # try this
        #y = y.reshape((y.shape[0], y.shape[1], 1))
        
        self.model.fit(
			x,
			y,
			epochs=epochs,
			batch_size=batch_size,
			callbacks=callbacks
		)
        
        self.model.save(save_fname)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        
        
    def load_h5_model(self, model_name):
        self.model = load_model(model_name)
        
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch):
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch))
		
        save_fname = os.path.join(self.configs["data"]["save_dir"], '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
			ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True)
		]
        
        self.model.fit_generator(
			data_gen,
			steps_per_epoch=steps_per_epoch,
			epochs=epochs,
			callbacks=callbacks,
			workers=1
		)
		
        print('[Model] Training Completed. Model saved as %s' % save_fname)
        
    
    def predict_point_by_point(self, data):
		#Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
        print('[Model] Predicting Point-by-Point...')
        predicted = self.model.predict(data)
        predicted = np.reshape(predicted, (predicted.size,))
        return predicted
    
    
    def predict_sequences_multiple(self, data, window_size, prediction_len):
        #Predict sequence of 50 steps before shifting prediction run forward by 50 steps
        print('[Model] Predicting Sequences Multiple...')
        prediction_seqs = []
        for i in range(int(len(data)/prediction_len)):
            curr_frame = data[i*prediction_len]
            predicted = []
            for j in range(prediction_len):
                predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
                curr_frame = curr_frame[1:]
                curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
                prediction_seqs.append(predicted)
                return prediction_seqs

    def predict_sequence_full(self, data, window_size):
        #Shift the window by 1 new prediction each time, re-run predictions on new window
        print('[Model] Predicting Sequences Full...')
        curr_frame = data[0]
        predicted = []
        for i in range(len(data)):
            predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
            curr_frame = curr_frame[1:]
            curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
        return predicted
Exemple #15
0
# In[40]:

# Fit Train data with 10% data used for model inferencing.
model.fit(X_train,
          y_train,
          validation_split=0.1,
          epochs=50,
          batch_size=250,
          callbacks=[checkpointer],
          verbose=1)

# Load best weights from saved model checkpoint
model.load_weights('weights.best.DeepNN.hdf5')

# predict on test data
y_pred = model.predict(X_test)

# In[39]:

# Compute MAE from predicted and actual loss
print("Mean Absolute error: %.2f" %
      performance_metric(np.exp(y_test), np.exp(y_pred)))

# In[48]:

test_ids = df_test['id']

# In[53]:


def save_predictions(ids=None, predictions=None, file=None):
disc_model.trainable = False
adv_input = Input(shape=(100, ))
x = generator_model(adv_input)
adv_output = disc_model(x)
adv_model = Model(inputs=adv_input, outputs=adv_output)
adv_model.compile(loss='binary_crossentropy', optimizer='adam')
adv_model.summary()

################################    train   ##################################
print("On commence l'entrainement")
A_loss = []
for i in range(5000):
    print("nb -> " + str(i))
    noise = np.random.uniform(-1, 1, size=[256, 100])
    image_fake = generator_model.predict(noise)
    imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=256)]

    y_true = np.ones(256)
    y_true *= 0.9
    y_false = np.zeros(256)

    x = np.concatenate([imageBatch, image_fake])
    y = np.concatenate([y_true, y_false])

    disc_model.trainable = True
    d_loss = disc_model.train_on_batch(x, y)

    noise = np.random.uniform(-1, 1, size=[256, 100])
    y = np.ones(256)
Exemple #17
0
# Initialising the ANN
classifier = Sequential()

# Adding the input layer and the first hidden layer
classifier.add(
    Dense(output_dim=6, init='uniform', activation='relu', input_dim=11))

# Adding the second hidden layer
classifier.add(Dense(output_dim=6, init='uniform', activation='relu'))

# Adding the output layer
classifier.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))

# Compiling the ANN
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size=10, nb_epoch=100)

# Part 3 - Making the predictions and evaluating the model

# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import RMSprop, adam
import numpy as np

x_data = [[73., 80., 75.], [93., 88., 93.], [89., 91., 90.], [96., 98., 100.],
          [73., 66., 70.]]
#shape(5,3)

y_data = [[152.], [185.], [180.], [196.], [142.]]

model = Sequential()
model.add(Dense(input_dim=3, units=1))
# 입력의 갯수 input_dim, units은 출력의 shape

model.add(Activation('linear'))
#활성화함수 선형차원
rmsprop = adam(lr=0.01)
#RMSprop adagrad의 단점을 보완한 옵티마이져

model.compile(loss='mse', optimizer=rmsprop)
model.fit(x_data, y_data, epochs=10000)  #fit 트레이닝

y_predict = model.predict(np.array([[95., 100., 80]]))
print(y_predict)
Exemple #19
0
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(32, input_shape=(look_back, 1)))
# model.add(LSTM(16, return_sequences=True))
# model.add(LSTM(16))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])
model.fit(trainX,
          trainY,
          epochs=200,
          batch_size=1,
          verbose=2,
          callbacks=[EarlyStopping(monitor='loss', patience=2), tensorboard])
# make predictions

trainPredict = model.predict(trainX)
testPredict = model.predict(testX)

# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
        'validation_data': (X_test, Y_test)
    })

# Fitting the model
model.fit(
    **keras_dict
)

yhat = []

if (train_test_split > 0):
    # Getting the last n time series
    _, X_test, _, _ = create_train_test()

    # Making the prediction list
    yhat = [y[0] for y in model.predict(X_test)]
    print(X_test)


if len(yhat) > 0:

    # Constructing the forecast dataframe
    fc = data.tail(len(yhat)).copy()
    fc.reset_index(inplace=True)
    fc['forecast'] = yhat

    # Ploting the forecasts
    plt.figure(figsize=(12, 8))
    for dtype in ['Sales', 'forecast']:
        plt.plot(
            'Date',
Exemple #21
0


# Part 3 - Making the predictions and visualising the results

# Getting the real stock price of 2017
dataset_test = pd.read_csv('Google_Stock_Price_Test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values

# Getting the predicted stock price of 2017
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
    X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)

# Visualising the results
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
Exemple #22
0
max_words = 200
input_length = 10

model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=input_length))
model.add(Convolution1D(128, kernel_size=3,
                        activation='relu'))  # 10 - 3 + 1 = 8
model.add(Convolution1D(64, kernel_size=3,
                        activation='relu'))  # 10 - 3 + 1 = 6
model.add(Convolution1D(32, kernel_size=3,
                        activation='relu'))  # 10 - 3 + 1 = 4
model.add(Flatten())  # 128 = 32 * 4
model.add(Dropout(0.2))
model.add(Dense(128, activation='sigmoid'))  # W = 128 x 128
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()

model.compile(loss='mse', optimizer='adam')

input_array = np.random.randint(n_in, size=(mb, input_length))

output_array = model.predict(input_array)
assert output_array.shape == (mb, 1)

print(
    "Saving model with embedding into several Conv1D layers into Flatten and Dense for backend {} and keras major version {}"
    .format(backend, major_version))
model.save("{}embedding_conv1d_extended_{}_{}.h5".format(
    base_path, backend, major_version))
def run_gru(s):

    x_dims = len(x_cols[s.dataSet]) if s.dataSet in x_cols else s.lookback
    random.seed(6)
    np.random.seed(6)
    rnn = Sequential()
    rnn.add(
        GRU(s.nodes,
            input_shape=(None, x_dims),
            kernel_initializer='he_uniform',
            stateful=False))

    #rnn.add(Dropout(0.15))
    rnn.add(Dense(1, kernel_initializer='he_uniform'))
    opt = adam(lr=s.lr, decay=0.0)  #1e-3)
    rnn.compile(loss='mae', optimizer=opt)

    # prepare dataset as pyBrain sequential dataset
    sequence = readDataSet(s.dataSet, s.dataSetDetailed, s)
    if s.limit_to:
        sequence = sequence[:s.limit_to]

    dp = DataProcessor()
    # standardize data by subtracting mean and dividing by std
    #(meanSeq, stdSeq) = dp.normalize('data', sequence)

    dp.windowed_normalize(sequence)

    for key in sequence.keys():
        if key != "data":
            dp.normalize(key, sequence)

    predictedInput = np.zeros((len(sequence), ))
    targetInput = np.zeros((len(sequence), ))
    trueData = np.zeros((len(sequence), ))

    if s.dataSet in differenceSets:
        predictedInputNodiff = np.zeros((len(sequence), ))
        targetInputNodiff = np.zeros((len(sequence), ))

    if s.dataSet in differenceSets:
        backup_sequence = sequence
        sequence = dp.difference(sequence, s.lookback)

    allX = getX(sequence, s)
    allY = np.array(sequence['data'])
    for i in range(60):
        print i, sequence['data'][i]

    allX = allX[28:]
    allY = allY[48:]
    #if s.dataSet not in x_cols:
    #    allY = allY[s.lookback:]
    trainX = allX[0:s.nTrain]
    trainY = allY[s.predictionStep:s.nTrain + s.predictionStep]
    print trainX[0]
    print trainY[0]
    trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
    curBatch = 1.0
    callback = LossCallback()
    temp_set = np.array(sequence['data'])[:48 + s.nTrain + 5]
    configure_batches(48, s.batch_size,
                      np.reshape(temp_set, (temp_set.shape[0], 1, 1)))
    rnn.fit(trainX,
            trainY,
            epochs=s.epochs,
            batch_size=s.batch_size,
            verbose=min(s.max_verbosity, 2),
            callbacks=[callback])
    for i in xrange(0, s.nTrain):
        targetInput[i] = allY[i + s.predictionStep]

    for i in tqdm(xrange(s.nTrain + s.predictionStep, len(allX)),
                  disable=s.max_verbosity == 0):
        if i % s.retrain_interval == 0 and i > s.numLags + s.nTrain and s.online:
            trainX = allX[i - s.nTrain - s.predictionStep:i - s.predictionStep]
            trainY = allY[i - s.nTrain:i]
            trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
            temp_set = np.array(sequence['data'])[i - s.nTrain -
                                                  s.predictionStep - 48:i]
            configure_batches(48, s.batch_size,
                              np.reshape(temp_set, (temp_set.shape[0], 1, 1)))
            rnn.fit(trainX,
                    trainY,
                    epochs=s.epochs,
                    batch_size=s.batch_size,
                    verbose=2,
                    callbacks=[callback])

        targetInput[i] = allY[i + s.predictionStep]
        predictedInput[i] = rnn.predict(np.reshape(allX[i], (1, 1, x_dims)))
        if i == 12546:
            print allX[i]
            print targetInput[i]
        if s.dataSet in differenceSets:
            predictedInputNodiff[i] = predictedInput[i]
            targetInputNodiff[i] = targetInput[i]
            predictedInput[i] = dp.inverse_difference(backup_sequence['data'],
                                                      predictedInput[i], i - 1)
            targetInput[i] = dp.inverse_difference(backup_sequence['data'],
                                                   targetInput[i], i - 1)
        predictedInput[0] = 0
        trueData[i] = sequence['data'][i]

    #predictedInput = dp.denormalize(predictedInput, meanSeq, stdSeq)
    #targetInput = dp.denormalize(targetInput, meanSeq, stdSeq)
    dp.windowed_denormalize(predictedInput, targetInput)
    if s.dataSet in differenceSets:

        # predictedInputNodiff = dp.denormalize(predictedInputNodiff)
        # targetInputNodiff = dp.denormalize(targetInputNodiff)
        pass
    #trueData = (trueData * stdSeq) + meanSeq

    dp.saveResultToFile(s.dataSet, predictedInput, targetInput, 'gru',
                        s.predictionStep, s.max_verbosity)
    skipTrain = error_ignore_first[s.dataSet]
    from plot import computeSquareDeviation
    squareDeviation = computeSquareDeviation(predictedInput, targetInput)
    squareDeviation[:skipTrain] = None
    nrmse = np.sqrt(np.nanmean(squareDeviation)) / np.nanstd(targetInput)
    if s.max_verbosity > 0:
        print "", s.nodes, "NRMSE {}".format(nrmse)
    mae = np.nanmean(np.abs(targetInput - predictedInput))
    if s.max_verbosity > 0:
        print "MAE {}".format(mae)

    if s.dataSet in differenceSets:
        dp.saveResultToFile(s.dataSet, predictedInputNodiff, targetInputNodiff,
                            'gru_nodiff', s.predictionStep, s.max_verbosity)
        squareDeviation = computeSquareDeviation(predictedInputNodiff,
                                                 targetInputNodiff)
        squareDeviation[:skipTrain] = None
        nrmse = np.sqrt(
            np.nanmean(squareDeviation)) / np.nanstd(targetInputNodiff)
        if s.max_verbosity > 0:
            print "", s.nodes, "NRMSE {}".format(nrmse)
        mae = np.nanmean(np.abs(targetInputNodiff - predictedInputNodiff))
        if s.max_verbosity > 0:
            print "MAE {}".format(mae)
        mase = errors.get_mase(predictedInput, targetInput,
                               np.roll(targetInput, 24))
        if s.max_verbosity > 0:
            print "MAE {}".format(mae)
    return nrmse
Exemple #24
0
model = Sequential()
model.add(
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

# tb = TensorBoard(log_dir='./logs', histogram_freq=0)
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_split=0.1)
score = model.evaluate(x_test, y_test, verbose=0)
pred = model.predict(x_test)
numpy.savetxt('pred.csv', pred, delimiter=',')
model.save("my_model.h5")
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save('cnn.h5')
Exemple #25
0
# Train the model using SGD
print('[INFO]: Training....')
sgd = SGD(0.01)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
H = model.fit(train_x,
              train_y,
              validation_data=(test_x, test_y),
              epochs=100,
              batch_size=128)

# Test the network
print('[INFO]: Testing....')
predictions = model.predict(test_x, batch_size=128)
print(
    classification_report(test_y.argmax(axis=1),
                          predictions.argmax(axis=1),
                          target_names=[str(x) for x in lb.classes_]))

plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0, 100), H.history['loss'], label='train_loss')
plt.plot(np.arange(0, 100), H.history['val_loss'], label='val_loss')
plt.plot(np.arange(0, 100), H.history['acc'], label='train_acc')
plt.plot(np.arange(0, 100), H.history['val_acc'], label='val_acc')
plt.title('Training Loss & Accuracy')
plt.xlabel('Epoch #')
plt.ylabel('Loss/Accuracy')
plt.legend()
# Evaluation on the test data
test_files_names= test_set.filenames
os.getcwd()
os.chdir("D:\\NUS\\Semester1\\CI\\Neural Network\\Assignments_NN\\datasets\\test")


training_set_arch1.class_indices
predicted_labels_arch1=[]
predicted_category_arch1=[]
for i in range(len(test_files_names)):
    
 test_image = image.load_img(test_files_names[i], target_size = (32, 32))
 test_image = image.img_to_array(test_image)
 test_image = np.expand_dims(test_image, axis = 0)
 
 result_arch1 = classmodel.predict(test_image)
 predicted_labels_arch1.append(result_arch1[0][0]) 
 
 if result_arch1[0][0] == 1:
    prediction = 'woman'
 else:
    prediction = 'man'
 predicted_category_arch1.append(prediction)   
 
 
actual_category_arch1=[]
for i in range(len(test_files_names)):
    actual_category_arch1.append([test_files_names[i].split("\\")][0][0])
    
confusion_matrix(actual_category_arch1,predicted_category_arch1)
tn1, fp1, fn1, tp1 = confusion_matrix(actual_category_arch1,predicted_category_arch1).ravel()
Exemple #27
0
model.add(Dense(1))
model.compile(optimizer='adam', loss='mae')
model.summary()
model.fit_generator(generator, epochs=100)
model.history.history.keys()
loss_per_epoch = model.history.history['loss']
plt.plot(range(len(loss_per_epoch)), loss_per_epoch)

first_eval_batch = scaled_train[-90:]
first_eval_batch
first_eval_batch = first_eval_batch.reshape((1, 90, n_features))
test_predictions = []
first_eval_batch = scaled_train[-n_input:]
current_batch = first_eval_batch.reshape((1, n_input, n_features))
for i in range(len(test)):
    current_pred = model.predict(current_batch)[0]
    test_predictions.append(current_pred)
    current_batch = np.append(current_batch[:, 1:, :], [[current_pred]],
                              axis=1)

true_predictions = scaler.inverse_transform(test_predictions)
true_predictions
test['Predictions'] = true_predictions
Y_true = df.iloc[train_len:]
Y_pred = test["Predictions"]

from sklearn.metrics import mean_squared_error, mean_absolute_error
MSE = mean_squared_error(Y_true, Y_pred)
MAE = mean_absolute_error(Y_true, Y_pred)

plt.plot(test["Predictions"], label="Pred", color="black", zorder=1)
Exemple #28
0
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense

# Define Training DATA i.e, Or Gate Inputs
input = np.array([[1], [0]])
output = np.array([[0], [1]])

# Model Definition
model = Sequential()
model.add(Dense(16, input_dim=1, activation='linear'))
model.add(Dense(1, activation='linear'))

model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['binary_accuracy'])

# Train Model
model.fit(input, output, nb_epoch=100, verbose=2)

# Validation
print("Round off Values: \n", model.predict(input).round())
print("Actual Values: \n", model.predict(input))
Exemple #29
0
classifier.add(Dense(128, activation='relu'))
classifier.add(Dense(10, activation='softmax'))

#compiling CNN
classifier.compile(optimizer='rmsprop',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

classifier.fit(X_train,
               y_train,
               batch_size=32,
               epochs=30,
               validation_data=(X_test, y_test))

#predict values from validation set
Y_pred = classifier.predict(X_test)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred, axis=1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(y_test, axis=1)
#plot confusion matrix
from sklearn.metrics import confusion_matrix
conf_matrix = confusion_matrix(Y_true, Y_pred_classes)

#predicting test set
results = classifier.predict(test)
results = np.argmax(results, axis=1)
results = pd.Series(results, name="label")

g = plt.imshow(test[2][:, :, 0])
y = np_utils.to_categorical(dataY)
# define the LSTM model
model = Sequential()
model.add(
    LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
# load the network weights
filename = "chinese_shiji_lstm_model_20161123"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# pick a random seed
start = numpy.random.randint(0, len(dataX) - 1)
pattern = dataX[start]
print "Seed:"
print "\"", ''.join([int_to_char[value] for value in pattern]), "\""
# generate characters
for i in range(1000):
    x = numpy.reshape(pattern, (1, len(pattern), 1))
    x = x / float(n_vocab)
    prediction = model.predict(x, verbose=0)
    index = numpy.argmax(prediction)
    result = int_to_char[index]
    seq_in = [int_to_char[value] for value in pattern]
    sys.stdout.write(result)
    pattern.append(index)
    pattern = pattern[1:len(pattern)]
print "\nDone."
Exemple #31
0
Pool_Valid_Loss = Valid_Loss

print('Evaluating Test Accuracy Without Acquisition')
score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)

all_accuracy = acc

print('Starting Active Learning')

for i in range(acquisition_iterations):
    print('POOLING ITERATION', i)

    score_All = np.zeros(shape=(X_Pool.shape[0], nb_classes))
    for d in range(dropout_iterations):
        print('Dropout Iteration', d)
        dropout_score = model.predict(X_Pool, batch_size=batch_size, verbose=1)
        np.save(
            '/Users/Riashat/Documents/Cambridge_THESIS/Code/Experiments/keras/active_learning/Acquisition_Functions/Bayesian_Active_Learning/GPU/Max_Entropy/Dropout_Scores/'
            + 'Dropout_Score_' + str(d) + '.npy', dropout_score)
        score_All = score_All + dropout_score

    Avg_Pi = np.divide(score_All, dropout_iterations)
    Log_Avg_Pi = np.log2(Avg_Pi)
    Entropy_Avg_Pi = -np.multiply(Avg_Pi, Log_Avg_Pi)
    Entropy_Average_Pi = np.sum(Entropy_Avg_Pi, axis=1)

    U_X = Entropy_Average_Pi

    # THIS FINDS THE MINIMUM INDEX
    # a_1d = U_X.flatten()
    # x_pool_index = a_1d.argsort()[-Queries:]
test_datagen = ImageDataGenerator(rescale = 1./255)

training_set = train_datagen.flow_from_directory('dataset/training_set',
                                                 target_size = (64, 64),
                                                 batch_size = 32,
                                                 class_mode = 'binary')

test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size = (64, 64),
                                            batch_size = 32,
                                            class_mode = 'binary')

classifier.fit_generator(training_set,
                         steps_per_epoch = 8000,
                         epochs = 25,
                         validation_data = test_set,
                         validation_steps = 2000)

# Part 3 - Making new predictions

import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
    prediction = 'dog'
else:
    prediction = 'cat'
X_test = np.array(X_test)
X_test.shape

y_test = np.array(y_test)
y_test.shape

classifier.fit(X_train,
               y_train,
               batch_size=1000,
               epochs=10,
               validation_data=(X_test, y_test))

score, accuracy = classifier.evaluate(X_test, y_test)
print('Test Score = ', score)
print('Test Accuracy = ', accuracy)

############################ Preddict any random Value ###############################

name = ["jagat jyoti mishra"]
input_data = []

for i in name:
    tmp = [set_flag(char_index[j]) for j in str(i)]
    for k in range(0, 30 - len(str(i))):
        tmp.append(set_flag(char_index["END"]))
    input_data.append(tmp)

input_data = np.array(input_data)

classifier.predict(input_data)
x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],1)
print("x_train.shape : ", x_train.shape)

# LSTM 모델을 완성하시오

# 2. 모델구성
model = Sequential()
model.add(LSTM(17,activation='relu',input_shape=(4,1)))
model.add(Dense(42))
model.add(Dense(39))
model.add(Dense(41))
model.add(Dense(1))
model.summary()


# 3. 실행
from keras.callbacks import EarlyStopping
els = EarlyStopping(monitor='loss', patience=8, mode='auto')
model.compile(optimizer='adam',loss = 'mse')
model.fit(x_train,y_train,epochs=20,batch_size=1,callbacks=[]) 


# 4. 테스트 
x_predict = np.array([7,8,9,10])
print("x_predict.shape : ", x_predict.shape)
x_predict = x_predict.reshape(x_predict[0],1)
print("x_predict.shape : ", x_predict.shape)
# x_predict = x_predict.reshape(x_predict[1],x_predict[0],1)
print(x_predict,"\n",x_predict.shape) 
y_predict = model.predict(x_predict)
print(y_predict)
              optimizer='adam',
              metrics=['mean_absolute_error'])
model.summary()

#DEFINE THE CHECKPOINT
from keras.callbacks import ModelCheckpoint

checkpoint_name = 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_name,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='auto')
callbacks_list = [checkpoint]

model.fit(img_flat,
          df_y,
          epochs=10,
          batch_size=32,
          validation_split=0.2,
          callbacks=callbacks_list)

#Save the good weights into a file
wights_file = 'Weights-006--34.30850.hdf5'  # choose the best checkpoint based on the name
model.load_weights(wights_file)  # load it
model.compile(loss='mean_absolute_error',
              optimizer='adam',
              metrics=['mean_absolute_error'])

predictions = model.predict(img_flat_test)
Exemple #36
0
                         batch_size=512)

#plot of training and validation loss with each epoch
plt.clf()
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g', label='Training loss')
plt.plot(epochs, val_loss, 'y', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

#plot of training and validation accuracy with each epoch
plt.clf()
acc = history.history['acc']
val_acc = history.history['val_acc']
plt.plot(epochs, acc, 'g', label='Training acc')
plt.plot(epochs, val_acc, 'y', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

#predicting the test set results
y_pred = model_lstm.predict(X_test)
y_pred = np.argmax(y_pred, axis=1)
Exemple #37
0
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(num_labels))
model.add(Activation('softmax'))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=30,
                    verbose=1,
                    validation_split=0.1)

score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)

print('Test accuracy:', score[1])

text_labels = encoder.classes_

for i in range(10):
    prediction = model.predict(np.array([x_test[i]]))
    print(prediction)
    predicted_label = text_labels[np.argmax(prediction[0])]

    print('Actual Class:' + Y_test[i])
    print("Predicted Class: " + predicted_label)
    image_arr = cv2.imread(image, cv2.IMREAD_UNCHANGED)
    image_arr = cv2.cvtColor(image_arr, cv2.COLOR_BGR2RGB )
    T.append(cv2.resize(image_arr ,  (width, height), interpolation=cv2.INTER_AREA))
  return T

test = read_process_images(test_images)
# test
x_test = np.array(test)

x_test.shape

test_datagen = ImageDataGenerator(rescale=1./255)

test_generator = test_datagen.flow(x_test, batch_size=batch_size)

y_pred = np.argmax(model.predict(x_test), axis=-1)

y_pred

"""JSON FILE GENERATION

"""

# initializing lists 
image_list = []
import os
     # 1.Get file names from directory
image_list=os.listdir(r"/content/drive/My Drive/NewData/test_anonymous/")
image_list.sort()
print(image_list)
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# "Loss"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()

#---------------------------------------------------------------------------------
performace = classifier.evaluate(X_test,y_test)

y_pred = classifier.predict_classes(X_test)

result = classifier.predict(X_test)

prob = classifier.predict_proba(X_test)

np.savetxt("prob.csv",prob,fmt="%s",delimiter=",")

from keras.models import load_model

classifier.save('FCN.h5')

model = load_model('FCN.h5')
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy',mcor,precision,recall, f1])

    history = model.fit(x_train, y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=1,
                        validation_split=0.1,callbacks=[early_stopping])
    score = model.evaluate(x_val, y_val,
                           batch_size=batch_size, verbose=1)
    score1 = score[0]
    acc1 = score[1]
    print('Test score:', score[0])
    print('Test accuracy:', score[1])
    predictions = model.predict(x_val,batch_size,1)
    # prec = precision(y_val,predictions)
    # r = recall(y_val,predictions)
    # f1_score = f1(y_val,predictions)
    TP = y_val*predictions

    TP_sum = 0
    FP_sum = 0
    FN_sum = 0
    i = 0
    for pred in predictions:
        print "Prediction: "+str(pred)
        print "Y valuation: "+str(y_val[i])
        if pred[0] > 0.5 and y_val[i][0] == 1:
            TP_sum = TP_sum + 1
        if pred[0] > 0.5 and y_val[i][0]==0: