def train_model_input_predicted_convbase(): train_features, train_labels = extract_features(train_dir, 2000) validation_features, validaiton_labels = extract_features( validation_dir, 1000) test_features, test_labels = extract_features(test_dir, 1000) train_features = np.reshape(train_features, (2000, 4 * 4 * 512)) validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512)) test_features = np.reshape(test_features, (1000, 4 * 4 * 512)) model = make_model() history = model.fit(train_features, train_labels, batch_size=batch_size, epochs=epochs, validation_data=(validation_features, validaiton_labels)) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(acc, val_acc, epochs, 'Accuracy') plot_history(loss, val_loss, epochs, 'Loss') return model
def try_conv1d_weather(): train_gen = tuto11.train_gen val_gen = tuto11.val_gen train_steps = tuto11.train_steps val_steps = tuto11.val_steps model = Sequential() model.add( layers.Conv1D(32, 5, activation='relu', input_shape=(None, tuto11.float_data.shape[-1]))) model.add(layers.MaxPooling1D(3)) model.add(layers.Conv1D(32, 5, activation='relu')) model.add(layers.MaxPooling1D(3)) model.add(layers.Conv1D(32, 5, activation='relu')) model.add(layers.GlobalMaxPooling1D()) model.add(layers.Dense(1)) model.summary() model.compile(optimizer=RMSprop(), loss='mae') history = model.fit(train_gen, steps_per_epoch=train_steps, epochs=20, validation_data=val_gen, validation_steps=val_steps) loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(loss, val_loss, 20, 'loss')
def test_model(): history = model.fit(x_train, y_train, epochs=20, batch_size=128, validation_data=(x_valid, y_valid)) loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(loss, val_loss, 20, 'loss')
def start_training(train_generator, validation_generator, batch_size=32, len_train=2000, len_valid=1000, epochs=30, model_name='cats_and_dogs_small_generic.h5'): print(len_train, batch_size) history = model.fit_generator(train_generator, steps_per_epoch=int(len_train/batch_size), epochs=epochs, validation_data=validation_generator, validation_steps=int(len_valid/batch_size)) model.save(model_name) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(acc, val_acc, epochs, metric='Accuracy') plot_history(loss, val_loss, epochs, metric='Loss')
def train_model_full_pipeline_base_frozen(): train_datagen = ImageDataGenerator(rescale=1. / 255, rotation_range=40, width_shift_range=.2, height_shift_range=.2, shear_range=.2, zoom_range=.2, horizontal_flip=True, fill_mode='nearest') validation_datagen = ImageDataGenerator(rescale=1. / 255) train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary') validation_generator = validation_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary') model = make_model(conv=True) history = model.fit(train_generator, steps_per_epoch=2000 / batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=1000 / batch_size) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(acc, val_acc, epochs, 'Accuracy') plot_history(loss, val_loss, epochs, 'Loss') return model
target = df['target'] x_train, y_train = data[:200000], target[:200000] x_valid, y_valid = data[200000:250000], target[200000:250000] x_test, y_test = data[250000:], target[250000:] model = Sequential() model.add(Dense(128, activation='relu', input_shape=(14, ))) model.add(Dense(64, activation='relu')) model.add(Dropout(.5)) model.add(Dense(32, activation='relu')) model.add(Dropout(.3)) model.add(Dense(32)) model.add(Dense(1)) model.compile(optimizer='rmsprop', loss='mse') history = model.fit(x_train, y_train, epochs=20, batch_size=128, validation_data=(x_valid, y_valid)) loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(loss, val_loss, 20, 'loss') data_test = df_test[cols[1:-1]] test_submit = df_test[['id']].copy() test_submit['target'] = model.predict(data_test) test_submit.to_csv('test_submit_02.csv', index=False)
def try_conv1d_preprocessing(): float_data = tuto11.float_data step = 3 lookback = 720 delay = 144 batch_size = 128 train_gen = generator(float_data, lookback, delay, 0, 200000, shuffle=True, step=step, batch_size=batch_size) val_gen = generator(float_data, lookback, delay, 200001, 300000, step=step, batch_size=batch_size) test_gen = generator(float_data, lookback, delay, 300001, None, step=step, batch_size=batch_size) train_steps = int((200000 - lookback) / batch_size) val_steps = int((300000 - 200001 - lookback) / batch_size) test_steps = int((len(float_data) - 300001 - lookback) / batch_size) print('float data shape[-1]:', float_data.shape[-1]) model = Sequential() model.add( layers.Conv1D(32, 5, activation='relu', input_shape=(None, float_data.shape[-1]))) model.add(layers.MaxPooling1D(3)) model.add(layers.Conv1D(32, 5, activation='relu')) model.add(layers.GRU(32, dropout=.1, recurrent_dropout=.5)) model.add(layers.Dense(1)) model.summary() model.compile(optimizer=RMSprop(), loss='mae') history = model.fit(train_gen, steps_per_epoch=train_steps, epochs=20, validation_data=val_gen, validation_steps=val_steps) loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(loss, val_loss, 20, 'loss') # P255
results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1 return results x_train, x_test = vectorize_sequences(train_data), vectorize_sequences(test_data) y_train, y_test = np.asarray(train_labels).astype('float32'), np.asarray(test_labels).astype('float32') epochs = 5 model = models.Sequential() model.add(layers.Dense(16, input_shape=(10000,), activation='relu', kernel_regularizer=regularizers.l2(.001))) model.add(layers.Dense(16, activation='relu', kernel_regularizer=regularizers.l2(.001))) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) x_val, y_val = x_train[:10000], y_train[:10000] partial_x_train, partial_y_train = x_train[10000:], y_train[10000:] history = model.fit(partial_x_train, partial_y_train, epochs=epochs, batch_size=512, validation_data=(x_val, y_val)) history_dict = history.history print(history_dict.keys()) loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] util_func.plot_history(loss_values, val_loss_values, epochs, metric='loss')
def train_model_full_pipeline_top_unfrozen(dense_train_model): train_datagen = ImageDataGenerator(rescale=1. / 255, rotation_range=40, width_shift_range=.2, height_shift_range=.2, shear_range=.2, zoom_range=.2, horizontal_flip=True, fill_mode='nearest') validation_datagen = ImageDataGenerator(rescale=1. / 255) train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary') validation_generator = validation_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary') test_generator = validation_datagen.flow_from_directory( test_dir, target_size=(150, 150), batch_size=batch_size, class_mode='binary') conv_base.trainable = True set_trainable = False for layer in conv_base.layers: if layer.name == 'block5_conv1': set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False dense_train_model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-5), metrics=['acc']) history = model.fit(train_generator, steps_per_epoch=2000 / batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=1000 / batch_size) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] plot_history(smooth_curve(acc, factor=.8), smooth_curve(val_acc, factor=.8), epochs, 'Accuracy') plot_history(smooth_curve(loss, factor=.8), smooth_curve(val_loss, factor=.8), epochs, 'Loss') test_loss, test_acc = model.evaluate(test_generator, steps=1000 / batch_size) print('test acc: ', test_acc) return model
epochs = 9 model = models.Sequential() model.add(layers.Dense(256, activation='relu', input_shape=(10000, ))) model.add(layers.Dropout(.3)) model.add(layers.Dense(128, activation='relu')) model.add(layers.Dropout(.3)) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dropout(.2)) model.add(layers.Dense(46, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc']) history = model.fit(partial_x_train, partial_y_train_onehot, epochs=epochs, batch_size=256, validation_data=(x_val, y_val_onehot)) history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] util_func.plot_history(loss_values, val_loss_values, epochs, metric='loss') util_func.plot_history(acc_values, val_acc_values, epochs, metric='accuracy') # p 108