def _train_svi(self, train_loader, epochs, lr, device): self.device = device print("\n == SVI training ==") optimizer = pyro.optim.Adam({"lr": lr}) elbo = TraceMeanField_ELBO() svi = SVI(self.model, self.guide, optimizer, loss=elbo) loss_list = [] accuracy_list = [] start = time.time() for epoch in range(epochs): loss = 0.0 correct_predictions = 0.0 for x_batch, y_batch in train_loader: x_batch = x_batch.to(device) y_batch = y_batch.to(device) labels = y_batch.argmax(-1) loss += svi.step(x_data=x_batch, y_data=labels) outputs = self.forward(x_batch, n_samples=10) predictions = outputs.argmax(dim=-1) correct_predictions += (predictions == labels).sum().item() if DEBUG: print("\n", pyro.get_param_store()["model.0.weight_loc"][0][:5]) print("\n", predictions[:10], "\n", labels[:10]) total_loss = loss / len(train_loader.dataset) accuracy = 100 * correct_predictions / len(train_loader.dataset) print( f"\n[Epoch {epoch + 1}]\t loss: {total_loss:.2f} \t accuracy: {accuracy:.2f}", end="\t") loss_list.append(loss) accuracy_list.append(accuracy) execution_time(start=start, end=time.time()) self.save() plot_loss_accuracy(dict={ 'loss': loss_list, 'accuracy': accuracy_list }, path=TESTS + self.name + "/" + self.name + "_training.png")
mode='auto') save_weights = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True) history = model.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size, callbacks=[save_weights, early_stopping]) historydf = pd.DataFrame(history.history, index=history.epoch) utils.plot_loss_accuracy(history) test_datagen = ImageDataGenerator(rescale=1. / 255) test_generator = test_datagen.flow_from_directory(test_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode=None, shuffle=False) #print(test_generator.filenames) probabilities = model.predict_generator(test_generator, nb_test_samples // batch_size) mapper = {} i = 0 for file in test_generator.filenames:
import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split import utils X, y = make_classification(n_samples=1000, n_features=2, n_redundant=0, \ n_informative=2, random_state=0, n_clusters_per_class=1) print(X.shape) X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.33, random_state=100) print(X_train.shape) print(X_validation.shape) utils.plot_data(X_train, y_train) #perceptron model for binary classification model = Sequential() model.add(Dense(units=1, input_shape=(2,), activation='sigmoid')) model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy']) history = model.fit(x=X_train, y=y_train, verbose=3, epochs=1000, validation_data=(X_validation,y_validation), batch_size=10) print(model.summary()) print(model.get_weights()) historydf = pd.DataFrame(history.history, index=history.epoch) utils.plot_loss_accuracy(history) y_pred = model.predict_classes(X, verbose=0)