def k_fold_predict(n_splits): overall_pred, overall_pred_add = [], [] model = Resnet34(num_classes=17).cuda() kf = KFold(n_splits=4) for i, (train_index, val_index) in enumerate(kf.split(train_IDs)): logger.info("Fold %d" % (i + 1)) inner_train_IDs = list(np.array(train_IDs)[train_index]) val_IDs = list(np.array(train_IDs)[val_index]) partition = {'inner_train': inner_train_IDs, 'validation': val_IDs} train_ds = PlanetDataset(os.path.join(DATA_DIR, 'train-jpg'), partition['inner_train'], os.path.join(DATA_DIR, 'train_v2.csv'), True) val_ds = PlanetDataset(os.path.join(DATA_DIR, 'train-jpg'), partition['validation'], os.path.join(DATA_DIR, 'train_v2.csv')) train_dl = DataLoader(train_ds, batch_size=batch_size, num_workers=4, pin_memory=True) val_dl = DataLoader(val_ds, batch_size=batch_size, num_workers=4, pin_memory=True) best_model_path = train(model, 0.01, 30, train_dl, val_dl) logger.info("Training complete") best_model = Resnet34(num_classes=17).cuda() load_model(best_model, best_model_path) logger.info("Loading best model") logger.info("Making TTA predictions") tta_pred = make_tta_prediction(best_model, test_dl, test_dl_aug, 4) tta_add_pred = make_tta_prediction(model, test_add_dl, test_add_dl_aug, 4) logger.info("TTA predictions complete") overall_pred.append(tta_pred) overall_pred_add.append(tta_add_pred) overall_pred = np.mean(overall_pred, axis=0) overall_pred_add = np.mean(overall_pred_add, axis=0) return overall_pred, overall_pred_add
def main(): # Measures total program runtime by collecting start time start_time = time() # Creates & retrieves Command Line Arugments in_arg = func.get_training_input_args() # Function that checks command line arguments func.check_command_line_args(in_arg) # Function to load and transform data result = load_and_preprocess_data(in_arg.data_dir) dataloaders, dataloaders_valid, dataloaders_test, image_datasets = result # Function to load the model model, input_size = func.load_model(in_arg.arch) for param in model.parameters(): param.requires_grad = False # Function to build the network flower_classifer = func.FlowerNetwork(input_size, 102, in_arg.hidden_units, in_arg.dropout) model.classifier = flower_classifer criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=in_arg.learning_rate) device = torch.device( "cuda" if torch.cuda.is_available() and in_arg.gpu == True else "cpu") print_every = 40 # Function to train the network func.trainer(device, model, dataloaders, print_every, criterion, optimizer, in_arg.epochs, dataloaders_valid) # Measure total program runtime by collecting end time end_time = time() # Computes overall runtime in seconds & prints it in hh:mm:ss format func.duration(start_time, end_time) # Function to save the checkpoint func.save_checkpoint(model, in_arg.arch, in_arg.save_dir, image_datasets, optimizer)
print '****** EPOCH SUMMARY', epoch + 1, train_accuracy, dev_accuracy print '****** saving population...' count = 0 for m in population: save_model(m, '../saved_models/ga200_p2after_m' + str(count) + '.mdl') count += 1 if __name__ == '__main__': models = [] population_size = 100 if len(sys.argv) > 1: for i in range(population_size): if i < population_size: model = load_model('../saved_models/ga200_p2after_m' + str(i) + '.mdl') else: model = ga_mdl.Genetic_NNModel([28 * 28, 128, 10]) models.append(model) else: for i in range(population_size): model = ga_mdl.Genetic_NNModel([28 * 28, 128, 10]) models.append(model) train_set, dev_set = load_mnist('../mnist_data') roulette_wheel = [] for i in range(population_size): for k in range(int((population_size - i)**0.5)): roulette_wheel.append(i) for i in range(7):
from keras import backend as K from timeit import default_timer as timer from progress.bar import Bar from helper_functions import save_model, load_model #tbCallBack = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True) if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) else: input_shape = (img_width, img_height, 3) starting_epoch = 0 try: model, model_details = load_model('models/{}'.format(model_directory)) starting_epoch = int(model_details[1]) + 1 except: model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2)))
action='store_true', help='For using GPU for prediction') in_arg = parser.parse_args() input_image = ''.join(in_arg.input_image) k = in_arg.top_k mapping = ''.join(in_arg.category_names) path = ''.join(in_arg.checkpoint) if in_arg.gpu and torch.cuda.is_available(): device = 'cuda' elif in_arg.gpu and ~torch.cuda.is_available(): print("GPU is not available, so we will do prediction on CPU.") device = 'cpu' else: device = 'cpu' model, criterion, optimizer = helper_functions.load_model(path) # print("Model is loaded successfully") with open(mapping, 'r') as json_file: cat_to_name = json.load(json_file) prob, classes = helper_functions.predict(input_image, model, k, device) # print("Prediction is done successfully") category = [cat_to_name[str(cls)] for cls in classes] for i in range(k): print("Rank {}: Predicted flower category {} with a probability of {}.". format(i + 1, category[i], prob[i])) # print("Prediction Completed")
validation_batches = val_samples // batch_size test_datagen = ImageDataGenerator(rescale=1. / 255) validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary', shuffle=True) model_dir = 'conv-64-64-128-128' model = load_model('models/{}'.format(model_dir), epoch=None, best_acc=True, model_to_load=None) #result = model.evaluate_generator(validation_generator, steps=validation_batches) #result = model.predict_generator(validation_generator, steps=validation_batches) #print(result) fname = 'data/validation/dogs/dog.10010.jpg' img = load_img( fname, grayscale=False, # based on colour_mode - rgb generally target_size=(img_width, img_height)) #interpolation='nearest') # Defaults to this x = img_to_array(img, data_format=K.image_data_format()) # This is set by default x = test_datagen.random_transform(x)
train_accuracy = accuracy_on_dataset(train_set, population[0]) dev_accuracy = accuracy_on_dataset(dev_set, population[0]) print '****** EPOCH SUMMARY', epoch + 1, train_accuracy, dev_accuracy print '****** saving population...' count = 0 for m in population: save_model(m, '../saved_models/ga200' + str(count) + '.mdl') count += 1 if __name__ == '__main__': models = [] population_size = 50 if len(sys.argv) > 1: for i in range(population_size): model = load_model('../saved_models/ga200' + str(i) + '.mdl') models.append(model) else: for i in range(population_size): model = nn_mdl.NNModel() model.initialize(28 * 28, 256, 128, 10) models.append(model) train_set, dev_set = load_mnist('../mnist_data') roulette_wheel = [] for i in range(population_size): for k in range(int((population_size - i)**0.5)): roulette_wheel.append(i) for i in range(7): rand.shuffle(roulette_wheel)
print '\t', (count / len(train_set)) * 100, '% complete.', 'took:', took,\ 'seconds. average loss <-', avg_loss / 5000 avg_loss = 0 train_loss = total_loss / len(train_set) train_accuracy = accuracy_on_dataset(train_set, model) dev_accuracy = accuracy_on_dataset(dev_set, model) print 'epoch:', epoch + 1, 'loss:', train_loss, 'train accuracy:', train_accuracy, 'dev accuracy:', dev_accuracy if dev_accuracy > best_dev_accuracy: best_dev_accuracy = dev_accuracy print 'saving model....' save_model(model, model_file) if __name__ == '__main__': model_file = '../saved_models/nn2classifier.mdl' train_set, dev_set = load_mnist('../mnist_data') if len(sys.argv) > 1: model = load_model(model_file) else: model = nn_mdl.NNModel() model.initialize(28 * 28, 128, 128, 10) train_classifier(train_set, dev_set, num_iterations=20, learning_rate=0.001, model=model, regularization=1e-6, model_file=model_file)
print('total_epoch:', total_epoch) model_ver = 'resnet' if type(model).__name__ == 'ResNet' else 'paper' # Save directories; work_dir = '/home/saiperi/critical-learning/fim_results' save_dir = '/home/saiperi/critical-learning/fim_results(' + model_ver + ')_init(' + w_init + \ ')_v(' + str(SEED) + ')_defrem(' + str(blur_stop_epoch) + \ ')_reset_param(' + str(args.reset_parameters_flag) + ')/' model_save_dir = save_dir + 'models/' # Load model if args.model_load_flag: model_load_path = model_save_dir = save_dir + 'models/' model_file_name = 'model(' + model_ver + ')_init(' + w_init + ')_v(' + str(SEED) + ')_defrem(' + str(blur_stop_epoch) + ')_'\ + 'epoch_' + str(args.model_load_epoch) + '.pt' model = load_model(model, model_load_path, model_save_file_name) # Use single gpu if cuda: model.cuda(DEVICE_ID) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) # optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) # optimizer = optim.Adam(model.parameters(), lr=args.learning_rate,weight_decay=args.weight_decay) scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # step_size = every nth epoch
@conference: MICCAI-19 """ # In-repo imports from eye_dataset import EyeDatasetTest from helper_functions import load_model from adaptive_attack import AdaptiveSegmentationMaskAttack if __name__ == '__main__': # Glaucoma dataset eye_dataset = EyeDatasetTest('../data/image_samples', '../data/mask_samples') # GPU parameters DEVICE_ID = 0 # Load model, change it to where you download the model to model = load_model('../models/eye_pretrained_model.pt') model.eval() model.cpu() model.cuda(DEVICE_ID) # Attack parameters tau = 1e-7 beta = 1e-6 # Read images im_name1, im1, mask1 = eye_dataset[0] im_name2, im2, mask2 = eye_dataset[1] # Perform attack adaptive_attack = AdaptiveSegmentationMaskAttack(DEVICE_ID, model, tau, beta)
from helper_functions import load_model DATA_DIR = os.path.abspath('data/') LOG_DIR = './logs' MODEL_DIR = './models' with open(os.path.join(DATA_DIR, 'partition.p'), 'rb') as f: partition = pickle.load(f) batch_size = 64 # set model for prediction model = Net(num_classes=17).cuda() # model = Resnet34(num_classes=17).cuda() model_name = 'model_net_89' load_model(model, os.path.join(MODEL_DIR, model_name + '.pth')) run_name = time.strftime("%Y-%m-%d_%H%M-") + model_name + "prediction" logger = setup_logs(LOG_DIR, run_name) # create datasets and loaders test_ds = PlanetDataset(os.path.join(DATA_DIR, 'test-jpg'), partition['test'], os.path.join(DATA_DIR, 'sample_submission_v2.csv')) test_add_ds = PlanetDataset(os.path.join(DATA_DIR, 'test-jpg-additional'), partition['test_add'], os.path.join(DATA_DIR, 'sample_submission_v2.csv')) test_ds_aug = PlanetDataset(os.path.join(DATA_DIR, 'test-jpg'), partition['test'],