def __init__(self, conv_base_model=None): # 设置卷积网络和子网络,子网络在之后给定层的特征可视化中使用 self.conv_base_model = conv_base_model if conv_base_model else alexnet_model( ) self.conv_sub_models = [None] + [ AlexNet(i, self.conv_base_model) for i in (1, 2, 3, 4, 5) ] # Make it 1-based # Get deconvolutional layers from Deconv_Layers instance DeconvLayers_Instance = DeconvLayers(self.conv_base_model) self.deconv_layers = DeconvLayers_Instance.deconv_layers self.bias3D = DeconvLayers_Instance.bias3D # This attributes will be filled by 'project_down' method self.array = None # Tensor being projected down from feature space to image space self.activation_maxpool = None # Activation for max_pool layer 1 and 2, needed for switches self.current_layer = None # Changes as array is passed on self.f = None # Filter whose activation is projected down
def getAlexNet(self, train_images, train_labels, load_saved_model, model_save_path, use_pretraining, pretrained_weights_path, train_dir, val_dir, fine_tuning_method, batch_size, num_epochs, optimizer, loss, initial_epoch, sample, lr=None): """ :param load_saved_model: boolean (whether to just load the model from weights path) :param model_save_path: (final model weights path, if load_pretrained is true) :param pretrained_weights_path: if load_trained is false and if use_pretraining is true, the path of weights to load for pre-training :param train_dir: training data directory :param val_dir: validation data directory :param use_pretraining: boolean, whether to use pre-training or train from scratch :param fine_tuning_method: whether to use end-to-end pre-training or phase-by-phase pre-training :param batch_size: batch_size to use while fitting the model :param num_epochs: number of epochs to train the model :param optimizer: type of optimizer to use (sgd|adagrad) :param loss: type of loss to use (mse|l1) :param initial_epoch: starting epoch to start training :return: Returns the AlexNet model according to the parameters provided """ print(get_time_string() + 'Creating AlexNet model..') img_rows, img_cols = 227, 227 # Resolution of inputs channels = 3 if load_saved_model: if model_save_path is None: raise Exception('Unable to load trained model as model_save_path is None!') print(get_time_string() + 'Loading saved model weights from ' + model_save_path + '..') model = alexnet_model(img_rows=img_rows, img_cols=img_cols, channels=channels, num_classes=NUM_CLASSES_YEARBOOK, use_pretraining=use_pretraining, pretrained_weights_path=pretrained_weights_path, optimizer=optimizer, loss=loss, fine_tuning_method=fine_tuning_method, weights_path=model_save_path) else: model = alexnet_model(img_rows=img_rows, img_cols=img_cols, channels=channels, num_classes=NUM_CLASSES_YEARBOOK, use_pretraining=use_pretraining, pretrained_weights_path=pretrained_weights_path, optimizer=optimizer, loss=loss, fine_tuning_method=fine_tuning_method) if initial_epoch >= num_epochs: print(get_time_string() + 'Not fitting the model since initial_epoch is >= num_epochs. Returning model..') return model # Start Fine-tuning print(get_time_string() + 'Fitting the model..') for e in range(initial_epoch, num_epochs): print_line() print('Starting epoch ' + str(e)) print_line() completed = 0 for x_chunk, y_chunk in chunks(train_images, train_labels, batch_size, ALEXNET_ARCHITECTURE): print(get_time_string() + 'Fitting model for chunk of size ' + str(len(x_chunk)) + '...') model.fit(x_chunk, y_chunk, batch_size=batch_size, nb_epoch=1, verbose=1 ) completed += len(x_chunk) print(get_time_string() + str(completed) + ' of ' + str(len(train_images)) + ' complete. ') # Not saving model since there's some bug while loading from saved model # file_name = self.getCheckpointFileName(base_model_save_path=model_save_path, epoch=e) # print(get_time_string() + 'Saving model to ' + file_name) # model.save(file_name) file_name = self.getWeightCheckpointFileName(base_model_save_path=model_save_path, epoch=e) print(get_time_string() + 'Saving model weights to ' + file_name) model.save_weights(file_name) print(get_time_string() + 'Epoch ' + str(e) + ' complete. Evaluating on validation set..') evaluateYearbookFromModel(model=model, architecture=ALEXNET_ARCHITECTURE, sample=sample) print_line() print(get_time_string() + 'Fitting complete. Returning model..') if model_save_path is not None: print(get_time_string() + 'Saving final model to ' + model_save_path + '..') model.save(model_save_path) return model
from torchvision import transforms import torch import alexnet url, filename = ("https://github.com/pytorch/hub/raw/master/dog.jpg", "dog.jpg") try: urllib.URLopener().retrieve(url, filename) except: urllib.request.urlretrieve(url, filename) input_image = Image.open(filename) preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) input_tensor = preprocess(input_image) input_batch = input_tensor.unsqueeze( 0) # create a mini-batch as expected by the model alex_model = alexnet.alexnet_model(pretrained=True) with torch.no_grad(): output = alex_model(input_batch) # Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes print(output[0]) # The output has unnormalized scores. To get probabilities, you can run a softmax on it. print(torch.nn.functional.softmax(output[0], dim=0))
from alexnet import alexnet_model from alexnet import freeze_layer model = alexnet_model(n_classes=2, freeze=[1, 1, 1, 1, 1, 1, 1, 1, 1])