def __init__(self, dropout_rate, feat_length=512, archi_type='resnet18'): super(CIFAR10FeatureLayer, self).__init__() self.archi_type = archi_type self.feat_length = feat_length if self.archi_type == 'default': self.add_module('conv1', nn.Conv2d(3, 32, kernel_size=3, padding=1)) self.add_module('bn1', nn.BatchNorm2d(32)) self.add_module('relu1', nn.ReLU()) self.add_module('pool1', nn.MaxPool2d(kernel_size=2)) #self.add_module('drop1', nn.Dropout(dropout_rate)) self.add_module('conv2', nn.Conv2d(32, 32, kernel_size=3, padding=1)) self.add_module('bn2', nn.BatchNorm2d(32)) self.add_module('relu2', nn.ReLU()) self.add_module('pool2', nn.MaxPool2d(kernel_size=2)) #self.add_module('drop2', nn.Dropout(dropout_rate)) self.add_module('conv3', nn.Conv2d(32, 64, kernel_size=3, padding=1)) self.add_module('bn3', nn.BatchNorm2d(64)) self.add_module('relu3', nn.ReLU()) self.add_module('pool3', nn.MaxPool2d(kernel_size=2)) #self.add_module('drop3', nn.Dropout(dropout_rate)) elif self.archi_type == 'resnet18': self.add_module('resnet18', resnet.ResNet18(feat_length)) elif self.archi_type == 'resnet50': self.add_module('resnet50', resnet.ResNet50(feat_length)) elif self.archi_type == 'resnet152': self.add_module('resnet152', resnet.ResNet152(feat_length)) else: raise NotImplementedError
def create_model(): input_tensor = Input(shape=(250, 250, 3)) resnet152 = resnet.ResNet152(weights='imagenet', include_top=False, input_tensor=input_tensor) for layer in resnet152.layers: layer.trainable = False resnet_output = resnet152.output outputs = [] for i in range(4): x = Flatten()(resnet_output) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) outputs.append( Dense(class_outputs[i], activation='softmax', name=output_names[i])(x)) model = Model(input_tensor, outputs, name='resnet152_mtfl') model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.001, momentum=0.9), metrics=['accuracy']) return model
def get_model(model_name, parameters): device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') if model_name == 'resnet18': net = resnet.ResNet18(parameters, num_classes=10).to(device) elif model_name == 'resnet34': net = resnet.ResNet34(parameters, num_classes=10).to(device) elif model_name == 'resnet50': net = resnet.ResNet50(parameters, num_classes=10).to(device) elif model_name == 'resnet101': net = resnet.ResNet101(parameters, num_classes=10).to(device) elif model_name == 'resnet152': net = resnet.ResNet152(parameters, num_classes=10).to(device) elif model_name == 'vgg16': net = 0 else: print("Entered student model is not compatibale currently!\n") net = -1 return net
def get_model(device): """ :param device: instance of torch.device :return: An instance of torch.nn.Module """ num_classes = 2 if config["dataset"] == "Cifar100": num_classes = 100 elif config["dataset"] == "Cifar10": num_classes = 10 elif config["dataset"] == "15-Scene": num_classes = 15 elif config["dataset"] == "MNIST": num_classes = 10 model = { "resnet10": lambda: resnet.ResNet10(num_classes=num_classes), "resnet18": lambda: resnet.ResNet18(num_classes=num_classes), "resnet34": lambda: resnet.ResNet34(num_classes=num_classes), "resnet50": lambda: resnet.ResNet50(num_classes=num_classes), "resnet101": lambda: resnet.ResNet101(num_classes=num_classes), "resnet152": lambda: resnet.ResNet152(num_classes=num_classes), "bert": lambda: modeling_bert_appendix.BertImage(config, num_classes=num_classes), }[config["model"]]() model.to(device) if device == "cuda": # model = torch.nn.DataParallel(model) # multiple GPUs not available # for free on Google Colab -EU torch.backends.cudnn.benchmark = True return model
import resnet import numpy as np from keras.preprocessing.image import load_img, img_to_array from keras_applications.imagenet_utils import decode_predictions # ------------------------------------- # Load pre-trained models # ------------------------------------- resnet50 = resnet.ResNet50(weights='imagenet') resnet101 = resnet.ResNet101(weights='imagenet') resnet152 = resnet.ResNet152(weights='imagenet') # ------------------------------------- # Helper functions # ------------------------------------- def path_to_tensor(image_path, target_size): image = load_img(image_path, target_size=target_size) tensor = img_to_array(image) tensor = np.expand_dims(tensor, axis=0) return tensor # ------------------------------------- # Make predictions # ------------------------------------- image_path = 'images/dog.jpeg' image_tensor = path_to_tensor(image_path, (224, 224)) pred_resnet50 = resnet50.predict(image_tensor)
net_names = ["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"] model_name = "models/" + net_names[-1] + "/digists" data_path = "../data_img/MNIST/train/" # Step 0: Global Parameters epochs = 2 lr_rate = 0.0001 batch_size = 32 # Step 1: Create Model # model = resnet.ResNet18((None, height, width, channel), classes = n_outputs, filters = 6) # model = resnet.ResNet34((None, height, width, channel), classes = n_outputs, filters = 6) # model = resnet.ResNet50((None, height, width, channel), classes = n_outputs, filters = 6) # model = resnet.ResNet101((None, height, width, channel), classes = n_outputs, filters = 6) model = resnet.ResNet152((None, height, width, channel), classes=n_outputs, filters=6) # Step 2: Define Metrics print(model.summary()) if sys.argv[1] == "train": # Step 3: Load data X_train, Y_train, X_valid, Y_valid = loader.load_light( data_path, width, height, True, 0.8, True) # Step 3: Loads the weights # model.load_weights(model_name) # Step 4: Training
'a', 'b', 'd', 'e', 'f', 'g', 'h', 'n', 'q', 'r', 't',) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) if layers == 18: net = resnet.ResNet18(num_class=num_class, channels=channels).to(device) elif layers == 34: net = resnet.ResNet34(num_class=num_class, channels=channels).to(device) elif layers == 50: net = resnet.ResNet50(num_class=num_class, channels=channels).to(device) elif layers == 101: net = resnet.ResNet101(num_class=num_class, channels=channels).to(device) else: net = resnet.ResNet152(num_class=num_class, channels=channels).to(device) print(net) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) #scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[3, 7], gamma=0.1) best_score = 0.0 for epoch in range(num_epoch): # エポック数 net.train() running_loss = 0.0 for i, data in enumerate(trainloader, 0):