def create_model():
    input_tensor = Input(shape=(250, 250, 3))
    resnet101 = resnet.ResNet101(weights='imagenet',
                                 include_top=False,
                                 input_tensor=input_tensor)

    for layer in resnet101.layers:
        layer.trainable = False

    resnet_output = resnet101.output

    outputs = []
    for i in range(4):
        x = Flatten()(resnet_output)
        x = Dense(1024, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(1024, activation='relu')(x)
        outputs.append(
            Dense(class_outputs[i], activation='softmax',
                  name=output_names[i])(x))

    model = Model(input_tensor, outputs, name='resnet101_mtfl')
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.SGD(lr=0.001, momentum=0.9),
                  metrics=['accuracy'])

    return model
Пример #2
0
def build_backbone(backbone, output_stride, BatchNorm, detach=False):
    if backbone == 'resnet50':
        return resnet.ResNet50(output_stride, BatchNorm, detach=detach)
    elif backbone == 'resnet101':
        return resnet.ResNet101(output_stride, BatchNorm, detach=detach)
#    elif backbone == 'xception':
#        return xception.AlignedXception(output_stride, BatchNorm)
#    elif backbone == 'drn':
#        return drn.drn_d_54(BatchNorm)
#    elif backbone == 'mobilenet':
#        return mobilenet.MobileNetV2(output_stride, BatchNorm)
    else:
        raise NotImplementedError
Пример #3
0
def resnetmodel(config):
    patchheight, patchwidth, depth, channel = config['height'], config[
        'width'], config['depth'], config['channel']
    resnet101 = resnet.ResNet101()  #weights='imagenet'
    input_img = Input(shape=(patchheight, patchwidth, depth, channel),
                      name='patchimg')
    input_imgr = Reshape((patchheight, patchwidth, depth),
                         name='reshapeinput')(input_img)
    fcn1 = resnet101(input_imgr)

    fcn1 = Dropout(0.2)(fcn1)
    regr = Dense(patchheight * 2, name='aux_outputr')(fcn1)
    regr = Reshape((patchheight, 2), name='reg')(regr)

    cnn = Model(inputs=input_img, outputs=regr)
    return cnn
Пример #4
0
 def __init__(self, n_channels, n_classes, backbone='resnet50'):
     super(UNet, self).__init__()
     # self.backbone = build_backbone(backbone, output_stride=16, BatchNorm=nn.BatchNorm2d)#不使用sync_bn
     self.backbone = resnet.ResNet101(output_stride=16, BatchNorm=nn.BatchNorm2d)
     # [, 64, 128, 128]
     # self.inc = inconv(n_channels, 64)
     # self.down1 = down(64, 128)
     # self.down2 = down(128, 256)
     # self.down3 = down(256, 512)
     # self.down4 = down(512, 512)
     self.up1 = up(1024, 512)
     self.up2 = up(512, 256)
     self.up3 = sp_up(256, 64)
     self.up4 = up(64, 64)
     self.up5 = nn.ConvTranspose2d(64, 64, 2, stride=2)
     self.outc = outconv(64, n_classes)
Пример #5
0
def get_model(model_name, parameters):
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    if model_name == 'resnet18':
        net = resnet.ResNet18(parameters, num_classes=10).to(device)
    elif model_name == 'resnet34':
        net = resnet.ResNet34(parameters, num_classes=10).to(device)
    elif model_name == 'resnet50':
        net = resnet.ResNet50(parameters, num_classes=10).to(device)
    elif model_name == 'resnet101':
        net = resnet.ResNet101(parameters, num_classes=10).to(device)
    elif model_name == 'resnet152':
        net = resnet.ResNet152(parameters, num_classes=10).to(device)
    elif model_name == 'vgg16':
        net = 0
    else:
        print("Entered student model is not compatibale currently!\n")
        net = -1

    return net
Пример #6
0
def set_model(network, num_classes, include_top=False, input_shape=(224, 224, 3)):
    
    if network == 'resnet50':
        base_model = resnet.ResNet50(include_top=include_top, input_shape=input_shape)
    elif network == 'resnet101':
        base_model = resnet.ResNet101(include_top=include_top, input_shape=input_shape)
    else:
        raise ValueError("Oops, wrong network, {} doesn't exist".format(network))
        
    x = base_model.output
    x = Flatten()(x)
    x = Dense(1024, activation='relu', name='fc1')(x)
    x = Dropout(0.7, name='dropout1')(x)
    x = Dense(128, activation='relu', name='fc2')(x)
    x = Dropout(0.5, name='dropout2')(x)
    x = Dense(num_classes, activation='sigmoid')(x)
    
    model = Model(inputs=base_model.input, outputs=x, name=network)
    
    return model
Пример #7
0
    def __init__(self, n_channels, n_classes, backbone='resnet50', scale=4):
        super(UNet, self).__init__()
        # self.backbone = build_backbone(backbone, output_stride=16, BatchNorm=nn.BatchNorm2d)#不使用sync_bn
        self.backbone = resnet.ResNet101(output_stride=16,
                                         BatchNorm=nn.BatchNorm2d)
        # [, 64, 128, 128]
        # self.inc = inconv(n_channels, 64)
        # self.down1 = down(64, 128)
        # self.down2 = down(128, 256)
        # self.down3 = down(256, 512)
        # self.down4 = down(512, 512)
        self.up1 = up(1024, 512)
        self.up2 = up(512, 256)
        self.up3 = sp_up(256, 64)
        self.up4 = up(64, 64)
        self.up5 = nn.ConvTranspose2d(64, 64, 2, stride=2)
        self.outc = outconv(64 + (64 + 32 + 8) * int(scale), n_classes)

        self.conv_1x1_4 = nn.Conv2d(512, 64 * int(scale), 1, padding=1)
        self.conv_1x1_3 = nn.Conv2d(256, 32 * int(scale), 1, padding=1)
        self.conv_1x1_2 = nn.Conv2d(64, 8 * int(scale), 1, padding=1)
Пример #8
0
def get_model(device):
    """
	:param device: instance of torch.device
	:return: An instance of torch.nn.Module
	"""
    num_classes = 2
    if config["dataset"] == "Cifar100":
        num_classes = 100
    elif config["dataset"] == "Cifar10":
        num_classes = 10
    elif config["dataset"] == "15-Scene":
        num_classes = 15
    elif config["dataset"] == "MNIST":
        num_classes = 10

    model = {
        "resnet10":
        lambda: resnet.ResNet10(num_classes=num_classes),
        "resnet18":
        lambda: resnet.ResNet18(num_classes=num_classes),
        "resnet34":
        lambda: resnet.ResNet34(num_classes=num_classes),
        "resnet50":
        lambda: resnet.ResNet50(num_classes=num_classes),
        "resnet101":
        lambda: resnet.ResNet101(num_classes=num_classes),
        "resnet152":
        lambda: resnet.ResNet152(num_classes=num_classes),
        "bert":
        lambda: modeling_bert_appendix.BertImage(config,
                                                 num_classes=num_classes),
    }[config["model"]]()

    model.to(device)
    if device == "cuda":
        # model = torch.nn.DataParallel(model)  # multiple GPUs not available
        # for free on Google Colab -EU
        torch.backends.cudnn.benchmark = True

    return model
Пример #9
0
import resnet
import numpy as np
from keras.preprocessing.image import load_img, img_to_array
from keras_applications.imagenet_utils import decode_predictions

# -------------------------------------
#   Load pre-trained models
# -------------------------------------
resnet50 = resnet.ResNet50(weights='imagenet')
resnet101 = resnet.ResNet101(weights='imagenet')
resnet152 = resnet.ResNet152(weights='imagenet')

# -------------------------------------
#   Helper functions
# -------------------------------------


def path_to_tensor(image_path, target_size):
    image = load_img(image_path, target_size=target_size)
    tensor = img_to_array(image)
    tensor = np.expand_dims(tensor, axis=0)
    return tensor


# -------------------------------------
#   Make predictions
# -------------------------------------

image_path = 'images/dog.jpeg'
image_tensor = path_to_tensor(image_path, (224, 224))
pred_resnet50 = resnet50.predict(image_tensor)
Пример #10
0
import resnet

model = resnet.ResNet101(input_shape=(224, 224, 3),
                         attention_input_shape=(224, 224, 3))
model.summary()
Пример #11
0
def main():
    global args, best_prec
    args = parser.parse_args()

    best_prec = 0

    #check if save directory exist or not
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    #check if gpu or cpu
    '''
    if torch.cuda.is_available():
        device = 'cuda'
        print("device: ", device)
        device = torch.cuda.set_device(0)
        print("Using GPU: ", torch.cuda.current_device())
        #device = torch.cuda.device("cuda:1")
        #print('Use GPU', file = f)
        print('Use GPU')
    else:
        device = 'cpu'
        #print('Use CPU', file = f)
        print('Use CPU')
    '''

    #    if args.evaluate:
    #        print('evaluate')
    #    else:
    #        print('not evaluate')
    #
    #load model ResNet101 to device
    #model = resnet_deform.ResNet101()
    #model = resnet.ResNet(101, 10)
    model = resnet.ResNet101()
    #model = resnet.ResNet152()
    #model = resnet2.ResNet101()
    #model = testnetwork.DeformNet()
    #model = testnetwork.PlainNet()
    #model = resnet.ResNet50()
    #model = resnet.ResNet18()
    #model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet101', pretrained=False)

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
    # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs

    model = nn.DataParallel(model)

    model = model.to(device)

    #print("Using Model DeformNet", file = f)
    #print("Using Model DeformNet")
    print(
        "Using Model Resnet 101 With Deformable Convolution in conv2_x with image size 224 x 224"
    )
    #print("Using Model Plain Resnet 50")

    #can add code to use multi GPU here
    #print('Loaded model to device', file = f)
    print('Loaded model to device', device)

    #add code here to resume from a checkpoint

    #preparing CIFAR 10 dataset
    #    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #                                     std=[0.229, 0.224, 0.225])

    normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                                     std=[0.2023, 0.1994, 0.2010])

    #setting for for training dataset
    #/media/commlab/TenTB/home/laisc/resnet-deform-pytorch/data
    train_loader = torch.utils.data.DataLoader(
        #datasets.CIFAR10(root = './data', train = True, transform = transforms.Compose([
        datasets.CIFAR10(
            root='/media/commlab/TenTB/home/laisc/resnet-deform-pytorch/data',
            train=True,
            transform=transforms.Compose([
                transforms.RandomHorizontalFlip(),
                transforms.RandomCrop(32, 4),
                transforms.Resize(224),
                transforms.ToTensor(),
                normalize,
            ]),
            download=True),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True)

    #setting for validation dataset
    val_loader = torch.utils.data.DataLoader(
        #datasets.CIFAR10(root = './data', train = False, transform = transforms.Compose([
        datasets.CIFAR10(
            root='/media/commlab/TenTB/home/laisc/resnet-deform-pytorch/data',
            train=False,
            transform=transforms.Compose([
                transforms.Resize(224),
                transforms.ToTensor(),
                normalize,
            ])),
        batch_size=32,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True)

    #define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    #define optimizer used
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    #lr_scheduler - methods to adjust the learning rate based on the number of epochs.
    #MultiStepLR - Decays the learning rate of each parameter group by gamma once the number
    #of epoch reaches one of the milestones.
    #original:
    #lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones = [100, 150], last_epoch = args.start_epoch - 1)

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=6,
                                                   gamma=0.5)

    #for validation process
    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    #print start time for taining
    starttime = datetime.datetime.now()
    #print ('start time: ', str(starttime), file = f)
    print('start time: ', str(starttime))

    #for training process
    for epoch in range(args.start_epoch, args.epochs):
        print("Start Training")
        #for each epoch
        #print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']), file = f)
        print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
        print('Start Training Epoch: ', epoch)
        print('Epoch ', epoch, 'start time: ', str(datetime.datetime.now()))
        train(train_loader, model, criterion, optimizer, epoch)
        print('Epoch ', epoch, 'end time: ', str(datetime.datetime.now()))
        lr_scheduler.step()

        #after one epoch, evaluate using validation set
        print('Start Validation for Epoch: ', epoch)
        prec = validate(val_loader, model, criterion)
        #print("prec for epoch ", epoch, " is: ", prec)

        #save the best prec
        is_best_prec = prec > best_prec
        best_prec = max(prec, best_prec)

        #save checkpoint
        if epoch > 0 and epoch % args.save_every == 0:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec': best_prec,
                },
                is_best_prec,
                filename=os.path.join(args.save_dir,
                                      'checkpoint_resnet101.th'))

        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'best_prec': best_prec,
            },
            is_best_prec,
            filename=os.path.join(args.save_dir, 'model_resnet101.th'))

    #print end time for taining
    endtime = datetime.datetime.now()
    #print ('end time: ', str(endtime), file = f)
    print('end time: ', str(endtime))
Пример #12
0
           'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
           'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
           'a', 'b', 'd', 'e', 'f', 'g', 'h', 'n', 'q', 'r', 't',)


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

if   layers == 18:
    net = resnet.ResNet18(num_class=num_class, channels=channels).to(device)
elif layers == 34:
    net = resnet.ResNet34(num_class=num_class, channels=channels).to(device)
elif layers == 50:
    net = resnet.ResNet50(num_class=num_class, channels=channels).to(device)
elif layers == 101:
    net = resnet.ResNet101(num_class=num_class, channels=channels).to(device)
else:
    net = resnet.ResNet152(num_class=num_class, channels=channels).to(device)
print(net)



criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[3, 7], gamma=0.1)

best_score = 0.0

for epoch in range(num_epoch):  # エポック数

    net.train()
Пример #13
0
def build_backbone(backbone, output_stride, BatchNorm):
    if backbone == 'resnet':
        return resnet.ResNet101(output_stride, BatchNorm)
    else:
        raise NotImplementedError
Пример #14
0
import densenet
import dpn
import preact_resnet

cifar10_networks = {
    'lenet': lenet.LeNet(),
    'simplenet9': simplenet.SimpleNet9(),
    'simplenet9_thin': simplenet.SimpleNet9_thin(),
    'simplenet9_mobile': simplenet.SimpleNet9_mobile(),
    'simplenet7': simplenet.SimpleNet7(),
    'simplenet7_thin': simplenet.SimpleNet7_thin(),
    'resnet18NNFC1': resnet_with_compression.ResNet18NNFC1(),
    'resnet18EH0': resnet_with_compression.ResNet18EH(layer=0, quantizer=20),
    'resnet18EH1': resnet_with_compression.ResNet18EH(layer=1, quantizer=6),
    'resnet18EH2': resnet_with_compression.ResNet18EH(layer=2, quantizer=5),
    'resnet18EH3': resnet_with_compression.ResNet18EH(layer=3, quantizer=3),
    'resnet18EH4': resnet_with_compression.ResNet18EH(layer=4, quantizer=10),
    'resnet18JPEG90': resnet_with_compression.ResNet18JPEG(quantizer=90),
    'resnet18JPEG87': resnet_with_compression.ResNet18JPEG(quantizer=87),
    'resnet18AVC': resnet_with_compression.ResNet18AVC(layer=2, quantizer=24),
    'resnet18': resnet.ResNet18(),
    'resnet101': resnet.ResNet101(),
    'mobilenetslimplus': mobilenet.MobileNetSlimPlus(),
    'mobilenetslim': mobilenet.MobileNetSlim(),
    'mobilenet': mobilenet.MobileNet(),
    'mobilenetv2': mobilenetv2.MobileNetV2(),
    'densenet121': densenet.DenseNet121(),
    'dpn92': dpn.DPN92(),
    'preact_resnet18': preact_resnet.PreActResNet18(),
}
Пример #15
0
def main():
    global args, best_prec
    args = parser.parse_args()
    
    best_prec = 0
    
    #check if save directory exist or not
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
        
    #check if gpu or cpu
    if torch.cuda.is_available():
        device = 'cuda'
        print('Use GPU', file = f)
    else:
        device = 'cpu'
        print('Use CPU', file = f)
        
#    if args.evaluate:
#        print('evaluate')
#    else:
#        print('not evaluate')   
#        
    #load model ResNet101 to device
    model = resnet.ResNet101()
    model = model.to(device)
   
    #can add code to use multi GPU here
    print('Loaded model to device', file = f)
    
    #add code here to resume from a checkpoint
        
    #preparing CIFAR 10 dataset
#    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
#                                     std=[0.229, 0.224, 0.225])
    
    normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                                     std=[0.2023, 0.1994, 0.2010])
    
    #setting for for training dataset
    train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(root = './data', train = True, transform = transforms.Compose([
                    transforms.RandomHorizontalFlip(),
                    transforms.RandomCrop(32, 4),
                    transforms.ToTensor(),
                    normalize,
                    ]), download = True),
            batch_size = args.batch_size, shuffle = True,
            num_workers = args.workers, pin_memory = True)
        
    #setting for validation dataset
    val_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(root = './data', train = False, transform = transforms.Compose([
                    transforms.ToTensor(),
                    normalize,
                    ])),
        batch_size = 128, shuffle = False,
        num_workers = args.workers, pin_memory = True)
        
        
    #define loss function and optimizer
    criterion = nn.CrossEntropyLoss()
    
    #define optimizer used
    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum = args.momentum, weight_decay = args.weight_decay)
    
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones = [100, 150], last_epoch = args.start_epoch - 1)
    
    
    
    #for validation process
    if args.evaluate:
        validate(val_loader, model, criterion)
        return
    
    
    #print start time for taining
    starttime = datetime.datetime.now()
    print ('start time: ', str(starttime), file = f)
    
    #for training process
    for epoch in range(args.start_epoch, args.epochs):
        
        #for each epoch
        print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']), file = f)
        print('Start Training Epoch: ', epoch)
        print ('Epoch ', epoch, 'start time: ', str(datetime.datetime.now()))
        train(train_loader, model, criterion, optimizer, epoch)
        print ('Epoch ', epoch, 'end time: ', str(datetime.datetime.now()))
        lr_scheduler.step()
        
        #after one epoch, evaluate using validation set
        print('Start Validation for Epoch: ', epoch)
        prec = validate(val_loader, model, criterion)
        
        #save the best prec 
        is_best_prec = prec > best_prec
        best_prec = max(prec, best_prec)
        
        #save checkpoint
        if epoch > 0 and epoch % args.save_every == 0:
            save_checkpoint({'epoch': epoch + 1,
                             'state_dict': model.state_dict(), 
                             'best_prec': best_prec,},
         is_best_prec, filename = os.path.join(args.save_dir, 'checkpoint_resnet101.th'))
            
        save_checkpoint({'state_dict': model.state_dict(), 
                         'best_prec': best_prec,},
             is_best_prec, filename = os.path.join(args.save_dir, 'model_resnet101.th'))
        
    #print end time for taining
    endtime = datetime.datetime.now()
    print ('end time: ', str(endtime), file = f)