示例#1
0
def create_model(type, input_size, num_classes):
    if type == "resnet":
        model = ResNet(num_classes=num_classes)
    elif type in ["seresnext50", "seresnext101", "seresnet50", "seresnet101", "seresnet152", "senet154"]:
        model = SeNet(type=type, num_classes=num_classes)
    elif type == "alexnet":
        model = AlexNetWrapper(num_classes=num_classes)
    elif type == "nasnet":
        model = NasNet(num_classes=num_classes)
    elif type == "cnn":
        model = SimpleCnn(num_classes=num_classes)
    elif type == "residual_cnn":
        model = ResidualCnn(num_classes=num_classes)
    elif type == "fc_cnn":
        model = FcCnn(num_classes=num_classes)
    elif type == "hc_fc_cnn":
        model = HcFcCnn(num_classes=num_classes)
    elif type == "mobilenetv2":
        model = MobileNetV2(input_size=input_size, n_class=num_classes)
    elif type in ["drn_d_38", "drn_d_54", "drn_d_105"]:
        model = Drn(type=type, num_classes=num_classes)
    elif type == "seresnext50_cs":
        model = SeResNext50Cs(num_classes=num_classes)
    elif type == "stack":
        model = StackNet(num_classes=num_classes)
    else:
        raise Exception("Unsupported model type: '{}".format(type))

    return nn.DataParallel(model)
示例#2
0
def init_model():
    print('==> Building model..'+args.model)
    if args.model == 'ResNet18':
        net = ResNet18(num_classes=num_classes)
    elif args.model == 'MLP':
        # 4-layer MLP
        input_dim = 3072 if ('CIFAR' in args.dataset) else 784
        width = args.width
        net = torch.nn.Sequential(OrderedDict([
                                 ('flatten', torch.nn.Flatten()),
                                 ('linear0', torch.nn.Linear(input_dim, width)),
                                 ('relu0', torch.nn.ReLU()),
                                 ('linear1', torch.nn.Linear(width, width)),
                                 ('relu1', torch.nn.ReLU()),
                                 ('linear2', torch.nn.Linear(width, width)),
                                 ('relu2', torch.nn.ReLU()),
                                 ('linear3', torch.nn.Linear(width, num_classes))]))
    elif args.model == 'DenseNet':
        net = densenet_cifar(num_classes=num_classes)
    elif args.model == 'MobileNetV2':
        net = MobileNetV2(num_classes=num_classes)
    elif args.model == 'ResNet20_FIXUP':
        net = fixup_resnet20(num_classes=num_classes)
    else:
        raise ValueError('shitty args.model name')
    net = net.to(device)

    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    return net
 def __init__(self, mlp_kwargs, n1_kwargs, n2_kwargs, n3_kwargs=None):
     super(Medusa, self).__init__()
     self.name = 'medusa'
     self.n1 = CNN1D(**n1_kwargs)
     self.n2 = MobileNetV2(**n2_kwargs)
     self.n3 = None
     if n3_kwargs:
         self.n3 = CNN1D(**n3_kwargs)
     self.mlp = MLP(**mlp_kwargs)
示例#4
0
def export2caffe(weights, num_classes, img_size):
    model = MobileNetV2(num_classes)
    weights = torch.load(weights, map_location='cpu')
    model.load_state_dict(weights['model'])
    model.eval()
    fuse(model)
    name = 'MobileNetV2'
    dummy_input = torch.ones([1, 3, img_size[1], img_size[0]])
    pytorch2caffe.trans_net(model, dummy_input, name)
    pytorch2caffe.save_prototxt('{}.prototxt'.format(name))
    pytorch2caffe.save_caffemodel('{}.caffemodel'.format(name))
示例#5
0
def build_internal_nn(model_name, *args, **kwargs):
    if model_name == "VGG19":
        return VGG('VGG19')
    elif model_name == "GoogLeNet":
        return GoogLeNet(*args, **kwargs)
    elif model_name == "MobileNetV2":
        return MobileNetV2(*args, **kwargs)
    elif model_name == "SENet18":
        return SENet18(*args, **kwargs)
    else:
        raise ValueError("Unknown model name : {}".format(model_name))
示例#6
0
def export2caffe(weights, num_classes, img_size):
    model = MobileNetV2(num_classes)
    weights = torch.load(weights, map_location='cpu')
    model.load_state_dict(weights['model'])
    model.eval()
    fuse(model)
    dummy_input = torch.ones([1, 3, img_size[1], img_size[0]])
    torch.onnx.export(model,
                      dummy_input,
                      'MobileNetV2.onnx',
                      input_names=['input'],
                      output_names=['output'],
                      opset_version=7)
示例#7
0
 def cfg2fitness(cfg):
     if args.local_rank == 0:
         print(str(cfg))
     if str(cfg) in cfg2fit_dict.keys():
         return cfg2fit_dict[str(cfg)]
     elif cfg == run_manager.net.module.config['cfg_base']:
         return 0.
     else:
         run_manager.run_config.n_epochs = run_manager.run_config.search_epoch
         if args.model == 'resnet18':
             run_manager.reset_model(
                 ResNet_ImageNet(num_classes=1000, cfg=cfg, depth=18),
                 net_origin.cpu())
         elif args.model == 'resnet34':
             run_manager.reset_model(
                 ResNet_ImageNet(num_classes=1000, cfg=cfg, depth=34),
                 net_origin.cpu())
         elif args.model == 'resnet50':
             run_manager.reset_model(
                 ResNet_ImageNet(num_classes=1000, cfg=cfg, depth=50),
                 net_origin.cpu())
         elif args.model == 'mobilenet':
             run_manager.reset_model(MobileNet(num_classes=1000, cfg=cfg),
                                     net_origin.cpu())
         elif args.model == 'mobilenetv2':
             run_manager.reset_model(MobileNetV2(num_classes=1000, cfg=cfg),
                                     net_origin.cpu())
         elif args.model == 'vgg':
             run_manager.reset_model(
                 VGG_CIFAR(cfg=cfg, cutout=False, num_classes=10),
                 net_origin.cpu())
         elif args.model == 'resnet56':
             run_manager.reset_model(
                 ResNet_CIFAR(cfg=cfg,
                              depth=56,
                              num_classes=10,
                              cutout=False), net_origin.cpu())
         elif args.model == 'resnet110':
             run_manager.reset_model(
                 ResNet_CIFAR(cfg=cfg,
                              depth=110,
                              num_classes=10,
                              cutout=False), net_origin.cpu())
         run_manager.start_epoch = 0
         run_manager.train()
         _, acc1, _ = run_manager.validate(is_test=False, return_top5=True)
         cfg2fit_dict[str(cfg)] = acc1.item()
         return acc1.item()
示例#8
0
def getStudentModel():
    student_model = parser.get('student', 'MODEL')

    if student_model == 'dcase_small':
        student = DCASE_Small()
        student = student.cuda()

    elif student_model == 'cnn_lstm':
        student = CNN_LSTM()
        student = student.cuda()

    elif student_model == 'mobilenetv2':
        width_mult = parser.getint('models', 'width_mult')
        student = MobileNetV2(width_mult, 8)

    return student
示例#9
0
def run(img_dir, output_csv, weights, img_size, num_classes, rect):
    results = []
    model = MobileNetV2(num_classes)
    state_dict = torch.load(weights, map_location='cpu')
    model.load_state_dict(state_dict['model'])
    model = model.to(device)
    model.eval()
    names = [n for n in os.listdir(img_dir) if osp.splitext(n)[1] in IMG_EXT]
    for name in tqdm(names):
        path = osp.join(img_dir, name)
        img = cv2.imread(path)
        pred = inference(model, img, img_size, rect=rect)
        idx = pred.argmax()
        results.append('%s %d' % (path, idx))
    with open(output_csv, 'w') as f:
        f.write('\n'.join(results))
示例#10
0
def build_model(device, model_name, num_classes=10):
    """构建模型:vgg、vggnonorm、resnet、preactresnet、googlenet、densenet、
                resnext、mobilenet、mobilenetv2、dpn、shufflenetg2、senet、shufflenetv2

    :param device: 'cuda' if you have a GPU, 'cpu' otherwise
    :param model_name: One of the models available in the folder 'models'
    :param num_classes: 10 or 100 depending on the chosen dataset
    :return: The model architecture
    """
    print('==> Building model..')
    model_name = model_name.lower()
    if model_name == 'vgg':
        net = VGG('VGG19', num_classes=num_classes)
    elif model_name == 'vggnonorm':
        net = VGG('VGG19', num_classes=num_classes, batch_norm=False)
    elif model_name == 'resnet':
        net = ResNet18(num_classes=num_classes)
    elif model_name == 'preactresnet':
        net = PreActResNet18()
    elif model_name == 'googlenet':
        net = GoogLeNet()
    elif model_name == 'densenet':
        net = DenseNet121()
    elif model_name == 'resnext':
        net = ResNeXt29_2x64d()
    elif model_name == 'mobilenet':
        net = MobileNet()
    elif model_name == 'mobilenetv2':
        net = MobileNetV2()
    elif model_name == 'dpn':
        net = DPN92()
    elif model_name == 'shufflenetg2':
        net = ShuffleNetG2()
    elif model_name == 'senet':
        net = SENet18()
    elif model_name == 'shufflenetv2':
        net = ShuffleNetV2(1)
    else:
        raise ValueError('Error: the specified model is incorrect ({})'.format(model_name))

    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    return net
示例#11
0
def main():
    # Initializing Configs
    folder_init(opt)
    net = None

    # Initialize model
    try:
        if opt.MODEL_NAME == 'MobileNetV2':
            net = MobileNetV2(opt)
        elif opt.MODEL_NAME == 'PeleeNet':
            net = PeleeNet(opt)
    except KeyError('Your model is not found.'):
        exit(0)
    finally:
        log("Model initialized successfully.")

    if opt.START_PREDICT or opt.START_VOTE_PREDICT:
        if opt.START_VOTE_PREDICT:
            net.load(model_type="temp_model.dat")
            net = prep_net(net)
            val_loader = load_regular_data(opt, net, val_loader_type=SixBatch)
            vote_val(net, val_loader)
        net.load(model_type="temp_model.dat")
        net = prep_net(net)
        _, val_loader = load_regular_data(opt,
                                          net,
                                          val_loader_type=ImageFolder)
        predict(net, val_loader)
    else:
        if opt.LOAD_SAVED_MOD:
            net.load()
        net = prep_net(net)
        if net.opt.DATALOADER_TYPE == "SamplePairing":
            train_loader, val_loader = load_regular_data(
                opt, net, train_loader_type=SamplePairing)
            log("SamplePairing datasets are generated successfully.")
        elif net.opt.DATALOADER_TYPE == "ImageFolder":
            train_loader, val_loader = load_regular_data(
                opt, net, train_loader_type=ImageFolder)
            log("All datasets are generated successfully.")
        else:
            raise KeyError("Your DATALOADER_TYPE doesn't exist!")
        fit(net, train_loader, val_loader)
                                           train=False,
                                           transform=Compose([
                                               Sampler(107),
                                               FilterDimensions([0, 1, 2]),
                                               Flatten(),
                                           ]))
    test_loader_inertial = DataLoader(dataset=test_dataset_inertial,
                                      batch_size=batch_size,
                                      shuffle=False,
                                      drop_last=True)
    models_list.append(model_inertial)
    data_loaders_list.append(test_loader_inertial)
    inertial_included = True

if len(sys.argv) > 2:
    model_rgb = MobileNetV2(num_classes).to(device)
    model_rgb.load_state_dict(torch.load(sys.argv[2]))

    test_dataset_rgb = UtdMhadDataset(modality='sdfdi',
                                      train=False,
                                      transform=Compose([
                                          torchvision.transforms.Resize(224),
                                          torchvision.transforms.ToTensor()
                                      ]))
    test_loader_rgb = DataLoader(dataset=test_dataset_rgb,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 drop_last=True)
    models_list.append(model_rgb)
    data_loaders_list.append(test_loader_rgb)
    rgb_included = True
示例#13
0
文件: train.py 项目: paperscodes/CAP
            net_origin = nn.DataParallel(ResNet_ImageNet(depth=18, num_classes=run_config.data_provider.n_classes))
    elif args.model=="resnet34":
        assert args.dataset=='imagenet', 'resnet34 only supports imagenet dataset'
        net = ResNet_ImageNet(depth=34, num_classes=run_config.data_provider.n_classes, cfg=eval(args.cfg))
        if args.base_path!=None:
            weight_path = args.base_path+'/checkpoint/model_best.pth.tar'
            net_origin = nn.DataParallel(ResNet_ImageNet(depth=34, num_classes=run_config.data_provider.n_classes))
    elif args.model=="resnet50":
        assert args.dataset=='imagenet', 'resnet50 only supports imagenet dataset'
        net = ResNet_ImageNet(depth=50, num_classes=run_config.data_provider.n_classes, cfg=eval(args.cfg))
        if args.base_path!=None:
            weight_path = args.base_path+'/checkpoint/model_best.pth.tar'
            net_origin = nn.DataParallel(ResNet_ImageNet(depth=50, num_classes=run_config.data_provider.n_classes))
    elif args.model=="mobilenetv2":
        assert args.dataset=='imagenet', 'mobilenetv2 only supports imagenet dataset'
        net = MobileNetV2(num_classes=run_config.data_provider.n_classes, cfg=eval(args.cfg))
        if args.base_path!=None:
            weight_path = args.base_path+'/checkpoint/model_best.pth.tar'
            net_origin = nn.DataParallel(MobileNetV2(num_classes=run_config.data_provider.n_classes))
    elif args.model=="mobilenet":
        assert args.dataset=='imagenet', 'mobilenet only supports imagenet dataset'
        net = MobileNet(num_classes=run_config.data_provider.n_classes, cfg=eval(args.cfg))
        if args.base_path!=None:
            weight_path = args.base_path+'/checkpoint/model_best.pth.tar'
            net_origin = nn.DataParallel(MobileNet(num_classes=run_config.data_provider.n_classes))

    # build run manager
    run_manager = RunManager(args.path, net, run_config)
    if args.local_rank == 0:
        run_manager.save_config(print_info=True)
示例#14
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # create model
    if args.arch == 'mobilenetv1':
        model = torch.nn.DataParallel(MobileNetv1(args.save_grad))
        model.load_state_dict(torch.load("trained_weights/mobilenet_sgd_rmsprop_69.526.tar")['state_dict'])
        if type(model) == torch.nn.DataParallel and args.save_grad:
            model = model.module
    elif args.arch == 'mobilenetv2':
        model = MobileNetV2(width_mult=1)
        state_dict = torch.load("trained_weights/mobilenetv2_1.0-f2a8633.pth.tar")
        model.load_state_dict(state_dict)
    else:
        raise "Model arch not supported"

    if args.quant or args.clamp:
        transformer = TorchTransformer()
        transformer.register(torch.nn.Conv2d, QConv2d)
        transformer.register(torch.nn.Linear, QLinear)
        model = transformer.trans_layers(model, True)
        if args.quant:
            transformer.register(torch.nn.ReLU, CGPACTLayer)
            model = transformer.trans_layers(model, False)
            set_module_bits(model, 4)

    model = model.cuda()
    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    lr_schedular = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0, last_epoch=-1)
   # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if args.lmdb:
        from dataset import ImagenetLMDBDataset
        train_dataset = ImagenetLMDBDataset(args.lmdbdir, transforms.Compose([
                transforms.RandomResizedCrop(224, scale=(0.2, 1.0)),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]), ['data', 'label'])
        val_dataset = ImagenetLMDBDataset(args.lmdbdir, transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]), ['vdata', 'vlabel'])
    else:
        train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
                transforms.RandomResizedCrop(224, scale=(0.2, 1.0)),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))
        val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, 0, None)
        return

    if not os.path.exists(args.logdir):
        os.makedirs(args.logdir)
    writer = SummaryWriter(args.logdir)

    for epoch in range(args.start_epoch, args.epochs):
        # adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, writer)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch, writer)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best, filename=os.path.join(args.savedir, 'checkpoint.pth.tar'))

        lr_schedular.step()

    os.system("echo \"training done.\" | mail -s \"Desktop Notify\" [email protected]")
# coding: utf-8
# Author: Zhongyang Zhang
# Email : [email protected]

import sys
sys.path.append('..')
import torch
from torch.autograd import Variable
from torchviz import make_dot
from models import MobileNetV2
from config import Config

opt = Config()

x = Variable(torch.randn(
    128, 3, 256, 256))  # change 12 to the channel number of network input
model = MobileNetV2.MobileNetV2()
y = model(x)
g = make_dot(y)
g.view()
示例#16
0
## datasets
#load training dataset with self-defined dataloader
train_data, num_classes = LoadImageData('/userhome/30/yfyang/fyp_data/')
trainloader = torch.utils.data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0) 
#load testing data with default dataloader
test_data = datasets.ImageFolder('/userhome/30/yfyang/fyp_data/test/images/', transform=transform_test)
testloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
print("data size: {} for training".format(len(trainloader.dataset)))
print("data size: {} for testing".format(len(testloader.dataset)))

# class
classes = {0: 'CDRom', 1: 'HardDrive', 2: 'PowerSupply'}

if MobileNet:
    print("BackBone: MobileNetV2")
    net = MobileNetV2(num_classes=3).cuda()
    net._modules.get('features')[-1].register_forward_hook(hook_feature)
else:
    print("BackBone: ResNet18")
    net = ResNet(num_classes=3).cuda()
    net._modules.get('features')[-2].register_forward_hook(hook_feature)

optimizer = torch.optim.SGD(net.parameters(), lr=LEARNING_RATE, momentum=0.9, weight_decay=5e-4)


# load checkpoint
if RESUME:
    # epoch38-acc99.24812316894531-1586176538.pt
    print("===> Resuming from checkpoint.")
    assert os.path.isfile('checkpoint/epoch50-acc99.24812316894531-1586534447.pt'), 'Error: no checkpoint found!'
    net.load_state_dict(torch.load('checkpoint/epoch50-acc99.24812316894531-1586534447.pt'))
示例#17
0
        net = ResNet_ImageNet(depth=18,
                              num_classes=run_config.data_provider.n_classes,
                              cfg=eval(args.cfg))
    elif args.model == "resnet34":
        assert args.dataset == 'imagenet', 'resnet34 only supports imagenet dataset'
        net = ResNet_ImageNet(depth=34,
                              num_classes=run_config.data_provider.n_classes,
                              cfg=eval(args.cfg))
    elif args.model == "resnet50":
        assert args.dataset == 'imagenet', 'resnet50 only supports imagenet dataset'
        net = ResNet_ImageNet(depth=50,
                              num_classes=run_config.data_provider.n_classes,
                              cfg=eval(args.cfg))
    elif args.model == "mobilenetv2":
        assert args.dataset == 'imagenet', 'mobilenetv2 only supports imagenet dataset'
        net = MobileNetV2(num_classes=run_config.data_provider.n_classes,
                          cfg=eval(args.cfg))
    elif args.model == "mobilenet":
        assert args.dataset == 'imagenet', 'mobilenet only supports imagenet dataset'
        net = MobileNet(num_classes=run_config.data_provider.n_classes,
                        cfg=eval(args.cfg))

    # build run manager
    run_manager = RunManager(args.path, net, run_config)

    # load checkpoints
    best_model_path = '%s/checkpoint/model_best.pth.tar' % args.path
    assert os.path.isfile(best_model_path), 'wrong path'
    if torch.cuda.is_available():
        checkpoint = torch.load(best_model_path)
    else:
        checkpoint = torch.load(best_model_path, map_location='cpu')
示例#18
0
## show images
#torchvision.utils.save_image(images[17],'test.png')
#img = cv2.imread('test.png')
#flat = img.reshape(1, img.size)
#np.savetxt('test.txt',flat,'%d',delimiter=',')

# Model
print('==> Building model..')
# net = VGG('VGG19')
# net = ResNet18()
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
# net = EfficientNetB0()
# net = RegNetX_200MF()
net = net.to(device)
if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

#if args.resume:
#    # Load checkpoint.
#    print('==> Resuming from checkpoint..')
#    assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
示例#19
0
def main(args):
    input_shape = (32, 32, 3)
    num_classes = 10
    batch_size = int(args.batch_size)
    epochs = int(args.epochs)

    # Load cifar10 data
    (X_train, y_train), (X_test, y_test) = load_cifar10()

    # Define model
    model = MobileNetV2(input_shape=input_shape,
                        nb_class=num_classes,
                        include_top=True).build()
    MODEL_NAME = "mobilenetv2__" + datetime.now().strftime("%Y-%m%d-%H%M%S")

    # Path & Env. settings -------------------------------------------------------------
    LOG_DIR = os.path.join("./log", MODEL_NAME)
    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)

    shutil.copyfile(os.path.join(os.getcwd(), 'train.sh'),
                    os.path.join(LOG_DIR, 'train.sh'))
    shutil.copyfile(os.path.join(os.getcwd(), 'train.py'),
                    os.path.join(LOG_DIR, 'train.py'))
    shutil.copyfile(os.path.join(os.getcwd(), 'models.py'),
                    os.path.join(LOG_DIR, 'models.py'))

    MODEL_WEIGHT_CKP_PATH = os.path.join(LOG_DIR, "best_weights.h5")
    MODEL_TRAIN_LOG_CSV_PATH = os.path.join(LOG_DIR, "train_log.csv")
    # ----------------------------------------------------------------------------------

    # Compile model
    model.summary()
    model.compile(
        optimizer=keras.optimizers.SGD(lr=2e-2,
                                       momentum=0.9,
                                       decay=0.0,
                                       nesterov=False),
        loss='categorical_crossentropy',
        loss_weights=[
            1.0
        ],  # The loss weight for model output without regularization loss. Set 0.0 due to validate only regularization factor.
        metrics=['accuracy'])

    # Load initial weights from pre-trained model
    if args.trans_learn:
        model.load_weights(str(args.weights_path), by_name=False)
        print("Load model init weights from", MODEL_INIT_WEIGHTS_PATH)

    print("Produce training results in", LOG_DIR)

    # Set learning rate
    learning_rates = []
    for i in range(5):
        learning_rates.append(2e-2)
    for i in range(50 - 5):
        learning_rates.append(1e-2)
    for i in range(100 - 50):
        learning_rates.append(8e-3)
    for i in range(150 - 100):
        learning_rates.append(4e-3)
    for i in range(200 - 150):
        learning_rates.append(2e-3)
    for i in range(300 - 200):
        learning_rates.append(1e-3)

    # Set model callbacks
    callbacks = []
    callbacks.append(
        ModelCheckpoint(MODEL_WEIGHT_CKP_PATH,
                        monitor='val_loss',
                        save_best_only=True,
                        save_weights_only=True))
    callbacks.append(CSVLogger(MODEL_TRAIN_LOG_CSV_PATH))
    callbacks.append(
        LearningRateScheduler(lambda epoch: float(learning_rates[epoch])))

    # data generator with data augumatation
    datagen = keras.preprocessing.image.ImageDataGenerator(
        featurewise_center=False,
        featurewise_std_normalization=False,
        rotation_range=0.0,
        width_shift_range=0.2,
        height_shift_range=0.2,
        vertical_flip=False,
        horizontal_flip=True)
    datagen.fit(X_train)

    # Train model
    history = model.fit_generator(datagen.flow(X_train,
                                               y_train,
                                               batch_size=batch_size),
                                  steps_per_epoch=len(X_train) / batch_size,
                                  epochs=epochs,
                                  verbose=1,
                                  callbacks=callbacks,
                                  validation_data=(X_test, y_test))

    # Validation
    val_loss, val_acc = model.evaluate(X_test, y_test, verbose=1)
    print("--------------------------------------")
    print("model name : ", MODEL_NAME)
    print("validation loss     : {:.5f}".format(val_loss))
    print("validation accuracy : {:.5f}".format(val_acc))

    # Save model as "instance"
    ins_name = 'model_instance'
    ins_path = os.path.join(LOG_DIR, ins_name) + '.h5'
    model.save(ins_path)

    # Save model as "architechture"
    arch_name = 'model_fin_architechture'
    arch_path = os.path.join(LOG_DIR, arch_name) + '.json'
    json_string = model.to_json()
    with open(arch_path, 'w') as f:
        f.write(json_string)
示例#20
0
        param = param.data
        own_state[name].copy_(param)

    names = [
        'layer1_conv1.0.weight', 'layer1_conv1.0.bias',
        'layer3_conv2.0.weight', 'layer3_conv2.0.bias',
        'layer5_conv3_1.0.weight', 'layer5_conv3_1.0.bias',
        'layer6_conv3_2.0.weight', 'layer6_conv3_2.0.bias'
    ]
    for name, param in teacher_model.named_parameters():
        if name in names:
            param.requires_grad = False

else:
    width_mult = parser.getfloat('teacher', 'WIDTH_MULT')
    teacher_model = MobileNetV2(width_mult, 8)
    teacher_model = teacher_model.cuda()

epochs = parser.getint('teacher', 'EPOCHS')

lr = parser.getfloat('teacher', 'LEARNING_RATE')
criterion_teacher = nn.BCELoss()
optimiser_teacher = optim.Adam(teacher_model.parameters(), lr=lr)

max_acc = 0.0

for i in range(epochs):

    predictions = pd.DataFrame(columns=[
        "audio_filename", "1_engine", "2_machinery-impact",
        "3_non-machinery-impact", "4_powered-saw", "5_alert-signal", "6_music",
示例#21
0
    parser.add_argument('--weights', type=str, default='')
    parser.add_argument('--rect', action='store_true')
    parser.add_argument('-s',
                        '--img_size',
                        type=int,
                        nargs=2,
                        default=[224, 224])
    parser.add_argument('-bs', '--batch-size', type=int, default=64)
    parser.add_argument('--num-workers', type=int, default=4)

    opt = parser.parse_args()

    val_data = ClsDataset(opt.val,
                          img_size=opt.img_size,
                          augments=None,
                          rect=opt.rect)
    val_loader = DataLoader(
        val_data,
        batch_size=opt.batch_size,
        pin_memory=True,
        num_workers=opt.num_workers,
    )
    val_fetcher = Fetcher(val_loader, post_fetch_fn=val_data.post_fetch_fn)
    model = MobileNetV2(len(val_data.classes))
    model = model.to(device)
    if opt.weights:
        state_dict = torch.load(opt.weights, map_location='cpu')
        model.load_state_dict(state_dict['model'])
    metrics = test(model, val_fetcher)
    print('metrics: %8g' % (metrics))
示例#22
0
def train(data_dir, epochs, img_size, batch_size, accumulate, lr, adam, resume,
          weights, num_workers, multi_scale, rect, mixed_precision, notest,
          nosave):
    train_dir = osp.join(data_dir, 'train.txt')
    val_dir = osp.join(data_dir, 'valid.txt')

    train_data = ClsDataset(train_dir,
                            img_size=img_size,
                            multi_scale=multi_scale,
                            rect=rect)
    train_loader = DataLoader(
        train_data,
        batch_size=batch_size,
        shuffle=not (dist.is_initialized()),
        sampler=DistributedSampler(train_data, dist.get_world_size(),
                                   dist.get_rank())
        if dist.is_initialized() else None,
        pin_memory=True,
        num_workers=num_workers,
    )
    train_fetcher = Fetcher(train_loader, train_data.post_fetch_fn)
    if not notest:
        val_data = ClsDataset(val_dir,
                              img_size=img_size,
                              augments=None,
                              rect=rect)
        val_loader = DataLoader(
            val_data,
            batch_size=batch_size,
            shuffle=not (dist.is_initialized()),
            sampler=DistributedSampler(val_data, dist.get_world_size(),
                                       dist.get_rank())
            if dist.is_initialized() else None,
            pin_memory=True,
            num_workers=num_workers,
        )
        val_fetcher = Fetcher(val_loader, post_fetch_fn=val_data.post_fetch_fn)

    model = MobileNetV2(num_classes=len(train_data.classes))

    trainer = Trainer(model,
                      train_fetcher,
                      loss_fn=compute_loss,
                      workdir='weights',
                      accumulate=accumulate,
                      adam=adam,
                      lr=lr,
                      weights=weights,
                      resume=resume,
                      mixed_precision=mixed_precision)
    while trainer.epoch < epochs:
        trainer.step()
        best = False
        if not notest:
            metrics = test(trainer.model, val_fetcher)
            if metrics > trainer.metrics:
                best = True
                print('save best, acc: %g' % metrics)
                trainer.metrics = metrics
        if not nosave:
            trainer.save(best)
示例#23
0
     net_origin = nn.DataParallel(
         ResNet_ImageNet(depth=34, num_classes=1000))
 elif args.model == 'resnet50':
     assert args.dataset == 'imagenet', 'resnet50 only supports imagenet dataset'
     net = ResNet_ImageNet(num_classes=1000, cfg=None, depth=50)
     weight_path = 'Exp_base/resnet50_base/checkpoint/model_best.pth.tar'
     net_origin = nn.DataParallel(
         ResNet_ImageNet(depth=50, num_classes=1000))
 elif args.model == 'mobilenet':
     assert args.dataset == 'imagenet', 'mobilenet only supports imagenet dataset'
     net = MobileNet(num_classes=1000, cfg=None)
     weight_path = 'Exp_base/mobilenet_base/checkpoint/model_best.pth.tar'
     net_origin = nn.DataParallel(MobileNet(num_classes=1000))
 elif args.model == 'mobilenetv2':
     assert args.dataset == 'imagenet', 'mobilenetv2 only supports imagenet dataset'
     net = MobileNetV2(num_classes=1000, cfg=None)
     weight_path = 'Exp_base/mobilenetv2_base/checkpoint/model_best.pth.tar'
     net_origin = nn.DataParallel(MobileNetV2(num_classes=1000))
 elif args.model == 'vgg':
     assert args.dataset == 'cifar10', 'vgg only supports cifar10 dataset'
     net = VGG_CIFAR(cfg=None, cutout=False)
     weight_path = 'Exp_base/vgg_base/checkpoint/model_best.pth.tar'
     net_origin = nn.DataParallel(VGG_CIFAR(cutout=False))
 elif args.model == "resnet56":
     assert args.dataset == 'cifar10', 'resnet56 only supports cifar10 dataset'
     net = ResNet_CIFAR(cfg=None, depth=56, num_classes=10, cutout=False)
     weight_path = 'Exp_base/resnet56_base/checkpoint/model_best.pth.tar'
     net_origin = nn.DataParallel(
         ResNet_CIFAR(depth=56, num_classes=10, cutout=False))
 elif args.model == "resnet110":
     assert args.dataset == 'cifar10', 'resnet110 only supports cifar10 dataset'