예제 #1
0
def load_pretrained_model(network):
    if network == 'inception_resnetv2':
        net = inceptionresnetv2(num_classes=1000)
    elif network == 'resnet_50':
        net = models.resnet50(pretrained=True)
    elif network == 'resnet_18':
        net = models.resnet18(pretrained=True)
    elif network == 'alexnet':
        net = models.alexnet(pretrained=True)
    elif network == 'my_res_50':
        net = myr.resnet50(pretrained=True)
    else:
        raise ValueError('invalid network name')
    return net
예제 #2
0
def load_scratch_model(network):
    if network == 'inception_resnetv2':
        net = inceptionresnetv2(num_classes=1000, pretrained=None)
    elif network == 'resnet_50':
        net = models.resnet50(pretrained=False)
    elif network == 'resnet_18':
        net = models.resnet18(pretrained=False)
    elif network == 'alexnet':
        net = models.alexnet(pretrained=False)
    elif network == 'my_res_50':
        net = myr.resnet50(pretrained=False)
    elif network == 'resnet_14_cifar_10':
        net = res_cifar.resnet14()
    elif network == 'resnet_32_cifar_10':
        net = res_cifar.resnet32()
    else:
        raise ValueError('invalid network name')
    return net
예제 #3
0
    def __init__(self, FLAGS, data_type='rgb'):
        self.FLAGS = FLAGS
        if FLAGS.model == 'resnet18':
            self.model = resnet18(pretrained=True)
        elif FLAGS.model == 'resnet50':
            self.model = resnet50(pretrained=True)
        else:
            raise('Error model type: ', FLAGS.model)

        if data_type == 'flow':
            self.to_flow_model(self.model)
        elif data_type == 'rgb':
            pass
        else:
            raise('Error data_type: ', data_type)
        self.data_type = data_type
        self.max_gradient_norm = FLAGS.max_gradient_norm
        self.loss = nn.CrossEntropyLoss()
        self.learning_rate = FLAGS.lr
        self.set_optimizer(self.learning_rate, 0.1)
        if torch.cuda.is_available():
            self.model = self.model.cuda()
            self.loss = self.loss.cuda()
예제 #4
0
    net.load_state_dict(torch.load(model_path), strict=True)

elif args.model == 'resnet':
    net = resnet.resnet50(pretrained=False, num_classes=args.num_classes)
    net.load_state_dict(torch.load(model_path), strict=True)
    features_blob = []
    params = list(net.parameters())
    fc_weight = params[-2]

    def hook_feature(module, input, output):
        features_blob.append(output.data)

    net._modules.get('layer4').register_forward_hook(hook_feature)

elif args.model == 'my_resnet':
    net = my_resnet.resnet50(pretrained=False, num_classes=args.num_classes)
    net.load_state_dict(torch.load(model_path), strict=True)
    features_blob = []
    params = list(net.parameters())
    fc_weight = params[-2]

    def hook_feature(module, input, output):
        features_blob.append(output.data)

    net._modules.get('layer4').register_forward_hook(hook_feature)

elif args.model == 'my_resnet3':
    net = my_resnet3.resnet50(pretrained=False, num_classes=args.num_classes)
    net.load_state_dict(torch.load(model_path), strict=True)
    print(net.seg2label_pool)
예제 #5
0
    print('Best val Loss: {:4f}'.format(best_loss))
    # load best model weights
    model.load_state_dict(best_model_wts)
    #f.close()#tancar el fitxer
    return model


#model_ft = models.resnet18(pretrained=True) #carregar un model amb uns weights que ja han fet un train, es descarreguen els weights. #s'entrena el model amb la resnet18,
if model == 'resnet18':
    model_ft = my_resnet.resnet18(
        pretrained=True
    )  #carregar un model amb uns weights que ja han fet un train, es descarreguen els weights. #s'entrena el model amb la resnet18,
elif model == 'resnet34':
    model_ft = my_resnet.resnet34(pretrained=True)
elif model == 'resnet50':
    model_ft = my_resnet.resnet50(pretrained=True)
elif model == 'resnet101':
    model_ft = my_resnet.resnet101(pretrained=True)
else:
    model_ft = my_resnet.resnet18(pretrained=True)

num_ftrs = model_ft.fc.in_features  #numero de features
model_ft.fc = nn.Linear(
    num_ftrs, 7
)  #aplica transformacio linear y=Atx+b, nou model de 2 classes, nosaltres ho haurem de canviar a 7 (l'imagen net, per defecte son 1000)

model_ft = model_ft.to(device)

criterion = nn.CrossEntropyLoss()

# Observe that all parameters are being optimized
예제 #6
0
                best_model_wts = copy.deepcopy(model.state_dict())

        print()
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Loss: {:4f}'.format(best_loss), 'in epoch', num_epoch_loss)
    print('Best train Acc: {:4f}'.format(best_acc), 'in epoch', num_epoch_acc)
    # load best model weights
    model.load_state_dict(best_model_wts)
    #f.close()#tancar el fitxer
    return model


model_ft = my_resnet.resnet50(
    pretrained=True
)  #carregar un model amb uns weights que ja han fet un train, es descarreguen els weights
num_ftrs = model_ft.fc.in_features  #numero de features
model_ft.fc = nn.Linear(
    num_ftrs, 7
)  #aplica transformacio linear y=Atx+b, nou model de 2 classes, nosaltres ho haurem de canviar a 7 (l'imagen net, per defecte son 1000)

model_ft = model_ft.to(device)

criterion = nn.CrossEntropyLoss(
    weight=(w_loss.cuda())
)  #combina nn.LogSoftmax() i nn.NLLLoss() en una sola classe. És útil quan tenim un nombre determinat de classes. És la manera de calcular la loss

# Observe that all parameters are being optimized
optimizer_ft = optim.Adam(
    model_ft.parameters(), lr=1e-6
예제 #7
0
def main():
    args = parser.parse_args()
    print('creat model')
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device
    model_tmp = resnet50(pretrained=True)
    best_prec1 = 0.0
    if args.resume:
        is_exist, checkpoint = read_checkpoint(args.resume)
        if is_exist:
            # print("=> loading checkpoint '{}'".format(args.resume))
            # checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model_tmp.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint (epoch {})".format(
                checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # cudnn.benchmark = True # ???

    # Load data
    print('Loading dataset')
    train_set = DataSet('train', batch_size=args.batch_size)
    test_set = DataSet('test')
    test_example = test_set.num_example
    train_set.epoch = args.start_epoch

    if torch.cuda.is_available():
        model_tmp.cuda()
        criterion = nn.CrossEntropyLoss().cuda()
    else:
        criterion = nn.CrossEntropyLoss()

    lr = args.lr
    # optimizer = torch.optim.SGD(model_tmp.parameters(), lr, momentum=args.momentum,
    #                             weight_decay=args.weight_decay)

    ignored_params = list(map(id, model_tmp.fc_new.parameters()))
    base_params = filter(lambda p: id(p) not in ignored_params,
                         model_tmp.parameters())
    optimizer = torch.optim.SGD([{
        'params': base_params
    }, {
        'params': model_tmp.fc_new.parameters(),
        'lr': lr
    }],
                                lr=lr * 0.1,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    #device_ids = [int(x) for x in args.device.split(',')]
    # model = torch.nn.DataParallel(model_tmp, device_ids=device_ids) # use parallel training
    model = torch.nn.DataParallel(model_tmp, [0])  # use parallel training

    if args.resume:
        if is_exist:
            print('Last checkpoint testing:')
            test_acc = test_acc_func(model, test_set, train_set.epoch,
                                     args.batch_size)

    print('start training')
    step = 0
    while train_set.epoch < args.epochs:
        start_time = time.time()
        train_img, train_label = train_set.next_batch_train_parallel()
        train_img = train_img.transpose((0, 3, 1, 2))
        train_img, train_label = torch.from_numpy(train_img), torch.from_numpy(
            train_label)
        train_img, train_label = train_img.float(), train_label.float()
        train_img_var, train_label_var = Variable(train_img), Variable(
            train_label)
        if torch.cuda.is_available():
            train_img_var, train_label_var = train_img_var.cuda(
            ), train_label_var.cuda()

        _, gt = torch.max(train_label_var, 1)
        middle_time = time.time()
        model.train()
        train_out = model.forward(train_img_var)
        loss = criterion(train_out, gt[:, 0])

        _, pred = torch.max(train_out, 1)

        correct = pred.eq(gt)
        accuracy = correct.float().sum(0).mul(100.0 / train_img_var.size(0))
        losses = loss.float()

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        end_time = time.time()

        if step % args.print_freq == 0:
            print('Epoch: {},'
                  'Step: {},'
                  'Loading Time: {:.3f} s'
                  'Time: {:.3f} s'
                  'Base Lr: {:.4f}'
                  'Training Acc: {:.3f}%,'
                  'Loss: {:.4f}'.format(train_set.epoch, step,
                                        middle_time - start_time,
                                        end_time - middle_time, lr,
                                        accuracy.data[0, 0], losses.data[0]))

        if step % 20 * args.print_freq == 0 and step > 10:
            test_acc = test_acc_func(model, test_set, train_set.epoch,
                                     args.batch_size)
            if test_acc > best_prec1:
                is_best = 1
                best_prec1 = test_acc
                save_checkpoint(
                    {
                        'epoch': train_set.epoch + 1,
                        'state_dict': model_tmp.state_dict(),
                        'best_prec1': best_prec1,
                    }, is_best, args.data)

        if step == 10000:
            lr = lr / 2
            optimizer = torch.optim.SGD(
                [{
                    'params': base_params
                }, {
                    'params': model_tmp.fc_new.parameters(),
                    'lr': lr
                }],
                lr=lr * 0.1,
                momentum=args.momentum,
                weight_decay=args.weight_decay)
        if step == 30000:
            lr = lr / 2
            optimizer = torch.optim.SGD(
                [{
                    'params': base_params
                }, {
                    'params': model_tmp.fc_new.parameters(),
                    'lr': lr
                }],
                lr=lr * 0.1,
                momentum=args.momentum,
                weight_decay=args.weight_decay)

        step += 1