コード例 #1
0
def createPinNet(input_shape=(224, 224, 3)):
    input_layer = Input(input_shape)
    x = ResNet34(input_layer, with_head=False)
    x = GlobalAveragePooling2D()(x)
    out_probs = Dense(4, activation='softmax')(x)
    out_alias =  Dense(4, activation='linear')(x)
    model = Model(inputs=input_layer, outputs=[out_probs, out_alias])
    return model
コード例 #2
0
def get_resnet34():
    res34 = ResNet34(100)
    res34.load_state_dict(torch.load("pretrained/resnet_cifar100.pth"))
    class_to_train = 101
    num_ftrs = res34.linear.in_features
    res34.linear = nn.Linear(num_ftrs, class_to_train)

    return res34
コード例 #3
0
def build_model():
    model = ResNet34(args.dataset == 'cifar10' and 10 or 100)

    if torch.cuda.is_available():
        model.cuda()
        torch.backends.cudnn.benchmark = True

    return model
コード例 #4
0
def load_model(model_path):
    assert ('.pt' or '.pth') in model_path
    if torch.typename(torch.load(model_path)) == 'OrderedDict':

        model = ResNet34()
        model.load_state_dict(torch.load(model_path))

    else:
        model = torch.load(model_path)

    model.eval()
    if cuda_available():
        model.cuda()

    return model
コード例 #5
0
ファイル: train.py プロジェクト: wcy1122/SYSU_Undergraduate
def get_model(model_name):
    if model_name=='CNN': return CNN()
    if model_name=='CNN_GAP': return CNN(GAP=True)
    if model_name=='VGG16': return VGG16(batch_norm=False)
    if model_name=='VGG11_BN': return VGG11(batch_norm=True)
    if model_name=='VGG13_BN': return VGG13(batch_norm=True)
    if model_name=='VGG16_BN': return VGG16(batch_norm=True)
    if model_name=='VGG11_GAP': return VGG11(batch_norm=True, GAP=True)
    if model_name=='VGG13_GAP': return VGG13(batch_norm=True, GAP=True)
    if model_name=='VGG16_GAP': return VGG16(batch_norm=True, GAP=True)
    if model_name=='ResNet18': return ResNet18()
    if model_name=='ResNet34': return ResNet34()
    if model_name=='ResNet50': return ResNet50()
    if model_name=='ResNet101': return ResNet101()
    raise NotImplementedError('Model has not been implement.')
コード例 #6
0
exe = fluid.Executor(place)

# 新建项目
first_program = fluid.Program()  # 主程序
fluid.io.load_params(executor=exe,
                     main_program=first_program,
                     dirname=params_path)
startup = fluid.Program()  # 默认启动程序

# 编辑项目
with fluid.program_guard(main_program=first_program, startup_program=startup):
    image = fluid.layers.data(name="image",
                              shape=[C, IMG_H, IMG_W],
                              dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')
    net_x = ResNet34().net(input=image, class_dim=class_dim)
    net_x = fluid.layers.fc(input=net_x, size=2, act="softmax")
    # 定义损失函数
    cost = fluid.layers.cross_entropy(net_x, label)
    avg_cost = fluid.layers.mean(cost)
    # 获取正确率
    acc_1 = fluid.layers.accuracy(input=net_x, label=label, k=1)
    # 动态测试程序
    testProgram = first_program.clone(for_test=True)
    # 定义优化方法
    adma_optimizer = fluid.optimizer.Adam(learning_rate=LEARNING_RATE)
    adma_optimizer.minimize(avg_cost)

# 数据传入设置

train_reader = paddle.batch(reader=paddle.reader.shuffle(data_reader(), 1000),
コード例 #7
0
ファイル: train.py プロジェクト: YIWEI-CHEN/darts
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    random.seed(args.seed)
    np.random.seed(
        args.data_seed)  # cutout and load_corrupted_data use np.random
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = False
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    cudnn.deterministic = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    if args.arch == 'resnet':
        model = ResNet18(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    elif args.arch == 'resnet50':
        model = ResNet50(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    elif args.arch == 'resnet34':
        model = ResNet34(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    else:
        genotype = eval("genotypes.%s" % args.arch)
        model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                        args.auxiliary, genotype)
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_transform, test_transform = utils._data_transforms_cifar10(args)

    # Load dataset
    if args.dataset == 'cifar10':
        noisy_train_data = CIFAR10(root=args.data,
                                   train=True,
                                   gold=False,
                                   gold_fraction=0.0,
                                   corruption_prob=args.corruption_prob,
                                   corruption_type=args.corruption_type,
                                   transform=train_transform,
                                   download=True,
                                   seed=args.data_seed)
        gold_train_data = CIFAR10(root=args.data,
                                  train=True,
                                  gold=True,
                                  gold_fraction=1.0,
                                  corruption_prob=args.corruption_prob,
                                  corruption_type=args.corruption_type,
                                  transform=train_transform,
                                  download=True,
                                  seed=args.data_seed)
        test_data = dset.CIFAR10(root=args.data,
                                 train=False,
                                 download=True,
                                 transform=test_transform)
    elif args.dataset == 'cifar100':
        noisy_train_data = CIFAR100(root=args.data,
                                    train=True,
                                    gold=False,
                                    gold_fraction=0.0,
                                    corruption_prob=args.corruption_prob,
                                    corruption_type=args.corruption_type,
                                    transform=train_transform,
                                    download=True,
                                    seed=args.data_seed)
        gold_train_data = CIFAR100(root=args.data,
                                   train=True,
                                   gold=True,
                                   gold_fraction=1.0,
                                   corruption_prob=args.corruption_prob,
                                   corruption_type=args.corruption_type,
                                   transform=train_transform,
                                   download=True,
                                   seed=args.data_seed)
        test_data = dset.CIFAR100(root=args.data,
                                  train=False,
                                  download=True,
                                  transform=test_transform)

    num_train = len(gold_train_data)
    indices = list(range(num_train))
    split = int(np.floor(args.train_portion * num_train))

    if args.gold_fraction == 1.0:
        train_data = gold_train_data
    else:
        train_data = noisy_train_data
    train_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True,
        num_workers=0)

    if args.clean_valid:
        valid_data = gold_train_data
    else:
        valid_data = noisy_train_data

    valid_queue = torch.utils.data.DataLoader(
        valid_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:]),
        pin_memory=True,
        num_workers=0)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    if args.loss_func == 'cce':
        criterion = nn.CrossEntropyLoss().cuda()
    elif args.loss_func == 'rll':
        criterion = utils.RobustLogLoss(alpha=args.alpha).cuda()
    elif args.loss_func == 'forward_gold':
        corruption_matrix = train_data.corruption_matrix
        criterion = utils.ForwardGoldLoss(corruption_matrix=corruption_matrix)
    else:
        assert False, "Invalid loss function '{}' given. Must be in {'cce', 'rll'}".format(
            args.loss_func)

    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc, valid_obj = infer_valid(valid_queue, model, criterion)
        logging.info('valid_acc %f', valid_acc)

        test_acc, test_obj = infer(test_queue, model, criterion)
        logging.info('test_acc %f', test_acc)

        utils.save(model, os.path.join(args.save, 'weights.pt'))
コード例 #8
0
        classifier_id.load_state_dict(
            torch.load(
                'validation/autoencoder_checkpoints/final_classification_model-id.pkl'
            ))

        autoencoder_alcoholism.eval()
        autoencoder_stimulus.eval()
        autoencoder_id.eval()

    else:
        if opt.classifier == 'ResNet18':
            classifier_alcoholism = ResNet18(num_classes_alc)
            classifier_stimulus = ResNet18(num_classes_stimulus)
            classifier_id = ResNet18(num_classes_id)
        elif opt.classifier == 'ResNet34':
            classifier_alcoholism = ResNet34(num_classes_alc)
            classifier_stimulus = ResNet34(num_classes_stimulus)
            classifier_id = ResNet34(num_classes_id)
        elif opt.classifier == 'ResNet50':
            classifier_alcoholism = ResNet50(num_classes_alc)
            classifier_stimulus = ResNet50(num_classes_stimulus)
            classifier_id = ResNet50(num_classes_id)

        classifier_alcoholism = classifier_alcoholism.to(device)
        classifier_stimulus = classifier_stimulus.to(device)
        classifier_id = classifier_id.to(device)

        if device == 'cuda':
            classifier_alcoholism = torch.nn.DataParallel(
                classifier_alcoholism)
            classifier_stimulus = torch.nn.DataParallel(classifier_stimulus)
コード例 #9
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)
    cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    random.seed(args.seed)

    if args.arch == 'resnet':
        model = ResNet18(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    elif args.arch == 'resnet50':
        model = ResNet50(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    elif args.arch == 'resnet34':
        model = ResNet34(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    else:
        genotype = eval("genotypes.%s" % args.arch)
        model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                        args.auxiliary, genotype)
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_transform, test_transform = utils._data_transforms_cifar10(args)

    train_data = CIFAR10(root=args.data,
                         train=True,
                         gold=False,
                         gold_fraction=0.0,
                         corruption_prob=args.corruption_prob,
                         corruption_type=args.corruption_type,
                         transform=train_transform,
                         download=True,
                         seed=args.seed)
    gold_train_data = CIFAR10(root=args.data,
                              train=True,
                              gold=True,
                              gold_fraction=1.0,
                              corruption_prob=args.corruption_prob,
                              corruption_type=args.corruption_type,
                              transform=train_transform,
                              download=True,
                              seed=args.seed)

    num_train = len(train_data)
    indices = list(range(num_train))
    split = int(np.floor(args.train_portion * num_train))

    clean_train_queue = torch.utils.data.DataLoader(
        gold_train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True,
        num_workers=2)

    noisy_train_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True,
        num_workers=2)

    # clean_train_list = []
    # for input, target in clean_train_queue:
    #   input = Variable(input).cuda()
    #   target = Variable(target).cuda(async=True)
    #   clean_train_list.append((input, target))
    #
    # noisy_train_list = []
    # for input, target in noisy_train_queue:
    #   input = Variable(input).cuda()
    #   target = Variable(target).cuda(async=True)
    #   noisy_train_list.append((input, target))

    clean_valid_queue = torch.utils.data.DataLoader(
        gold_train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:]),
        pin_memory=True,
        num_workers=2)

    noisy_valid_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:]),
        pin_memory=True,
        num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    if args.loss_func == 'cce':
        criterion = nn.CrossEntropyLoss().cuda()
    elif args.loss_func == 'rll':
        criterion = utils.RobustLogLoss(alpha=args.alpha).cuda()
    elif args.loss_func == 'forward_gold':
        corruption_matrix = train_data.corruption_matrix
        criterion = utils.ForwardGoldLoss(corruption_matrix=corruption_matrix)
    else:
        assert False, "Invalid loss function '{}' given. Must be in {'cce', 'rll'}".format(
            args.loss_func)

    global gain
    for epoch in range(args.epochs):
        if args.random_weight:
            logging.info('Epoch %d, Randomly assign weights', epoch)
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
            clean_obj, noisy_obj = infer_random_weight(clean_train_queue,
                                                       noisy_train_queue,
                                                       model, criterion)
            logging.info('clean loss %f, noisy loss %f', clean_obj, noisy_obj)
            gain = np.random.randint(1, args.grad_clip, size=1)[0]
        else:
            scheduler.step()
            logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
            model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

            train_acc, train_obj, another_obj = train(clean_train_queue,
                                                      noisy_train_queue, model,
                                                      criterion, optimizer)
            if args.clean_train:
                logging.info('train_acc %f, clean_loss %f, noisy_loss %f',
                             train_acc, train_obj, another_obj)
            else:
                logging.info('train_acc %f, clean_loss %f, noisy_loss %f',
                             train_acc, another_obj, train_obj)

            utils.save(model, os.path.join(args.save, 'weights.pt'))

        clean_valid_acc, clean_valid_obj = infer_valid(clean_valid_queue,
                                                       model, criterion)
        logging.info('clean_valid_acc %f, clean_valid_loss %f',
                     clean_valid_acc, clean_valid_obj)

        noisy_valid_acc, noisy_valid_obj = infer_valid(noisy_valid_queue,
                                                       model, criterion)
        logging.info('noisy_valid_acc %f, noisy_valid_loss %f',
                     noisy_valid_acc, noisy_valid_obj)
コード例 #10
0
testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=False,
                                         num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Model
print('==> Building model..')
net = ResNet34()  # Used in FSSD paper

net = net.to(device)
if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

if args.resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./checkpoint/ckpt.pth')
    net.load_state_dict(checkpoint['net'])
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']
コード例 #11
0
ファイル: main.py プロジェクト: vnepveu/resnet_pipeline
import torch

from pipeline import Pipeline
from constants import MODEL_DIR
from resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from report import Report

if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                        level=logging.INFO,
                        datefmt='%Y-%m-%d %H:%M:%S')
    if not os.path.exists(MODEL_DIR):
        os.mkdir(MODEL_DIR)
    res18 = (ResNet18(), "18")
    res34 = (ResNet34(), "34")
    res50 = (ResNet50(), "50")
    res101 = (ResNet101(), "101")
    res152 = (ResNet101(), "152")
    # all_resnets = [res18, res34, res50, res101, res152]
    all_resnets = [res18]
    all_epochs = [100]
    all_lr = [0.01]

    for model, architecture in all_resnets:
        for lr in all_lr:
            for n_epoch in all_epochs:
                name = f"ResNet{architecture}_{lr}_{n_epoch}epoch"
                pipeline = Pipeline(model, lr, n_epoch, name)
                pipeline.run()
コード例 #12
0
def key_transform(key):
    key = key.replace(".downsample.0.", ".shortcut.conv.")
    key = key.replace(".downsample.1.", ".shortcut.bn.")
    return key


# functions to show an image
def imshow(img):
    img = img / 2 + 0.5  # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


# load model
res34 = ResNet34()

# pretrained model taken from - https://github.com/huyvnphan/PyTorch_CIFAR10
pretrained_dict = torch.load("./state_dicts/resnet34.pt")
new_dict = OrderedDict()
for key, value in pretrained_dict.items():
    new_key = key_transform(key)
    new_dict[new_key] = value

model_dict = res34.state_dict()
model_dict.update(new_dict)
res34.load_state_dict(model_dict)

# only update the parameters corresponding to hashing
update_params = []
for module in res34.modules():
コード例 #13
0
if args.feature == 'alcoholism':
    num_classes = 2
elif args.feature == 'stimulus':
    num_classes = 5
elif args.feature == 'id':
    num_classes = 122
else:
    raise ValueError("feature [%s] not recognized." % args.feature)

#### load specified model ####
print('==> Building model..')

if args.model == 'ResNet18':
    net = ResNet18(num_classes)
elif args.model == 'ResNet34':
    net = ResNet34(num_classes)
elif args.model == 'ResNet50':
    net = ResNet50(num_classes)
elif args.model == 'ResNet101':
    net = ResNet101(num_classes)
elif args.model == 'ResNet152':
    net = ResNet152(num_classes)
else:
    raise NotImplementedError('model [%s] not implemented.' % args.model)

net = net.to(device)
if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

best_acc = 0  # best test accuracy
コード例 #14
0
def main():
    args = get_args()
    
    device           = args.gpu
    
    load_model       = args.load_model
    model_dir        = args.model_dir

    architecture     = args.architecture
    similarity       = args.similarity
    loss_type        = args.loss_type
    
    data_dir         = args.data_dir
    data_name        = args.out_dataset
    batch_size       = args.batch_size
    
    train            = args.train
    weight_decay     = args.weight_decay
    epochs           = args.epochs

    test             = args.test
    noise_magnitudes = args.magnitudes

    # Create necessary directories
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)

    if architecture == 'densenet':
        underlying_net = DenseNet3(depth = 100, num_classes = 10)
    elif architecture == 'resnet':
        underlying_net = ResNet34()
    elif architecture == 'wideresnet':
        underlying_net = WideResNet(depth = 28, num_classes = 10, widen_factor = 10)
    
    underlying_net.to(device)
    
    # Construct g, h, and the composed deconf net
    baseline = (similarity == 'baseline')
    
    if baseline:
        h = InnerDeconf(underlying_net.output_size, 10)
    else:
        h = h_dict[similarity](underlying_net.output_size, 10)

    h.to(device)

    deconf_net = DeconfNet(underlying_net, underlying_net.output_size, 10, h, baseline)
    
    deconf_net.to(device)

    parameters = []
    h_parameters = []
    for name, parameter in deconf_net.named_parameters():
        if name == 'h.h.weight' or name == 'h.h.bias':
            h_parameters.append(parameter)
        else:
            parameters.append(parameter)

    optimizer = optim.SGD(parameters, lr = 0.1, momentum = 0.9, weight_decay = weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones = [int(epochs * 0.5), int(epochs * 0.75)], gamma = 0.1)
    
    h_optimizer = optim.SGD(h_parameters, lr = 0.1, momentum = 0.9) # No weight decay
    h_scheduler = optim.lr_scheduler.MultiStepLR(h_optimizer, milestones = [int(epochs * 0.5), int(epochs * 0.75)], gamma = 0.1)
    
    # Load the model (capable of resuming training or inference)
    # from the checkpoint file

    if load_model:
        checkpoint = torch.load(f'{model_dir}/checkpoint.pth')
        
        epoch_start = checkpoint['epoch']
        optimizer.load_state_dict(checkpoint['optimizer'])
        h_optimizer.load_state_dict(checkpoint['h_optimizer'])
        deconf_net.load_state_dict(checkpoint['deconf_net'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        h_scheduler.load_state_dict(checkpoint['h_scheduler'])
        epoch_loss = checkpoint['epoch_loss']
    else:
        epoch_start = 0
        epoch_loss = None

    #get outlier data
    train_data, val_data, test_data, open_data = get_datasets(data_dir, data_name, batch_size)
  
    criterion = losses_dict[loss_type]

    # Train the model
    if train:
        deconf_net.train()
        
        num_batches = len(train_data)
        epoch_bar = tqdm(total = num_batches * epochs, initial = num_batches * epoch_start)
        
        for epoch in range(epoch_start, epochs):
            total_loss = 0.0
            for batch_idx, (inputs, targets) in enumerate(train_data):
                if epoch_loss is None:
                    epoch_bar.set_description(f'Training | Epoch {epoch + 1}/{epochs} | Batch {batch_idx + 1}/{num_batches}')
                else:
                    epoch_bar.set_description(f'Training | Epoch {epoch + 1}/{epochs} | Epoch loss = {epoch_loss:0.2f} | Batch {batch_idx + 1}/{num_batches}')
                inputs = inputs.to(device)
                targets = targets.to(device)
                h_optimizer.zero_grad()
                optimizer.zero_grad()
                
                logits, _, _ = deconf_net(inputs)
                loss = criterion(logits, targets)
                loss.backward()
                
                optimizer.step()
                h_optimizer.step()
                total_loss += loss.item()
                
                epoch_bar.update()
            
            epoch_loss = total_loss
            h_scheduler.step()
            scheduler.step()
            
            checkpoint = {
                'epoch': epoch + 1,
                'optimizer': optimizer.state_dict(),
                'h_optimizer': h_optimizer.state_dict(),
                'deconf_net': deconf_net.state_dict(),
                'scheduler': scheduler.state_dict(),
                'h_scheduler': h_scheduler.state_dict(),
                'epoch_loss': epoch_loss,
            }
            torch.save(checkpoint, f'{model_dir}/checkpoint.pth') # For continuing training or inference
            torch.save(deconf_net.state_dict(), f'{model_dir}/model.pth') # For exporting / sharing / inference only
        
        if epoch_loss is None:
            epoch_bar.set_description(f'Training | Epoch {epochs}/{epochs} | Batch {num_batches}/{num_batches}')
        else:
            epoch_bar.set_description(f'Training | Epoch {epochs}/{epochs} | Epoch loss = {epoch_loss:0.2f} | Batch {num_batches}/{num_batches}')
        epoch_bar.close()

    if test:
        deconf_net.eval()
        best_val_score = None
        best_auc = None
        
        for score_func in ['h', 'g', 'logit']:
            print(f'Score function: {score_func}')
            for noise_magnitude in noise_magnitudes:
                print(f'Noise magnitude {noise_magnitude:.5f}         ')
                validation_results =  np.average(testData(deconf_net, device, val_data, noise_magnitude, criterion, score_func, title = 'Validating'))
                print('ID Validation Score:',validation_results)
                
                id_test_results = testData(deconf_net, device, test_data, noise_magnitude, criterion, score_func, title = 'Testing ID') 
                
                ood_test_results = testData(deconf_net, device, open_data, noise_magnitude, criterion, score_func, title = 'Testing OOD')
                auroc = calc_auroc(id_test_results, ood_test_results)*100
                tnrATtpr95 = calc_tnr(id_test_results, ood_test_results)
                print('AUROC:', auroc, 'TNR@TPR95:', tnrATtpr95)
                if best_auc is None:
                    best_auc = auroc
                else:
                    best_auc = max(best_auc, auroc)
                if best_val_score is None or validation_results > best_val_score:
                    best_val_score = validation_results
                    best_val_auc = auroc
                    best_tnr = tnrATtpr95
        
        print('supposedly best auc: ', best_val_auc, ' and tnr@tpr95 ', best_tnr)
        print('true best auc:'      , best_auc)
コード例 #15
0
def train(dataset='Fish4Knowledge',
          cnn='resnet50',
          batch_size=32,
          epochs=50,
          image_size=32,
          eps=0.01):
    """
    Train one checkpoint with data augmentation: random padding+cropping and horizontal flip
    :param args: 
    :return:
    """
    print(
        'Dataset: %s, CNN: %s, loss: adv, epsilon: %.3f batch: %s, epochs: %s'
        % (dataset, cnn, eps, batch_size, epochs))

    IMAGE_SIZE = image_size
    INPUT_SHAPE = (image_size, image_size, 3)

    # find image folder: images are distributed in class subfolders
    if dataset == 'Fish4Knowledge':
        # image_path = '/home/xingjun/datasets/Fish4Knowledge/fish_image'
        image_path = '/data/cephfs/punim0619/Fish4Knowledge/fish_image'
        images, labels, images_val, labels_val = get_Fish4Knowledge(
            image_path, train_ratio=0.8)
    elif dataset == 'QUTFish':
        # image_path = '/home/xingjun/datasets/QUT_fish_data'
        image_path = '/data/cephfs/punim0619/QUT_fish_data'
        images, labels, images_val, labels_val = get_QUTFish(image_path,
                                                             train_ratio=0.8)
    elif dataset == 'WildFish':
        # image_pathes = ['/home/xingjun/datasets/WildFish/WildFish_part1',
        #             '/home/xingjun/datasets/WildFish/WildFish_part2',
        #             '/home/xingjun/datasets/WildFish/WildFish_part3',
        #             '/home/xingjun/datasets/WildFish/WildFish_part4']
        image_pathes = [
            '/data/cephfs/punim0619/WildFish/WildFish_part1',
            '/data/cephfs/punim0619/WildFish/WildFish_part2',
            '/data/cephfs/punim0619/WildFish/WildFish_part3',
            '/data/cephfs/punim0619/WildFish/WildFish_part4'
        ]
        images, labels, images_val, labels_val = get_WildFish(image_pathes,
                                                              train_ratio=0.8)

    # images, labels, images_val, labels_val = get_imagenet_googlesearch_data(image_path, num_class=NUM_CLASS)
    num_classes = len(np.unique(labels))
    num_images = len(images)
    num_images_val = len(images_val)
    print('Train: classes: %s, images: %s, val images: %s' %
          (num_classes, num_images, num_images_val))

    global current_index
    current_index = 0

    # dynamic loading a batch of data
    def get_batch():
        index = 1

        global current_index

        B = np.zeros(shape=(batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))
        L = np.zeros(shape=(batch_size))
        while index < batch_size:
            try:
                img = load_img(images[current_index],
                               target_size=(IMAGE_SIZE, IMAGE_SIZE))
                img = img_to_array(img)
                img /= 255.
                # if cnn == 'ResNet50': # imagenet pretrained
                #     mean = np.array([0.485, 0.456, 0.406])
                #     std = np.array([0.229, 0.224, 0.225])
                #     img = (img - mean)/std
                ## data augmentation
                # random width and height shift
                img = random_shift(img, 0.2, 0.2)
                # random rotation
                img = random_rotation(img, 10)
                # random horizental flip
                flip_horizontal = (np.random.random() < 0.5)
                if flip_horizontal:
                    img = flip_axis(img, axis=1)
                # # random vertical flip
                # flip_vertical = (np.random.random() < 0.5)
                # if flip_vertical:
                #     img = flip_axis(img, axis=0)
                # #cutout
                # eraser = get_random_eraser(v_l=0, v_h=1, pixel_level=False)
                # img = eraser(img)

                B[index] = img
                L[index] = labels[current_index]
                index = index + 1
                current_index = current_index + 1
            except:
                traceback.print_exc()
                # print("Ignore image {}".format(images[current_index]))
                current_index = current_index + 1
        # B = np.rollaxis(B, 3, 1)
        return B, np_utils.to_categorical(L, num_classes)

    global val_current_index
    val_current_index = 0

    # dynamic loading a batch of validation data
    def get_val_batch():
        index = 1
        B = np.zeros(shape=(batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))
        L = np.zeros(shape=(batch_size))

        global val_current_index

        while index < batch_size:
            try:
                img = load_img(images_val[val_current_index],
                               target_size=(IMAGE_SIZE, IMAGE_SIZE))
                img = img_to_array(img)
                img /= 255.
                # if cnn == 'ResNet50': # imagenet pretrained
                #     mean = np.array([0.485, 0.456, 0.406])
                #     std = np.array([0.229, 0.224, 0.225])
                #     img = (img - mean)/std
                B[index] = img
                L[index] = labels_val[val_current_index]
                index = index + 1
                val_current_index = val_current_index + 1
            except:
                traceback.print_exc()
                # print("Ignore image {}".format(images[val_current_index]))
                val_current_index = val_current_index + 1
        # B = np.rollaxis(B, 3, 1)
        return B, np_utils.to_categorical(L, num_classes)

    # load checkpoint
    if cnn == 'ResNet18':
        base_model = ResNet18(input_shape=INPUT_SHAPE,
                              classes=num_classes,
                              include_top=False)
    elif cnn == 'ResNet34':
        base_model = ResNet34(input_shape=INPUT_SHAPE,
                              classes=num_classes,
                              include_top=False)
    elif cnn == 'ResNet50':
        base_model = ResNet50(include_top=False,
                              weights='imagenet',
                              input_shape=INPUT_SHAPE)
    elif cnn == 'EfficientNetB1':
        base_model = EfficientNetB1(input_shape=INPUT_SHAPE,
                                    classes=num_classes,
                                    include_top=False,
                                    backend=keras.backend,
                                    layers=keras.layers,
                                    models=keras.models,
                                    utils=keras.utils)
    elif cnn == 'EfficientNetB2':
        base_model = EfficientNetB2(input_shape=INPUT_SHAPE,
                                    classes=num_classes,
                                    include_top=False,
                                    backend=keras.backend,
                                    layers=keras.layers,
                                    models=keras.models,
                                    utils=keras.utils)
    elif cnn == 'EfficientNetB3':
        base_model = EfficientNetB3(input_shape=INPUT_SHAPE,
                                    classes=num_classes,
                                    include_top=False,
                                    backend=keras.backend,
                                    layers=keras.layers,
                                    models=keras.models,
                                    utils=keras.utils)
    elif cnn == 'EfficientNetB4':
        base_model = EfficientNetB4(input_shape=INPUT_SHAPE,
                                    classes=num_classes,
                                    include_top=False,
                                    backend=keras.backend,
                                    layers=keras.layers,
                                    models=keras.models,
                                    utils=keras.utils)
    else:
        warnings.warn("Error: unrecognized dataset!")
        return

    x = base_model.output
    x = Flatten()(x)
    x = Dense(num_classes, name='dense')(x)
    output = Activation('softmax')(x)
    model = Model(input=base_model.input, output=output, name=cnn)
    # model.summary()

    loss = cross_entropy

    base_lr = 1e-2
    sgd = SGD(lr=base_lr, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=loss, optimizer=sgd, metrics=['accuracy'])

    # AdaFGSM attack for AdvFish training
    attack = AdaFGSM(model,
                     epsilon=float(eps),
                     random_start=True,
                     loss_func='xent',
                     clip_min=0.,
                     clip_max=1.)
    # PGD attack for AdvFish training, it reduces to FGSM when set nb_iter=1
    # attack = LinfPGDAttack(model,
    #                      epsilon=float(eps),
    #                      eps_iter=float(eps),
    #                      nb_iter=1,
    #                      random_start=True,
    #                      loss_func='xent',
    #                      clip_min=0.,
    #                      clip_max=1.)

    # always save your weights after training or during training
    # create folder if not exist
    if not os.path.exists('models/'):
        os.makedirs('models/')
    log_path = 'log/%s' % dataset
    if not os.path.exists(log_path):
        os.makedirs(log_path)

    ## loop the weight folder then load the lastest weight file continue training
    model_prefix = '%s_%s_%s_%.4f_' % (dataset, cnn, 'adv', eps)
    w_files = os.listdir('models/')
    existing_ep = 0
    # for fl in w_files:
    #     if model_prefix in fl:
    #         ep = re.search(model_prefix+"(.+?).h5", fl).group(1)
    #         if int(ep) > existing_ep:
    #             existing_ep = int(ep)
    #
    # if existing_ep > 0:
    #     weight_file = 'models/' + model_prefix + str(existing_ep) + ".h5"
    #     print("load previous model weights from: ", weight_file)
    #     model.load_weights(weight_file)
    #
    #     log = np.load(os.path.join(log_path, 'train_log_%s_%s_%.3f.npy' % (cnn, 'adv', eps)))
    #
    #     train_loss_log = log[0, :existing_ep+1].tolist()
    #     train_acc_log = log[1, :existing_ep+1].tolist()
    #     val_loss_log = log[2, :existing_ep+1].tolist()
    #     val_acc_log = log[3, :existing_ep+1].tolist()
    # else:
    train_loss_log = []
    train_acc_log = []
    val_loss_log = []
    val_acc_log = []

    # dynamic training
    for ep in range(epochs - existing_ep):
        # cosine learning rate annealing
        eta_min = 1e-5
        eta_max = base_lr
        lr = eta_min + (eta_max -
                        eta_min) * (1 + math.cos(math.pi * ep / epochs)) / 2
        K.set_value(model.optimizer.lr, lr)
        # # step-wise learning rate annealing
        # if ep in [int(epochs*0.5), int(epochs*0.75)]:
        #     lr = K.get_value(model.optimizer.lr)
        #     K.set_value(model.optimizer.lr, lr*.1)
        #     print("lr decayed to {}".format(lr*.1))

        current_index = 0
        n_step = int(num_images / batch_size)
        pbar = tqdm(range(n_step))
        for stp in pbar:
            b, l = get_batch()
            # adversarial denoising
            b_adv = attack.perturb(K.get_session(), b, l, batch_size)
            train_loss, train_acc = model.train_on_batch(b_adv, l)
            pbar.set_postfix(acc='%.4f' % train_acc, loss='%.4f' % train_loss)

        ## test acc and loss at each epoch
        val_current_index = 0
        y_pred = []
        y_true = []
        while val_current_index + batch_size < num_images_val:
            b, l = get_val_batch()
            pred = model.predict(b)
            y_pred.extend(pred.tolist())
            y_true.extend(l.tolist())

        y_pred = np.clip(np.array(y_pred), 1e-7, 1.)
        correct_pred = (np.argmax(y_pred, axis=1) == np.argmax(y_true, axis=1))
        val_acc = np.mean(correct_pred)
        val_loss = -np.sum(np.mean(y_true * np.log(y_pred),
                                   axis=1)) / val_current_index

        train_loss_log.append(train_loss)
        train_acc_log.append(train_acc)
        val_loss_log.append(val_loss)
        val_acc_log.append(val_acc)
        log = np.stack((np.array(train_loss_log), np.array(train_acc_log),
                        np.array(val_loss_log), np.array(val_acc_log)))

        # save training log
        np.save(
            os.path.join(log_path,
                         'train_log_%s_%s_%.4f.npy' % (cnn, 'adv', eps)), log)

        pbar.set_postfix(acc='%.4f' % train_acc,
                         loss='%.4f' % train_loss,
                         val_acc='%.4f' % val_acc,
                         val_loss='%.4f' % val_loss)
        print(
            "Epoch %s - loss: %.4f - acc: %.4f - val_loss: %.4f - val_acc: %.4f"
            % (ep, train_loss, train_acc, val_loss, val_acc))
        images, labels = shuffle(images, labels)
        if ((ep + existing_ep + 1) % 5
                == 0) or (ep == (epochs - existing_ep - 1)):
            model_file = 'models/%s_%s_%s_%.4f_%s.h5' % (dataset, cnn, 'adv',
                                                         eps, ep + existing_ep)
            model.save_weights(model_file)
コード例 #16
0
ファイル: lfw_eval.py プロジェクト: zhangxu0307/mobile-face
    fig2 = hist.get_figure()
    fig2.savefig(os.path.join(savePath, "neg.jpg"))


if __name__ == '__main__':

    lfwPath = 'data/LFW/lfw.zip'
    modelPath = 'model_file/resnet34_webface_align_m05.pt'
    lfwLandmarkPath = 'data/LFW/lfw_landmark.txt'
    lfwPairsPath = 'data/LFW/pairs.txt'
    histSavePath = "data/"

    modelName = modelPath.replace('.', '/').split('/')[1]
    print("model:", modelName)

    net = ResNet34(feature=True)
    net.load_state_dict(torch.load(modelPath))
    net.cuda()
    net.eval()
    net.feature = True
    print("load model finished")

    predicts = runNetonLFW(lfwPath, lfwLandmarkPath, lfwPairsPath)
    print("predict finished!")

    plotSimliarityHist(predicts, savePath=histSavePath)

    accuracy = []
    thd = []
    folds = KFold(n=6000, n_folds=10, shuffle=False)
    thresholds = np.arange(-1.0, 1.0, 0.005)
コード例 #17
0
    epoch += 1

    if epoch >= 90:
        lr *= 5e-2
    elif epoch >= 60:
        lr *= 1e-1
    elif epoch >= 30:
        lr *= 5e-1

    print('Learning rate: ', lr)
    return lr

if n == 18:
    model = ResNet18(input_shape=input_shape, depth=depth)
else:
    model = ResNet34(input_shape=input_shape, depth=depth)

model.compile(loss='categorical_crossentropy',
              optimizer=LAMBOptimizer(lr=lr_schedule(0),
                                      weight_decay=weight_decay),
              metrics=['accuracy'])
model.summary()
print(model_type)

# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), 'weights')
model_name = 'cifar10_%s_model.h5' % model_type
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)

if not os.path.exists('logs/'):
コード例 #18
0
# constant for classes
classes = (
    "plane",
    "car",
    "bird",
    "cat",
    "deer",
    "dog",
    "frog",
    "horse",
    "ship",
    "truck",
)

net = ResNet34()

criterion = nn.CrossEntropyLoss()
mse = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=1e-3)

net.to(DEVICE)

loss_epoch = []
loss_batch = []
loss_epoch_hash = []
loss_batch_hash = []
for epoch in range(EPOCH):  # loop over the dataset multiple times

    # running_loss = 0.0
    running_criterion = 0.0