Example #1
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = polynet(num_classes=1000,
                    pretrained='imagenet' if load_pretrained else None)

    feature_extractor = nn.Sequential(
        model.stem,
        model.stage_a,
        model.reduction_a,
        model.stage_b,
        model.reduction_b,
        model.stage_c,
    )

    num_classes = [int(x) for x in args.num_classes.split(',')]

    # Predictor
    predictor = MultiHead(in_features=model.last_linear.in_features,
                          num_classes=num_classes)
    predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

    # Construct
    model = BaseModel(feature_extractor, predictor)

    return model
Example #2
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = sys.modules[__name__].__dict__[args.arch](
        num_classes=1000, pretrained='imagenet' if load_pretrained else None)

    num_classes = [int(x) for x in args.num_classes.split(',')]

    # Extractor
    feature_extractor = nn.Sequential(model.layer0, model.layer1, model.layer2,
                                      model.layer3, model.layer4)

    # Predictor
    predictor = MultiHead(in_features=model.last_linear.in_features,
                          num_classes=num_classes)
    # if args.dropout_p > 0:
    predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

    # Construct
    model = BaseModel(feature_extractor, predictor)

    return model
Example #3
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = SE_ResNeXt101FT(num_classes=340, pretrained=load_pretrained)

    if not args.load_only_extractor:
        return model

    model.load_state_dict(
        torch.load(
            'extensions/qd/data/experiments/se_resnext_n01z3/checkpoints/se_resnext101_n.pth'
        )['state_dict'])

    # Extractor
    feature_extractor = model.features

    # Construct

    num_classes = [int(x) for x in args.num_classes.split(',')]

    predictor = nn.Sequential(
        nn.Dropout(args.dropout_p),
        MultiHead(in_features=2048, num_classes=num_classes))

    model = BaseModel(feature_extractor, predictor)

    return model
Example #4
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = se_resnext50(num_classes=1000, pretrained=load_pretrained)

    # if not args.load_only_extractor:
    # return model

    # model.load_state_dict(torch.load('extensions/qd/data/experiments/se_resnext_n01z3/checkpoints/se_resnext101_n.pth')['state_dict'])

    if args.num_input_channels != 3:
        # if args.num_input_channels % 3 != 0:
        #     assert False

        conv1_ = model.conv1
        model.conv1 = torch.nn.Conv2d(args.num_input_channels,
                                      conv1_.out_channels,
                                      kernel_size=conv1_.kernel_size,
                                      stride=conv1_.stride,
                                      padding=conv1_.padding,
                                      bias=False)

        for i in range(int(args.num_input_channels / 3)):
            model.conv1.weight.data[:, i * 3:(i + 1) *
                                    3] = conv1_.weight.data / (int(
                                        args.num_input_channels / 3))

        if args.num_input_channels % 3 > 0:
            model.conv1.weight.data[:, -(args.num_input_channels %
                                         3):] = conv1_.weight.data[:, -(
                                             args.num_input_channels % 3):]

    feature_extractor = nn.Sequential(model.conv1, model.bn1, model.relu,
                                      model.maxpool, model.layer1,
                                      model.layer2, model.layer3, model.layer4)

    num_classes = [int(x) for x in args.num_classes.split(',')]

    predictor = nn.Sequential(
        nn.Dropout(args.dropout_p),
        MultiHead(in_features=2048, num_classes=num_classes))

    model = BaseModel(feature_extractor, predictor)

    return model
Example #5
0
def get_net(args):

    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    feature_extractor = ResidualNet('ImageNet',
                                    args.depth,
                                    1000,
                                    args.att_type,
                                    pretrained=load_pretrained)

    num_classes = [int(x) for x in args.num_classes.split(',')]

    predictor = MultiHead(in_features=feature_extractor.fc.in_features,
                          num_classes=num_classes)
    predictor = nn.Sequential(nn.Dropout(args.dropout_p), predictor)

    feature_extractor.fc = None

    model = BaseModel(feature_extractor, predictor)

    return model
Example #6
0
def get_net(args):
    
    load_pretrained = args.net_init == 'pretrained' and args.checkpoint == ""
    if load_pretrained:
        print(yellow('Loading a net, pretrained on ImageNet1k.'))

    model = inceptionresnetv2(num_classes=1000, pretrained='imagenet' if load_pretrained else None) 

    feature_extractor = nn.Sequential(
        model.conv2d_1a,
        model.conv2d_2a,
        model.conv2d_2b,
        model.maxpool_3a,
        model.conv2d_3b,
        model.conv2d_4a,
        model.maxpool_5a,
        model.mixed_5b,
        model.repeat,
        model.mixed_6a,
        model.repeat_1,
        model.mixed_7a,
        model.repeat_2,
        model.block8,
        model.conv2d_7b,
    )

    num_classes = [int(x) for x in args.num_classes.split(',')]

    # Predictor 
    predictor = MultiHead(in_features = model.last_linear.in_features, num_classes=num_classes)
    predictor = nn.Sequential( nn.Dropout(args.dropout_p), predictor)

    # Construct
    model = BaseModel(feature_extractor, predictor)

    return model
    trainloader = DataLoader(dataset=trainset,
                             batch_size=args.batch_size,
                             shuffle=(train_sampler is None),
                             sampler=train_sampler,
                             num_workers=args.num_workers,
                             pin_memory=True)

    valloader = DataLoader(dataset=valset,
                           batch_size=args.batch_size,
                           shuffle=False,
                           num_workers=args.num_workers,
                           pin_memory=True)

    # model
    model = BaseModel(model_name=args.model_name,
                      num_classes=args.num_classes,
                      pretrained=args.pretrained)

    if args.resume:
        state = torch.load(args.resume)
        print('Resume from:{}'.format(args.resume))
        model.load_state_dict(state['net'], strict=False)
        best_acc = state['acc']
        start_epoch = state['epoch'] + 1
        if 'ols' in args.loss:
            criterion['ols'].matrix = state['ols'].cuda()

    # sync_bn
    if args.sync_bn and multi_gpus:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        print('Using SyncBatchNorm')
Example #8
0
def drop(db=DATABASE_URI):
    """Drop the tables."""
    connect(db)
    BaseModel.drop_tables()
Example #9
0
def create(db=DATABASE_URI):
    """Create the tables."""
    connect(db)
    BaseModel.create_tables()
Example #10
0
    mode = 'best'

    parser = argparse.ArgumentParser()
    parser.add_argument('--savepath', default='./Base224L2/eff-b3', type=str)
    parser.add_argument('--last', action='store_true')
    args = parser.parse_args()

    path = args.savepath
    if args.last:
        mode = 'last'

    args = get_setting(path)
    # print(args)

    # model
    model = BaseModel(model_name=args['model_name'], num_classes=int(args['num_classes']), \
                    pretrained=int(args['pretrained']), pool_type=args['pool_type'], down=int(args['down']))

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # device = torch.device('cpu')
    model = model.to(device)
    load_pretrained_model(path, model, mode=mode)

    # data
    testset = Testset(root='./data/test')
    testloader = DataLoader(dataset=testset,
                            batch_size=128,
                            shuffle=False,
                            num_workers=8,
                            pin_memory=True)

    submit = {}