def __init__(self, 
                 layers,
                 in_channels=192,
                 strides=(1, 2, 2, 2),
                 dilations=(1, 1, 1, 1),
                 pretrained=False,
                 deep_base=False) -> None:
        super(ResNetDCT_345, self).__init__()
        if layers == 18:
            resnet = resnet18(pretrained, deep_base, strides=strides, dilations=dilations)
        elif layers == 34:
            resnet = resnet34(pretrained, deep_base, strides=strides, dilations=dilations)
        elif layers == 50:
            resnet = resnet50(pretrained, deep_base, strides=strides, dilations=dilations)
        elif layers == 101:
            resnet = resnet101(pretrained, deep_base, strides=strides, dilations=dilations)
        self.layer2, self.layer3, self.layer4, self.avgpool, self.fc = \
            resnet.layer2, resnet.layer3, resnet.layer4, resnet.avgpool, resnet.fc
        self.relu = nn.ReLU(inplace=True)

        out_ch = self.layer2[0].conv1.out_channels
        ks = self.layer2[0].conv1.kernel_size
        stride = self.layer2[0].conv1.stride
        padding =  self.layer2[0].conv1.padding
        self.layer2[0].conv1 = nn.Conv2d(in_channels, out_ch, kernel_size=ks, stride=stride, padding=padding, bias=False)
        init_weight(self.layer2[0].conv1)
        
        out_ch = self.layer2[0].downsample[0].out_channels
        self.layer2[0].downsample[0] = nn.Conv2d(in_channels, out_ch, kernel_size=1, stride=2, bias=False)
        init_weight(self.layer2[0].downsample[0])
def test():
    ##
    config = getConfig()
    # define transform image
    transform_test = transforms.Compose([
        transforms.Resize((config.image_size, config.image_size)),
        # transforms.CenterCrop(config.input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    net = resnet101(pretrained=True, use_bap=False)
    in_features = net.fc_new.in_features
    new_linear = torch.nn.Linear(in_features=in_features, out_features=25)
    net.fc_new = new_linear

    # load checkpoint
    checkpoint_path = os.path.join(config.checkpoint_path,
                                   'model_best.pth.tar')
    load_state_dict = torch.load(checkpoint_path,
                                 map_location='cpu')['state_dict']
    new_state_dict = {}
    for key, value in load_state_dict.items():
        new_key = key.replace('module.', '')
        new_state_dict[new_key] = value
    net.load_state_dict(new_state_dict)
    img_dir = config.image
    image = Image.open(img_dir).convert('RGB')
    image = transform_test(image)
    preds, _, _ = net(image.unsqueeze(0))
    print(torch.sigmoid(preds))
Example #3
0
def get_model(model_name, pho_size=299, num_classes=110):
    if model_name == "vgg16":
        model = VGG(num_classes=num_classes, pho_size=299)
    elif model_name == "resnet101":
        model = resnet101(num_classes=num_classes)
    elif model_name == "resnet152":
        model = resnet152(num_classes=num_classes)
    elif model_name == "densenet":
        model = DenseNet(growth_rate=12,
                         block_config=[(100 - 4) // 6 for _ in range(3)],
                         num_classes=num_classes,
                         small_inputs=False,
                         efficient=True,
                         pho_size=pho_size)
    elif model_name == "InceptionResNetV2":
        model = InceptionResNetV2(num_classes=num_classes)
    elif model_name == "InceptionV4":
        model = InceptionV4(num_classes=num_classes)
    elif model_name == "Inception3":
        model = Inception3(num_classes=num_classes)
    elif model_name == "denoise":
        model = get_denoise()
    elif model_name == "Mymodel":
        model = Mymodel()
    elif model_name == 'Comdefend':
        model = ComDefend()
    elif model_name == 'Rectifi':
        model = Rectifi()
    return model
Example #4
0
    def __init__(self, layers=18, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, use_ppm=True,
                 criterion=nn.CrossEntropyLoss(ignore_index=255), BatchNorm=nn.BatchNorm2d, flow=False, sd=False,
                 pretrained=True):
        super(PSPNet, self).__init__()
        assert layers in [18, 50, 101, 152]
        assert 512 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        self.flow = flow
        self.sd = sd
        self.criterion = criterion
        models.BatchNorm = BatchNorm

        if layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 18:
            resnet = models.resnet18(deep_base=False, pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'conv1' in n:
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'conv1' in n:
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 512
        if use_ppm:
            self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins, BatchNorm)
            fea_dim *= 2
        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, 256, kernel_size=3, padding=1, bias=False),
            BatchNorm(256),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
            nn.Conv2d(256, classes, kernel_size=1)
        )
        if self.training:
            self.aux = nn.Sequential(
                nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
                BatchNorm(256),
                nn.ReLU(inplace=True),
                nn.Dropout2d(p=dropout),
                nn.Conv2d(256, classes, kernel_size=1)
            )
Example #5
0
    def __init__(self,
                 model,
                 modality='rgb',
                 inp=3,
                 num_classes=150,
                 input_size=224,
                 input_segments=8,
                 dropout=0.5):
        super(tsn_model, self).__init__()

        if modality == 'flow':
            inp = 10

        self.num_classes = num_classes
        self.inp = inp
        self.input_segments = input_segments
        self._enable_pbn = False
        if model == 'resnet18':
            self.model = resnet.resnet18(inp=inp, pretrained=True)
        elif model == 'resnet34':
            self.model = resnet.resnet34(inp=inp, pretrained=True)
        elif model == 'resnet50':
            self.model = resnet.resnet50(inp=inp, pretrained=True)
        elif model == 'resnet101':
            self.model = resnet.resnet101(inp=inp, pretrained=True)
        elif model == 'bn_inception':
            self.model = bn_inception.bninception(inp=inp)

        self.modality = modality
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(p=dropout)
        in_channels = self.model.fc.in_features
        self.model.fc = None
        self.fc = nn.Linear(in_channels, num_classes)
        self.consensus = basic_ops.ConsensusModule('avg')
    def __init__(self,
                 embedding_size,
                 num_classes,
                 backbone='resnet18',
                 mode='t'):
        super(background_resnet, self).__init__()
        self.trainMode = mode
        self.backbone = backbone
        # copying modules from pretrained models
        if backbone == 'resnet50':
            self.pretrained = resnet.resnet50(pretrained=False)
        elif backbone == 'resnet101':
            self.pretrained = resnet.resnet101(pretrained=False)
        elif backbone == 'resnet152':
            self.pretrained = resnet.resnet152(pretrained=False)
        elif backbone == 'resnet18':
            self.pretrained = resnet.resnet18(pretrained=False)
        elif backbone == 'resnet34':
            self.pretrained = resnet.resnet34(pretrained=False)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))

        self.fc0 = nn.Linear(128, embedding_size[0])

        # task specific layers for task 1
        self.fc1 = nn.Linear(128, embedding_size[1])
        self.bn1 = nn.BatchNorm1d(embedding_size[1])
        self.relu1 = nn.ReLU()
        self.last1 = nn.Linear(embedding_size[1], num_classes)

        # task speicific layers for task 2
        self.fc2 = nn.Linear(128, embedding_size[2])
        self.bn2 = nn.BatchNorm1d(embedding_size[2])
        self.relu2 = nn.ReLU()
        self.last2 = nn.Linear(embedding_size[2], num_classes)
Example #7
0
def validate():
    ##
    engine = Engine()
    config = getConfig()
    device = torch.device("cuda:" + str(config.device))
    # define dataset
    transform_test = transforms.Compose([
        transforms.Resize((config.image_size, config.image_size)),
        # transforms.CenterCrop(config.input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    val_dataset = CustomDataset('data/movie_val.csv',
                                'data/movie/images',
                                transform=transform_test)
    val_loader = DataLoader(val_dataset,
                            batch_size=config.batch_size,
                            shuffle=False,
                            num_workers=config.workers,
                            pin_memory=True)

    net = resnet101(pretrained=True, use_bap=False)
    in_features = net.fc_new.in_features
    new_linear = torch.nn.Linear(in_features=in_features, out_features=25)
    net.fc_new = new_linear

    # load checkpoint
    use_gpu = torch.cuda.is_available() and config.use_gpu
    if use_gpu:
        net = net.to(device)
    gpu_ids = [int(r) for r in config.gpu_ids.split(',')]
    if use_gpu and config.multi_gpu:
        net = torch.nn.DataParallel(net, device_ids=gpu_ids)
    checkpoint_path = os.path.join(config.checkpoint_path,
                                   'model_best.pth.tar')
    load_state_dict = torch.load(checkpoint_path,
                                 map_location=device)['state_dict']
    new_state_dict = {}
    for key, value in load_state_dict.items():
        new_key = key.replace('module.', '')
        new_state_dict[new_key] = value
    net.load_state_dict(new_state_dict)

    # define loss
    criterion = torch.nn.BCEWithLogitsLoss()
    if use_gpu:
        criterion = criterion.cuda()
    state = {
        'model': net,
        'val_loader': val_loader,
        'criterion': criterion,
        'config': config,
        'device': device,
        'step': 0,
        'lr': config.lr
    }
    prec1, fprec, val_loss = engine.validate(state)
    print(prec1)
Example #8
0
    def __init__(self,
                 layers=50,
                 bins=(1, 2, 3, 6),
                 dropout=0.1,
                 classes=2,
                 zoom_factor=8,
                 use_ppm=True,
                 BatchNorm=nn.BatchNorm2d,
                 pretrained=False):
        """ """
        super(Backbone, self).__init__()
        assert layers in [50, 101, 152]
        assert 2048 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        models.BatchNorm = BatchNorm

        if layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                    resnet.conv2, resnet.bn2, resnet.relu,
                                    resnet.conv3, resnet.bn3, resnet.relu,
                                    resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 2048
        if use_ppm:
            self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins, BatchNorm)
            fea_dim *= 2
        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
            BatchNorm(512), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
            nn.Conv2d(512, classes, kernel_size=1))
        if self.training:
            self.aux = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False),
                BatchNorm(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
                nn.Conv2d(256, classes, kernel_size=1))
Example #9
0
def prepare_optimizee(args, sgd_in_names, obs_shape, hidden_size, actor_critic,
                      current_optimizee_step, prev_optimizee_step):
    prev_optimizee_step += current_optimizee_step
    current_optimizee_step = 0

    model = resnet101(pretrained=True)
    num_ftrs = model.fc.in_features
    fc_layers = nn.Sequential(
        nn.Linear(num_ftrs, 512),
        nn.ReLU(inplace=True),
        nn.Linear(512, args.num_class),
    )
    model.fc_new = fc_layers

    train_blocks = args.train_blocks.split('.')
    # default turn-off fc, turn-on fc_new
    for param in model.fc.parameters():
        param.requires_grad = False
    ##### Freeze several bottom layers (Optional) #####
    non_train_blocks = [
        'conv1', 'bn1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc'
    ]
    for name in train_blocks:
        try:
            non_train_blocks.remove(name)
        except Exception:
            print(
                "cannot find block name %s\nAvailable blocks are: conv1, bn1, layer1, layer2, layer3, layer4, fc"
                % name)
    for name in non_train_blocks:
        for param in getattr(model, name).parameters():
            param.requires_grad = False

    # Setup optimizer
    sgd_in = []
    for name in train_blocks:
        if name != 'fc':
            sgd_in.append({'params': get_params(model, [name]), 'lr': args.lr})
        else:
            sgd_in.append({
                'params': get_params(model, ["fc_new"]),
                'lr': args.lr
            })
    base_lrs = [group['lr'] for group in sgd_in]
    optimizer = SGD(sgd_in,
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)

    model = model.cuda()
    model.eval()
    return model, optimizer, current_optimizee_step, prev_optimizee_step
Example #10
0
def create_model(opt=None):
    assert opt.model in ['alexnet', 'googlenet', 'lenet', 'mobilenetv2', 'resnet34', 'resnet101', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'mobilenetv3small', 'mobilenetv3wen']
    if opt.model == 'mobilenetv2':
        model = mobilenet.MobileNetV2(num_classes=opt.n_classes, )
        print('net is mobilenetv2!')
        opt.model_save_dir='./weights/mobilenetv2'
    elif opt.model == 'alexnet':
        model = alexnet.AlexNet(num_classes=opt.n_classes, init_weights=True)
        print('net is alexnet!')
        opt.model_save_dir = './weights/alexnet'
    elif opt.model == 'googlenet':
        model = googlenet.GoogLeNet(num_classes=opt.n_classes, init_weights=True)
        print('net is googlenet!')
        opt.model_save_dir = './weights/googlenet'
    elif opt.model == 'lenet':
        model = lenet.LeNet(num_classes=opt.n_classes)
        print('net is lenet!')
        opt.model_save_dir = './weights/lenet'
    elif opt.model == 'resnet34':
        model = resnet.resnet34(num_classes=opt.n_classes)
        print('net is resnet34!')
        opt.model_save_dir = './weights/resnet34'
    elif opt.model == 'resnet101':
        model = resnet.resnet101(num_classes=opt.n_classes)
        print('net is resnet101!')
        opt.model_save_dir = './weights/resnet101'
    elif opt.model == 'vgg11':
        model = vgg.vgg(model_name="vgg11", num_classes=opt.n_classes, init_weights=True)
        print('net is vgg11!')
        opt.model_save_dir = './weights/vgg11'
    elif opt.model == 'vgg13':
        model = vgg.vgg(model_name="vgg13", num_classes=opt.n_classes, init_weights=True)
        print('net is vgg13!')
        opt.model_save_dir = './weights/vgg13'
    elif opt.model == 'vgg16':
        model = vgg.vgg(model_name="vgg16", num_classes=opt.n_classes, init_weights=True)
        print('net is vgg16!')
        opt.model_save_dir = './weights/vgg16'
    elif opt.model == 'vgg19':
        model = vgg.vgg(model_name="vgg19", num_classes=opt.n_classes, init_weights=True)
        print('net is vgg19!')
        opt.model_save_dir = './weights/vgg19'
    elif opt.model == 'mobilenetv3small':
        model = mobilenetv3_small.MobileNetV3_small(num_classes=opt.n_classes)
        print('net is mobilenetv3small!')
        opt.model_save_dir = './weights/mobilenetv3small'
    elif opt.model == 'mobilenetv3wen':
        model = mobilenetv3_wen.MobileNetV3_small(num_classes=opt.n_classes)
        print('net is mobilenetv3wen!')
        opt.model_save_dir = './weights/mobilenetv3wen'
    return opt, model
Example #11
0
    def __init__(self, layers=50, dropout=0.1, classes=1, zoom_factor=8, criterion=nn.CrossEntropyLoss(ignore_index=255), BatchNorm=nn.BatchNorm2d, pretrained=True, args=None):
        super(MGLNet, self).__init__()
        assert layers in [50, 101, 152]
        assert classes == 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.criterion = criterion
        self.args = args
        models.BatchNorm = BatchNorm
        self.gamma = 1.0

        if layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        self.dim = 512

        self.pred = nn.Sequential(
            nn.Conv2d(2048, self.dim, kernel_size=3, padding=1, bias=False),
            BatchNorm(self.dim),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
            nn.Conv2d(self.dim, classes, kernel_size=1)
        )

        self.region_conv = self.pred[0:4] # 2048 -> 512
        self.edge_cat = ConcatNet(BatchNorm) # concat low-level feature map to predict edge

        # cascade mutual net
        self.mutualnet0 = MutualNet(BatchNorm, dim=self.dim, num_clusters=args.num_clusters, dropout=dropout)
        if args.stage == 1:
            self.mutualnets = nn.ModuleList([self.mutualnet0])
        elif args.stage == 2:
            self.mutualnet1 = MutualNet(BatchNorm, dim=self.dim, num_clusters=args.num_clusters, dropout=dropout)
            self.mutualnets = nn.ModuleList([self.mutualnet0, self.mutualnet1])
Example #12
0
    def __init__(self, input_size=(512, 1024)):
        super(FPN, self).__init__()
        self.input_size = input_size
        self.input_row = input_size[0]
        self.input_col = input_size[1]

        # self.base_net = InceptionResNetV2()
        self.base_net = resnet101()

        # !!!! Can be replaced with light-head/RFB/ASPP ect. to improve the receptive field !!!!
        self.c6 = nn.Conv2d(2048, 256, kernel_size=3, stride=2, padding=1)
        self.c5_down = nn.Conv2d(2048, 256, kernel_size=1, stride=1)
        self.c4_down = nn.Conv2d(1024, 256, kernel_size=1, stride=1)
        self.c3_down = nn.Conv2d(512, 256, kernel_size=1, stride=1)

        # !!!! I add another two dilated convolution to it to improve the receptive field !!!!
        # !!!! Of course they can be replaced by light-head/ASPP/RBF etc. !!!!
        # !!!! Of course they can also be replaced by Inception Block !!!!
        self.c2_up = nn.Conv2d(256, 256, kernel_size=1, stride=1)

        # !!!! Can be replaced with light-head/RFB/ASPP etc. to improve the receptive field !!!!
        # self.conv_fuse = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.p4_atrous = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=2,
                      dilation=2), SwitchNorm2d(256,
                                                using_moving_average=True),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0),
            SwitchNorm2d(256, using_moving_average=True),
            nn.ReLU(inplace=True))
        self.conv_fuse = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=2,
                      dilation=2))
        #                                nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=4, dilation=4),
        #                                nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=8, dilation=8))

        # downsample Conv
        self.conv_down = nn.Conv2d(256,
                                   256,
                                   kernel_size=3,
                                   stride=2,
                                   padding=1)
        self.conv1x1_cat = nn.Conv2d(512,
                                     256,
                                     kernel_size=1,
                                     stride=1,
                                     padding=0)
        self.sn = SwitchNorm2d(256, using_moving_average=True)
        self.bn = nn.BatchNorm2d(256, eps=0.001)
Example #13
0
def get_model(model_name):
    if model_name=="vgg16":
        model=VGG(num_classes=110)
    elif model_name=="resnet101":
        model=resnet101(num_classes=1000)
    elif model_name=="densenet":
        model=DenseNet(
        growth_rate=12,
        block_config=[(100 - 4) // 6 for _ in range(3)],
        num_classes=110,
        small_inputs=False,
        efficient=True,
    )
    else:
        model=None
    return model
Example #14
0
    def __init__(self, num_class=19):
        super(DFN, self).__init__()
        self.num_class = num_class
        self.resnet_features = resnet101(pretrained=False)
        self.layer0 = nn.Sequential(self.resnet_features.conv1, self.resnet_features.bn1,
                                    self.resnet_features.relu  #self.resnet_features.conv3,
                                    #self.resnet_features.bn3, self.resnet_features.relu3
                                    )
        self.layer1 = nn.Sequential(self.resnet_features.maxpool, self.resnet_features.layer1)
        self.layer2 = self.resnet_features.layer2
        self.layer3 = self.resnet_features.layer3
        self.layer4 = self.resnet_features.layer4

        # this is for smooth network
        self.out_conv = nn.Conv2d(2048,self.num_class,kernel_size=1,stride=1)
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        self.cab1 = CAB(self.num_class*2,self.num_class)
        self.cab2 = CAB(self.num_class*2,self.num_class)
        self.cab3 = CAB(self.num_class*2,self.num_class)
        self.cab4 = CAB(self.num_class*2,self.num_class)

        self.rrb_d_1 = RRB(256, self.num_class)
        self.rrb_d_2 = RRB(512, self.num_class)
        self.rrb_d_3 = RRB(1024, self.num_class)
        self.rrb_d_4 = RRB(2048, self.num_class)

        self.upsample = nn.Upsample(scale_factor=2,mode="bilinear")
        self.upsample_4 = nn.Upsample(scale_factor=4, mode="bilinear")
        self.upsample_8 = nn.Upsample(scale_factor=8, mode="bilinear")

        self.rrb_u_1 = RRB(self.num_class,self.num_class)
        self.rrb_u_2 = RRB(self.num_class,self.num_class)
        self.rrb_u_3 = RRB(self.num_class,self.num_class)
        self.rrb_u_4 = RRB(self.num_class,self.num_class)


        ## this is for boarder net work
        self.rrb_db_1 = RRB(256, self.num_class)
        self.rrb_db_2 = RRB(512, self.num_class)
        self.rrb_db_3 = RRB(1024, self.num_class)
        self.rrb_db_4 = RRB(2048, self.num_class)

        self.rrb_trans_1 = RRB(self.num_class,self.num_class)
        self.rrb_trans_2 = RRB(self.num_class,self.num_class)
        self.rrb_trans_3 = RRB(self.num_class,self.num_class)
Example #15
0
 def __init__(self, opt):
     super(resnet_mil, self).__init__()
     import model.resnet as resnet
     resnet = resnet.resnet101()
     resnet.load_state_dict(
         torch.load('/media/jxgu/d2tb/model/resnet/resnet101.pth'))
     self.conv = torch.nn.Sequential()
     self.conv.add_module("conv1", resnet.conv1)
     self.conv.add_module("bn1", resnet.bn1)
     self.conv.add_module("relu", resnet.relu)
     self.conv.add_module("maxpool", resnet.maxpool)
     self.conv.add_module("layer1", resnet.layer1)
     self.conv.add_module("layer2", resnet.layer2)
     self.conv.add_module("layer3", resnet.layer3)
     self.conv.add_module("layer4", resnet.layer4)
     self.l1 = nn.Sequential(nn.Linear(2048, 1000), nn.ReLU(True),
                             nn.Dropout(0.5))
     self.att_size = 7
     self.pool_mil = nn.MaxPool2d(kernel_size=self.att_size, stride=0)
Example #16
0
 def __init__(self, opt):
     super(resnet_mil, self).__init__()
     import model.resnet as resnet
     resnet = resnet.resnet101()
     resnet.load_state_dict(torch.load('/media/jxgu/d2tb/model/resnet/resnet101.pth'))
     self.conv = torch.nn.Sequential()
     self.conv.add_module("conv1", resnet.conv1)
     self.conv.add_module("bn1", resnet.bn1)
     self.conv.add_module("relu", resnet.relu)
     self.conv.add_module("maxpool", resnet.maxpool)
     self.conv.add_module("layer1", resnet.layer1)
     self.conv.add_module("layer2", resnet.layer2)
     self.conv.add_module("layer3", resnet.layer3)
     self.conv.add_module("layer4", resnet.layer4)
     self.l1 = nn.Sequential(nn.Linear(2048, 1000),
                             nn.ReLU(True),
                             nn.Dropout(0.5))
     self.att_size = 7
     self.pool_mil = nn.MaxPool2d(kernel_size=self.att_size, stride=0)
Example #17
0
    def __init__(self, embedding_size, num_classes, backbone='resnet50'):
        super(background_resnet, self).__init__()
        self.backbone = backbone
        # copying modules from pretrained models
        if backbone == 'resnet50':
            self.pretrained = resnet.resnet50(pretrained=False)
        elif backbone == 'resnet101':
            self.pretrained = resnet.resnet101(pretrained=False)
        elif backbone == 'resnet152':
            self.pretrained = resnet.resnet152(pretrained=False)
        elif backbone == 'resnet18':
            self.pretrained = resnet.resnet18(pretrained=False)
        elif backbone == 'resnet34':
            self.pretrained = resnet.resnet34(pretrained=False)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))

        self.fc0 = nn.Linear(512, embedding_size)
        self.bn0 = nn.BatchNorm1d(embedding_size)
        self.relu = nn.ReLU()
        self.last = nn.Linear(embedding_size, num_classes)
 def __init__(self, 
              layers,
              in_channels=192,
              strides=(1, 2, 2, 2),
              dilations=(1, 1, 1, 1),
              pretrained=False,
              deep_base=False) -> None:
     super(ResNetDCT_2345, self).__init__()
     self.layers = layers
     if layers == 18:
         resnet = resnet18(pretrained, deep_base, strides=strides, dilations=dilations)
     elif layers == 34:
         resnet = resnet34(pretrained, deep_base, strides=strides, dilations=dilations)
     elif layers == 50:
         resnet = resnet50(pretrained, deep_base, strides=strides, dilations=dilations)
     elif layers == 101:
         resnet = resnet101(pretrained, deep_base, strides=strides, dilations=dilations)
     self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool, self.fc = \
         resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4, resnet.avgpool, resnet.fc
     self.relu = nn.ReLU(inplace=True)
     if layers in [18, 34]:
         in_ch = self.layer1[0].conv1.in_channels
         self.down_layer = nn.Sequential(
             nn.Conv2d(in_channels, in_ch, kernel_size=1, stride=1, bias=False),
             nn.BatchNorm2d(in_ch),
             nn.ReLU(inplace=True)
         )
         # initialize the weight for only one layer
         for m in self.down_layer.modules():
             init_weight(m)
     else:
         out_ch = self.layer1[0].conv1.out_channels
         self.layer1[0].conv1 = nn.Conv2d(in_channels, out_ch, kernel_size=1, stride=1, bias=False)
         init_weight(self.layer1[0].conv1)
        
         out_ch = self.layer1[0].downsample[0].out_channels
         self.layer1[0].downsample[0] = nn.Conv2d(in_channels, out_ch, kernel_size=1, stride=1, bias=False)
         init_weight(self.layer1[0].downsample[0])
Example #19
0
def get_model(model_type='resnet50', num_classes=1000):
    # TODO: Add more backbones
    if model_type == 'resnet34':
        model = resnet.resnet34(pretrained=True)
    elif model_type == 'resnet50':
        model = resnet.resnet50(pretrained=True)
    elif model_type == 'resnet101':
        model = resnet.resnet101(pretrained=True)
    elif model_type == 'resnet152':
        model = resnet.resnet152(pretrained=True)
    elif model_type == 'resnext50_32x4d':
        model = resnet.resnext50_32x4d(pretrained=True)
    elif model_type == 'resnext101_32x8d':
        model = resnet.resnext101_32x8d(pretrained=True)
    elif model_type == 'res2net_v1b_50':
        model = res2net50_v1b_26w_4s(pretrained=True)
    elif model_type == 'res2net_v1b_101':
        model = res2net101_v1b_26w_4s(pretrained=True)
    elif model_type == 'res2net50_26w_4s':
        model = res2net50_26w_4s(pretrained=True)
    elif model_type == 'res2net101_26w_4s':
        model = res2net101_26w_4s(pretrained=True)
    elif model_type == 'res2next50':
        model = res2next50(pretrained=True)
    elif model_type == 'senet154':
        model = senet.senet154(num_classes=num_classes, pretrained='imagenet')
    elif model_type == 'resnest50':
        model = resnest50(pretrained=True)
    elif model_type == 'resnest101':
        model = resnest101(pretrained=True)
    elif model_type == 'resnest200':
        model = resnest200(pretrained=True)
    elif model_type == 'resnest269':
        model = resnest269(pretrained=True)
    else:
        model = resnet.resnet50(pretrained=True)

    return model
Example #20
0
def test():
    ##
    config = getConfig()
    device = torch.device("cuda:" + str(config.device))
    # define transform image
    transform_test = transforms.Compose([
        transforms.Resize((config.image_size, config.image_size)),
        # transforms.CenterCrop(config.input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    net = resnet101(pretrained=True, use_bap=False)
    in_features = net.fc_new.in_features
    new_linear = torch.nn.Linear(in_features=in_features, out_features=25)
    net.fc_new = new_linear

    # load checkpoint
    use_gpu = torch.cuda.is_available() and config.use_gpu
    if use_gpu:
        net = net.to(device)
    gpu_ids = [int(r) for r in config.gpu_ids.split(',')]
    if use_gpu and config.multi_gpu:
        net = torch.nn.DataParallel(net, device_ids=gpu_ids)
    checkpoint_path = os.path.join(config.checkpoint_path,
                                   'model_best.pth.tar')
    load_state_dict = torch.load(checkpoint_path,
                                 map_location=device)['state_dict']
    new_state_dict = {}
    for key, value in load_state_dict.items():
        new_key = key.replace('module.', '')
        new_state_dict[new_key] = value
    net.load_state_dict(new_state_dict)
    img_dir = config.image
    image = Image.open(img_dir).convert('RGB')
    image = transform_test(image)
    preds, _, _ = net(image.unsqueeze(0).to(device))
    print(torch.sigmoid(preds))
Example #21
0
elif args.layer == '34':
    net = resnet34(pretrained=False,
                   progress=True,
                   activate=activate,
                   hidden=hidden,
                   num_classes=10)
elif args.layer == '50':
    net = resnet50(pretrained=False,
                   progress=True,
                   activate=activate,
                   hidden=hidden,
                   num_classes=10)
elif args.layer == '101':
    net = resnet101(pretrained=False,
                    progress=True,
                    activate=activate,
                    hidden=hidden,
                    num_classes=10)
elif args.layer == '152':
    net = resnet152(pretrained=False,
                    progress=True,
                    activate=activate,
                    hidden=hidden,
                    num_classes=10)
else:
    raise ValueError('layer should be 18 / 34 / 50 / 101 / 152')

net.to(device)
net.initialize_weights(zero_init_residual=True)

# ============================ step 3/6 损失函数 ============================
Example #22
0
parser.add_argument("--gamma_name", type=str)
parser.add_argument("--gamma_name_path", type=str)
parser.add_argument("--record_path", type=str)

args = parser.parse_args()

######## Choose whether to use Cuda
device = torch.device("cuda" if (
    args.use_cuda and torch.cuda.is_available()) else "cpu")
torch.cuda.empty_cache()

####### Pick according model
if args.model_name == 'res50':
    model = resnet.resnet50().to(device)
elif args.model_name == 'res101':
    model = resnet.resnet101().to(device)
elif args.model_name == 'res152':
    model = resnet.resnet152().to(device)
elif args.model_name == 'res34':
    model = resnet.resnet34().to(device)
elif args.model_name == 'res18':
    model = resnet.resnet18().to(device)
elif args.model_name == 'alexnet':
    model = alexnet.alexnet().to(device)
else:
    print('Wrong Model Name')

########### Whether to parallel the model
if args.use_cuda:
    if args.parallel:
        model = nn.DataParallel(model)
Example #23
0
def train():
    # input params
    config = getConfig()
    torch.manual_seed(GLOBAL_SEED)
    torch.cuda.manual_seed(GLOBAL_SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True
    torch.cuda.set_device(config.device)
    best_prec1 = 0.
    rate = 0.875
    device = torch.device("cuda:" + str(config.device))
    # define train_dataset and loader
    transform_train = transforms.Compose([
        transforms.Resize(
            (int(config.input_size // rate), int(config.input_size // rate))),
        transforms.RandomCrop((config.input_size, config.input_size)),
        transforms.RandomVerticalFlip(),
        transforms.ColorJitter(brightness=32. / 255., saturation=0.5),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    train_dataset = CustomDataset('data/movie_train.csv',
                                  'data/movie/images',
                                  transform=transform_train)
    train_loader = DataLoader(train_dataset,
                              batch_size=config.batch_size,
                              shuffle=True,
                              num_workers=config.workers,
                              pin_memory=True,
                              worker_init_fn=_init_fn)

    transform_test = transforms.Compose([
        transforms.Resize((config.image_size, config.image_size)),
        transforms.CenterCrop(config.input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    val_dataset = CustomDataset('data/movie_val.csv',
                                'data/movie/images',
                                transform=transform_test)
    val_loader = DataLoader(val_dataset,
                            batch_size=config.batch_size * 2,
                            shuffle=False,
                            num_workers=config.workers,
                            pin_memory=True,
                            worker_init_fn=_init_fn)
    # logging dataset info
    print('Train:[{train_num}], Val:[{val_num}]'.format(
        train_num=len(train_dataset), val_num=len(val_dataset)))
    print('Batch Size:[{0}], Total:::Train Batches:[{1}],Val Batches:[{2}]'.
          format(config.batch_size, len(train_loader), len(val_loader)))

    net = resnet101(pretrained=True, use_bap=False)
    in_features = net.fc_new.in_features
    new_linear = torch.nn.Linear(in_features=in_features,
                                 out_features=train_dataset.num_classes)
    net.fc_new = new_linear

    # gpu config
    use_gpu = torch.cuda.is_available() and config.use_gpu
    if use_gpu:
        net = net.to(device)

    gpu_ids = [int(r) for r in config.gpu_ids.split(',')]
    if use_gpu and config.multi_gpu:
        net = torch.nn.DataParallel(net, device_ids=gpu_ids)

    # define optimizer
    assert config.optim in ['sgd', 'adam'], 'optim name not found!'
    if config.optim == 'sgd':
        optimizer = torch.optim.SGD(net.parameters(),
                                    lr=config.lr,
                                    momentum=config.momentum,
                                    weight_decay=config.weight_decay)
    elif config.optim == 'adam':
        optimizer = torch.optim.Adam(net.parameters(),
                                     lr=config.lr,
                                     weight_decay=config.weight_decay)

    # define learning scheduler
    assert config.scheduler in ['plateau', 'step', 'cosine_annealing'
                                ], 'scheduler not supported!!!'
    if config.scheduler == 'plateau':
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                               'min',
                                                               patience=3,
                                                               factor=0.1)
    elif config.scheduler == 'step':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    step_size=2,
                                                    gamma=0.9)

    # define loss
    criterion = torch.nn.BCEWithLogitsLoss()

    # train val parameters dict
    state = {
        'model': net,
        'train_loader': train_loader,
        'val_loader': val_loader,
        'criterion': criterion,
        'config': config,
        'optimizer': optimizer,
        'device': device,
        'step': 0,
        'lr': config.lr
    }
    ## train and val
    engine = Engine()
    print(config)
    best_prec = 0
    for e in range(config.epochs):
        if config.scheduler == 'step':
            scheduler.step()
        lr_val = get_lr(optimizer)
        print("Start epoch %d ==========,lr=%f" % (e, lr_val))
        train_prec, train_loss = engine.train(state, e)
        prec, val_loss = engine.validate(state)
        is_best = prec > best_prec
        best_prec = max(prec, best_prec)
        print('Epoch: {}, Train-Loss: {:.4f}, Train-accuracy: {:.4f},'\
                     'Test-accuracy: {:.4f}'.format(e + 1, train_loss, train_prec, prec))
        print('Best accuracy: {:.4f}'.format(best_prec))
        save_checkpoint(
            {
                'epoch': e + 1,
                'state_dict': net.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best, config.output)
        if config.scheduler == 'plateau':
            scheduler.step(val_loss)
Example #24
0
                  './data/celebA/list_attr_celeba.txt', '0',
                  './data/celebA/img_align_celeba/', transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=opt.batchSize,
                                          shuffle=True,
                                          num_workers=opt.workers)

valset = celeba('./data/celebA/list_eval_partition.txt',
                './data/celebA/list_attr_celeba.txt', '1',
                './data/celebA/img_align_celeba/', transform_val)
valloader = torch.utils.data.DataLoader(valset,
                                        batch_size=opt.batchSize,
                                        shuffle=True,
                                        num_workers=opt.workers)

resnet = resnet101(num_classes=40)
resnet.cuda()
criterion = nn.MSELoss(reduce=False)
optimizer = optim.SGD(resnet.parameters(),
                      lr=opt.lr,
                      momentum=0.9,
                      weight_decay=5e-4)
scheduler = StepLR(optimizer, step_size=3)
kk = 100
count = 0
weight = torch.FloatTensor(40).fill_(1.0).cuda()
print(type(weight))
val_loss = []


def validation():
Example #25
0
record_dir = 'record/%s/%s' % (args.dataset, args.method)
if not os.path.exists(record_dir):
    os.makedirs(record_dir)
record_file = os.path.join(
    record_dir, '%s_net_%s_%s_to_%s_num_%s' %
    (args.method, args.net, args.source, args.target, args.num))

torch.cuda.manual_seed(args.seed)
if args.net == 'resnet34':
    G = resnet34()
    inc = 512
elif args.net == 'resnet50':
    G = resnet50()
    inc = 2048
elif args.net == 'resnet101':
    G = resnet101()
    inc = 2048
elif args.net == "alexnet":
    G = AlexNetBase()
    inc = 4096
elif args.net == "vgg":
    G = VGGBase()
    inc = 4096
else:
    raise ValueError('Model cannot be recognized.')

params = []
for key, value in dict(G.named_parameters()).items():
    if value.requires_grad:
        if 'classifier' not in key:
            params += [{
Example #26
0
    def __init__(self,
                 layers=50,
                 bins=(1, 2, 3, 6),
                 dropout=0.1,
                 classes=2,
                 zoom_factor=8,
                 use_ppm=True,
                 criterion=nn.CrossEntropyLoss(ignore_index=255),
                 BatchNorm=nn.BatchNorm2d,
                 pretrained=True):
        super(PSPNet, self).__init__()
        assert layers in [18, 50, 101, 152]
        assert 2048 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        self.criterion = criterion
        self.criterion_reg = nn.MSELoss(reduce=False)
        models.BatchNorm = BatchNorm

        if layers == 18:
            resnet = models_origin.resnet18(pretrained=True)
        elif layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
        '''self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 512
        if use_ppm:
            self.ppm = PPM(fea_dim, int(fea_dim/len(bins)), bins, BatchNorm)
            fea_dim *= 2'''

        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                    resnet.conv2, resnet.bn2, resnet.relu,
                                    resnet.conv3, resnet.bn3, resnet.relu,
                                    resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 2048

        if use_ppm:
            self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins, BatchNorm)
            fea_dim *= 2
        '''self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
            BatchNorm(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
            nn.Conv2d(512, classes, kernel_size=1)
        )'''

        self.cls3 = nn.Sequential(
            nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
            BatchNorm(512), nn.ReLU(inplace=True),
            nn.Conv2d(512, 96 * 2, kernel_size=1), BatchNorm(96 * 2),
            nn.ReLU(inplace=True))

        self.cls2 = nn.Sequential(
            nn.Conv2d(96 * 2 + 96, 96, kernel_size=3, padding=1, bias=False),
            BatchNorm(96), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
            nn.Conv2d(96, classes, kernel_size=1))

        self.reg = nn.Sequential(
            nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
            BatchNorm(512), nn.ReLU(inplace=True),
            nn.Conv2d(512, 96, kernel_size=1))

        self.reg2 = nn.Sequential(
            nn.Conv2d(96, 96, kernel_size=3, padding=1, bias=False),
            BatchNorm(96), nn.ReLU(inplace=True),
            nn.Conv2d(96, 96, kernel_size=1), nn.ReLU(inplace=True))
        self.reg3 = nn.Sequential(
            nn.Conv2d(96, 96, kernel_size=3, padding=1, bias=False),
            BatchNorm(96), nn.ReLU(inplace=True),
            nn.Conv2d(96, 96, kernel_size=1), nn.ReLU(inplace=True))

        self.bn = BatchNorm(96)
        self.bn2 = BatchNorm(96)

        self.mean = torch.tensor(np.load('../data/meanvar/mean.npy'),
                                 requires_grad=False)
        self.var = torch.tensor(np.sqrt(np.load('../data/meanvar/var.npy')),
                                requires_grad=False)

        self.mean_2d = torch.tensor(np.load('../data/meanvar/mean_2d.npy'),
                                    requires_grad=False)  #.cuda().float()
        self.var_2d = torch.tensor(np.sqrt(
            np.load('../data/meanvar/var_2d.npy')),
                                   requires_grad=False)  #.cuda().float()
        self.var = self.var[0, :]

        self.mean = torch.unsqueeze(self.mean, 0)
        self.mean = torch.unsqueeze(self.mean, 2)
        self.mean = torch.unsqueeze(self.mean, 2)
        self.mean = self.mean.repeat(1, 1, 60, 60)
        self.var = torch.unsqueeze(self.var, 0)
        self.var = torch.unsqueeze(self.var, 2)
        self.var = torch.unsqueeze(self.var, 2)
        self.var = self.var.repeat(1, 1, 60, 60)
Example #27
0
        writer = SummaryWriter(tf_record)
    else:
        writer = None

    # batch size
    train_batchSize = [args.label_batch_size, args.unlabel_batch_size]

    # backbone architecture
    if args.arch == 'resnet18':
        backbone = resnet.resnet18(feature_len=args.feat_len)
    elif args.arch == 'resnet34':
        backbone = resnet.resnet34(feature_len=args.feat_len)
    elif args.arch == 'resnet50':
        backbone = resnet.resnet50(feature_len=args.feat_len)
    elif args.arch == 'resnet101':
        backbone = resnet.resnet101(feature_len=args.feat_len)
    elif args.arch == 'resnet152':
        backbone = resnet.resnet152(feature_len=args.feat_len)
    elif args.arch == 'usr':
        backbone = model_usr
    else:
        raise NameError(
            'Arch %s is not support. Please enter from [resnet18, resnet34, resnet50, resnet101, resnet152, usr]'
            % args.arch)

    # head
    model_head = arc.ArcMarginProduct_virface(in_features=args.feat_len,
                                              out_features=args.num_ids,
                                              s=32,
                                              m=0.5,
                                              device='cuda')
Example #28
0
experiment_name = str(N_CLASSES) + '_classes' + '_1000_train_16'

model_path = os.path.join('model', 'resnet101-5d3b4d8f.pth')
#model_path = os.path.join(save_dir, experiment_name, MODEL_NAME)
if resume and os.path.isfile(model_path):
    model = torch.load(model_path)
    model.eval()
    with open(os.path.join(save_dir, experiment_name, MODEL_LOG_NAME),
              'r') as fp:
        words = fp.readline().split()
        starting_iter = int(words[2])
        starting_epoch = int(words[5])
    print("Resuming model at epoch %d and iter %d, with %d classes" %
          (starting_epoch, starting_iter, N_CLASSES))
else:
    model = resnet101(pretrained=pretrained, num_classes=N_CLASSES)
    model = nn.DataParallel(model)
    starting_iter = 0
    starting_epoch = 0
    print(
        "Training ResNet-101 from scratch with %d classes (initialized with pretrained weights)"
        % N_CLASSES)
model.cuda()
# Cross entropy loss takes the logits directly, so we don't need to apply softmax in our CNN
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
                      lr=0.1,
                      weight_decay=0.0001,
                      momentum=0.9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                 factor=0.1,
Example #29
0
def main():
    args = get_args()
    PID = os.getpid()
    print("<< ============== JOB (PID = %d) %s ============== >>" %
          (PID, args.save_dir))
    prepare_seed(args.rand_seed)

    if args.timestamp == 'none':
        args.timestamp = "{:}".format(
            time.strftime('%h-%d-%C_%H-%M-%s', time.gmtime(time.time())))

    torch.set_num_threads(1)

    # Log outputs
    args.save_dir = args.save_dir + \
        "/Visda17-L2O.train.Res101-%s-train.%s-LR%.2E-epoch%d-batch%d-seed%d"%(
               "LWF" if args.lwf > 0 else "XE", args.train_blocks, args.lr, args.epochs, args.batch_size, args.rand_seed) + \
        "%s/%s"%('/'+args.resume if args.resume != 'none' else '', args.timestamp)
    logger = prepare_logger(args)

    best_prec1 = 0

    #### preparation ###########################################
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]),
    }

    kwargs = {'num_workers': 20, 'pin_memory': True}
    trainset = VisDA17(txt_file=os.path.join(args.data,
                                             "train/image_list.txt"),
                       root_dir=os.path.join(args.data, "train"),
                       transform=data_transforms['train'])
    valset = VisDA17(txt_file=os.path.join(args.data,
                                           "validation/image_list.txt"),
                     root_dir=os.path.join(args.data, "validation"),
                     transform=data_transforms['val'],
                     label_one_hot=True)
    train_loader = DataLoader(trainset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              **kwargs)
    val_loader = DataLoader(valset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            **kwargs)
    train_loader_iter = iter(train_loader)
    current_optimizee_step, prev_optimizee_step = 0, 0

    model_old = None
    if args.lwf > 0:
        # create a fixed model copy for Life-long learning
        model_old = resnet101(pretrained=True)
        for param in model_old.parameters():
            param.requires_grad = False
        model_old.eval()
        model_old.cuda()
    ############################################################

    ### Agent Settings ########################################
    RANDOM = False  # False | True | 'init'
    action_space = np.arange(0, 1.1, 0.1)
    obs_avg = True
    _window_size = 1
    window_size = 1 if obs_avg else _window_size
    window_shrink_size = 20  # larger: controller will be updated more frequently
    sgd_in_names = [
        "conv1", "bn1", "layer1", "layer2", "layer3", "layer4", "fc_new"
    ]
    coord_size = len(sgd_in_names)
    ob_name_lstm = ["loss", "loss_kl", "step", "fc_mean", "fc_std"]
    ob_name_scalar = []
    obs_shape = (len(ob_name_lstm) * window_size + len(ob_name_scalar) +
                 coord_size, )
    _hidden_size = 20
    hidden_size = _hidden_size * len(ob_name_lstm)
    actor_critic = Policy(coord_size,
                          input_size=(len(ob_name_lstm), len(ob_name_scalar)),
                          action_space=len(action_space),
                          hidden_size=_hidden_size,
                          window_size=window_size)
    actor_critic.cuda()
    actor_critic.eval()

    partial = torch.load(args.agent_load_dir,
                         map_location=lambda storage, loc: storage)
    state = actor_critic.state_dict()
    pretrained_dict = {k: v for k, v in partial.items()}
    state.update(pretrained_dict)
    actor_critic.load_state_dict(state)

    ################################################################

    _min_iter = 10
    # reset optmizee
    model, optimizer, current_optimizee_step, prev_optimizee_step = prepare_optimizee(
        args, sgd_in_names, obs_shape, hidden_size, actor_critic,
        current_optimizee_step, prev_optimizee_step)
    epoch_size = len(train_loader)
    total_steps = epoch_size * args.epochs
    bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
    pbar = tqdm(range(int(epoch_size * args.epochs)),
                file=sys.stdout,
                bar_format=bar_format,
                ncols=100)
    _window_size = max(
        _min_iter,
        current_optimizee_step + prev_optimizee_step // window_shrink_size)
    train_loader_iter, obs, loss, loss_kl, fc_mean, fc_std = train_step(
        args,
        _window_size,
        train_loader_iter,
        train_loader,
        model,
        optimizer,
        obs_avg,
        args.lr,
        pbar,
        current_optimizee_step + prev_optimizee_step,
        total_steps,
        model_old=model_old)
    logger.writer.add_scalar("loss/ce", loss,
                             current_optimizee_step + prev_optimizee_step)
    logger.writer.add_scalar("loss/kl", loss_kl,
                             current_optimizee_step + prev_optimizee_step)
    logger.writer.add_scalar("loss/total", loss + loss_kl,
                             current_optimizee_step + prev_optimizee_step)
    logger.writer.add_scalar("fc/mean", fc_mean,
                             current_optimizee_step + prev_optimizee_step)
    logger.writer.add_scalar("fc/std", fc_std,
                             current_optimizee_step + prev_optimizee_step)
    current_optimizee_step += _window_size
    pbar.update(_window_size)
    prev_obs = obs.unsqueeze(0)
    prev_hidden = torch.zeros(actor_critic.net.num_recurrent_layers, 1,
                              hidden_size).cuda()
    for epoch in range(args.epochs):
        print("\n===== Epoch %d / %d =====" % (epoch + 1, args.epochs))
        print("<< ============== JOB (PID = %d) %s ============== >>" %
              (PID, args.save_dir))
        while current_optimizee_step < epoch_size:
            # Sample actions
            with torch.no_grad():
                if not RANDOM:
                    value, action, action_log_prob, recurrent_hidden_states, distribution = actor_critic.act(
                        prev_obs, prev_hidden, deterministic=False)
                    action = action.squeeze()
                    action_log_prob = action_log_prob.squeeze()
                    value = value.squeeze()
                    for idx in range(len(action)):
                        logger.writer.add_scalar(
                            "action/%s" % sgd_in_names[idx], action[idx],
                            current_optimizee_step + prev_optimizee_step)
                        logger.writer.add_scalar(
                            "entropy/%s" % sgd_in_names[idx],
                            distribution.distributions[idx].entropy(),
                            current_optimizee_step + prev_optimizee_step)
                        optimizer.param_groups[idx]['lr'] = float(
                            action_space[action[idx]]) * args.lr
                        logger.writer.add_scalar(
                            "LR/%s" % sgd_in_names[idx],
                            optimizer.param_groups[idx]['lr'],
                            current_optimizee_step + prev_optimizee_step)
                else:
                    if RANDOM is True or RANDOM == 'init':
                        for idx in range(coord_size):
                            optimizer.param_groups[idx]['lr'] = float(
                                choice(action_space)) * args.lr
                    if RANDOM == 'init':
                        RANDOM = 'done'
                    for idx in range(coord_size):
                        logger.writer.add_scalar(
                            "LR/%s" % sgd_in_names[idx],
                            optimizer.param_groups[idx]['lr'],
                            current_optimizee_step + prev_optimizee_step)

            # Obser reward and next obs
            _window_size = max(
                _min_iter, current_optimizee_step +
                prev_optimizee_step // window_shrink_size)
            _window_size = min(_window_size,
                               epoch_size - current_optimizee_step)
            train_loader_iter, obs, loss, loss_kl, fc_mean, fc_std = train_step(
                args,
                _window_size,
                train_loader_iter,
                train_loader,
                model,
                optimizer,
                obs_avg,
                args.lr,
                pbar,
                current_optimizee_step + prev_optimizee_step,
                total_steps,
                model_old=model_old)
            logger.writer.add_scalar(
                "loss/ce", loss, current_optimizee_step + prev_optimizee_step)
            logger.writer.add_scalar(
                "loss/kl", loss_kl,
                current_optimizee_step + prev_optimizee_step)
            logger.writer.add_scalar(
                "loss/total", loss + loss_kl,
                current_optimizee_step + prev_optimizee_step)
            logger.writer.add_scalar(
                "fc/mean", fc_mean,
                current_optimizee_step + prev_optimizee_step)
            logger.writer.add_scalar(
                "fc/std", fc_std, current_optimizee_step + prev_optimizee_step)
            current_optimizee_step += _window_size
            pbar.update(_window_size)
            prev_obs = obs.unsqueeze(0)
            if not RANDOM: prev_hidden = recurrent_hidden_states
        prev_optimizee_step += current_optimizee_step
        current_optimizee_step = 0

        # evaluate on validation set
        prec1 = validate(val_loader, model, args)
        logger.writer.add_scalar("prec", prec1, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            args.save_dir, {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best)

        logging.info('Best accuracy: {prec1:.3f}'.format(prec1=best_prec1))
Example #30
0
transform_test = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])

testset = CustomData('./data/test1',
                     transform=transform_test,
                     train=False,
                     test=True)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=opt.batchSize,
                                         shuffle=False,
                                         num_workers=opt.num_workers)
model = resnet101(pretrained=True)
model.fc = nn.Linear(2048, 2)
model.load_state_dict(torch.load('ckp/model.pth'))
model.cuda()
model.eval()
results = []

with torch.no_grad():
    for image, label in testloader:
        image = image.cuda()
        out = model(image)
        label = label.numpy().tolist()
        _, predicted = torch.max(out.data, 1)
        predicted = predicted.data.cpu().numpy().tolist()
        results.extend([[i, ";".join(str(j))]
                        for (i, j) in zip(label, predicted)])
                  './data/celebA/list_attr_celeba.txt', '0',
                  './data/celebA/img_align_celeba/', transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=opt.batchSize,
                                          shuffle=True,
                                          num_workers=opt.workers)

valset = celeba('./data/celebA/list_eval_partition.txt',
                './data/celebA/list_attr_celeba.txt', '1',
                './data/celebA/img_align_celeba/', transform_val)
valloader = torch.utils.data.DataLoader(valset,
                                        batch_size=opt.batchSize,
                                        shuffle=True,
                                        num_workers=opt.workers)

resnet = resnet101(pretrained=True, num_classes=40)
resnet.cuda()
criterion = nn.MSELoss(reduce=True)
optimizer = optim.SGD(resnet.parameters(),
                      lr=opt.lr,
                      momentum=0.9,
                      weight_decay=5e-4)
scheduler = StepLR(optimizer, step_size=3)


def train(epoch):
    print('\nEpoch: %d' % epoch)
    scheduler.step()
    resnet.train()
    for batch_idx, (images, attrs) in enumerate(trainloader):
        images = Variable(images.cuda())