コード例 #1
0
    def __init__(self, num_classes, input_size, pretrained=True):
        super(GCN, self).__init__()
        self.input_size = input_size
        resnet = models.resnet152()
        if pretrained:
            resnet.load_state_dict(torch.load(res152_path))
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu)
        self.layer1 = nn.Sequential(resnet.maxpool, resnet.layer1)
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        self.gcm1 = _GlobalConvModule(2048, num_classes, (7, 7))
        self.gcm2 = _GlobalConvModule(1024, num_classes, (7, 7))
        self.gcm3 = _GlobalConvModule(512, num_classes, (7, 7))
        self.gcm4 = _GlobalConvModule(256, num_classes, (7, 7))

        self.brm1 = _BoundaryRefineModule(num_classes)
        self.brm2 = _BoundaryRefineModule(num_classes)
        self.brm3 = _BoundaryRefineModule(num_classes)
        self.brm4 = _BoundaryRefineModule(num_classes)
        self.brm5 = _BoundaryRefineModule(num_classes)
        self.brm6 = _BoundaryRefineModule(num_classes)
        self.brm7 = _BoundaryRefineModule(num_classes)
        self.brm8 = _BoundaryRefineModule(num_classes)
        self.brm9 = _BoundaryRefineModule(num_classes)

        initialize_weights(self.gcm1, self.gcm2, self.gcm3, self.gcm4, self.brm1, self.brm2, self.brm3,
                           self.brm4, self.brm5, self.brm6, self.brm7, self.brm8, self.brm9)
コード例 #2
0
    def __init__(self, num_classes, pretrained=True):
        super(ResNetDUCHDC, self).__init__()
        resnet = models.resnet152()
        if pretrained:
            resnet.load_state_dict(torch.load(res152_path))
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1 = resnet.layer1
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n or 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n or 'downsample.0' in n:
                m.stride = (1, 1)
        layer3_group_config = [1, 2, 5, 9]
        for idx in range(len(self.layer3)):
            self.layer3[idx].conv2.dilation = (layer3_group_config[idx % 4], layer3_group_config[idx % 4])
            self.layer3[idx].conv2.padding = (layer3_group_config[idx % 4], layer3_group_config[idx % 4])
        layer4_group_config = [5, 9, 17]
        for idx in range(len(self.layer4)):
            self.layer4[idx].conv2.dilation = (layer4_group_config[idx], layer4_group_config[idx])
            self.layer4[idx].conv2.padding = (layer4_group_config[idx], layer4_group_config[idx])

        self.duc = _DenseUpsamplingConvModule(8, 2048, num_classes)
コード例 #3
0
    def __init__(self, num_classes, pretrained=True):
        super(ResNetDUC, self).__init__()
        resnet = models.resnet152()
        if pretrained:
            resnet.load_state_dict(torch.load(res152_path))
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1 = resnet.layer1
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation = (2, 2)
                m.padding = (2, 2)
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation = (4, 4)
                m.padding = (4, 4)
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        self.duc = _DenseUpsamplingConvModule(8, 2048, num_classes)
コード例 #4
0
def resnet152(num_classes=1000, pretrained='imagenet'):
    """Constructs a ResNet-152 model.
    """
    model = models.resnet152(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['resnet152'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
コード例 #5
0
 def __init__(self, embed_size):
     """Load the pretrained ResNet-152 and replace top fc layer."""
     super(EncoderCNN, self).__init__()
     resnet = models.resnet152(pretrained=True)
     modules = list(resnet.children())[:-1]      # delete the last fc layer.
     self.resnet = nn.Sequential(*modules)
     self.linear = nn.Linear(resnet.fc.in_features, embed_size)
     self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
コード例 #6
0
ファイル: main.py プロジェクト: rt416/grad-cam-pytorch
def demo2(image_paths, output_dir, cuda):
    """
    Generate Grad-CAM at different layers of ResNet-152
    """

    device = get_device(cuda)

    # Synset words
    classes = get_classtable()

    # Model
    model = models.resnet152(pretrained=True)
    model.to(device)
    model.eval()

    # The four residual layers
    target_layers = ["relu", "layer1", "layer2", "layer3", "layer4"]
    target_class = 243  # "bull mastif"

    # Images
    images = []
    raw_images = []
    print("Images:")
    for i, image_path in enumerate(image_paths):
        print("\t#{}: {}".format(i, image_path))
        image, raw_image = preprocess(image_path)
        images.append(image)
        raw_images.append(raw_image)
    images = torch.stack(images).to(device)

    gcam = GradCAM(model=model)
    probs, ids = gcam.forward(images)
    ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
    gcam.backward(ids=ids_)

    for target_layer in target_layers:
        print("Generating Grad-CAM @{}".format(target_layer))

        # Grad-CAM
        regions = gcam.generate(target_layer=target_layer)

        for j in range(len(images)):
            print(
                "\t#{}: {} ({:.5f})".format(
                    j, classes[target_class], float(probs[ids == target_class])
                )
            )

            save_gradcam(
                filename=osp.join(
                    output_dir,
                    "{}-{}-gradcam-{}-{}.png".format(
                        j, "resnet152", target_layer, classes[target_class]
                    ),
                ),
                gcam=regions[j, 0],
                raw_image=raw_images[j],
            )
コード例 #7
0
ファイル: extractCNN.py プロジェクト: gedaye11/fluent_change
    def __init__(self, descriptor_name):
        super(Net, self).__init__()

        # if descriptor_name == 'vgg16':
        #     self.select = ['30']
        #     self.vgg16 = models.vgg16(pretrained=True)
        #     self.sequence = []
        #     for name, layer in self.vgg16.features._modules.items():
        #         self.sequence += [layer]
        #     for name, layer in self.vgg16.classifier._modules.items():
        #         self.sequence += [layer]
        #         break
        #     self.model = nn.Sequential(*self.sequence)

        if descriptor_name == 'vgg16':
            self.select = ['30']
            self.vgg16 = models.vgg16(pretrained=True)
            self.sequence = []
            for name, layer in self.vgg16.features._modules.items():
                self.sequence += [layer]
            for name, layer in self.vgg16.classifier._modules.items():
                if name == '6':
                    break
                self.sequence += [layer]
            layer = nn.Linear(4096, 10)
            # init.xavier_normal(layer.weight.data, gain = 1)
            self.sequence += [layer]

            self.model = nn.Sequential(*self.sequence)

        elif descriptor_name == 'vgg19':
            self.select = ['36']
            self.vgg19 = models.vgg19(pretrained=True)
            self.sequence = []
            for name, layer in self.vgg19.features._modules.items():
                self.sequence += [layer]
            for name, layer in self.vgg19.classifier._modules.items():
                self.sequence += [layer]
                break
            self.model = nn.Sequential(*self.sequence)

        elif descriptor_name == 'resnet50':
            self.select = ['avgpool']
            self.model = models.resnet50(pretrained=True)
            self.model.fc = nn.Linear(2048, 10)
            
        elif descriptor_name == 'resnet101':
            self.select = ['avgpool']
            self.model = models.resnet101(pretrained=True)

        elif descriptor_name == 'resnet152':
            self.select = ['avgpool']
            self.model = models.resnet152(pretrained=True)
            self.model.fc = nn.Linear(2048, 10)
コード例 #8
0
ファイル: network.py プロジェクト: jimchenhub/Xlearn
 def __init__(self):
   super(ResNet152Fc, self).__init__()
   model_resnet152 = models.resnet152(pretrained=True)
   self.conv1 = model_resnet152.conv1
   self.bn1 = model_resnet152.bn1
   self.relu = model_resnet152.relu
   self.maxpool = model_resnet152.maxpool
   self.layer1 = model_resnet152.layer1
   self.layer2 = model_resnet152.layer2
   self.layer3 = model_resnet152.layer3
   self.layer4 = model_resnet152.layer4
   self.avgpool = model_resnet152.avgpool
   self.__in_features = model_resnet152.fc.in_features
コード例 #9
0
ファイル: feature_net.py プロジェクト: Suluo/Kaggle
 def __init__(self, model):
     super(feature_net, self).__init__()
     if model == 'vgg':
         vgg = models.vgg19(pretrained=True)
         self.feature == nn.Sequential(*list(vgg.children())[:-1])
         self.feature.add_module('global average', nn.AvgPool2d(9))
     elif model == 'inceptionv3':
         inception = models.inception_v3(pretrained=True)
         self.feature = nn.Sequential(*list(inception.children())[:-1])
         self.feature._modules.pop('13')
         self.feature.add_modules('global average', nn.AvgPool2d(35))
     elif model == 'resnet152':
         resnet = models.resnet152(pretrained=True)
         self.feature = nn.Sequential(*list(resnet.children())[:-1])
コード例 #10
0
def get_model(num_classes, model_type='resnet50'):
    if model_type == 'resnet50':
        model = resnet50(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'resnet101':
        model = resnet101(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'resnet152':
        model = resnet152(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'densenet121':
        model = densenet121(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    elif model_type == 'densenet161':
        model = densenet161(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    elif model_type == 'densenet201':
        model = densenet201(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    return model
コード例 #11
0
    def __init__(self, requires_grad=False, pretrained=True, num=18):
        super(resnet, self).__init__()
        if(num==18):
            self.net = models.resnet18(pretrained=pretrained)
        elif(num==34):
            self.net = models.resnet34(pretrained=pretrained)
        elif(num==50):
            self.net = models.resnet50(pretrained=pretrained)
        elif(num==101):
            self.net = models.resnet101(pretrained=pretrained)
        elif(num==152):
            self.net = models.resnet152(pretrained=pretrained)
        self.N_slices = 5

        self.conv1 = self.net.conv1
        self.bn1 = self.net.bn1
        self.relu = self.net.relu
        self.maxpool = self.net.maxpool
        self.layer1 = self.net.layer1
        self.layer2 = self.net.layer2
        self.layer3 = self.net.layer3
        self.layer4 = self.net.layer4
コード例 #12
0
 def __init__(self, pretrained=True, layers=[1, 2, 3, 4]):
     super().__init__(resnet152(pretrained=pretrained),
                      [64, 256, 512, 1024, 2048], [4, 4, 8, 16, 32], layers)
コード例 #13
0
    def __init__(self, backbone, expert_dims, use_ce, mimic_ce_dims, concat_experts, concat_mix_experts,
                 vlad_clusters, attr_fusion_name, attr_vocab_size, same_dim=512):
        super().__init__()

        modalities = list(expert_dims.keys())
        self.expert_dims = expert_dims
        self.modalities = modalities
        self.use_ce = use_ce
        self.mimic_ce_dims = mimic_ce_dims
        self.concat_experts = concat_experts
        self.concat_mix_experts = concat_mix_experts
        self.attr_fusion_name = attr_fusion_name
        self.backbone_name = backbone

        in_dims = [expert_dims[mod][0] for mod in modalities]
        agg_dims = [expert_dims[mod][1] for mod in modalities]
        use_bns = [True for modality in self.modalities]

        if self.use_ce or self.mimic_ce_dims:
            dim_reducers = [ReduceDim(in_dim, same_dim) for in_dim in in_dims]
            self.video_dim_reduce = nn.ModuleList(dim_reducers)

        if self.use_ce:
            self.g_reason_1 = nn.Linear(same_dim * 2, same_dim)
            self.g_reason_2 = nn.Linear(same_dim, same_dim)

            self.f_reason_1 = nn.Linear(same_dim, same_dim)
            self.f_reason_2 = nn.Linear(same_dim, same_dim)

            gated_vid_embds = [GatedEmbeddingUnitReasoning(same_dim) for _ in in_dims]

        elif self.mimic_ce_dims:  # ablation study
            gated_vid_embds = [MimicCEGatedEmbeddingUnit(same_dim, same_dim, use_bn=True) for _ in modalities]

        elif self.concat_mix_experts:  # ablation study
            in_dim, out_dim = sum(in_dims), sum(agg_dims)
            gated_vid_embds = [GatedEmbeddingUnit(in_dim, out_dim, use_bn=True)]

        elif self.concat_experts:  # ablation study
            gated_vid_embds = []

        else:
            gated_vid_embds = [GatedEmbeddingUnit(in_dim, dim, use_bn) for
                               in_dim, dim, use_bn in zip(in_dims, agg_dims, use_bns)]

        self.video_GU = nn.ModuleList(gated_vid_embds)

        if backbone == 'resnet':
            resnet = models.resnet152(pretrained=True)
            modules = list(resnet.children())[:-2]
            self.backbone = nn.Sequential(*modules)
        elif backbone == 'densenet':
            densenet = models.densenet169(pretrained=True)
            modules = list(densenet.children())[:-1]
            self.backbone = nn.Sequential(*modules)
        elif backbone in ['inceptionresnetv2', 'pnasnet5large', 'nasnetalarge', 'senet154', 'polynet']:
            self.backbone = pretrainedmodels.__dict__[backbone](num_classes=1000, pretrained='imagenet')
        else:
            raise ValueError
        self.dropout = nn.Dropout(p=0.2)
        self.avg_pooling = nn.AdaptiveAvgPool2d((1, 1))

        # self.video_multi_encoding = VideoMultilevelEncoding(in_dim=in_dims[-1], out_dim=in_dims[-1])

        if 'keypoint' in self.expert_dims.keys():
            self.effnet = EffNet()
            self.keypoint_pooling = NetVLAD(
                feature_size=512,
                cluster_size=vlad_clusters['keypoint'],
            )

        if 'attr0' in self.expert_dims.keys():
            self.attr_embed = nn.Embedding(attr_vocab_size, 300, padding_idx=0)
            attr_pooling_list = [NetVLAD(feature_size=300, cluster_size=vlad_clusters['attr']) for _ in range(6)]
            self.attr_pooling = nn.ModuleList(attr_pooling_list)

            if attr_fusion_name == 'attrmlb':
                self.attr_fusion = AttrMLB()
            else:
                self.attr_fusion = TIRG(attr_fusion_name, embed_dim=same_dim)
コード例 #14
0
    def _get_model_and_layer(self, model_name, layer):
        """ Internal method for getting layer from model
        :param model_name: model name such as 'resnet-18'
        :param layer: layer as a string for resnet-18 or int for alexnet
        :returns: pytorch model, selected layer
        """
        if model_name == 'resnet-18':
            model = models.resnet18(pretrained=True)
            if layer == 'default':
                layer = model._modules.get('avgpool')
                self.layer_output_size = 512
            else:
                layer = model._modules.get(layer)

            return model, layer


        if model_name == 'resnet-101':
            model = models.resnet101(pretrained=True)
            if layer == 'default':
                layer = model._modules.get('avgpool')
                self.layer_output_size = 2048
            else:
                layer = model._modules.get(layer)
            return model, layer

        if model_name == 'resnet-152':
            model = models.resnet152(pretrained=True)
            if layer == 'default':
                layer = model._modules.get('avgpool')
                self.layer_output_size = 2048
            else:
                layer = model._modules.get(layer)
            return model, layer

        if model_name == 'resnet-50':
            model = models.resnet50(pretrained=True)
            if layer == 'default':
                layer = model._modules.get('avgpool')
                self.layer_output_size = 2048
            else:
                layer = model._modules.get(layer)
            return model, layer


        elif model_name == 'alexnet':
            model = models.alexnet(pretrained=True)
            if layer == 'default':
                layer = model.classifier[-3]
                self.layer_output_size = 4096
            else:
                layer = model.classifier[-layer]

            return model, layer

        elif model_name == 'inception-v3':
            model = models.inception_v3(pretrained=True)
            layer = model.fc
            self.layer_output_size = 1000
            return model, layer


        else:
            raise KeyError('Model %s was not found' % model_name)
コード例 #15
0
from torch.autograd import Variable


TARGET_IMAGE_SIZE = [448, 448]
data_transforms = transforms.Compose(
    [
        transforms.Resize(TARGET_IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(IMAGE_COLOR_MEAN, IMAGE_COLOR_STD),
    ]
)

use_cuda = torch.cuda.is_available()

# NOTE feat path "https://download.pytorch.org/models/resnet152-b121ed2d.pth"
RESNET152_MODEL = models.resnet152(pretrained=True)
RESNET152_MODEL.eval()

if use_cuda:
    RESNET152_MODEL = RESNET152_MODEL.cuda()


class ResNet152FeatModule(nn.Module):
    def __init__(self):
        super().__init__()
        modules = list(RESNET152_MODEL.children())[:-2]
        self.feature_module = nn.Sequential(*modules)

    def forward(self, x):
        return self.feature_module(x)
コード例 #16
0
        outputs = model(inputs)
        _, preds = torch.max(outputs.data, 1)

        for j in range(inputs.size()[0]):
            images_so_far += 1
            ax = plt.subplot(num_images // 2, 2, images_so_far)
            ax.axis('off')
            ax.set_title('predicted: {}'.format(class_names[preds[j]]))
            imshow(inputs.cpu().data[j])

            if images_so_far == num_images:
                return


model_ft = models.resnet152(True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 80)

if use_gpu:
    model_ft = nn.DataParallel(model_ft, [0]).cuda()

model_ft.load_state_dict(torch.load('best_model_wts_resnet152.pkl'))
# torch.save(model_ft, 'model_pretrained_resnet152.pkl')


criterion = nn.CrossEntropyLoss()

# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# optimizer_ft = optim.Adam(model_ft.parameters(), lr=0.01)
コード例 #17
0
ファイル: train.py プロジェクト: xungeer29/kaggle_idesigner
def train():
    # model
    if config.model == 'ResNet18':
        backbone = models.resnet18(pretrained=True)
        model = ResNet18(backbone, num_classes=config.num_classes)
    elif config.model == 'ResNet34':
        backbone = models.resnet34(pretrained=True)
        model = ResNet34(backbone, num_classes=config.num_classes)
    elif config.model == 'ResNet50':
        backbone = models.resnet50(pretrained=True)
        model = ResNet50(backbone, num_classes=config.num_classes)
    elif config.model == 'ResNet101':
        backbone = models.resnet101(pretrained=True)
        model = ResNet101(backbone, num_classes=config.num_classes)
    elif config.model == 'ResNet152':
        backbone = models.resnet152(pretrained=True)
        model = ResNet152(backbone, num_classes=config.num_classes)
    elif config.model == 'se_resnet50':
        backbone = pretrainedmodels.__dict__['se_resnet50'](
            pretrained='imagenet')
        model = se_resnet50(backbone, num_classes=config.num_classes)
    else:
        print('ERROR: No model {}!!!'.format(config.model))
    print model
    # model = torch.nn.DataParallel(model)
    model.cuda()

    # freeze layers
    if config.freeze:
        for p in model.backbone.layer1.parameters():
            p.requires_grad = False
        for p in model.backbone.layer2.parameters():
            p.requires_grad = False
        for p in model.backbone.layer3.parameters():
            p.requires_grad = False
        #for p in model.backbone.layer4.parameters(): p.requires_grad = False

    # loss
    # criterion = nn.CrossEntropyLoss().cuda()
    criterion = FocalLoss(config.num_classes,
                          alpha=None,
                          gamma=2,
                          size_average=True)
    # criterion = LabelSmoothing(config.num_classes, 0, 0.1)

    # train data
    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(0.05, 0.05, 0.05),
        transforms.RandomRotation(10),
        transforms.Resize((config.width, config.height)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    dst_train = iDesignerDataset('./data/train.txt', transform=transform)
    weights = create_weights(dst_train, './data/label_list.txt')
    sampler = WeightedRandomSampler(weights,
                                    num_samples=len(dst_train),
                                    replacement=True)
    dataloader_train = DataLoader(dst_train,
                                  shuffle=False,
                                  batch_size=config.batch_size,
                                  num_workers=config.num_workers,
                                  sampler=sampler)

    # validation data
    transform = transforms.Compose([
        transforms.Resize((config.width, config.height)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    dst_valid = iDesignerDataset('./data/valid.txt', transform=transform)
    dataloader_valid = DataLoader(dst_valid,
                                  shuffle=False,
                                  batch_size=config.batch_size / 2,
                                  num_workers=config.num_workers)

    # log
    if not os.path.exists('./log'):
        os.makedirs('./log')
    log = open('./log/log.txt', 'a')

    log.write('-' * 30 + '\n')
    log.write(
        'model:{}\nnum_classes:{}\nnum_epoch:{}\nim_width:{}\nim_height:{}\niter_smooth:{}\n'
        .format(config.model, config.num_classes, config.num_epochs,
                config.width, config.height, config.iter_smooth))

    # load checkpoint
    if config.resume:
        print 'resume checkpoint...'
        model = torch.load(os.path.join('./checkpoints', config.checkpoint))

    # visdom
    vis = visdom.Visdom(env='kaggle_idesigner')

    # optimizer
    # optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=0.0002)
    # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
    #                              lr=0.00001, betas=(0.9, 0.999), weight_decay=0.0002)
    # optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
    #                       lr=lr, momentum=1e-1, weight_decay=1e-4)

    # adjust lr
    # lr = half_lr(config.lr, epoch)
    # lr = step_lr(epoch)
    # lr_scheduler = torch.optim.lr_scheduler.
    # lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
    # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [40, 100, 150, 200], gamma=0.1)

    cudnn.benchmark = True

    # train
    sum = 0
    train_loss_sum = 0
    train_top1_sum = 0
    max_val_top1_acc = 0
    iters = 0
    for epoch in range(config.num_epochs):
        ep_start = time.time()
        lr = step_lr(epoch)
        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            model.parameters()),
                                     lr=0.01,
                                     betas=(0.9, 0.999),
                                     weight_decay=0.0002)
        model.train()
        for i, (ims, label) in enumerate(dataloader_train):
            input = Variable(ims).cuda()
            target = Variable(label).cuda().long()

            output = model(input)

            if config.smooth_label:
                smoothed_target = label_smoothing(output, target).cuda()
                loss = F.kl_div(output, smoothed_target).cuda()

            # OHEM: online hard example mining
            if not config.OHEM and not config.smooth_label:
                loss = criterion(output, target)
            elif config.OHEM:
                if epoch < 50:
                    loss = criterion(output, target)
                else:
                    loss = F.cross_entropy(output, target, reduce=False).cuda()
                    OHEM, _ = loss.topk(int(config.num_classes *
                                            config.OHEM_ratio),
                                        dim=0,
                                        largest=True,
                                        sorted=True)
                    loss = OHEM.mean()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            #lr_scheduler.step()

            acc = accuracy(output.data, target.data, topk=(1, ))
            train_loss_sum += loss.data.cpu().numpy()
            train_top1_sum += acc[0]
            sum += 1

            if (i + 1) % config.iter_smooth == 0:
                iters += 1
                vis.line(X=torch.FloatTensor([iters]),
                         Y=torch.FloatTensor([train_loss_sum / sum]),
                         win='train_loss',
                         update='append')
                vis.line(X=torch.FloatTensor([iters]),
                         Y=torch.FloatTensor([train_top1_sum / sum]),
                         win='train_acc_top1',
                         update='append')

                print('Epoch [%d/%d], Iter [%d/%d], Loss: %.4f, top1: %.4f' %
                      (epoch + 1, config.num_epochs, i + 1,
                       len(dst_train) // config.batch_size,
                       train_loss_sum / sum, train_top1_sum / sum))
                log.write(
                    'Epoch [%d/%d], Iter [%d/%d], Loss: %.4f, top1: %.4f\n' %
                    (epoch + 1, config.num_epochs, i + 1,
                     len(dst_train) // config.batch_size, train_loss_sum / sum,
                     train_top1_sum / sum))
                sum = 0
                train_loss_sum = 0
                train_top1_sum = 0

        epoch_time = (time.time() - ep_start) / 60.
        if epoch % 1 == 0 and epoch < config.num_epochs:
            # eval
            val_time_start = time.time()
            val_loss, val_top1 = eval(model, dataloader_valid, criterion)
            val_time = (time.time() - val_time_start) / 60.

            vis.line(X=torch.FloatTensor([epoch]),
                     Y=torch.FloatTensor([val_loss]),
                     win='val_loss',
                     update='append')
            vis.line(X=torch.FloatTensor([epoch]),
                     Y=torch.FloatTensor([val_top1]),
                     win='val_acc_top1',
                     update='append')

            print(
                'Epoch [%d/%d], Val_Loss: %.4f, Val_top1: %.4f, best_top1: %.4f'
                % (epoch + 1, config.num_epochs, val_loss, val_top1,
                   max_val_top1_acc))
            print('epoch time: {} min'.format(epoch_time))
            if val_top1[0].data > max_val_top1_acc:
                max_val_top1_acc = val_top1[0].data
                print('Taking top1 snapshot...')
                if not os.path.exists('./checkpoints'):
                    os.makedirs('./checkpoints')
                torch.save(model, '{}/{}.pth'.format('checkpoints',
                                                     config.model))

            log.write(
                'Epoch [%d/%d], Val_Loss: %.4f, Val_top1: %.4f, best_top1: %.4f\n'
                % (epoch + 1, config.num_epochs, val_loss, val_top1,
                   max_val_top1_acc))
        torch.save(model, '{}/{}_last.pth'.format('checkpoints', config.model))

    log.write('-' * 30 + '\n')
    log.close()
コード例 #18
0
ファイル: build_model.py プロジェクト: chan8616/PoAI
def get_model(model_name, in_ch, num_classes, pretrained):
    if in_ch == 1:
        gray = True
    elif in_ch == 3:
        gray = False
    ################ SqueezeNet ################
    if (model_name == 'SqueezeNet 1.0'):
        model = models.squeezenet1_0(pretrained=pretrained)
    if (model_name == 'SqueezeNet 1.1'):
        model = models.squeezenet1_1(pretrained=pretrained)

    ################ VGG ################
    if (model_name == 'VGG11'):
        model = models.vgg11(pretrained=pretrained)
    if (model_name == 'VGG11 with batch normalization'):
        model = models.vgg11_bn(pretrained=pretrained)
    if (model_name == 'VGG13'):
        model = models.vgg13(pretrained=pretrained)
    if (model_name == 'VGG13 with batch normalization'):
        model = models.vgg13_bn(pretrained=pretrained)
    if (model_name == 'VGG16'):
        model = models.vgg16(pretrained=pretrained)
    if (model_name == 'VGG16 with batch normalization'):
        model = models.vgg16_bn(pretrained=pretrained)
    if (model_name == 'VGG19'):
        model = models.vgg19(pretrained=pretrained)
    if (model_name == 'VGG19 with batch normalization'):
        model = models.vgg19_bn(pretrained=pretrained)

    ################ ResNet ################
    if (model_name == 'ResNet-18'):
        model = models.resnet18(pretrained=pretrained)
    if (model_name == 'ResNet-34'):
        model = models.resnet34(pretrained=pretrained)
    if (model_name == 'ResNet-50'):
        model = models.resnet50(pretrained=pretrained)
    if (model_name == 'ResNet-101'):
        model = models.resnet101(pretrained=pretrained)
    if (model_name == 'ResNet-152'):
        model = models.resnet152(pretrained=pretrained)

    ################ DenseNet ################
    if (model_name == 'DenseNet-121'):
        model = models.densenet121(pretrained=pretrained)
    if (model_name == 'DenseNet-161'):
        model = models.densenet161(pretrained=pretrained)
    if (model_name == 'DenseNet-169'):
        model = models.densenet169(pretrained=pretrained)
    if (model_name == 'DenseNet-201'):
        model = models.densenet201(pretrained=pretrained)

    ################ Other Networks ################
    if (model_name == 'AlexNet'):
        model = models.alexnet(pretrained=pretrained)
    if (model_name == 'Inception v3'):
        model = models.inception_v3(pretrained=pretrained)
    if (model_name == 'GoogLeNet'):
        model = models.googlenet(pretrained=pretrained)
    if (model_name == 'ShuffleNet v2'):
        model = models.shufflenet_v2_x1_0(pretrained=pretrained)
    if (model_name == 'MobileNet v2'):
        model = models.mobilenet_v2(pretrained=pretrained)
    if (model_name == 'MNASNet 1.0'):
        model = models.mnasnet1_0(pretrained=pretrained)
    if (model_name == 'ResNeXt-50-32x4d'):
        model = models.resnext50_32x4d(pretrained=pretrained)
    if (model_name == 'ResNeXt-101-32x8d'):
        model = models.resnext101_32x8d(pretrained=pretrained)
    if (model_name == 'Wide ResNet-50-2'):
        model = models.wide_resnet50_2(pretrained=pretrained)
    if (model_name == 'Wide ResNet-101-2'):
        model = models.wide_resnet101_2(pretrained=pretrained)

    if ('VGG' in model_name) or ('Dense' in model_name) or (
            'Squeeze' in model_name) or (model_name in [
                'AlexNet', 'MobileNet v2', 'MNASNet 1.0'
            ]):
        if pretrained and (gray
                           == False):  #layer freeze(unless gray scale iamge)
            for layer in model.features.parameters():
                layer.requires_grad = False
        # layer change
        if gray:
            out_channels = model.features[0].out_channels
            kernel_size = model.features[0].kernel_size[0]
            stride = model.features[0].stride[0]
            padding = model.features[0].padding[0]
            model.features[0] = nn.Conv2d(1, out_channels, kernel_size, stride,
                                          padding)

        if 'Squeeze' in model_name:
            in_channels = model.classifier[1].in_channels
            model.classifier[1] = nn.Conv2d(in_channels, num_classes)
        else:
            in_features = model.classifier[-1].in_features
            model.classifier[-1] = nn.Linear(in_features, num_classes)

    else:
        if pretrained and (gray
                           == False):  #layer freeze(unless gray scale iamge)
            for layer in model.parameters():
                layer.requires_grad = False
        # last layer change
        if gray:
            out_channels = model.conv1.out_channels
            kernel_size = model.conv1.kernel_size[0]
            stride = model.conv1.stride[0]
            padding = model.conv1.padding[0]
            model.conv1 = nn.Conv2d(1, out_channels, kernel_size, stride,
                                    padding)
        in_features = model.fc.in_features
        model.fc = nn.Linear(in_features, num_classes)

    print('\nLoad model :', model_name)
    print(model, '\n')
    return model
コード例 #19
0
 def __init__(self):
     super(FeaturesExtractor, self).__init__()
     resnet = models.resnet152(pretrained=True)
     self.features = nn.Sequential(*list(resnet.children())[:-2])
コード例 #20
0
ファイル: extract_model.py プロジェクト: Wang-Yujue/MMdnn
#  Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------

import argparse
import os
from six import text_type as _text_type
from mmdnn.conversion.examples.imagenet_test import TestKit
import torch
import torchvision.models as models


NETWORKS_MAP = {
    'inception_v3'      : lambda : models.inception_v3(pretrained=True),
    'vgg16'             : lambda : models.vgg16(pretrained=True),
    'vgg19'             : lambda : models.vgg19(pretrained=True),
    'resnet152'         : lambda : models.resnet152(pretrained=True),
    'densenet'          : lambda : models.densenet201(pretrained=True),
    'squeezenet'        : lambda : models.squeezenet1_1(pretrained=True)
}


def _main():
    parser = argparse.ArgumentParser()

    parser.add_argument('-n', '--network',
                        type=_text_type, help='Model Type', required=True,
                        choices=NETWORKS_MAP.keys())

    parser.add_argument('-i', '--image', type=_text_type, help='Test Image Path')

    args = parser.parse_args()
コード例 #21
0
import torch
import torch.nn as nn
import time
import torchvision.models as models
import os
import numpy as np
from efficientnet_pytorch import EfficientNet

print("loading backbone network")
start = time.time()
#model=EfficientNet.from_pretrained("efficientnet-b0")
model = models.resnet152(pretrained=True, progress=True).float()
#model=models.resnet50(pretrained=True,progress=True).float()
print("loading done")


class Flatten(nn.Module):
    def __init__(self):
        super(Flatten, self).__init__()

    def forward(self, x):
        return x.view(x.size()[0], -1)


class cnn_lstm(nn.Module):
    def __init__(self):
        super(cnn_lstm, self).__init__()
        #self.pretrain =model#####Efficientnet#####
        self.pretrain = nn.Sequential(
            *list(model.children()))[0:7]  ### Resnet #####
        self.prelstm = nn.Sequential(
コード例 #22
0
ファイル: model.py プロジェクト: bairw660606/VSGNet
    def __init__(self):
 	        super(VSGNet,self).__init__()
 	            
                model =models.resnet152(pretrained=True)
		self.flat= Flatten()
                

                self.Conv_pretrain = nn.Sequential(*list(model.children())[0:7])## Resnets,resnext

		
                
                
                
                
                ######### Convolutional Blocks for human,objects and the context##############################
                self.Conv_people=nn.Sequential(
				nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.ReLU(inplace=False),
				)
		self.Conv_objects=nn.Sequential(
				nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.ReLU(inplace=False),
				)
		self.Conv_context=nn.Sequential(
				nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
				nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
				nn.ReLU(inplace=False),
				)
		
               ############################################################################################### 
                

                ##### Attention Feature Model######
                self.conv_sp_map=nn.Sequential(
                                nn.Conv2d(2, 64, kernel_size=(5, 5)),
                                #nn.Conv2d(3, 64, kernel_size=(5, 5)),
                                nn.MaxPool2d(kernel_size=(2, 2)),
                                nn.Conv2d(64, 32, kernel_size=(5,5)),
                                nn.MaxPool2d(kernel_size=(2, 2)),
                        	nn.AvgPool2d((13,13),padding=0,stride=(1,1)),
				#nn.Linear(32,1024),
				#nn.ReLU()
				    
			     )
		self.spmap_up=nn.Sequential(
					nn.Linear(32,512),代码段 小部件
					nn.ReLU(),
					)
	
		
                #######################################
                
                
                ### Prediction Model for attention features#######
                self.lin_spmap_tail=nn.Sequential(
                    nn.Linear(512,117),
                   
                )
		
		##################################################
		

                ######### Graph Model Basic Structure ########################
		self.peo_to_obj_w=nn.Sequential(
						nn.Linear(1024,1024),
						nn.ReLU(),
						)
		self.obj_to_peo_w=nn.Sequential(
						nn.Linear(1024,1024),
						nn.ReLU(),
						)
            
                #################################################################    


		#Interaction prediction model for visual features######################
		self.lin_single_head=nn.Sequential(
                    #nn.Linear(2048,1),
		    #nn.Dropout2d(p=0.5),
	            nn.Linear(lin_size*3+4, 1024),
	            #nn.Linear(lin_size*3, 1024),
                    nn.Linear(1024,512),
		
                    nn.ReLU(),

                )
		self.lin_single_tail=nn.Sequential(
                    #nn.ReLU(),
	            nn.Linear(512,1),
                    #nn.Linear(10,1),

                )

                #################################################################

                ########## Prediction model for visual features#################

                self.lin_visual_head=nn.Sequential(
                    #nn.Linear(2048, 117),
	            #nn.Dropout2d(p=0.5),
	            nn.Linear(lin_size*3+4, 1024),
	            #nn.Linear(lin_size*3, 1024),
	            #nn.Linear(lin_size*3+4+sp_size, 1024),
	            nn.Linear(1024,512),
                   
                    nn.ReLU(),
                  #  nn.ReLU(),
                )
                self.lin_visual_tail=nn.Sequential(
                    nn.Linear(512,117),
                   
                )

                ################################################
                
                
                
                ####### Prediction model for graph features##################
                self.lin_graph_head=nn.Sequential(
                    #nn.Linear(2048, 117),
	            #nn.Dropout2d(p=0.5),
	            nn.Linear(lin_size*2, 1024),
	            nn.Linear(1024,512),
	           
                    nn.ReLU(),
                   
                )
                self.lin_graph_tail=nn.Sequential(
                    nn.Linear(512,117),
                   
                )

                ########################################


		self.sigmoid=nn.Sigmoid()
コード例 #23
0
    def __init__(self,
                 model_name='vgg',
                 weight_path=None,
                 is_bn=True,
                 is_conv=True,
                 is_deconv=False,
                 trainable=False,
                 is_summary=True,
                 use_cuda=True,
                 last_layer=''):
        super(FeatureExtraction, self).__init__()

        main_log.debug('create feature extraction model %s ...' % model_name)

        if model_name == 'vgg':
            self.model = models.vgg16(pretrained=True)
            # keep feature extraction network up to indicated layer
            vgg_feature_layers = [
                'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
                'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
                'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
                'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
                'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
                'relu5_3', 'pool5'
            ]
            if last_layer == '':
                last_layer = 'pool4'
            last_layer_idx = vgg_feature_layers.index(last_layer)
            self.model = nn.Sequential(
                *list(self.model.features.children())[:last_layer_idx + 1])
        elif model_name == 'resnet101':
            self.model = models.resnet101(pretrained=True)
            resnet_feature_layers = [
                'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2',
                'layer3', 'layer4'
            ]
            if last_layer == '':
                last_layer = 'layer4'

            last_layer_idx = resnet_feature_layers.index(last_layer)

            resnet_module_list = [
                self.model.conv1, self.model.bn1, self.model.relu,
                self.model.maxpool, self.model.layer1, self.model.layer2,
                self.model.layer3, self.model.layer4
            ]

            self.model = nn.Sequential(*resnet_module_list[:last_layer_idx +
                                                           1])
        elif model_name == 'resnet152':
            # change the input layer ??
            self.model = models.resnet152(pretrained=True)
            resnet_feature_layers = [
                'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2',
                'layer3', 'layer4'
            ]
            if last_layer == '':
                last_layer = 'layer4'

            last_layer_idx = resnet_feature_layers.index(last_layer)

            resnet_module_list = [
                self.model.conv1, self.model.bn1, self.model.relu,
                self.model.maxpool, self.model.layer1, self.model.layer2,
                self.model.layer3, self.model.layer4
            ]

            self.model = nn.Sequential(*resnet_module_list[:last_layer_idx +
                                                           1])
        elif model_name == 'self-defined':
            self.model = nn.Sequential()

            layer_counter = 0

            if is_bn:
                self.model.add_module(
                    'bn_' + str(layer_counter),
                    nn.BatchNorm2d(FeatureExtraction.n_conv_channels[0]))

            layer_counter += 1

            if is_conv:
                for i in range(FeatureExtraction.n_conv_layer):
                    # self.model.add_module(nn.Conv2d(FeatureExtraction.n_conv_layer[0], FeatureExtraction.n_conv_layer[1],
                    # kernel_size=(2, 2), strides=(1, 1), )
                    self.model.add_module(
                        'conv_' + str(layer_counter),
                        conv2d_same_padding(
                            FeatureExtraction.n_conv_channels[i],
                            FeatureExtraction.n_conv_channels[i + 1],
                            FeatureExtraction.kernel_size,
                            height=FeatureExtraction.height,
                            width=FeatureExtraction.width))

                    if is_bn:
                        self.model.add_module(
                            'bn_' + str(layer_counter),
                            nn.BatchNorm2d(
                                FeatureExtraction.n_conv_channels[i + 1]))

                    self.model.add_module('act_' + str(layer_counter),
                                          nn.ReLU())

                    layer_counter += 1

            if is_deconv:
                for i in range(FeatureExtraction.n_deconv_layer):
                    self.model.add_module(
                        'deconv_' + str(layer_counter),
                        conv2d_transpose_same_padding(
                            FeatureExtraction.n_deconv_channels[i],
                            FeatureExtraction.n_deconv_channels[i + 1],
                            FeatureExtraction.kernel_size,
                            height=FeatureExtraction.height,
                            width=FeatureExtraction.width))

                    if is_bn:
                        self.model.add_module(
                            'bn_' + str(layer_counter),
                            nn.BatchNorm2d(
                                FeatureExtraction.n_deconv_channels[i + 1]))

                    self.model.add_module('act_' + str(layer_counter),
                                          nn.ReLU())

                    layer_counter += 1

                self.model.add_module(
                    'conv_' + str(layer_counter),
                    conv2d_same_padding(FeatureExtraction.n_deconv_channels[
                        FeatureExtraction.n_deconv_layer],
                                        1, (2, 1),
                                        height=FeatureExtraction.height,
                                        width=FeatureExtraction.width))

                if is_bn:
                    self.model.add_module('bn_' + str(layer_counter),
                                          nn.BatchNorm2d(1))

                self.model.add_module('act_' + str(layer_counter),
                                      nn.Sigmoid())
        else:
            main_log.error(
                'Feature_extraction_model build error: unexpected method = %s'
                % model_name)
            return

        # load the pre-trained weights
        if weight_path and os.path.isfile(weight_path):
            main_log.info('model weight exists')

            checkpoint = torch.load(weight_path,
                                    map_location=lambda storage, loc: storage)
            checkpoint = OrderedDict([(k.replace('model.', ''), v)
                                      for k, v in checkpoint.items()])
            # self.model.load_state_dict(checkpoint, strict=False)
            if is_deconv:
                self.model.load_state_dict(checkpoint, strict=True)
            else:
                self.model.load_state_dict(checkpoint, strict=False)

        # freeze parameters
        if not trainable:
            for param in self.model.parameters():
                param.requires_grad = False

        # move to GPU
        if use_cuda:
            self.model.cuda()
            # self.model = nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            # cudnn.benchmark = True

        # summary
        if is_summary:
            # input_size = (C, H, W)
            # summary(self.model, input_size=(1, 512, 512))
            summary(self.model,
                    input_size=(1, 2, 1500),
                    dtype=torch.get_default_dtype())

        main_log.debug('creating feature extraction model is completed')
コード例 #24
0
ファイル: backbones.py プロジェクト: lzcn/torchutils
def resnet152(pretrained=True, **kwargs):
    num_features = 2048
    backbone = models.resnet152(pretrained, **kwargs)
    backbone.fc = nn.Identity()
    return backbone, num_features
コード例 #25
0
        [[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0], [0, 0, -1]],
        dtype=np.float)
    set_base_poses = torch.from_numpy(set_base_poses.T).float()
    print('Base poses set to unit vectors')
elif base_poses == 'gaussian':
    set_base_poses = np.random.normal(size=(3, feature_dim))
    set_base_poses = torch.from_numpy(set_base_poses).float()
    print('Base poses sampled from gaussian')
else:
    print('Standard initialization for base poses')
if backbone_model == 'ResNet-34':
    feature_extractor = pretrainedmodels.resnet34(pretrained=True)
elif backbone_model == 'ResNet-101':
    feature_extractor = pretrainedmodels.resnet101(pretrained=True)
elif backbone_model == 'ResNet-152':
    feature_extractor = pretrainedmodels.resnet152(pretrained=True)
#elif backbone_model == 'ResNext-101':
#    feature_extractor = pretrainedmodels.resnext101_32x8d(pretrained=True)
elif backbone_model == 'InceptionV3':
    feature_extractor = pretrainedmodels.inception_v3(pretrained=True)
else:
    raise NotImplementedError('Required model not implemented yet')
print('Use {:s} as backbone'.format(backbone_model))
posenet = PoseNet(feature_extractor,
                  droprate=dropout,
                  pretrained=True,
                  filter_nans=(args.model == 'mapnet++'),
                  feat_dim=feature_dim,
                  freeze_feature_extraction=freeze,
                  activation_function=af,
                  set_base_poses=set_base_poses)
コード例 #26
0
def main():

    global args
    args = parser.parse_args()

    BIG_TRAIN_PATH = os.path.join(ori_path, 'data4/train/')
    SMALL_TRAIN_PATH = os.path.join(ori_path, args.data, '/train/')
    #TRAIN_PATH = "/lfs/raiders3/1/ddkang/imagenet/ilsvrc2012/ILSVRC2012_img_train"
    BIG_VAL_PATH = os.path.join(ori_path, 'data4/val/')
    SMALL_VAL_PATH = os.path.join(ori_path, args.data, '/val/')
    #VAL_PATH = "/lfs/raiders3/1/ddkang/imagenet/ilsvrc2012/ILSVRC2012_img_val"

    pytorch_models = {
        'resnet18': models.resnet18(pretrained=True),
        'resnet34': models.resnet34(pretrained=True),
        'resnet50': models.resnet50(pretrained=True),
        'resnet101': models.resnet101(pretrained=True),
        'resnet152': models.resnet152(pretrained=True)
    }

    model_params = [('trn2', []), ('trn4', [1]), ('trn6', [1, 1]),
                    ('trn8', [1, 1, 1]), ('trn10', [1, 1, 1, 1]),
                    ('trn18', [2, 2, 2, 2]), ('trn34', [3, 4, 6, 3])]
    name_to_params = dict(model_params)

    big_model = pytorch_models['resnet18']

    for p in big_model.parameters():
        p.requires_grad = False
        p.cuda(args.gpu)

    if args.model.startswith('trn'):
        small_model = pytorch_resnet.rn_builder(name_to_params[args.model],
                                                num_classes=4,
                                                conv1_size=3,
                                                conv1_pad=1,
                                                nbf=16,
                                                downsample_start=False)
    elif args.model.startswith('lenet'):
        #small_model = lenet.lenet_builder()
        pass
    else:
        small_model = models.__dict__[args.model]()

    if args.gpu is not None:
        big_model = big_model.cuda(args.gpu)
        num_ftrs = big_model.fc.in_features
        big_model.fc = nn.Linear(num_ftrs, 4)

        if args.model.startswith('alexnet') or args.model.startswith('vgg'):
            small_model = small_model.cuda(args.gpu)
            small_model.cuda()
            num_ftrs = small_model.classifier[6].in_features
            small_model.classifier[6] = nn.Linear(num_ftrs, 4)
        elif args.model.startswith('resnet'):
            small_model = small_model.cuda(args.gpu)
            small_model.cuda()
            num_ftrs = small_model.fc.in_features
            small_model.fc = nn.Linear(num_ftrs, 4)
        else:
            small_model = small_model.cuda(args.gpu)

    else:
        big_model = torch.nn.DataParallel(big_model).cuda()
        num_ftrs = big_model.module.fc.in_features
        big_model.module.fc = nn.Linear(num_ftrs, 4)

        if args.model.startswith('alexnet') or args.model.startswith('vgg'):
            small_model.features = torch.nn.DataParallel(small_model.features)
            small_model.cuda()
            num_ftrs = small_model.classifier[6].in_features
            small_model.classifier[6] = nn.Linear(num_ftrs, 4)
        elif args.model.startswith('resnet'):
            small_model = small_model.cuda(args.gpu)
            small_model.cuda()
            num_ftrs = small_model.fc.in_features
            small_model.fc = nn.Linear(num_ftrs, 4)
        else:
            small_model = torch.nn.DataParallel(small_model).cuda()

    print("architecture of small_model:")
    print(small_model)

    ##################
    #fine tune big model for 4 classes
    train_loader, val_loader = get_datasets()  #train_fnames, val_fnames)
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    if args.resume is not None:
        checkpoint_path = os.path.join(ori_path, 'bigmodel.tar')
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            checkpoint_path), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(checkpoint_path).get('state_dict')

        if args.gpu is not None:
            new_checkpoint = OrderedDict()
            for k, v in checkpoint.items():
                name = k.replace("module.", "")  # removing ‘.moldule’ from key
                new_checkpoint[name] = v

            big_model.load_state_dict(new_checkpoint)
            big_model.cuda(args.gpu)

        else:
            big_model.load_state_dict(checkpoint)
            big_model.cuda(args.gpu)

    else:
        optimizer = optim.Adam(big_model.parameters(), lr=0.001)

        big_model.train(True)
        big_model.cuda(args.gpu)
        for epoch in range(0, args.epochs):
            print("===epoc===%d" % epoch)
            for i, (data, y) in enumerate(train_loader):
                data = Variable(data, requires_grad=True)
                #y=Variable(y,requires_grad=True)

                #if args.gpu is not None:
                data = data.cuda(args.gpu, non_blocking=True)
                y = y.cuda(args.gpu, non_blocking=True)

                out = big_model(data)
                loss = criterion(out, y)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                print('loss:', loss, loss.item())
        big_model.train(False)
        torch.save({'state_dict': big_model.state_dict()}, 'bigmodel.tar')

    test_acc0 = validate(val_loader, big_model, criterion)

    ##################

    train(big_model, small_model, args)
コード例 #27
0
ファイル: main.py プロジェクト: gitting-guud/MVA
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
    args.data + '/val_images', transform=data_transforms['val']),
                                         batch_size=args.batch_size,
                                         shuffle=False,
                                         num_workers=0)

# Neural network and optimizer
# We define neural net in model.py so that it can be reused by the evaluate.py script
#from model import Net
#model = Net()
from torchvision.models import resnet34
from torchvision.models import resnet18
from torchvision.models import resnet152
from torchvision.models import vgg16

model = resnet152(pretrained=True)
#model = resnet34(pretrained=True)
#model = vgg16(pretrained=True)
ct = 0
for name, child in model.named_children():
    ct += 1
    if ct <= 7:
        for name2, params in child.named_parameters():
            params.requires_grad = False
model.fc = nn.Linear(2048, 20)

if use_cuda:
    print('Using GPU')
    model.cuda()
else:
    print('Using CPU')
コード例 #28
0
    def __init__(self, num_classes):
        super(extension_1, self).__init__()

        self.num_classes = num_classes

        resnet = models.resnet152(pretrained=True)

        self.feature_fuse = fusion(256)
        self.conv = nn.Conv2d(2048,
                              256,
                              kernel_size=3,
                              stride=1,
                              dilation=1,
                              padding=1,
                              bias=False)
        self.conv1 = resnet.conv1
        self.conv2 = nn.Conv2d(3,
                               256,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               dilation=1)

        self.conv3 = nn.Conv2d(2048,
                               256,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               dilation=1)
        self.conv4 = nn.Conv2d(1024,
                               256,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               dilation=1)
        self.conv5 = nn.Conv2d(512,
                               256,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               dilation=1)
        self.conv6 = nn.Conv2d(256,
                               256,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               dilation=1)

        self.conv7 = nn.Conv2d(256,
                               256,
                               kernel_size=3,
                               stride=1,
                               padding=3,
                               dilation=3)
        self.conv8 = nn.Conv2d(256,
                               256,
                               kernel_size=3,
                               stride=1,
                               padding=5,
                               dilation=5)
        self.conv9 = nn.Conv2d(256,
                               256,
                               kernel_size=3,
                               stride=1,
                               padding=7,
                               dilation=7)
        self.conv10 = nn.Conv2d(256,
                                256,
                                kernel_size=3,
                                stride=1,
                                padding=11,
                                dilation=11)
        self.conv11 = nn.Conv2d(256,
                                256,
                                kernel_size=3,
                                stride=1,
                                padding=13,
                                dilation=13)

        self.conv12 = nn.Conv2d(768,
                                256,
                                kernel_size=1,
                                stride=1,
                                padding=0,
                                dilation=1)

        self.conv_end = nn.Conv2d(256,
                                  21,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0,
                                  dilation=1)
        self.fc2 = nn.Linear(256, 21)

        # self.bn1 = nn.BatchNorm2d(256)
        self.bn1 = GroupNorm(256)
        self.relu1 = nn.ReLU(inplace=True)

        self.good_resnet = Atrous_ResNet_features(pretrained=True)
        self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                             nn.Conv2d(256, 256, 1, stride=1))
        self.dropout = torch.nn.Dropout(0.1)
コード例 #29
0
def resnet_truncated():
    model = models.resnet152(pretrained=True)
    model = nn.Sequential(*list(model.children())[:-3])
    return model
コード例 #30
0
import torchvision.models as models
from torchvision import models


import torch.nn as nn
import torch

resnet = models.resnet152(pretrained=False)
modules = list(resnet.children())[:-1]      # delete the last fc layer.

resnet_1 = nn.Sequential(*modules)

x = torch.Tensor(1,3,300,300)

output = resnet_1(x)

for layer in resnet_1:
    x = layer(x)
    print(x.size())


コード例 #31
0
def main():
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }

    data_dir = config.data_dir
    image_datasets = {
        x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
        for x in ['train', 'val']
    }
    dataloaders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=4)
        for x in ['train', 'val']
    }
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}

    if config.use_OLE:
        print('using CrossEntropyloss with OLEloss')
        criterion = [nn.CrossEntropyLoss()] + [OLELoss()]
    else:
        print('using CrossEntropyloss')
        criterion = [nn.CrossEntropyLoss()]

    if config.use_pretrained:
        model = models.resnet152(pretrained=True)
    else:
        model = models.resnet152(pretrained=False)

    num_ftrs = model.fc.in_features
    model.fc = nn.Linear(num_ftrs, config.class_num)

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)

    torch.backends.cudnn.benchmark = True
    model = model.to(device)
    start_epoch = 0
    best_acc = 0
    lr = config.initial_lr

    if config.is_resume:
        print('==> Resuming from checkpoint..')
        weight_dir = os.path.join(config.checkpoint, 'final.pth.tar')
        checkpoint = torch.load(weight_dir)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        lr = pow(config.decay_scalar, int(
            start_epoch // config.decay_step)) * lr

    optimizer_ft = optim.SGD(model.parameters(), lr=lr, momentum=0.9)

    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=config.decay_step,
                                           gamma=config.decay_scalar)

    best_acc = train_model(dataloaders,
                           dataset_sizes,
                           start_epoch,
                           best_acc,
                           model,
                           criterion,
                           optimizer_ft,
                           exp_lr_scheduler,
                           num_epochs=config.max_epoch)
    filename = os.path.join(config.checkpoint, 'final.pth.tar')
    torch.save(
        {
            'epoch': config.max_epoch + start_epoch,
            'state_dict': model.state_dict(),
            'best_acc': best_acc,
        }, filename)
    print('Best val Acc: {:4f}'.format(best_acc), 'save at %s' % filename)
コード例 #32
0
                ax.axis('off')
                ax.set_title('predicted: {}'.format(class_names[preds[j]]))
                imshow(inputs.cpu().data[j])

                if images_so_far == num_images:
                    model.train(mode=was_training)
                    return
        model.train(mode=was_training)


######################################################################
# Finetuning the convnet transfer from resnet to MoleMap
# ----------------------
#
# Load a pretrained model and reset final fully connected layer.
model_ft = models.resnet152(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, len(class_names_m))
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
model_ft.parameters()
# Observe that all parameters are being optimized
# optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.0001, momentum=0.9)
optimizer_ft = torch.optim.Adam(model_ft.parameters(), lr=init_learning_rate)

# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                       step_size=num_epochs_decay,
                                       gamma=learning_rate_decay_factor)

######################################################################
import torch
import torch.nn as nn
import torchvision.models as models
from torch.autograd import Variable
import skvideo.io
import time
import math

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

batch_size = 1  # Extract Resnet features in a tensor of batch_size videos.

video_path = '../../../dataset/MSR-VTT/videos_224/'

# Download pre trained Resnet101.
resnet50 = models.resnet152(pretrained=True)
modules = list(resnet50.children())[:-1]
resnet50 = nn.Sequential(*modules)
for p in resnet50.parameters():
    p.requires_grad = False
resnet50 = resnet50.to(device)


# Convert id's list to dictionary with key as id, value as id numpy array
def video_array(batch_ids):
    batch_array = {}
    try:
        video = skvideo.io.vread(video_path + batch_ids[0] + '.mp4')
        batch_array.update({batch_ids[0]: video})
        return batch_array
    except:
コード例 #34
0
import torch
import torch.nn as nn
import torchvision.models as models
from torch.autograd import Variable
import skvideo.io
import time
import math

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

batch_size = 1  # Extract Resnet features in a tensor of batch_size videos.

video_path = '../../../dataset/MSR-VTT/videos_224/'

# Download pre trained Resnet101.
resnet50 = models.resnet152(pretrained=True)
modules = list(resnet50.children())[:-1]
resnet50 = nn.Sequential(*modules)
for p in resnet50.parameters():
    p.requires_grad = False
resnet50 = resnet50.to(device)


# Convert id's list to dictionary with key as id, value as id numpy array
def video_array(batch_ids):
    batch_array = {}
    try:
        video = skvideo.io.vread(video_path + batch_ids[0] + '.mp4')
        batch_array.update({batch_ids[0]:video})
        return batch_array
    except:
コード例 #35
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    model_ft = None
    input_size = 0

    if model_name == 'resnet':
        model_ft = models.resnet152(pretrained=use_pretrained)  #50 or 152
        '''
        in case, feature_extract is true, set_parameter_requires_grad will set all grad parameters to false
        and because after this operation a new layer (the output one) is added, these new parameters will have grad as true
        '''

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.fc.in_features

        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'alexnet':
        model_ft = models.alexnet(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier[6].in_features

        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'vgg':
        model_ft = models.vgg11_bn(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier[6].in_features

        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == 'squeezenet':
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == 'densenet':

        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

        input_size = 224

    elif model_name == 'inception':
        #inception v3
        model_ft = models.inception_v3(pretrained=use_pretrained)

        set_parameter_requires_grad(model_ft, feature_extract)

        #Aux net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)

        #Primary Net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

        input_size = 299

    else:
        print('invalid model name')
        exit()

    return model_ft, input_size