Exemplo n.º 1
0
def define_F(opt, use_bn=False):
    gpu_ids = opt['gpu_ids']
    device = torch.device('cuda' if gpu_ids else 'cpu')

    if 'unet' in opt and opt['unet'] == True:
        # Use UNet.
        netF = UNet(n_channels=3, n_classes=10)
        unet_model_path = opt['unet_model']
        netF.load_state_dict(torch.load(unet_model_path))
        if gpu_ids:
            netF = nn.DataParallel(netF)
    elif 'pnasnet' in opt and opt['pnasnet'] == True:
        netF = arch.PNasNetFeatureExtractor(use_input_norm=True, device=device)
    else:
        # pytorch pretrained VGG19-54, before ReLU.
        if use_bn:
            feature_layer = 49
        else:
            feature_layer = 34
        netF = arch.VGGFeatureExtractor(feature_layer=feature_layer,
                                        use_bn=use_bn,
                                        use_input_norm=True,
                                        device=device)
        # netF = arch.ResNet101FeatureExtractor(use_input_norm=True, device=device)
        if gpu_ids:
            netF = nn.DataParallel(netF)
    netF.eval()  # No need to train
    return netF
Exemplo n.º 2
0
def define_F(opt, use_bn=False):
    gpu_ids = opt['gpu_ids']
    tensor = torch.cuda.FloatTensor if gpu_ids else torch.FloatTensor
    # pytorch pretrained VGG19-54, before ReLU.
    if use_bn:
        feature_layer = 49
    else:
        feature_layer = 34
    netF = arch.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, \
        use_input_norm=True, tensor=tensor)
    if gpu_ids:
        netF = nn.DataParallel(netF).cuda()
    netF.eval()  # No need to train
    return netF
Exemplo n.º 3
0
def define_F(opt, use_bn=False):
    gpu_ids = opt['gpu_ids']
    device = torch.device('cuda' if gpu_ids else 'cpu')
    # pytorch pretrained VGG19-54, before ReLU.
    if use_bn:
        feature_layer = 49
    else:
        feature_layer = 34
    netF = arch.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, \
        use_input_norm=True, device=device)
    if gpu_ids:
        netF = nn.DataParallel(netF)
    netF.eval()  # No need to train
    return netF
Exemplo n.º 4
0
def define_F(opt, use_bn=False):
    gpu_ids = opt["gpu_ids"]
    device = torch.device("cuda" if gpu_ids else "cpu")
    # pytorch pretrained VGG19-54, before ReLU.
    if use_bn:
        feature_layer = 49
    else:
        feature_layer = 34
    netF = arch.VGGFeatureExtractor(feature_layer=feature_layer,
                                    use_bn=use_bn,
                                    use_input_norm=True,
                                    device=device)
    # netF = arch.ResNet101FeatureExtractor(use_input_norm=True, device=device)
    if gpu_ids:
        netF = nn.DataParallel(netF)
    netF.eval()  # No need to train
    return netF
Exemplo n.º 5
0
def define_F(opt, use_bn=False):
    gpu_ids = opt['gpu_ids']
    device = torch.device('cuda' if gpu_ids else 'cpu')
    # pytorch pretrained VGG19-54, before ReLU.
    if use_bn:
        feature_layer = 49
    else:
        feature_layer = 34
    # ouyry
    if 'network_F' not in opt:
        mode = 'VGG19'
    else:
        mode = opt['network_F']['mode']

    if mode == 'VGG19':
        netF = arch.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, \
            use_input_norm=True, device=device)
    elif mode == 'VGG16':
        feature_layer = 28
        netF = arch.VGG16FeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, \
            use_input_norm=True, device=device)
    elif mode == 'VGG16-MINC':
        feature_layer = 28
        model_path = opt['network_F']['path']
        netF = arch.VGG16MINCFeatureExtractor(feature_layer=feature_layer, model_path=model_path,\
            use_bn=use_bn, use_input_norm=True, device=device)
    elif mode == 'Sphere20a':
        feature_layer = 28
        model_path = opt['network_F']['path']
        if 'norm' in opt['network_F']:
            norm = opt['network_F']['norm']
        else:
            norm = True
        netF = arch.Sphere20aFeatureExtractor(feature_layer=feature_layer, model_path=model_path,\
            use_bn=use_bn, use_input_norm=norm, device=device)
    # netF = arch.ResNet101FeatureExtractor(use_input_norm=True, device=device)
    # if 'distributed' in opt:
    #     assert torch.cuda.is_available()
    #     netF = nn.parallel.DistributedDataParallel(netF)

    if gpu_ids:
        assert torch.cuda.is_available()
        netF = nn.DataParallel(netF)
    netF.eval()  # No need to train
    return netF
def define_F(opt, use_bn=False,**kwargs):
    gpu_ids = opt['gpu_ids']
    device = torch.device('cuda' if gpu_ids else 'cpu')
    # pytorch pretrained VGG19-54, before ReLU.
    if 'arch' in kwargs.keys()and 'vgg11' in kwargs['arch']:
        feature_layer = int(kwargs['arch'][len('vgg11_'):])
        kwargs['arch'] = 'vgg11'
    else:
        if use_bn:
            feature_layer = 49
        else:
            feature_layer = 34
    netF = arch.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn,use_input_norm=True, device=device,**kwargs)
    # netF = arch.ResNet101FeatureExtractor(use_input_norm=True, device=device)
    if gpu_ids:
        netF = nn.DataParallel(netF)
    netF.eval()  # No need to train
    return netF