# else:
    #     num_videos = 1

    # for idx in range(num_videos):
    #     domain_nets.append(ADNetDomainSpecific(num_classes=num_classes, num_history=num_history))

    #     scal = torch.Tensor([0.01])

    #     if trained_file and not random_initialize_domain_specific:
    #         domain_nets[idx].load_weights(trained_file, idx)
    #     else:
    #         # fc 6
    #         nn.init.normal_(domain_nets[idx].fc6.weight.data)
    #         domain_nets[idx].fc6.weight.data = domain_nets[idx].fc6.weight.data * scal.expand_as(domain_nets[idx].fc6.weight.data)
    #         domain_nets[idx].fc6.bias.data.fill_(0)
    #         # fc 7
    #         nn.init.normal_(domain_nets[idx].fc7.weight.data)
    #         domain_nets[idx].fc7.weight.data = domain_nets[idx].fc7.weight.data * scal.expand_as(domain_nets[idx].fc7.weight.data)
    #         domain_nets[idx].fc7.bias.data.fill_(0)

    return adnet_model, domain_nets


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    base_network = vggm()
    base_network = base_network.features[0:15]
    # model = testStructure(base_network=base_network).to(device)
    model = ADNet(base_network=base_network, opts=None).to(device)

    summary(model, (3, 112, 112))
예제 #2
0
def adnet(opts,
          base_network='vggm',
          trained_file=None,
          random_initialize_domain_specific=False,
          multidomain=True):
    """
    Args:
        base_network: (string)
        trained_file: (None or string) saved filename
        random_initialize_domain_specific: (bool) if there is trained file, whether to use the weight in the file (True)
            or just random initialize (False). Won't matter if the trained_file is None (always False)
        multidomain: (bool) whether to have separate weight for each video or not. Default True: separate
    Returns:
        adnet_model: (ADNet)
        domain_nets: (list of ADNetDomainSpecific) length: #videos
    """
    assert base_network in ['vggm'], "Base network variant is unavailable"

    num_classes = opts['num_actions']
    num_history = opts['num_action_history']

    assert num_classes in [11], "num classes is not exist"

    settings = pretrained_settings['adnet']

    if base_network == 'vggm':
        base_network = vggm()  # by default, load vggm's weights too
        base_network = base_network.features[0:10]

    else:  # change this part if adding more base network variant
        base_network = vggm()
        base_network = base_network.features[0:10]

    if trained_file:
        assert num_classes == settings['num_classes'], \
            "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)

        print('Resuming training, loading {}...'.format(trained_file))

        adnet_model = ADNet(base_network=base_network,
                            opts=opts,
                            num_classes=num_classes,
                            num_history=num_history)
        # if use_gpu:
        #     adnet_model = nn.DataParallel(adnet_model)
        #     cudnn.benchmark = True
        #     adnet_model = adnet_model.cuda()

        adnet_model.load_weights(trained_file)

        adnet_model.input_space = settings['input_space']
        adnet_model.input_size = settings['input_size']
        adnet_model.input_range = settings['input_range']
        adnet_model.mean = settings['mean']
        adnet_model.std = settings['std']
    else:
        adnet_model = ADNet(base_network=base_network,
                            opts=opts,
                            num_classes=num_classes)

    # initialize domain-specific network
    domain_nets = []
    if multidomain:
        num_videos = opts['num_videos']
    else:
        num_videos = 1

    for idx in range(num_videos):
        domain_nets.append(
            ADNetDomainSpecific(num_classes=num_classes,
                                num_history=num_history))

        scal = torch.Tensor([0.01])

        if trained_file and not random_initialize_domain_specific:
            domain_nets[idx].load_weights(trained_file, idx)
        else:
            # fc 6
            nn.init.normal_(domain_nets[idx].fc6.weight.data)
            domain_nets[idx].fc6.weight.data = domain_nets[
                idx].fc6.weight.data * scal.expand_as(
                    domain_nets[idx].fc6.weight.data)
            domain_nets[idx].fc6.bias.data.fill_(0)
            # fc 7
            nn.init.normal_(domain_nets[idx].fc7.weight.data)
            domain_nets[idx].fc7.weight.data = domain_nets[
                idx].fc7.weight.data * scal.expand_as(
                    domain_nets[idx].fc7.weight.data)
            domain_nets[idx].fc7.bias.data.fill_(0)

    return adnet_model, domain_nets
예제 #3
0
from models.vggm import vggm
from iceberg import *
from torch.optim import SGD

lr = 0.0005
mom = 0.9
wd = 1e-4
bsize = 256

model = vggm(num_classes=1000, pretrained=None)
model = torch.nn.DataParallel(model).cuda()
optim = SGD(model.parameters(), lr, momentum=mom, weight_decay=wd)
iceberg = Iceberg(
    './data/train.json',
    './data/vggm_lr5e-4_mom9e-1_wd1e-4_bs{}_model.pth'.format(bsize), model,
    optim, 10000, bsize)
iceberg.run(
    transforms.Compose([
        transforms.Resize(256),
        transforms.RandomCrop(221),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor()
    ]))