Example #1
0
                            num_classes=num_classes,
                            train=False,
                            transform=transform)
    logger.info("Dataset samples: {}".format(len(train_set) + len(val_set)))
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=24,
                              pin_memory=True)
    val_loader = DataLoader(val_set,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=24,
                            pin_memory=True)
    # Create model
    model = r2plus1d_18(pretrained=True, num_classes=226)
    # load pretrained
    checkpoint = torch.load('final_models/val_rgb_final.pth')

    new_state_dict = OrderedDict()
    for k, v in checkpoint.items():
        name = k[7:]  # remove 'module.'
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    # if phase == 'Train':
    #     model.fc1 = nn.Linear(model.fc1.in_features, num_classes)
    print(model)

    model = model.to(device)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
Example #2
0
                         sample_duration=sample_duration,
                         num_classes=num_classes).to(device)
    elif args.model == '3dresnet34':
        model = resnet34(pretrained=True,
                         progress=True,
                         sample_size=sample_size,
                         sample_duration=sample_duration,
                         num_classes=num_classes).to(device)
    elif args.model == '3dresnet50':
        model = resnet50(pretrained=True,
                         progress=True,
                         sample_size=sample_size,
                         sample_duration=sample_duration,
                         num_classes=num_classes).to(device)
    elif args.model == 'r2plus1d':
        model = r2plus1d_18(pretrained=True,
                            num_classes=num_classes).to(device)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    # Load model
    model.load_state_dict(torch.load(model_path))

    # Test the model
    model.eval()
    all_label = []
    all_pred = []

    with torch.no_grad():
        for batch_idx, data in enumerate(test_loader):
            # get the inputs and labels
            inputs, labels = data['data'].to(device), data['label'].to(device)
                            num_classes=num_classes,
                            train=False,
                            transform=transform)
    logger.info("Dataset samples: {}".format(len(train_set) + len(val_set)))
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers,
                              pin_memory=True)
    val_loader = DataLoader(val_set,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers,
                            pin_memory=True)
    # Create model
    model = r2plus1d_18(pretrained=False, num_classes=226)
    # load pretrained
    checkpoint = torch.load('final_models_finetuned/hha_final_finetuned.pth')
    new_state_dict = OrderedDict()
    for k, v in checkpoint.items():
        name = k[7:]  # remove 'module.'
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    # if phase == 'Train':
    #     model.fc1 = nn.Linear(model.fc1.in_features, num_classes)
    print(model)

    model = model.to(device)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
        logger.info("Using {} GPUs".format(torch.cuda.device_count()))
Example #4
0
                            mode='val',
                            transform=val_transform)

    training_dataloader = DataLoader(train_data,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=16,
                                     pin_memory=True)
    validation_dataloader = DataLoader(val_data,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=16,
                                       pin_memory=True)

    if args.model == 'r2plus1d_18':
        model = cnn3d = r2plus1d_18(pretrained=False, num_classes=500)
    elif args.model == 'r2plus1d_18_new':
        model = r2plus1d_18_new(pretrained=False, progress=False)
    elif args.model == '3dresnet18':
        model = resnet18(pretrained=False,
                         sample_size=sample_size,
                         sample_duration=16,
                         num_classes=500)
    elif args.model == 'ResCRNN':
        model = ResCRNN(sample_size=112,
                        sample_duration=16,
                        num_classes=100,
                        arch="resnet18")
    model = nn.DataParallel(model, device_ids=[0, 1, 2, 3], dim=0).to(device)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.0001)