Beispiel #1
0
def main():
    base = c3d.C3D(with_classifier=False)
    model = ssl_net.SSLNET(base, with_classifier=True, num_classes=12)

    start_epoch = 1
    # pretrain_weight = loadcontinur_weights(pretrain_path)

    # model.load_state_dict(pretrain_weight, strict=False)
    # train
    train_dataset = UntrimmedVideoDataset(params['root'], mode="train")
    if params['data'] == 'UCF-101':
        val_size = 800
    elif params['data'] == 'hmdb':
        val_size = 400
    elif params['data'] == 'Thumos14':
        val_size = 400
    train_dataset, val_dataset = random_split(
        train_dataset, (len(train_dataset) - val_size, val_size))

    print("num_works:{:d}".format(params['num_workers']))
    print("batch_size:{:d}".format(params['batch_size']))
    train_loader = DataLoader(train_dataset,
                              batch_size=params['batch_size'],
                              shuffle=True,
                              num_workers=params['num_workers'])
    val_loader = DataLoader(val_dataset,
                            batch_size=params['batch_size'],
                            shuffle=True,
                            num_workers=params['num_workers'])
    model = nn.DataParallel(model)  #multi-gpu
    model = model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=params['learning_rate'],
                          momentum=params['momentum'],
                          weight_decay=params['weight_decay'])
    #scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)

    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     min_lr=1e-5,
                                                     patience=50,
                                                     factor=0.1)

    #pretrain_model = pretrain_path.split('/')[-1].split('.')[0] + 'pth'

    model_save_dir = os.path.join(save_path,
                                  '_' + time.strftime('%m-%d-%H-%M'))
    writer = SummaryWriter(model_save_dir)

    for data in train_loader:
        clip, label = data
        writer.add_video('train/clips', clip, 0, fps=8)
        writer.add_text('train/idx', str(label.tolist()), 0)
        clip = clip.cuda()
        #writer.add_graph(model, (clip, clip));
        break
    for name, param in model.named_parameters():
        writer.add_histogram('params/{}'.format(name), param, 0)

    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    prev_best_val_loss = float('inf')
    prev_best_loss_model_path = None
    prev_best_acc_model_path = None
    best_acc = 0
    best_epoch = 0
    for epoch in tqdm(range(start_epoch, start_epoch + params['epoch_num'])):
        train(train_loader, model, criterion, optimizer, epoch, writer)
        val_loss, top1_avg = validation(val_loader, model, criterion,
                                        optimizer, epoch)
        if top1_avg >= best_acc:
            best_acc = top1_avg
            best_epoch = epoch
            model_path = os.path.join(
                model_save_dir, 'best_acc_model_{}.pth.tar'.format(epoch))
            torch.save(model.state_dict(), model_path)

            prev_best_acc_model_path = model_path
        if val_loss < prev_best_val_loss:
            model_path = os.path.join(
                model_save_dir, 'best_loss_model_{}.pth.tar'.format(epoch))
            torch.save(model.state_dict(), model_path)
            prev_best_val_loss = val_loss

            prev_best_loss_model_path = model_path
        scheduler.step(val_loss)
        if epoch % 20 == 0:
            checkpoints = os.path.join(model_save_dir, str(epoch) + ".pth.tar")
            torch.save(model.state_dict(), checkpoints)
            print("save_to:", checkpoints)
    print("best is :", best_acc, best_epoch)
Beispiel #2
0
def main2():
    model = c3d.C3D(with_classifier=True, num_classes=101)

    start_epoch = 1
    # load 16 compressed video frames
    train_coviar = CoviarData(
        '/data2/fb/project/pytorch-coviar-master/data/ucf101/mpeg4_videos',
        'ucf101',
        '/data2/fb/project/pytorch-coviar-master/data/datalists/ucf101_split1_train.txt',
        'residual', get_augmentation(), 4, 1, True)

    val_size = 400
    train_dataset, val_dataset = random_split(
        train_coviar, (len(train_coviar) - val_size, val_size))

    print("num_workers:{:d}".format(params['num_workers']))
    print("batch_size:{:d}".format(params['batch_size']))
    train_loader = DataLoader(train_dataset,
                              batch_size=params['batch_size'],
                              shuffle=True,
                              num_workers=params['num_workers'])
    val_loader = DataLoader(val_dataset,
                            batch_size=params['batch_size'],
                            shuffle=True,
                            num_workers=params['num_workers'])

    model = model.cuda()
    criterion = nn.CrossEntropyLoss.cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=params['learning_rate'],
                          momentum=params['momentum'],
                          weight_decay=params['weight_decay'])
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     min_lr=1e-5,
                                                     patience=20,
                                                     factor=0.1)

    model_save_dir = os.path.join(save_path, '_', time.strftime('%m-%d-%H-%M'))
    writer = SummaryWriter(model_save_dir)

    for data in train_loader:
        clip, label = data
        writer.add_video('train/clips', clip, 0, fps=8)
        writer.add_text('train/idx', str(label.tolist()), 0)
        clip = clip.cuda()
        break
    for name, param in model.named_parameters():
        writer.add_histogram('params/{}', format(name), param, 0)

    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    prev_best_val_loss = float('inf')
    prev_best_loss_model_path = None
    prev_best_acc_model_path = None
    best_acc = 0
    best_epoch = 0
    for epoch in tqdm(range(start_epoch, start_epoch + params['epoch_num'])):
        train_coviar(train_loader, model, criterion, optimizer, epoch,
                     params['learning_rate'])
        val_loss, top1_avg = validation(val_loader, model, criterion,
                                        optimizer, epoch)
        if top1_avg >= best_acc:
            best_acc = top1_avg
            best_epoch = epoch
            model_path = os.path.join(
                model_save_dir, 'best_acc_model_{}.pth.tar'.format(epoch))
            torch.save(model.state_dict(), model_path)

            prev_best_acc_model_path = model_path
        if val_loss < prev_best_val_loss:
            model_path = os.path.join(
                model_save_dir, 'best_loss_model_{}.pth.tar'.format(epoch))
            torch.save(model.state_dict(), model_path)
            prev_best_val_loss = val_loss

            prev_best_loss_model_path = model_path
        scheduler.step(val_loss)
        if epoch % 20 == 0:
            checkpoints = os.path.join(model_save_dir, str(epoch) + ".pth.tar")
            torch.save(model.state_dict(), checkpoints)
            print("save_to:", checkpoints)
    print("best is :", best_acc, best_epoch)
Beispiel #3
0
    params['root'] = '/data2/data/video_data/UCF-101'
    test_dataset = FinetuneData(params['root'], mode="test")
    test_loader = DataLoader(test_dataset,
                             batch_size=params['batch_size'],
                             shuffle=False,
                             num_workers=params['num_workers'])
    if len(device_ids) > 1:
        print(torch.cuda.device_count())
        model = nn.DataParallel(model)
    # model = nn.DataParallel(model, device_ids=[4,5,6,7])

    model = model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    test(test_loader, model, criterion)


if __name__ == '__main__':
    seed = 632
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    model = c3d.C3D(with_classifier=True, num_classes=101)

    pretrain_path ="/data2/fb/save_model" \
                   "/finetune_model_UCF101/_01-02-16-32" \
                   "/best_acc_model_112.pth.tar"

    test_model(model, pretrain_path)