Beispiel #1
0
def main(args):
    num_class = 101
    feature_length = 800
    # clf = Classifier(feature_length, num_class, isbn=False)
    # fe = I3DFeatureExtractor()
    # I3DClassifier = torch.nn.Sequential(
    #     fe,
    #     clf
    # )
    from models.unet import UntrimmedNet
    I3DClassifier = UntrimmedNet(num_class)
    I3DClassifier = torch.nn.DataParallel(I3DClassifier,
                                          device_ids=[0, 1, 2, 3]).cuda()

    if args.restore:
        if os.path.isfile(args.restore):
            print(("=> loading checkpoint '{}'".format(args.restore)))
            checkpoint = torch.load(args.restore)
            args.start_epoch = checkpoint['epoch']
            I3DClassifier.load_state_dict(checkpoint['state_dict'])
            print(("=> loaded checkpoint '{}' (epoch {})".format(
                args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.restore)))

    ds = build_video_dataset("ucf", train=True, test_rate=0.1)['dataset']
    eval_ds = build_video_dataset("ucf", train=False, test_rate=0.1)['dataset']

    loader = torch.utils.data.DataLoader(ds,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=16,
                                         pin_memory=True)

    eval_loader = torch.utils.data.DataLoader(eval_ds,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=20,
                                              pin_memory=True)

    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(I3DClassifier.parameters(), lr=args.lr)

    best_prec1 = 0
    best_prec5 = 0

    for epoch in range(args.start_epoch, args.epochs):
        # train for one epoch
        train(loader, I3DClassifier, criterion, optimizer, epoch)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            prec1, prec5 = validate(eval_loader, I3DClassifier, criterion,
                                    (epoch + 1) * len(loader))

            # remember best prec@1 and save checkpoint
            is_best = (prec1 > best_prec1) or (prec5 > best_prec5)

            best_prec1 = max(prec1, best_prec1)
            best_prec5 = max(prec5, best_prec5)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.model,
                    'state_dict': I3DClassifier.state_dict(),
                    # 'fuse_type': args.fuse_type,
                    'best_prec1': best_prec1,
                    'best_prec5': best_prec5
                },
                is_best,
                args.exp_name,
                path='result')

    print(
        'Experiment {} finished! Best Accu@1 is {:.6f}, Best Accu@5 is {:.6f}.'
        .format(args.exp_name, best_prec1, best_prec5))
            q = torch.dot(n, q)




def save(db, q, path):
    suffix = 'database'
    path = os.path.join(path, suffix)
    torch.save(db, path)
    torch.save(q, path)
    print("Database and Q matrix has been saved at: ", path)


if __name__ == '__main__':
    # build the models
    model_rgb = build_model(ckpt_rgb_path, 'rgb')
    model_flow = build_model(ckpt_flow_path, 'flow')

    # build dataset\
    ds = build_video_dataset('thumos_validation', False, single=True, single_batch_size=batch_size)['dataset']

    db = build_database(model_rgb, model_flow, ds)

    db = refine_database(db, 1000)

    q = get_q(db)

    save(db, q, save_path)

    print("Database and Q Matrix are generated, save at {}".format(save_path))
Beispiel #3
0
if __name__ == '__main__':

    ######################### HYPER PARAMETER ##############################

    # i3d_model_checkpoint = "models/0809_ucf_act.pth.tar"
    # i3d_model_checkpoint = "result/0811_epoch8_act_model_ckpt.pth.tar"    # Act clf
    index_file = 'ucf_index.txt'
    dataset = 'ucf'  # thumos_test is in fact the testing set.
    num_class = 101

    # num_class = 2

    ######################### HYPER PARAMETER ##############################
    from ops.utils import AverageMeter
    ds = build_video_dataset(dataset, train=False)
    # ds, annotation_root = ds["dataset"], ds["annotation_root"]
    ds = ds["dataset"]

    prefix = "features/kinetics/{}/".format(dataset)
    # prefix = "features/{}_act/".format(dataset)
    # if not os.path.exists(prefix):
    #     os.mkdir(prefix)
    st = time.time()
    # feature_length = 800
    path_list = []
    file_list = []
    label_list = []
    vid = []
    annotation_list = []
    # annotation_dict = build_annotation_dict(annotation_root=annotation_root, index_file=index_file)
def main(args):
    num_class = 101
    feature_length = 800
    from models.unet import UntrimmedNet
    I3DClassifier = UntrimmedNet(num_class)
    I3DClassifier = torch.nn.DataParallel(I3DClassifier,
                                          device_ids=[0, 1, 2, 3]).cuda()

    if args.restore:
        if os.path.isfile(args.restore):
            print(("=> loading checkpoint '{}'".format(args.restore)))
            checkpoint = torch.load(args.restore)
            # args.start_epoch = checkpoint['epoch']
            I3DClassifier.load_state_dict(checkpoint['state_dict'])
            print(("=> loaded checkpoint '{}' (epoch {})".format(
                args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.restore)))

    ds = build_video_dataset("thumos_validation", train=True)['dataset']
    eval_ds = build_video_dataset("thumos_test", train=False)['dataset']

    loader = torch.utils.data.DataLoader(ds,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=args.num_worker,
                                         pin_memory=True)

    eval_loader = torch.utils.data.DataLoader(eval_ds,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=args.num_worker,
                                              pin_memory=True)

    ckpt_dir = 'result/' + args.exp_name
    if not os.path.exists(ckpt_dir):
        os.mkdir(ckpt_dir)

    criterion = torch.nn.CrossEntropyLoss().cuda()
    multilabel_criterion = UnetLoss().cuda()
    optimizer = torch.optim.Adam(I3DClassifier.parameters(), lr=args.lr)

    best_prec1 = 0
    best_prec5 = 0

    for epoch in range(args.start_epoch, args.epochs):
        # train for one epoch
        train(loader, I3DClassifier, multilabel_criterion, optimizer, epoch)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.model,
                'state_dict': I3DClassifier.state_dict(),
            },
            False,
            args.exp_name + '_epoch{}'.format(epoch),
            path=ckpt_dir)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:

            prec1, prec5 = validate(eval_loader, I3DClassifier, criterion,
                                    (epoch + 1) * len(loader))

            # remember best prec@1 and save checkpoint
            is_best = (prec1 > best_prec1) or (prec5 > best_prec5)

            best_prec1 = max(prec1, best_prec1)
            best_prec5 = max(prec5, best_prec5)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.model,
                    'state_dict': I3DClassifier.state_dict(),
                    'best_prec1': best_prec1,
                    'best_prec5': best_prec5
                },
                is_best,
                args.exp_name + '_epoch{}'.format(epoch),
                path=ckpt_dir)

    print(
        'Experiment {} finished! Best Accu@1 is {:.6f}, Best Accu@5 is {:.6f}.'
        .format(args.exp_name, best_prec1, best_prec5))
Beispiel #5
0
def main(args):
    num_class = 102
    feature_length = 800
    clf = Classifier(feature_length, num_class, isbn=True)
    fe = I3DFeatureExtractor()
    I3DClassifier = torch.nn.Sequential(
        fe,
        clf
    )

    if args.restore:
        if os.path.isfile(args.restore):
            from models.module import load_i3d_checkpoint
            # from collections import OrderedDict
            # print(("=> loading checkpoint '{}'".format(args.restore)))
            # checkpoint = torch.load(args.restore)
            # new_ckpt = OrderedDict()
            # for key in checkpoint['state_dict'].keys():
            #     new_ckpt[key[7:]] = checkpoint['state_dict'][key]
            # args.start_epoch = checkpoint['epoch']
            # I3DClassifier.load_state_dict(new_ckpt)
            # print(("=> loaded checkpoint '{}' (epoch {})"
            #        .format(args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.restore)))

    I3DClassifier = torch.nn.DataParallel(I3DClassifier, device_ids=[i for i in list(range(torch.cuda.device_count()))]).cuda()
    # I3DClassifier = I3DClassifier.cuda()

    ds = build_video_dataset("background", train=True, test_rate=0.05)['dataset']
    eval_ds = build_video_dataset("background", train=False, test_rate=0.05)['dataset']

    loader = torch.utils.data.DataLoader(
        ds, batch_size=args.batch_size, shuffle=True,
        num_workers=args.num_workers, pin_memory=True)

    eval_loader = torch.utils.data.DataLoader(
        eval_ds, batch_size=args.batch_size, shuffle=True,
        num_workers=args.num_workers, pin_memory=True)

    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(I3DClassifier.parameters(), lr=args.lr)

    best_prec1 = 0

    if args.evaluate:
        prec1 = validate(eval_loader, I3DClassifier, criterion)
        return prec1

    print('F**k you')

    for epoch in range(args.start_epoch, args.epochs):
        # train for one epoch
        train(loader, I3DClassifier, criterion, optimizer, epoch)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            prec1 = validate(eval_loader, I3DClassifier, criterion, (epoch + 1) * len(loader))

            # remember best prec@1 and save checkpoint
            is_best = (prec1 > best_prec1)

            best_prec1 = max(prec1, best_prec1)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.model,
                'tsn_like': args.tsn,
                'state_dict': I3DClassifier.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, args.exp_name)

    print('Experiment {} finished! Best Accu@1 is {:.6f}'.format(args.exp_name, best_prec1))
def main(args):
    fuser = Fuser(fuse_type=args.fuse_type, s=16)
    if args.front_end:
        train_dataset = build_video_dataset(
            "ucf", train=True, unet=True,
            unet_clip_num=args.clip_num)['dataset']
    else:
        if args.dataset == 'ucf':
            train_dataset = FeatureDataset('features/ucf101/data.csv', fuser)
        elif args.dataset == 'thumos':
            train_dataset = FeatureDataset(
                'features/kinetics/thumos_validation/data.csv', fuser)
        else:
            raise ValueError
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.num_worker,
                                               pin_memory=True)

    eval_dataset = FeatureDataset('features/kinetics/thumos_test/data.csv',
                                  Fuser(fuse_type='none'))
    num_class = 101

    front = None
    if args.front_end:
        front = I3DFeatureExtractor()
        front = torch.nn.DataParallel(
            front,
            device_ids=[i for i in range(torch.cuda.device_count())]).cuda()
        front.eval()

    model = UntrimmedNetBack(num_class)
    if args.restore:
        if os.path.isfile(args.restore):
            print(("=> loading checkpoint '{}'".format(args.restore)))
            checkpoint = torch.load(args.restore)
            new = OrderedDict()
            for key in checkpoint['state_dict'].keys():
                if key[7:] in model.state_dict():
                    new[key[7:]] = checkpoint['state_dict'][key]
            model.load_state_dict(new)
            print(("=> loaded checkpoint '{}' (epoch {})".format(
                args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.restore)))
    model = torch.nn.DataParallel(
        model,
        device_ids=[i for i in range(torch.cuda.device_count())]).cuda()

    criterion = UnetLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=0.0005)

    best_prec1 = 0
    best_prec5 = 0
    lastavg = 0

    if args.evaluate:
        best_prec1, best_prec5 = validate(eval_dataset, model, criterion,
                                          args.modality)
        print(
            'Experiment {} finished! Best Accu@1 is {:.6f}, Best Accu@5 is {:.6f}.'
            .format(args.exp_name, best_prec1, best_prec5))
        return

    for epoch in range(args.start_epoch, args.epochs):
        train(train_loader, model, criterion, optimizer, epoch, args.modality,
              front)

        if args.dataset == 'thumos':
            if (epoch + 1) == 100:
                top1.reset()
                top5.reset()
                lastavg = losses.avg
                losses.reset()
                batch_time.reset()
                data_time.reset()

            if (epoch + 1) == 1000:
                optimizer.param_groups[-1][
                    'lr'] = optimizer.param_groups[-1]['lr'] / 10

            if (epoch + 1) == 2000:
                optimizer.param_groups[-1][
                    'lr'] = optimizer.param_groups[-1]['lr'] / 2

            if (epoch + 1) == 3000:
                optimizer.param_groups[-1][
                    'lr'] = optimizer.param_groups[-1]['lr'] / 2

            if (epoch + 1) == 4000:
                optimizer.param_groups[-1][
                    'lr'] = optimizer.param_groups[-1]['lr'] / 2
        elif args.dataset == 'ucf':
            if (epoch + 1) == 10:
                top1.reset()
                top5.reset()
                lastavg = losses.avg
                losses.reset()
                batch_time.reset()
                data_time.reset()
            if (epoch + 1) == 400:
                optimizer.param_groups[-1][
                    'lr'] = optimizer.param_groups[-1]['lr'] / 10
            if (epoch + 1) == 800:
                optimizer.param_groups[-1][
                    'lr'] = optimizer.param_groups[-1]['lr'] / 2
        else:
            raise ValueError

        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            top1.reset()
            top5.reset()
            lastavg = losses.avg
            losses.reset()
            batch_time.reset()
            data_time.reset()

            prefix = 'result/{}'.format(args.exp_name)
            if not os.path.exists(prefix):
                os.mkdir(prefix)
            prec1, prec5 = validate(eval_dataset, model, criterion,
                                    args.modality)

            is_best = (prec1 > best_prec1) or (prec5 > best_prec5)

            best_prec1 = max(prec1, best_prec1)
            best_prec5 = max(prec5, best_prec5)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                    'best_prec5': best_prec5
                }, is_best, args.exp_name + "_epoch{}".format(epoch), prefix)
    print(args)
    print(
        'Experiment {} finished! Best Accu@1 is {:.6f}, Best Accu@5 is {:.6f}. Saved@ {}'
        .format(
            args.exp_name, best_prec1, best_prec5,
            'result/{}/{}_epoch{}'.format(args.exp_name, args.exp_name,
                                          epoch)))
def main(args):
    fuser = Fuser(fuse_type=args.fuse_type, s=4)
    if args.dataset=='ucf':
        train_dataset = build_video_dataset("ucf", train=True, unet=True, unet_clip_num=args.clip_num, modality=args.modality)['dataset']
    elif args.dataset=='thumos':
        train_dataset = build_video_dataset("thumos_validation", train=True, unet=True, unet_clip_num=args.clip_num, modality=args.modality)['dataset']
    else:
        raise ValueError
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                                               num_workers=args.num_worker,
                                               pin_memory=True)
    # ds = build_video_dataset('thumos_validation', False, single=True, single_batch_size=batch_size)['dataset']

    eval_dataset = build_video_dataset("thumos_test", train==False, single=True, modality=args.modality, single_batch_size=args.batch_size)['dataset'] # 非正式测试!!!
    eval_loader = eval_dataset
    # eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.batch_size, shuffle=False,
    #                                            num_workers=args.num_worker,
    #                                            pin_memory=True)
    num_class = 101
    model = UntrimmedNet(num_class, args.modality, reduce=True)
    if args.restore:
        if os.path.isfile(args.restore):
            print(("=> loading checkpoint '{}'".format(args.restore)))
            checkpoint = torch.load(args.restore)
            new = checkpoint
            new = OrderedDict()
            for key in checkpoint['state_dict'].keys():
                if key[7:] in model.state_dict():
                    new[key[7:]] = checkpoint['state_dict'][key]
            model.load_state_dict(new)
            # print(("=> loaded checkpoint '{}' (epoch {})"
            #        .format(args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.restore)))

    model = torch.nn.DataParallel(model, device_ids=[i for i in range(torch.cuda.device_count())]).cuda()

    criterion = UnetLoss().cuda()

    para = filter(lambda p: p.requires_grad, model.parameters())
    if args.optimizer=='sgd':
        optimizer = torch.optim.SGD(para, lr=args.lr, momentum=0.9, weight_decay=0.0005)
    elif args.optimizer=='adam':
        optimizer = torch.optim.Adam(para, lr=args.lr, weight_decay=0.0005)


    best_prec1 = 0
    best_prec5 = 0

    prefix = 'result/{}'.format(args.exp_name)
    if not os.path.exists(prefix):
        os.mkdir(prefix)

    if args.evaluate:
        best_prec1, best_prec5 = validate(eval_loader, model, criterion, args.modality)
        print('Experiment {} finished! Best Accu@1 is {:.6f}, Best Accu@5 is {:.6f}.'.format(args.exp_name, best_prec1,
                                                                                             best_prec5))
        return

    for epoch in range(args.start_epoch, args.epochs):
        train(train_loader, model, criterion, optimizer, epoch, args.modality)

        if args.epochs<20 or (epoch+1)%10==0:
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
            }, False, args.exp_name + "_epoch{}".format(epoch), prefix)

        if (epoch+1)%5==0:
            top1.reset()
            top5.reset()
            lastavg = losses.avg
            losses.reset()
            batch_time.reset()
            data_time.reset()


        if args.lr_policy=='thumos':
            if (epoch + 1)%100==0:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2
            # if (epoch + 1) == 20:
            #     optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2
            # if (epoch + 1) == 30:
            #     optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2
            # if (epoch + 1) == 4000:
            #     optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2
        elif args.lr_policy=='ucf2':
            if (epoch + 1) == 4:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 5
            if (epoch + 1) == 8:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2
            if (epoch + 1) == 8:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2
        elif args.lr_policy=='ucf3':
            if (epoch + 1) == 1:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 10
            if (epoch + 1) == 2:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2
        elif args.lr_policy=='ucf':
            if (epoch + 1) == 1:
                top1.reset()
                top5.reset()
                lastavg = losses.avg
                losses.reset()
                batch_time.reset()
                data_time.reset()
            if (epoch + 1) == 6:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 10
            if (epoch + 1) == 12:
                optimizer.param_groups[-1]['lr'] = optimizer.param_groups[-1]['lr'] / 2


        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            top1.reset()
            top5.reset()
            lastavg=losses.avg
            losses.reset()
            batch_time.reset()
            data_time.reset()

            prec1, prec5 = validate(eval_loader, model, criterion, args.modality)

            is_best = (prec1 > best_prec1) or (prec5 > best_prec5)

            best_prec1 = max(prec1, best_prec1)
            best_prec5 = max(prec5, best_prec5)
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'best_prec5': best_prec5
            }, is_best, args.exp_name + "_epoch{}".format(epoch), prefix)
    print('Experiment {} finished! Best Accu@1 is {:.6f}, Best Accu@5 is {:.6f}. Saved@ {}'.format(args.exp_name,
                                                                                                   best_prec1,
                                                                                                   best_prec5,
                                                                                                   'result/{}/{}_epoch{}'.format(
                                                                                                       args.exp_name,
                                                                                                       args.exp_name,
                                                                                                       epoch)))