コード例 #1
0
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache + '/log_0724-valvideo.txt')
    #print(vars(opt))
    seed(opt.manual_seed)

    print('1. create_model')
    base_model, logits_model, criterion, base_optimizer, logits_optimizer = create_model(
        opt)
    if opt.resume:
        print('checkpoints load')
        #best_mAP = checkpoints.load(opt, base_model, logits_model, base_optimizer, logits_optimizer)
        checkpoints.load(opt, base_model, logits_model, base_optimizer,
                         logits_optimizer)

    #print('base_model = InceptionI3D Networks') # InceptionI3D Networks
    #print(base_model)
    #print('logits_model = AsyncTFBase: Linear Networks') # AsyncTFBase: Linear Networks
    #print(logits_model)

    trainer = train.Trainer()

    print('2. get_dataset')
    train_loader, val_loader, valvideo_loader = get_dataset(opt)
    #print('train_loader') # [56586, [25,img,s,v,o,meta]]
    #print(train_loader)    # 56586=pairs
    #print('val_loader')   # [12676, [25,img,s,v,o,meta]]
    #print(val_loader)
    #print('valvideo_loader') # [1863, [25+1,img,s,v,o,meta]]
    #print(valvideo_loader)   # 1863=num_(kind of video)

    if opt.evaluate:
        trainer.validate(val_loader, base_model, logits_model, criterion, -1,
                         opt)
        trainer.validate_video(valvideo_loader, base_model, logits_model,
                               criterion, -1, opt)
        return

    print('3.3 Valiation Video')
    #if opt.distributed:
    #    trainer.train_sampler.set_epoch(epoch)

    sov_mAP, sov_rec_at_n, sov_mprec_at_n = trainer.validate_video(
        valvideo_loader, base_model, logits_model, criterion, epoch, opt)

    is_best = sov_mAP > best_mAP
    best_mAP = max(sov_mAP, best_mAP)
    scores = {
        'mAP': sov_mAP,
        'sov_rec_at_n': sov_rec_at_n,
        'sov_mprec_at_n': sov_mprec_at_n
    }
    checkpoints.score_file(scores,
                           "{}/model_{}.txt".format(opt.cache, 'valvideo'))
コード例 #2
0
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache+'/log.txt')
    print(vars(opt))
    seed(opt.manual_seed)

    model, criterion, optimizer = create_model(opt)
    if opt.resume: best_mAP = checkpoints.load(opt, model, optimizer)
    print(model)
    trainer = train.Trainer()
    train_loader, val_loader, valvideo_loader = get_dataset(opt)

    if opt.evaluate:
        #trainer.validate(val_loader, model, criterion, -1, opt)
        trainer.validate_video(valvideo_loader, model, -1, opt)
        return

    for epoch in range(opt.start_epoch, opt.epochs):
        if opt.distributed:
            trainer.train_sampler.set_epoch(epoch)
        top1,top5 = trainer.train(train_loader, model, criterion, optimizer, epoch, opt)
        top1val,top5val = trainer.validate(val_loader, model, criterion, epoch, opt)
        mAP = trainer.validate_video(valvideo_loader, model, epoch, opt)
        is_best = mAP > best_mAP
        best_mAP = max(mAP, best_mAP)
        scores = {'top1train':top1,'top5train':top5,'top1val':top1val,'top5val':top5val,'mAP':mAP}
        checkpoints.save(epoch, opt, model, optimizer, is_best, scores)
コード例 #3
0
def main():
    global args, best_top1
    args = parse()
    if not args.no_logger:
        tee.Tee(args.cache + '/log.txt')
    print(vars(args))
    seed(args.manual_seed)

    model, criterion, optimizer = create_model(args)
    if args.resume:
        best_top1 = checkpoints.load(args, model, optimizer)
    print(model)
    trainer = train.Trainer()
    loaders = get_dataset(args)
    train_loader = loaders[0]

    if args.evaluate:
        scores = validate(trainer, loaders, model, criterion, args)
        checkpoints.score_file(scores, "{}/model_000.txt".format(args.cache))
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            trainer.train_sampler.set_epoch(epoch)
        scores = {}
        scores.update(trainer.train(train_loader, model, criterion, optimizer, epoch, args))
        scores.update(validate(trainer, loaders, model, criterion, args, epoch))

        is_best = scores[args.metric] > best_top1
        best_top1 = max(scores[args.metric], best_top1)
        checkpoints.save(epoch, args, model, optimizer, is_best, scores, args.metric)
    if not args.nopdb:
        pdb.set_trace()
コード例 #4
0
def main():
    best_score = 0
    args = parse()
    if not args.no_logger:
        tee.Tee(args.cache + '/log.txt')
    print(vars(args))
    print('experiment folder: {}'.format(experiment_folder()))
    print('git hash: {}'.format(get_script_dir_commit_hash()))
    seed(args.manual_seed)
    cudnn.benchmark = not args.disable_cudnn_benchmark
    cudnn.enabled = not args.disable_cudnn

    metrics = get_metrics(args.metrics)
    tasks = get_tasks(args.tasks)
    model, criterion = get_model(args)
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     weight_decay=args.weight_decay)
    else:
        assert False, "invalid optimizer"

    if args.resume:
        best_score = checkpoints.load(args, model, optimizer)
    print(model)
    trainer = train.Trainer()
    train_loader, val_loader = get_dataset(args)

    if args.evaluate:
        scores = validate(trainer, val_loader, model, criterion, args, metrics,
                          tasks, -1)
        print(scores)
        score_file(scores, "{}/model_999.txt".format(args.cache))
        return

    if args.warmups > 0:
        for i in range(args.warmups):
            print('warmup {}'.format(i))
            trainer.validate(train_loader, model, criterion, -1, metrics, args)
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            trainer.train_sampler.set_epoch(epoch)
        scores = {}
        scores.update(
            trainer.train(train_loader, model, criterion, optimizer, epoch,
                          metrics, args))
        scores.update(
            validate(trainer, val_loader, model, criterion, args, metrics,
                     tasks, epoch))
        is_best = scores[args.metric] > best_score
        best_score = max(scores[args.metric], best_score)
        checkpoints.save(epoch, args, model, optimizer, is_best, scores,
                         args.metric)
コード例 #5
0
ファイル: main.py プロジェクト: DLwbm123/GSTEG_CVPR_2019
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache + '/log.txt')
    print(vars(opt))
    seed(opt.manual_seed)

    base_model, logits_model, criterion, base_optimizer, logits_optimizer = create_model(
        opt)
    if opt.resume:
        best_mAP = checkpoints.load(opt, base_model, logits_model,
                                    base_optimizer, logits_optimizer)
    print(logits_model)
    trainer = train.Trainer()
    train_loader, val_loader, valvideo_loader = get_dataset(opt)

    if opt.evaluate:
        trainer.validate(val_loader, base_model, logits_model, criterion, -1,
                         opt)
        trainer.validate_video(valvideo_loader, base_model, logits_model,
                               criterion, -1, opt)
        return

    for epoch in range(opt.start_epoch, opt.epochs):
        if opt.distributed:
            trainer.train_sampler.set_epoch(epoch)
        s_top1, s_top5, o_top1, o_top5, v_top1, v_top5, sov_top1 = trainer.train(
            train_loader, base_model, logits_model, criterion, base_optimizer,
            logits_optimizer, epoch, opt)
        s_top1val, s_top5val, o_top1val, o_top5val, v_top1val, v_top5val, sov_top1val = trainer.validate(
            val_loader, base_model, logits_model, criterion, epoch, opt)
        sov_mAP, sov_rec_at_n, sov_mprec_at_n = trainer.validate_video(
            valvideo_loader, base_model, logits_model, criterion, epoch, opt)
        is_best = sov_mAP > best_mAP
        best_mAP = max(sov_mAP, best_mAP)
        scores = {
            's_top1': s_top1,
            's_top5': s_top5,
            'o_top1': o_top1,
            'o_top5': o_top5,
            'v_top1': v_top1,
            'v_top5': v_top5,
            'sov_top1': sov_top1,
            's_top1val': s_top1val,
            's_top5val': s_top5val,
            'o_top1val': o_top1val,
            'o_top5val': o_top5val,
            'v_top1val': v_top1val,
            'v_top5val': v_top5val,
            'sov_top1val': sov_top1val,
            'mAP': sov_mAP,
            'sov_rec_at_n': sov_rec_at_n,
            'sov_mprec_at_n': sov_mprec_at_n
        }
        checkpoints.save(epoch, opt, base_model, logits_model, base_optimizer,
                         logits_optimizer, is_best, scores)
コード例 #6
0
def main():
    """Main function for training and testing."""
    # Parse command line arguments and cache
    opt = opts.Opts().args
    utils.savecmd(opt.resume, sys.argv)

    utils.print_color_msg("==> Setting up data loader")
    train_loader, val_loader, test_loader = dataloader.create(opt)

    # Load checkpoint if specified, None otherwise
    utils.print_color_msg("==> Checking checkpoints")
    checkpoint = checkpoints.load(opt)

    utils.print_color_msg("==> Setting up model and criterion")
    model, optim_state = init.setup(opt, checkpoint)
    loss_fn = criterion.setup(opt, checkpoint)

    utils.print_color_msg("==> Loading trainer")
    trainer = train.create_trainer(model, loss_fn, opt, optim_state)

    best_loss = float('Inf')
    val_loss = float('Inf')
    start_epoch = max([1, opt.epochNum])
    if checkpoint is not None:
        start_epoch = checkpoint['epoch'] + 1
        best_loss = checkpoint['loss']
        print("".ljust(4) + "Previous best loss: " +
              utils.color_msg('%.5f' % best_loss))

    if opt.valOnly:
        assert start_epoch > 1, "There must be at least one epoch"
        utils.print_color_msg("==> Validation:")
        print("".ljust(4) + "=> Epoch %i" % (start_epoch - 1))
        trainer.val(val_loader, start_epoch - 1)
        sys.exit()

    if opt.testOnly:
        assert start_epoch > 1, "There must be at least one epoch"
        utils.print_color_msg("==> Testing:")
        print("".ljust(4) + "=> Epoch %i" % (start_epoch - 1))
        _, prediction, reference, post = trainer.test(test_loader,
                                                      start_epoch - 1)
        if opt.loss == 'BCELogit':
            prediction = F.sigmoid(torch.Tensor(prediction)).numpy()
        nce = evaluation.nce(reference, prediction)
        precision, recall, area = evaluation.pr(reference, prediction)
        precision_bl, recall_bl, area_bl = evaluation.pr(reference, post)
        utils.print_color_msg(
            "".ljust(7) + "NCE: %.4f. AUC(PR): %.4f. AUC(BL): %.4f" \
            %(nce, area, area_bl))
        trainer.logger['test'].write('NCE: %f\nAUC(PR): %f\n' % (nce, area))
        evaluation.plot_pr([precision, precision_bl], [recall, recall_bl],
                           [area, area_bl], ['BiRNN', 'posterior'], opt.resume)
        np.savez(os.path.join(opt.resume, 'result.npz'),
                 prediction=prediction,
                 reference=reference,
                 posteriors=post)
        sys.exit()

    utils.print_color_msg("==> Training:")
    for epoch in range(start_epoch, opt.nEpochs + 1):
        print("".ljust(4) + "=> Epoch %i" % epoch)
        best_model = False
        _ = trainer.train(train_loader, epoch, val_loss)

        if not opt.debug:
            val_loss = trainer.val(val_loader, epoch)
            if val_loss < best_loss:
                best_model = True
                print("".ljust(4) + "** Best model: " +
                      utils.color_msg('%.4f' % val_loss))
                best_loss = val_loss
            checkpoints.save(epoch, trainer.model, loss_fn,
                             trainer.optim_state, best_model, val_loss, opt)

    if not opt.debug:
        utils.print_color_msg("==> Testing:")
        _, prediction, reference, _ = trainer.test(test_loader, opt.nEpochs)
        prediction = F.sigmoid(torch.Tensor(prediction)).numpy()
        nce = evaluation.nce(reference, prediction)
        precision, recall, area = evaluation.pr(reference, prediction)
        utils.print_color_msg("".ljust(7) + "NCE: %.4f. AUC(PR): %.4f" %
                              (nce, area))
        trainer.logger['test'].write('NCE: %f\nAUC(PR): %f\n' % (nce, area))
        evaluation.plot_pr([precision], [recall], [area], ['BiRNN'],
                           opt.resume)

        # Flush write out and reset pointer
        for open_file in trainer.logger.values():
            open_file.flush()
            open_file.seek(0)
        plot.plot(opt.resume, opt.onebest)
コード例 #7
0
ファイル: predict_vgg16.py プロジェクト: vnigade/pytorch-i3d
def run(max_steps=64e3,
        arch='vgg16',
        mode='rgb',
        root='/ssd2/charades/Charades_v1_rgb',
        split='charades/charades.json',
        batch_size=1,
        load_model='',
        save_dir=''):
    # setup dataset
    # test_transforms = transforms.Compose([videotransforms.CenterCrop(224)])
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    inputsize = 224
    test_transforms = transforms.Compose([
        transforms.Resize(int(256. / 224 * inputsize)),
        transforms.CenterCrop(inputsize),
        transforms.ToTensor(), normalize
    ])

    dataloaders = {}
    datasets = {}
    phases = ['train']
    if 'train' in phases:
        dataset = Dataset(split,
                          'training',
                          root,
                          mode,
                          test_transforms,
                          num=-1,
                          save_dir=save_dir,
                          rescale=False,
                          model='vgg16')
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=batch_size,
                                                 shuffle=True,
                                                 num_workers=4,
                                                 pin_memory=True)
        dataloaders['train'] = dataloader
        datasets['train'] = dataset

    if 'val' in phases:
        val_dataset = Dataset(split,
                              'testing',
                              root,
                              mode,
                              test_transforms,
                              num=-1,
                              save_dir=save_dir,
                              rescale=False,
                              model='vgg16')
        val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                     batch_size=batch_size,
                                                     shuffle=True,
                                                     num_workers=4,
                                                     pin_memory=True)
        dataloaders['val'] = val_dataloader
        datasets['val'] = val_dataset

    # dataloaders = {'train': dataloader, 'val': val_dataloader}
    # datasets = {'train': dataset, 'val': val_dataset}

    # setup the model
    model, criterion, optimizer = create_model(arch,
                                               nclass=args.nclass,
                                               pretrained=True,
                                               distributed=False)
    checkpoints.load(load_model, model, optimizer)
    print("Loaded model from checkpoint {0}".format(load_model))
    print(model)

    # phases = ['train', 'val']
    test_transforms = transforms.Compose([transforms.ToTensor(), normalize])
    for phase in phases:
        model.train(False)  # Set model to evaluate mode

        tot_loss = 0.0
        tot_loc_loss = 0.0
        tot_cls_loss = 0.0

        # Iterate over data.
        print("Phase is {}".format(phase))
        spacing = np.linspace(0, _WINDOW_SIZE, _GAP, dtype=int, endpoint=False)
        for data in dataloaders[phase]:
            # get the inputs
            inputs, labels, name = data
            if args.overwrite == 0 and os.path.exists(
                    os.path.join(save_dir, name[0])):
                continue

            b, c, t, h, w = inputs.shape
            inputs = inputs.numpy()
            if t < _WINDOW_SIZE:
                # Append start frames to the input
                extra_frames = inputs[:, :, 0:(_WINDOW_SIZE - t), :, :]
                inputs = np.concatenate((inputs, extra_frames), axis=2)

            # process each window
            window_start = 0
            window_end = window_start + _WINDOW_SIZE
            output_dict = defaultdict(lambda: defaultdict(list))
            idx = 0
            while window_end < t:
                indexes = spacing + window_start
                input = np.squeeze(inputs[:, :, indexes], axis=0)
                input = np.transpose(input, (1, 2, 3, 0))
                # transform is performed on single image at a time
                # input_batch = []
                # for i in range(input.shape[0]):
                #   input_batch.append(test_transforms(input[i])) #.unsqueeze_(0)
                # input_batch = torch.stack(input_batch)
                input_batch = input

                with torch.no_grad():
                    ip = Variable(torch.from_numpy(input_batch).cuda())
                    # features = i3d(ip)
                    features = model(ip)
                    # Perform softmax and then average the score for the video/window level prediction
                    features = torch.nn.Softmax(dim=1)(features)
                    features = torch.mean(features, dim=0)
                output_dict["window_" + str(idx)]["scores"] = features.cpu(
                ).detach().numpy().flatten().tolist()
                idx += 1
                window_start += _WINDOW_STRIDE
                window_end = window_start + _WINDOW_SIZE
            with open(save_dir + "/" + name[0], 'w') as outfile:
                json.dump(output_dict, outfile)
                print("{0} Scores saved for {1}".format(
                    len(output_dict), name[0]))
コード例 #8
0
def main():
    """Main function for training and testing."""
    # Parse command line arguments and cache
    opt = opts.Opts().args
    utils.savecmd(opt.resume, sys.argv)

    utils.print_color_msg("==> Setting up data loader")
    train_loader, val_loader, test_loader = dataloader.create(opt)

    # Load checkpoint if specified, None otherwise
    utils.print_color_msg("==> Checking checkpoints")
    checkpoint = checkpoints.load(opt)

    utils.print_color_msg("==> Setting up model and criterion")
    model, optim_state = init.setup(opt, checkpoint)
    loss_fn = criterion.setup(opt, checkpoint)

    utils.print_color_msg("==> Loading trainer")
    trainer = train.create_trainer(model, loss_fn, opt, optim_state)

    best_loss = float('Inf')
    val_loss = float('Inf')
    start_epoch = max([1, opt.epochNum])
    if checkpoint is not None:
        start_epoch = checkpoint['epoch'] + 1
        best_loss = checkpoint['loss']
        print("".ljust(4) + "Previous best loss: " +
              utils.color_msg('%.5f' % best_loss))

    if opt.valOnly:
        assert start_epoch > 1, "There must be at least one epoch"
        utils.print_color_msg("==> Validation:")
        print("".ljust(4) + "=> Epoch %i" % (start_epoch - 1))
        trainer.val(val_loader, start_epoch - 1)
        sys.exit()

    if opt.testOnly:
        assert start_epoch > 1, "There must be at least one epoch"
        utils.print_color_msg("==> Testing:")
        print("".ljust(4) + "=> Epoch %i" % (start_epoch - 1))
        _, prediction, reference, post, seq_length = trainer.test(
            test_loader, start_epoch - 1)

        prediction = F.sigmoid(torch.Tensor(prediction)).numpy()
        nce = evaluation.nce(reference, prediction)
        precision, recall, area, threshold = evaluation.pr(
            reference, prediction)
        precision_bl, recall_bl, area_bl, _ = evaluation.pr(reference, post)
        f1, f1_precision, f1_recall, f1_threshold = evaluation.f1(
            precision, recall, threshold)
        tpr, fpr, roc_area = evaluation.roc(reference, prediction)

        # Calculate stats for sequences binned by the posterior
        limits = np.linspace(0, 1, 11).tolist()
        utils.print_color_msg('\n\nEffect of Input Posterior on Performance')
        for i in range(len(limits) - 1):
            ref, pred, p = evaluation.bin_results(reference, prediction, post, measure=post, \
                                                  lower_limit=limits[i], upper_limit=limits[i+1])
            if ref.size:
                nce_post = evaluation.nce(ref, pred)
                nce_post_bl = evaluation.nce(ref, p)
                precision_post, recall_post, area_post, threshold_post = evaluation.pr(
                    ref, pred)
                precision_post_bl, recall_post_bl, area_post_bl, threshold_post_bl = evaluation.pr(
                    ref, p)
                f1_post, _, _, _ = evaluation.f1(precision_post, recall_post,
                                                 threshold_post)
                f1_post_bl, _, _, _ = evaluation.f1(precision_post_bl,
                                                    recall_post_bl,
                                                    threshold_post_bl)
                _, _, roc_area_post = evaluation.roc(ref, pred)
                print('%.1f. - %.1f. %d    Results (model/bl)     NCE: %.4f. , %.4f.    AUC(PR): %.4f. , %.4f.    F-1:  %.4f. , %.4f.    AUC(ROC): %.4f.'\
                      %(limits[i], limits[i+1], int(ref.size), nce_post, nce_post_bl, area_post, area_post_bl, f1_post, f1_post_bl, roc_area_post))
            else:
                print('%.1f. - %.1f. Empty' % (limits[i], limits[i + 1]))

        # Caluclate stats for sequences binned by sequence length
        limits = [0, 2, 3, 6, 10, 20, 40]
        utils.print_color_msg('\n\nEffect of Sequence Length on Performance')
        for i in range(len(limits) - 1):
            ref, pred, p = evaluation.bin_results(reference, prediction, post, measure=seq_length, \
                                                  lower_limit=limits[i], upper_limit=limits[i+1])
            if ref.size:
                nce_len = evaluation.nce(ref, pred)
                nce_len_bl = evaluation.nce(ref, p)
                precision_len, recall_len, area_len, threshold_len = evaluation.pr(
                    ref, pred)
                precision_len_bl, recall_len_bl, area_len_bl, threshold_len_bl = evaluation.pr(
                    ref, p)
                f1_len, _, _, _ = evaluation.f1(precision_len, recall_len,
                                                threshold_len)
                f1_len_bl, _, _, _ = evaluation.f1(precision_len_bl,
                                                   recall_len_bl,
                                                   threshold_len_bl)
                _, _, roc_area_len = evaluation.roc(ref, pred)
                print(f'%d - %d  %d   Results (model/bl)    NCE: %.4f. , %.4f.    AUC: %.4f. , %.4f.    F-1:  %.4f. , %.4f.    AUC(ROC): %.4f.'\
                      %(limits[i], limits[i+1], int(ref.size), nce_len, nce_len_bl, area_len, area_len_bl, f1_len, f1_len_bl, roc_area_len))
            else:
                print('%d - %d Empty' % (limits[i], limits[i + 1]))

        # Calulate calibration stats
        limits = np.linspace(0, 1, 11).tolist()
        print('\n\nCalibration Stats')
        ece = 0
        for i in range(len(limits) - 1):
            ref, pred, p = evaluation.bin_results(reference, prediction, post, measure=prediction, \
                                                  lower_limit=limits[i], upper_limit=limits[i+1])
            if ref.size:
                accuracy_bin = np.mean(ref)
                confidence_bin = np.mean(pred)
                posterior_bin = np.mean(p)
                ece += abs(accuracy_bin -
                           confidence_bin) * len(ref) / len(reference)
                print(
                    f'%.1f. - %.1f. %d    Reference: %.4f. ,    Prediction: %.4f. ,    Posterior: %.4f.'
                    % (limits[i], limits[i + 1], int(ref.size), accuracy_bin,
                       confidence_bin, posterior_bin))
            else:
                print('%.1f. - %.1f. Empty' % (limits[i], limits[i + 1]))

        # Print Test Stats
        print('\n\nTest Stats')
        print(
            "".ljust(7) + "\nNCE: %.4f. \nAUC(PR): %.4f. \nF-1: %.4f. p: %.4f. r: %.4f. t: %.4f. \nAUC(ROC): %.4f. \nECE: %.4f. " \
            %(nce, area, f1, f1_precision, f1_recall, f1_threshold, roc_area, nce))

        trainer.logger['test'].write('NCE: %f\nAUC(PR): %f\n' % (nce, area))
        evaluation.plot_pr([precision, precision_bl], [recall, recall_bl],
                           [area, area_bl], ['BiLatticeRNN', 'posterior'],
                           opt.resume)
        np.savez(os.path.join(opt.resume, 'result.npz'),
                 prediction=prediction,
                 reference=reference,
                 posteriors=post)
        sys.exit()

    utils.print_color_msg("==> Training:")
    for epoch in range(start_epoch, opt.nEpochs + 1):
        print("".ljust(4) + "=> Epoch %i" % epoch)
        best_model = False
        _ = trainer.train(train_loader, epoch, val_loss)

        if not opt.debug:
            val_loss = trainer.val(val_loader, epoch)
            if val_loss < best_loss:
                best_model = True
                print("".ljust(4) + "** Best model: " +
                      utils.color_msg('%.4f' % val_loss))
                best_loss = val_loss
            checkpoints.save(epoch, trainer.model, loss_fn,
                             trainer.optim_state, best_model, val_loss, opt)

    if not opt.debug:
        utils.print_color_msg("==> Testing:")
        _, prediction, reference, _, _ = trainer.test(test_loader, opt.nEpochs)
        prediction = F.sigmoid(torch.Tensor(prediction)).numpy()
        nce = evaluation.nce(reference, prediction)
        precision, recall, area, _ = evaluation.pr(reference, prediction)
        utils.print_color_msg("".ljust(7) + "NCE: %.4f. AUC(PR): %.4f" %
                              (nce, area))
        trainer.logger['test'].write('NCE: %f\nAUC(PR): %f\n' % (nce, area))
        evaluation.plot_pr([precision], [recall], [area], ['BiLatticeRNN'],
                           opt.resume)

        # Flush write out and reset pointer
        for open_file in trainer.logger.values():
            open_file.flush()
            open_file.seek(0)
        plot.plot(opt.resume, opt.onebest)
コード例 #9
0
ファイル: main.py プロジェクト: gotaku6629/GSTEG_Prediction
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache + '/log0819-t2f51.txt')
    #print(vars(opt))
    seed(opt.manual_seed)

    print('1. create_model')
    base_model, logits_model, criterion, base_optimizer, logits_optimizer = create_model(
        opt)
    if opt.resume:
        print('checkpoints load')
        best_mAP = checkpoints.load(opt, base_model, logits_model,
                                    base_optimizer, logits_optimizer)
        #checkpoints.load(opt, base_model, logits_model, base_optimizer, logits_optimizer)

    print('base_model = InceptionI3D Networks')  # InceptionI3D Networks
    #print(base_model)
    print('logits_model = AsyncTFBase: Linear Networks'
          )  # AsyncTFBase: Linear Networks
    #print(logits_model)

    trainer = train.Trainer()

    print('2. get_dataset')
    train_loader, val_loader, valvideo_loader = get_dataset(opt)
    #print('train_loader') # [56586, [25, img, tuple]]
    #print(train_loader)    # 56586のペア(img-tuple)
    #print('val_loader')   # [12676, [25, img, tuple]]
    #print(val_loader)
    #print('valvideo_loader') # [1863, [25, img, tuple]]
    #print(valvideo_loader)   # 1863=ビデオの種類

    if opt.evaluate:
        trainer.validate(val_loader, base_model, logits_model, criterion, -1,
                         opt)
        trainer.validate_video(valvideo_loader, base_model, logits_model,
                               criterion, -1, opt)
        return

# write csv
    with open('train_log.csv', 'w') as csvfile:
        csv_writer = csv.writer(csvfile)
        csv_writer.writerow(['i', 'loss', 's', 'v', 'o'])

        print('3. Train & Test (Validation)')
        for epoch in range(opt.start_epoch, opt.epochs):  # 0~20
            #print('epoch = ', epoch)
            if opt.distributed:
                trainer.train_sampler.set_epoch(epoch)

            print('3.1 Training')
            s_top1, s_top5, o_top1, o_top5, v_top1, v_top5, sov_top1 = trainer.train(
                train_loader, base_model, logits_model, criterion,
                base_optimizer, logits_optimizer, epoch, opt, csv_writer)

            print('3.2 Test (Validation)')
            s_top1val, s_top5val, o_top1val, o_top5val, v_top1val, v_top5val, sov_top1val = trainer.validate(
                val_loader, base_model, logits_model, criterion, epoch, opt)

            print('3.3 Test (Validation_Video)')
            sov_mAP, sov_rec_at_n, sov_mprec_at_n = trainer.validate_video(
                valvideo_loader, base_model, logits_model, criterion, epoch,
                opt)

            is_best = sov_mAP > best_mAP
            best_mAP = max(sov_mAP, best_mAP)
            scores = {
                's_top1': s_top1,
                's_top5': s_top5,
                'o_top1': o_top1,
                'o_top5': o_top5,
                'v_top1': v_top1,
                'v_top5': v_top5,
                'sov_top1': sov_top1,
                's_top1val': s_top1val,
                's_top5val': s_top5val,
                'o_top1val': o_top1val,
                'o_top5val': o_top5val,
                'v_top1val': v_top1val,
                'v_top5val': v_top5val,
                'sov_top1val': sov_top1val,
                'mAP': sov_mAP,
                'sov_rec_at_n': sov_rec_at_n,
                'sov_mprec_at_n': sov_mprec_at_n
            }
            #scores = {'s_top1':s_top1,'s_top5':s_top5,'o_top1':o_top1,'o_top5':o_top5,'v_top1':v_top1,'v_top5':v_top5,'sov_top1':sov_top1,'s_top1val':s_top1val,'s_top5val':s_top5val,'o_top1val':o_top1val,'o_top5val':o_top5val,'v_top1val':v_top1val,'v_top5val':v_top5val,'sov_top1val':sov_top1val}
            checkpoints.save(epoch, opt, base_model, logits_model,
                             base_optimizer, logits_optimizer, is_best, scores)