Exemplo n.º 1
0
import random
import numpy as np

np.random.seed(0)
random.seed(0)


def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.get_popularity_ranks()
    return ml, data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    ml, data, rankings = load_movielens()

    # Construct an Evaluator
    evaluator = Evaluator(data, rankings)
    evaluator.add_algorithm(ContentKNNAlgorithm(), "ContentKNN")

    # Just make random recommendations
    evaluator.add_algorithm(NormalPredictor(), "Random")
    evaluator.evaluate(False)
    evaluator.sample_topn_recs(ml)
Exemplo n.º 2
0
    parser.add_argument('--config', type=str, default=None)

    args = parser.parse_args()

    return args


if __name__ == "__main__":
    # configuration
    configuration = get_configuration(AttentionMapNetConfigurator(),
                                      get_args())

    # model
    feature_extractor = models.resnet34(pretrained=True)
    if configuration.model == "SEAttentionPoseNet":
        posenet = SEAttentionPoseNet(resnet=feature_extractor,
                                     config=configuration,
                                     drop_rate=configuration.dropout)
    elif configuration.model == "LearnGAPoseNet":
        posenet = LearnGAPoseNet(feature_extractor=feature_extractor,
                                 drop_rate=configuration.dropout)

    dataloader = get_dataloader(configuration)

    evaluator = Evaluator(config=configuration,
                          model=posenet,
                          dataloader=dataloader)

    evaluator.run()
Exemplo n.º 3
0
def main():
    """Application entry point."""
    args = get_args()
    if args.save_to is not None:
        args.save_to.mkdir(exist_ok=True, parents=True)

    if args.images.is_file():
        markup = True
        evaluator = Evaluator()

        with open(str(args.images)) as f:
            samples = json.load(f)

    else:
        markup = False
        evaluator = None
        samples = [str(x) for x in args.images.glob('*')]

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # prepare models paths
    if args.model_path.is_file():
        models_paths = [args.model_path]
    else:
        models_paths = list(args.model_path.glob('*'))

    for model_path in models_paths:
        model, vis_transform, to_tensor_transform = read_model(
            model_path, device)

        cpu_device = torch.device('cpu')
        with torch.no_grad():
            for sample in tqdm(samples, desc='Predicting'):
                # get and process output
                img_path = sample['img_path'] if markup else sample
                img = cv2.imread(img_path)
                if markup:
                    sample = {
                        'image': img,
                        'bboxes': [x['bbox'] for x in sample['annotation']],
                        'labels':
                        [int(x['label']) for x in sample['annotation']]
                    }
                else:
                    sample = {'image': img, 'bboxes': [], 'labels': []}

                # prepare image for visualization
                sample = vis_transform(**sample)
                img = sample['image'].copy()

                # prepare sample for model applying
                sample = to_tensor_transform(**sample)
                sample['image'] = sample['image'].to(device)

                # applying model
                output = model([sample['image']])

                # for fasterrcnn from torchscript it returns [losses, outputs]
                if len(output) == 2:
                    output = output[1]

                output = {
                    k: v.to(cpu_device).numpy()
                    for k, v in output[0].items()
                }
                output = postprocessing([output], iou_th=args.iou_th)[0]

                drawn_img = draw_bboxes(img.copy(),
                                        output['boxes'],
                                        output['labels'],
                                        shifted_labels=True)

                # evaluate if markup file was passed
                if markup:
                    sample['boxes'] = sample['bboxes']
                    evaluator.collect_stats([output], [sample])

                    gt_drawn_img = draw_bboxes(img,
                                               sample['bboxes'],
                                               sample['labels'],
                                               shifted_labels=True)
                    drawn_img = np.concatenate([gt_drawn_img, drawn_img],
                                               axis=1)

                if args.save_to is None:
                    cv2.namedWindow('img', cv2.WINDOW_NORMAL)
                    cv2.imshow('img', drawn_img)
                    cv2.waitKey()

                else:
                    model_save_folder = args.save_to.joinpath(model_path.stem +
                                                              '_preds')
                    model_save_folder.mkdir(exist_ok=True, parents=True)

                    save_path = model_save_folder.joinpath(Path(img_path).name)
                    cv2.imwrite(str(save_path), drawn_img)

        # calculate metrics if markup file was passed
        if markup:
            print('\nMetrics for model:', model_path.name)
            metrics = evaluator.calculate_metrics()
            for name, value in metrics.items():
                print('{}: {}'.format(name, value))
Exemplo n.º 4
0
    return args


if __name__ == "__main__":
    args = get_args()

    # configuration
    configuration = get_configuration(MapNetConfigurator(), args)

    # model
    feature_extractor = models.resnet34(pretrained=False)
    model = PoseNet(feature_extractor, drop_rate=configuration.dropout)

    # data
    dataloader = get_dataloader(configuration)

    # read mean and stdev for un-normalizing predictions
    pose_stats_file = osp.join(configuration.preprocessed_data_path,
                               'pose_stats.txt')
    pose_m, pose_s = np.loadtxt(pose_stats_file)  # mean and stdev

    configuration.dataset_length = len(dataloader.dataset)
    configuration.pose_m = pose_m
    configuration.pose_s = pose_s

    evaluator = Evaluator(config=configuration,
                          model=model,
                          dataloader=dataloader)

    evaluator.run()
Exemplo n.º 5
0
import random
import numpy as np

np.random.seed(0)
random.seed(0)


def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print("\nComputing movie popularity ranks so we can measure novelty later...")
    rankings = ml.get_popularity_ranks()
    return data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    evaluation_data, rankings = load_movielens()

    # Construct an Evaluator
    evaluator = Evaluator(evaluation_data, rankings)

    # Throw in an SVD recommender
    evaluator.add_algorithm(SVD(random_state=10), "SVD")

    # Just make random recommendations
    evaluator.add_algorithm(NormalPredictor(), "Random")
    evaluator.evaluate(True)