예제 #1
0
import random
import numpy as np

np.random.seed(0)
random.seed(0)


def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.get_popularity_ranks()
    return ml, data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    ml, data, rankings = load_movielens()

    # Construct an Evaluator
    evaluator = Evaluator(data, rankings)
    evaluator.add_algorithm(ContentKNNAlgorithm(), "ContentKNN")

    # Just make random recommendations
    evaluator.add_algorithm(NormalPredictor(), "Random")
    evaluator.evaluate(False)
    evaluator.sample_topn_recs(ml)
예제 #2
0
from surprise import NormalPredictor
import random
import numpy as np

np.random.seed(0)
random.seed(0)


def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.get_popularity_ranks()
    return ml, data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    ml, data, rankings = load_movielens()

    # Construct an Evaluator
    evaluator = Evaluator(data, rankings)
    evaluator.add_algorithm(AutoRecAlgorithm(), "AutoRec")
    evaluator.add_algorithm(NormalPredictor(), "Random")

    evaluator.evaluate(True)
    evaluator.sample_topn_recs(ml)
예제 #3
0
    parser.add_argument('--config', type=str, default=None)

    args = parser.parse_args()

    return args


if __name__ == "__main__":
    # configuration
    configuration = get_configuration(AttentionMapNetConfigurator(),
                                      get_args())

    # model
    feature_extractor = models.resnet34(pretrained=True)
    if configuration.model == "SEAttentionPoseNet":
        posenet = SEAttentionPoseNet(resnet=feature_extractor,
                                     config=configuration,
                                     drop_rate=configuration.dropout)
    elif configuration.model == "LearnGAPoseNet":
        posenet = LearnGAPoseNet(feature_extractor=feature_extractor,
                                 drop_rate=configuration.dropout)

    dataloader = get_dataloader(configuration)

    evaluator = Evaluator(config=configuration,
                          model=posenet,
                          dataloader=dataloader)

    evaluator.run()
예제 #4
0
np.random.seed(0)
random.seed(0)


def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.get_popularity_ranks()
    return ml, data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    ml, data, rankings = load_movielens()

    # Construct an Evaluator to, you know, evaluate them
    evaluator = Evaluator(data, rankings)

    evaluator.add_algorithm(SVD(), "SVD")
    evaluator.add_algorithm(SVDpp(), "SVD++")

    # Just make random recommendations
    evaluator.add_algorithm(NormalPredictor(), "Random")
    evaluator.evaluate(False)
    evaluator.sample_topn_recs(ml)
예제 #5
0
    # Load up common data set for the recommender algorithms
    ml, data, rankings = load_movielens()

    print("Searching for best parameters...")
    param_grid = {'hidden_dim': [20, 10], 'learning_rate': [0.1, 0.01]}
    gs = GridSearchCV(RBMAlgorithm, param_grid, measures=['rmse', 'mae'], cv=3)
    gs.fit(data)

    # best RMSE score
    print("Best RMSE score attained: ", gs.best_score['rmse'])

    # combination of parameters that gave the best RMSE score
    print(gs.best_params['rmse'])

    # Construct an Evaluator
    evaluator = Evaluator(data, rankings)
    params = gs.best_params['rmse']

    RBM_tuned = RBMAlgorithm(hidden_dim=params['hidden_dim'],
                             learning_rate=params['learning_rate'])
    evaluator.add_algorithm(RBM_tuned, "RBM - Tuned")

    RBM_untuned = RBMAlgorithm()
    evaluator.add_algorithm(RBM_untuned, "RBM - Untuned")

    # Just make random recommendations
    evaluator.add_algorithm(NormalPredictor(), "Random")

    evaluator.evaluate(False)
    evaluator.sample_topn_recs(ml)
예제 #6
0
from common.evaluator import Evaluator
from .rbm_wrapper import RBMAlgorithm
from surprise import NormalPredictor
import random
import numpy as np

np.random.seed(0)
random.seed(0)


def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print("\nComputing movie popularity ranks so we can measure novelty later...")
    rankings = ml.get_popularity_ranks()
    return ml, data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    ml, data, rankings = load_movielens()

    # Construct an Evaluator
    evaluator = Evaluator(data, rankings)
    evaluator.add_algorithm(RBMAlgorithm(epochs=20), "RBM")
    evaluator.add_algorithm(NormalPredictor(), "Random")

    evaluator.evaluate(True)
    evaluator.sample_topn_recs(ml)
예제 #7
0
def main():
    """Application entry point."""
    args = get_args()
    if args.save_to is not None:
        args.save_to.mkdir(exist_ok=True, parents=True)

    if args.images.is_file():
        markup = True
        evaluator = Evaluator()

        with open(str(args.images)) as f:
            samples = json.load(f)

    else:
        markup = False
        evaluator = None
        samples = [str(x) for x in args.images.glob('*')]

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # prepare models paths
    if args.model_path.is_file():
        models_paths = [args.model_path]
    else:
        models_paths = list(args.model_path.glob('*'))

    for model_path in models_paths:
        model, vis_transform, to_tensor_transform = read_model(
            model_path, device)

        cpu_device = torch.device('cpu')
        with torch.no_grad():
            for sample in tqdm(samples, desc='Predicting'):
                # get and process output
                img_path = sample['img_path'] if markup else sample
                img = cv2.imread(img_path)
                if markup:
                    sample = {
                        'image': img,
                        'bboxes': [x['bbox'] for x in sample['annotation']],
                        'labels':
                        [int(x['label']) for x in sample['annotation']]
                    }
                else:
                    sample = {'image': img, 'bboxes': [], 'labels': []}

                # prepare image for visualization
                sample = vis_transform(**sample)
                img = sample['image'].copy()

                # prepare sample for model applying
                sample = to_tensor_transform(**sample)
                sample['image'] = sample['image'].to(device)

                # applying model
                output = model([sample['image']])

                # for fasterrcnn from torchscript it returns [losses, outputs]
                if len(output) == 2:
                    output = output[1]

                output = {
                    k: v.to(cpu_device).numpy()
                    for k, v in output[0].items()
                }
                output = postprocessing([output], iou_th=args.iou_th)[0]

                drawn_img = draw_bboxes(img.copy(),
                                        output['boxes'],
                                        output['labels'],
                                        shifted_labels=True)

                # evaluate if markup file was passed
                if markup:
                    sample['boxes'] = sample['bboxes']
                    evaluator.collect_stats([output], [sample])

                    gt_drawn_img = draw_bboxes(img,
                                               sample['bboxes'],
                                               sample['labels'],
                                               shifted_labels=True)
                    drawn_img = np.concatenate([gt_drawn_img, drawn_img],
                                               axis=1)

                if args.save_to is None:
                    cv2.namedWindow('img', cv2.WINDOW_NORMAL)
                    cv2.imshow('img', drawn_img)
                    cv2.waitKey()

                else:
                    model_save_folder = args.save_to.joinpath(model_path.stem +
                                                              '_preds')
                    model_save_folder.mkdir(exist_ok=True, parents=True)

                    save_path = model_save_folder.joinpath(Path(img_path).name)
                    cv2.imwrite(str(save_path), drawn_img)

        # calculate metrics if markup file was passed
        if markup:
            print('\nMetrics for model:', model_path.name)
            metrics = evaluator.calculate_metrics()
            for name, value in metrics.items():
                print('{}: {}'.format(name, value))
예제 #8
0
import random
import numpy as np

np.random.seed(0)
random.seed(0)


def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print("\nComputing movie popularity ranks so we can measure novelty later...")
    rankings = ml.get_popularity_ranks()
    return data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    evaluation_data, rankings = load_movielens()

    # Construct an Evaluator
    evaluator = Evaluator(evaluation_data, rankings)

    # Throw in an SVD recommender
    evaluator.add_algorithm(SVD(random_state=10), "SVD")

    # Just make random recommendations
    evaluator.add_algorithm(NormalPredictor(), "Random")
    evaluator.evaluate(True)

예제 #9
0
파일: eval.py 프로젝트: zju3dv/RVL-Dynamic
    feature_extractor = models.resnet34(pretrained=False)
    if configuration.model == "mapnet" or configuration.model == "posenet":
        model = PoseNet(feature_extractor, drop_rate=configuration.dropout)
    else:
        model = SEAttentionPoseNet(resnet=feature_extractor,
                                   config=configuration,
                                   drop_rate=configuration.dropout)

    # data
    dataloader = get_dataloader(configuration)

    # read mean and stdev for un-normalizing predictions
    pose_stats_file = osp.join(configuration.preprocessed_data_path,
                               'pose_stats.txt')
    pose_m, pose_s = np.loadtxt(pose_stats_file)  # mean and stdev

    configuration.dataset_length = len(dataloader.dataset)
    configuration.pose_m = pose_m
    configuration.pose_s = pose_s

    if not args.var:
        evaluator = Evaluator(config=configuration,
                              model=model,
                              dataloader=dataloader)
    else:
        evaluator = VarEvaluator(config=configuration,
                                 model=model,
                                 dataloader=dataloader)

    evaluator.run(False)
예제 #10
0

def load_movielens():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.load()
    print("\nComputing movie popularity ranks so we can measure novelty later...")
    rankings = ml.get_popularity_ranks()
    return ml, data, rankings


if __name__ == '__main__':
    # Load up common data set for the recommender algorithms
    ml, data, rankings = load_movielens()

    # Construct an Evaluator to, you know, evaluate them
    evaluator = Evaluator(data, rankings)

    # User-based KNN
    user_knn = KNNBasic(sim_options={'name': 'cosine', 'user_based': True})
    evaluator.add_algorithm(user_knn, "User KNN")

    # Item-based KNN
    item_knn = KNNBasic(sim_options={'name': 'cosine', 'user_based': False})
    evaluator.add_algorithm(item_knn, "Item KNN")

    # Just make random recommendations
    evaluator.add_algorithm(NormalPredictor(), "Random")
    evaluator.evaluate(False)
    evaluator.sample_topn_recs(ml)