示例#1
0
def main():
    logging.info('-' * 45 + ' BEGIN: ' + utils.get_time() + ' ' + '-' * 45)
    exclude = [
        'check_epoch', 'log_file', 'model_path', 'path', 'pin_memory',
        'regenerate', 'sep', 'train', 'verbose'
    ]
    logging.info(utils.format_arg_str(args, exclude_lst=exclude))  #格式化打印训练参数

    # Random seed
    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    torch.backends.cudnn.deterministic = True

    # GPU
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    logging.info('cuda available: {}'.format(torch.cuda.is_available()))
    logging.info('# cuda devices: {}'.format(torch.cuda.device_count()))

    # Read data
    corpus_path = os.path.join(args.path, args.dataset,
                               model_name.reader + '.pkl')
    corpus = pickle.load(open(corpus_path, 'rb'))

    # Define model
    model = model_name(args, corpus)
    logging.info(model)
    model.load_state_dict(torch.load(args.model_path, map_location="cpu"))
    model.eval()
    # Run model
    data_dict = dict()
    history_items = [509, 515, 666, 691]
    data_dict["test"] = {
        'user_id':
        torch.tensor([6185]),
        'item_id':
        torch.tensor(
            [sorted(list(set(corpus.all_df["item_id"].values.tolist())))]),
        'history_items':
        torch.tensor([history_items]),
        'lengths':
        torch.tensor([4]),
        'batch_size':
        1,
        'phase':
        'test'
    }

    runner = runner_name(args)
    prediction = model(data_dict["test"])
    reslut = torch.topk(prediction['prediction'],
                        10)[1].tolist()[0]  #返回top10的商品
    reslut_list = []
    for i in reslut:  #移除用户购买过的商品
        if not i in history_items:
            reslut_list.append(i)
    logging.info(f"topk10的推荐结果为:{reslut_list}")
def main():
    logging.info('-' * 45 + ' BEGIN: ' + utils.get_time() + ' ' + '-' * 45)
    exclude = [
        'check_epoch', 'log_file', 'model_path', 'path', 'pin_memory',
        'regenerate', 'sep', 'train', 'verbose'
    ]
    logging.info(utils.format_arg_str(args, exclude_lst=exclude))  #格式化打印训练参数

    # Random seed
    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    torch.backends.cudnn.deterministic = True

    # GPU
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    logging.info('cuda available: {}'.format(torch.cuda.is_available()))
    logging.info('# cuda devices: {}'.format(torch.cuda.device_count()))

    # Read data
    corpus_path = os.path.join(args.path, args.dataset,
                               model_name.reader + '.pkl')
    if False:  # not args.regenerate and os.path.exists(corpus_path):
        logging.info('Load corpus from {}'.format(corpus_path))
        corpus = pickle.load(open(corpus_path, 'rb'))
    else:
        corpus = reader_name(args)
        logging.info('Save corpus to {}'.format(corpus_path))
        pickle.dump(corpus, open(corpus_path,
                                 'wb'))  #保存数据对象,它是一个类,pickle可以读取对象

    # Define model
    model = model_name(args, corpus)
    logging.info(model)
    model.apply(model.init_weights)  #apply做用在每一层
    model.actions_before_train()
    model.to(model.device)

    # Run model
    data_dict = dict()
    for phase in ['train', 'dev', 'test']:
        data_dict[phase] = model_name.Dataset(model, corpus, phase)
    runner = runner_name(args)
    logging.info('Test Before Training: ' +
                 runner.print_res(model, data_dict['test']))
    if args.load > 0:
        model.load_model()
    if args.train > 0:
        runner.train(model, data_dict)
    logging.info(os.linesep + 'Test After Training: ' +
                 runner.print_res(model, data_dict['test']))

    model.actions_after_train()
    logging.info(os.linesep + '-' * 45 + ' END: ' + utils.get_time() + ' ' +
                 '-' * 45)
示例#3
0
def main():
    logging.info('-' * 45 + ' BEGIN: ' + utils.get_time() + ' ' + '-' * 45)
    logging.info(utils.format_arg_str(args))

    # Random seed
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    np.random.seed(args.random_seed)

    # GPU
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    logging.info("# cuda devices: {}".format(torch.cuda.device_count()))

    # Load data
    corpus_path = os.path.join(args.path, args.dataset, 'Corpus.pkl')
    if not args.regenerate and os.path.exists(corpus_path):
        logging.info('Load corpus from {}'.format(corpus_path))
        corpus = pickle.load(open(corpus_path, 'rb'))
    else:
        corpus = loader_name(args)
        logging.info('Save corpus to {}'.format(corpus_path))
        pickle.dump(corpus, open(corpus_path, 'wb'))

    # Define model
    model = model_name(args, corpus)
    logging.info(model)
    model = model.double()
    model.apply(model.init_weights)
    model.actions_before_train()
    if torch.cuda.device_count() > 0:
        model = model.cuda()

    # Run model
    runner = runner_name(args)
    logging.info('Test Before Training: ' + runner.print_res(model, corpus))
    if args.load > 0:
        model.load_model()
    if args.train > 0:
        runner.train(model, corpus)
    logging.info(os.linesep + 'Test After Training: ' + runner.print_res(model, corpus))

    model.actions_after_train()
    logging.info(os.linesep + '-' * 45 + ' END: ' + utils.get_time() + ' ' + '-' * 45)
示例#4
0
def main(args):
    logging.info('-' * 45 + ' BEGIN: ' + utils.get_time() + ' ' + '-' * 45)
    exclude = [
        'check_epoch', 'log_file', 'model_path', 'path', 'pin_memory',
        'regenerate', 'sep', 'train', 'verbose'
    ]
    logging.info(utils.format_arg_str(args, exclude_lst=exclude))

    # Random seed
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    np.random.seed(args.random_seed)

    # GPU
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    logging.info("# cuda devices: {}".format(torch.cuda.device_count()))

    # Load data
    corpus = load_corpus(args)

    # Define model
    model = model_name(args, corpus)
    logging.info(model)
    model = model.double()
    model.apply(model.init_weights)
    model.actions_before_train()
    if torch.cuda.device_count() > 0:
        model = model.cuda()

    runner = runner_name(args)

    logging.info('Test Before Training: ' + runner.print_res(model, corpus))
    if args.load > 0:
        model.load_model()
    if args.train > 0:
        runner.train(model, corpus)
    logging.info('\nTest After Training: ' + runner.print_res(model, corpus))

    model.actions_after_train()
    logging.info(os.linesep + '-' * 45 + ' END: ' + utils.get_time() + ' ' +
                 '-' * 45)
示例#5
0
def main(args):
    logging.info('-' * 45 + ' BEGIN: ' + utils.get_time() + ' ' + '-' * 45)
    exclude = [
        'check_epoch', 'log_file', 'model_path', 'path', 'pin_memory',
        'regenerate', 'sep', 'train', 'verbose', 'load', 'buffer'
    ]
    logging.info(utils.format_arg_str(args, exclude_lst=exclude))

    # Random seed
    np.random.seed(args.random_seed)

    # Read Data
    dataloader = DataLoader.DataLoader(args)
    dataloader._load_data()

    # Define Model
    model = model_name(args)

    # Run Model
    evaluations_list = {}
    for i in range(5):
        model.fit(dataloader.train_feature[i], dataloader.train_label[i])
        evaluations = model.print_res(dataloader.test_feature[i],
                                      dataloader.test_label[i])
        evaluation_results = model.evaluate(dataloader.test_feature[i],
                                            dataloader.test_label[i])
        for key in evaluation_results:
            if key not in evaluations_list:
                evaluations_list[key] = []
            evaluations_list[key].append(evaluation_results[key])
        logging.info('Test Results at {} times: {}'.format(i, evaluations))
    evaluations_all = {}
    for key in evaluations_list:
        evaluations_all[key] = np.mean(evaluations_list[key])
    logging.info("Average results: {}".format(
        utils.format_metric(evaluations_all)))