예제 #1
0
                    type=int,
                    default=4,
                    help="sample negative items for training")
parser.add_argument("--test_num_ng",
                    type=int,
                    default=99,
                    help="sample part of negative items for testing")
parser.add_argument("--out", default=True, help="save model or not")
parser.add_argument("--gpu", type=str, default="0", help="gpu card ID")
args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
cudnn.benchmark = True

############################## PREPARE DATASET ##########################
train_data, test_data, user_num, item_num, train_mat = data_utils.load_all()

# construct the train and test datasets
train_dataset = data_utils.BPRData(train_data, item_num, train_mat,
                                   args.num_ng, True)
test_dataset = data_utils.BPRData(test_data, item_num, train_mat, 0, False)
train_loader = data.DataLoader(train_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=16)
test_loader = data.DataLoader(test_dataset,
                              batch_size=args.test_num_ng + 1,
                              shuffle=False,
                              num_workers=0)

########################### CREATE MODEL #################################
예제 #2
0
                    help="compute metrics@top_k")
parser.add_argument("--gpu", type=str, default="1", help="gpu card ID")
args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
cudnn.benchmark = True

data_path = '../data/{}/'.format(args.dataset)
model_path = './models/{}/'.format(args.dataset)
print("arguments: %s " % (args))
print("config model", args.model)
print("config data path", data_path)
print("config model path", model_path)

############################## PREPARE DATASET ##########################
train_data, test_data, user_num, item_num, train_mat, user_neg = data_utils.load_all(
    args.dataset, data_path)

test_dataset = data_utils.NCFData(test_data, item_num, train_mat, user_neg,
                                  False, 0, False)
test_loader = data.DataLoader(test_dataset,
                              batch_size=args.test_num_ng + 1,
                              shuffle=False,
                              num_workers=0)
print("data loaded! user_num:{}, item_num:{} test_data_len:{}".format(
    user_num, item_num,
    len(test_data) // (args.test_num_ng + 1)))

########################### CREATE MODEL #################################
test_model = torch.load('{}{}_{}.pth'.format(model_path, args.model,
                                             args.alpha))
test_model.cuda()
예제 #3
0
random.seed(2019) #random and transforms
torch.backends.cudnn.deterministic=True # cudnn

def worker_init_fn(worker_id):
    np.random.seed(2019 + worker_id)

data_path = '../data/{}/'.format(args.dataset)
model_path = './models/{}/'.format(args.dataset)
print("arguments: %s " %(args))
print("config model", args.model)
print("config data path", data_path)
print("config model path", model_path)

############################## PREPARE DATASET ##########################

train_data, valid_data, test_data_pos, user_pos, user_num ,item_num, train_mat, train_data_noisy = data_utils.load_all(args.dataset, data_path)

# construct the train and test datasets
train_dataset = data_utils.NCFData(
		train_data, item_num, train_mat, args.num_ng, 0, train_data_noisy)
valid_dataset = data_utils.NCFData(
		valid_data, item_num, train_mat, args.num_ng, 1)

train_loader = data.DataLoader(train_dataset,
		batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
valid_loader = data.DataLoader(valid_dataset,
		batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)

print("data loaded! user_num:{}, item_num:{} train_data_len:{} test_user_num:{}".format(user_num, item_num, len(train_data), len(test_data_pos)))

########################### CREATE MODEL #################################
예제 #4
0
# paths
main_path = adaptdl.env.share_path()

train_rating = os.path.join(main_path, '{}.train.rating'.format(dataset))
test_rating = os.path.join(main_path, '{}.test.rating'.format(dataset))
test_negative = os.path.join(main_path, '{}.test.negative'.format(dataset))

model_path = os.path.join(main_path, 'models')
GMF_model_path = os.path.join(model_path, 'GMF.pth')
MLP_model_path = os.path.join(model_path, 'MLP.pth')
NeuMF_model_path = os.path.join(model_path, 'NeuMF.pth')

############################## PREPARE DATASET ##########################
train_data, test_data, user_num, item_num, train_mat = \
    data_utils.load_all(main_path, train_rating, test_negative, dataset)

# construct the train and test datasets
train_dataset = data_utils.NCFData(
        train_data, item_num, train_mat, args.num_ng, True)
test_dataset = data_utils.NCFData(
        test_data, item_num, train_mat, 0, False)
train_loader = adl.AdaptiveDataLoader(
    train_dataset,
    batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True)
test_loader = adl.AdaptiveDataLoader(
    test_dataset,
    batch_size=args.test_num_ng+1, shuffle=False, num_workers=0)

if args.autoscale_bsz:
    train_loader.autoscale_batch_size(