def main(args):
    progress = WorkSplitter()

    table_path = 'tables/'

    test = load_numpy(path=args.path, name=args.dataset + args.test)

    df = pd.DataFrame({
        'model': [
            "BiasedMF", "BiasedMF", "BiasedMF", "PropensityMF",
            "InitFeatureEmbedMF", "InitFeatureEmbedMF", "InitFeatureEmbedMF",
            "AlterFeatureEmbedMF", "ConcatFeatureEmbedMF", "CausalSampleMF",
            "UnionSampleMF", "WRSampleMF", "BatchSampleMF", "BridgeLabelMF",
            "RefineLabelMF"
        ],
        'way': [
            None, "unif", "combine", None, "user", "item", "both", None, None,
            None, None, None, None, None, None
        ]
    })

    progress.subsection("Reproduce")
    frame = []
    for idx, row in df.iterrows():
        row = row.to_dict()
        row['metric'] = ['NLL', 'AUC']
        row['rank'] = 10
        result = execute(test, row, folder=args.model_folder + args.dataset)
        frame.append(result)

    results = pd.concat(frame)
    save_dataframe_csv(results, table_path, args.name)
Exemplo n.º 2
0
def plrec(matrix_train, iteration=4, lamb=80, rank=200, seed=1, **unused):
    """
    Function used to achieve generalized projected lrec w/o item-attribute embedding
    :param matrix_train: user-item matrix with shape m*n
    :param iteration: number of power iterations in randomized svd
    :param lamb: parameter of penalty
    :param rank: latent dimension size
    :param seed: the seed of the pseudo random number generator to use when shuffling the data
    :return: prediction in sparse matrix
    """
    progress = WorkSplitter()

    progress.subsection("Randomized SVD")
    start_time = time.time()
    P, sigma, Qt = randomized_svd(matrix_train,
                                  n_components=rank,
                                  n_iter=iteration,
                                  random_state=seed)

    RQ = matrix_train.dot(sparse.csc_matrix(Qt.T*np.sqrt(sigma)))

    print("Elapsed: {}".format(inhour(time.time() - start_time)))

    progress.subsection("Closed-Form Linear Optimization")
    start_time = time.time()
    pre_inv = RQ.T.dot(RQ) + lamb * sparse.identity(rank, dtype=np.float32)
    inverse = sparse.linalg.inv(pre_inv.tocsc())
    Y = inverse.dot(RQ.T).dot(matrix_train)
    print("Elapsed: {}".format(inhour(time.time() - start_time)))

    return np.array(RQ.todense()), np.array(Y.todense()), None
Exemplo n.º 3
0
def main(args):
    progress = WorkSplitter()

    table_path = 'tables/'

    test = load_numpy(path=args.path, name=args.dataset + args.test)

    df = pd.DataFrame({
        'model': [
            'RestrictedBatchSampleMF', 'RestrictedBatchSampleMF',
            'RestrictedBatchSampleMF', 'RestrictedBatchSampleMF',
            'RestrictedBatchSampleMF'
        ],
        'way': [None, 'head_users', 'tail_users', 'head_items', 'tail_items']
    })

    progress.subsection("Gain Analysis")
    frame = []
    for idx, row in df.iterrows():
        row = row.to_dict()
        row['metric'] = ['NLL', 'AUC']
        row['rank'] = 10
        result = execute(test, row, folder=args.model_folder + args.dataset)
        frame.append(result)

    results = pd.concat(frame)
    save_dataframe_csv(results, table_path, args.name)
Exemplo n.º 4
0
def main(args):
    progress = WorkSplitter()

    table_path = 'tables/'

    test = load_numpy(path=args.path, name=args.dataset + args.test)

    df = pd.DataFrame({
        'model': [
            'AutoRec', 'AutoRec', 'AutoRec', 'InitFeatureEmbedAE',
            'InitFeatureEmbedAE', 'InitFeatureEmbedAE', 'AlterFeatureEmbedAE',
            'ConcatFeatureEmbedAE', 'UnionSampleAE', 'WRSampleAE',
            'BatchSampleAE', 'BridgeLabelAE', 'RefineLabelAE', 'DeepAutoRec',
            'DeepAutoRec', 'SoftLabelAE', 'HintAE'
        ],
        'way': [
            None, 'unif', 'combine', 'user', 'item', 'both', None, None, None,
            None, None, None, None, None, 'unif', None, None
        ]
    })

    progress.subsection("Reproduce")
    frame = []
    for idx, row in df.iterrows():
        row = row.to_dict()
        row['metric'] = ['NLL', 'AUC']
        row['rank'] = 200
        result = execute(test, row, folder=args.model_folder + args.dataset)
        frame.append(result)

    results = pd.concat(frame)
    save_dataframe_csv(results, table_path, args.name)
Exemplo n.º 5
0
def plrec(matrix_train, embeded_matrix=np.empty((0)), iteration=4, lam=80, rank=200, seed=1, **unused):
    """
    Function used to achieve generalized projected lrec w/o item-attribute embedding
    :param matrix_train: user-item matrix with shape m*n
    :param embeded_matrix: item-attribute matrix with length n (each row represents one item)
    :param lam: parameter of penalty
    :param k_factor: ratio of the latent dimension/number of items
    :return: prediction in sparse matrix
    """
    progress = WorkSplitter()
    matrix_input = matrix_train
    if embeded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embeded_matrix.T))

    progress.subsection("Randomized SVD")
    start_time = time.time()
    P, sigma, Qt = randomized_svd(matrix_input,
                                  n_components=rank,
                                  n_iter=iteration,
                                  random_state=seed)

    RQ = matrix_input.dot(sparse.csc_matrix(Qt.T*np.sqrt(sigma)))

    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    progress.subsection("Closed-Form Linear Optimization")
    start_time = time.time()
    pre_inv = RQ.T.dot(RQ) + lam * sparse.identity(rank, dtype=np.float32)
    inverse = inv(pre_inv)
    Y = inverse.dot(RQ.T).dot(matrix_input)
    print("Elapsed: {0}".format(inhour(time.time() - start_time)))
    return np.array(RQ.todense()), np.array(Y.todense()), None
Exemplo n.º 6
0
def acf(matrix_train,
        embeded_matrix=np.empty((0)),
        epoch=300,
        iteration=100,
        lamb=80,
        rank=100,
        key_dim=3,
        batch_size=32,
        optimizer="Adam",
        learning_rate=0.001,
        seed=1,
        root=1,
        fb=False,
        **unused):

    print(epoch, lamb, rank)
    progress = WorkSplitter()
    matrix_input = matrix_train
    if embeded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embeded_matrix.T))
    progress.subsection("Create PMI matrix")
    pmi_matrix = get_pmi_matrix(matrix_input, root)
    progress.subsection("Randomized SVD")
    start_time = time.time()
    if fb:
        P, sigma, Qt = pca(pmi_matrix, k=rank, n_iter=iteration, raw=True)
    else:
        P, sigma, Qt = randomized_svd(pmi_matrix,
                                      n_components=rank,
                                      n_iter=iteration,
                                      power_iteration_normalizer='QR',
                                      random_state=seed)
    Q = Qt.T * np.sqrt(sigma)
    m, n = matrix_input.shape
    model = ACF(m,
                n,
                rank,
                key_dim,
                lamb=lamb,
                batch_size=batch_size,
                learning_rate=learning_rate,
                optimizer=Optimizer[optimizer],
                item_embeddings=Q)
    model.train_model(matrix_input, epoch)
    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    RQ = model.get_RQ()
    Y = model.get_Y().T
    model.sess.close()
    tf.reset_default_graph()

    return RQ, Y, None
Exemplo n.º 7
0
def als(matrix_train,
        embeded_matrix=np.empty((0)),
        iteration=4,
        lam=80,
        rank=200,
        alpha=100,
        seed=1,
        **unused):
    """
    :param matrix_train: rating matrix
    :param embeded_matrix: item or user embedding matrix(side info)
    :param iteration: number of alternative solving
    :param lam: regularization parameter
    :param rank: SVD top K eigenvalue ranks
    :param alpha: re-weighting parameter
    :param gpu: GPU computation or CPU computation. GPU usually does 2X speed of CPU
    :param seed: Random initialization seed
    :return:
    """

    progress = WorkSplitter()
    progress.subsection("Alternative Item-wised Optimization")
    matrix_input = matrix_train
    if embeded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embeded_matrix.T))

    m, n = matrix_train.shape

    matrix_coo = matrix_train.tocoo()

    cold_rows, cold_cols = get_cold(matrix_coo, m, n)

    np.random.seed(1)
    U = torch.tensor(
        np.random.normal(0, 0.01, size=(m, rank)).astype(np.float32)).float()
    V = torch.tensor(
        np.random.normal(0, 0.01, size=(n, rank)).astype(np.float32)).float()

    U[cold_rows] = 0
    V[cold_cols] = 0

    for i in xrange(iteration):
        progress.subsubsection("Iteration: {0}".format(i))
        solve(matrix_input.T, U, V, lam=lam, rank=rank, alpha=alpha)
        solve(matrix_input, V, U, lam=lam, rank=rank, alpha=alpha)

    return U.numpy(), V.numpy().T, None
Exemplo n.º 8
0
def uncertainty(Rtrain, df_input, rank):
    progress = WorkSplitter()
    m, n = Rtrain.shape

    valid_models = vaes.keys()

    results = []

    for run in range(1):

        for idx, row in df_input.iterrows():
            row = row.to_dict()

            if row['model'] not in valid_models:
                continue

            progress.section(json.dumps(row))

            if 'optimizer' not in row.keys():
                row['optimizer'] = 'RMSProp'

            model = vaes[row['model']](n,
                                       rank,
                                       batch_size=100,
                                       lamb=row['lambda'],
                                       optimizer=Regularizer[row['optimizer']])

            model.train_model(Rtrain,
                              corruption=row['corruption'],
                              epoch=row['iter'])
            data_batches = model.get_batches(Rtrain, batch_size=100)
            progress.subsection("Predict")
            for batch in tqdm(data_batches):
                batch_size = batch.shape[0]
                _, stds = model.uncertainty(batch.todense())
                num_rated = np.squeeze(np.asarray(np.sum(batch, axis=1)))
                std = np.mean(stds, axis=1)
                results.append(
                    pd.DataFrame({
                        'model': [row['model']] * batch_size,
                        'numRated': num_rated,
                        'std': std
                    }))

    return pd.concat(results)
Exemplo n.º 9
0
def lookup(train, validation, params, measure='Cosine', gpu_on=True):
    progress = WorkSplitter()
    df = pd.DataFrame(columns=['model'])

    num_user = train.shape[0]

    for algorithm in params['models']:

        RQ = np.load('latent/U_{0}_{1}.npy'.format(algorithm, params['rank']))
        Y = np.load('latent/V_{0}_{1}.npy'.format(algorithm, params['rank']))
        if os.path.isfile('latent/B_{0}_{1}.npy'.format(
                algorithm, params['rank'])):
            Bias = np.load('latent/B_{0}_{1}.npy'.format(
                algorithm, params['rank']))
        else:
            Bias = None

        progress.subsection("Prediction")

        prediction = predict(matrix_U=RQ,
                             matrix_V=Y,
                             measure=measure,
                             bias=Bias,
                             topK=params['topK'][-1],
                             matrix_Train=train,
                             gpu=gpu_on)

        progress.subsection("Evaluation")

        result = evaluate(prediction, validation, params['metric'],
                          params['topK'])

        result_dict = {'model': algorithm}

        for name in result.keys():
            result_dict[name] = [
                round(result[name][0], 4),
                round(result[name][1], 4)
            ]

        df = df.append(result_dict, ignore_index=True)
    return df
Exemplo n.º 10
0
def pmi_svd(matrix_train,
            embeded_matrix=np.empty((0)),
            iteration=4,
            rank=200,
            fb=False,
            seed=1,
            root=1.1,
            **unused):
    """
    PureSVD algorithm

    :param matrix_train: rating matrix
    :param embeded_matrix: item or user embedding matrix(side info)
    :param iteration: number of random SVD iterations
    :param rank: SVD top K eigenvalue ranks
    :param fb: facebook package or sklearn package. boolean
    :param seed: Random initialization seed
    :param unused: args that not applicable for this algorithm
    :return:
    """
    progress = WorkSplitter()
    matrix_input = matrix_train
    if embeded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embeded_matrix.T))

    progress.subsection("Create PMI matrix")
    pmi_matrix = get_pmi_matrix_gpu(matrix_input, root)

    progress.subsection("Randomized SVD")
    start_time = time.time()
    if fb:
        P, sigma, Qt = pca(pmi_matrix, k=rank, n_iter=iteration, raw=True)
    else:
        P, sigma, Qt = randomized_svd(pmi_matrix,
                                      n_components=rank,
                                      n_iter=iteration,
                                      power_iteration_normalizer='QR',
                                      random_state=seed)

    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    return P, Qt, None
Exemplo n.º 11
0
def chain_item_item(matrix_train,
                    embeded_matrix=np.empty((0)),
                    iteration=7,
                    rank=200,
                    fb=True,
                    seed=1,
                    chain=1,
                    **unused):

    progress = WorkSplitter()
    matrix_input = matrix_train
    if embeded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embeded_matrix.T))

    progress.subsection("Randomized SVD")
    start_time = time.time()
    if fb:
        P, sigma, Qt = pca(matrix_input, k=rank, n_iter=iteration, raw=True)
    else:
        P, sigma, Qt = randomized_svd(matrix_input,
                                      n_components=rank,
                                      n_iter=iteration,
                                      power_iteration_normalizer='QR',
                                      random_state=seed)

    RQ = matrix_input.dot(sparse.csc_matrix(Qt).T).toarray()
    PS = P * sigma
    SPPS = PS.T.dot(PS)

    HRQ = RQ.dot(SPPS)

    if chain > 1:
        QTQ = Qt.dot(Qt.T)

    for i in range(1, chain):
        HRQ = HRQ.dot(QTQ).dot(SPPS)

    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    return HRQ, Qt, None
Exemplo n.º 12
0
def hyper_parameter_tuning(train, validation, params, save_path):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']

    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(columns=['model', 'k', 'topK'])

    num_user = train.shape[0]

    for algorithm in params['models']:

        for k in params['k']:

            if ((df['model'] == algorithm) & (df['k'] == k)).any():
                continue

            format = "model: {}, k: {}"
            progress.section(format.format(algorithm, k))

            progress.subsection("Training")
            model = params['models'][algorithm]()
            model.train(train)

            progress.subsection("Prediction")
            prediction_score = model.predict(train, k=k)

            prediction = predict(prediction_score=prediction_score,
                                 topK=params['topK'][-1],
                                 matrix_Train=train)

            progress.subsection("Evaluation")
            result = evaluate(prediction, validation, params['metric'],
                              params['topK'])

            result_dict = {'model': algorithm, 'k': k}

            for name in result.keys():
                result_dict[name] = [
                    round(result[name][0], 4),
                    round(result[name][1], 4)
                ]

            df = df.append(result_dict, ignore_index=True)

            save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 13
0
def execute(train, test, params, model, gpu_on=True, analytical=False):
    progress = WorkSplitter()

    columns = ['model', 'rank', 'lambda', 'epoch', 'corruption', 'topK']

    progress.section("\n".join(
        [":".join((str(k), str(params[k]))) for k in columns]))

    df = pd.DataFrame(columns=columns)

    progress.subsection("Train")
    RQ, Yt, Bias = model(train,
                         epoch=params['epoch'],
                         lamb=params['lambda'],
                         rank=params['rank'],
                         corruption=params['corruption'])
    Y = Yt.T

    progress.subsection("Prediction")
    prediction = predict(matrix_U=RQ,
                         matrix_V=Y,
                         bias=Bias,
                         topK=params['topK'][-1],
                         matrix_Train=train,
                         gpu=gpu_on)

    progress.subsection("Evaluation")
    result = evaluate(prediction,
                      test,
                      params['metric'],
                      params['topK'],
                      analytical=analytical)

    if analytical:
        return result
    else:
        result_dict = params

        for name in result.keys():
            result_dict[name] = [
                round(result[name][0], 4),
                round(result[name][1], 4)
            ]
        df = df.append(result_dict, ignore_index=True)

        return df
Exemplo n.º 14
0
def execute(train, test, params, model, analytical=False):
    progress = WorkSplitter()

    columns = ['model', 'k', 'topK']

    progress.section("\n".join(
        [":".join((str(k), str(params[k]))) for k in columns]))

    df = pd.DataFrame(columns=columns)

    progress.subsection("Train")
    model = model()
    model.train(train)

    progress.subsection("Prediction")
    prediction_score = model.predict(train, k=params['k'])

    prediction = predict(prediction_score=prediction_score,
                         topK=params['topK'][-1],
                         matrix_Train=train)

    progress.subsection("Evaluation")
    result = evaluate(prediction,
                      test,
                      params['metric'],
                      params['topK'],
                      analytical=analytical)

    if analytical:
        return result
    else:
        result_dict = params

        for name in result.keys():
            result_dict[name] = [
                round(result[name][0], 4),
                round(result[name][1], 4)
            ]
        df = df.append(result_dict, ignore_index=True)

        return df
Exemplo n.º 15
0
def hyper_parameter_tuning(train, validation, params, unif_train, save_path, seed, way, dataset, gpu_on):
    progress = WorkSplitter()

    table_path = 'tables/'
    data_name = save_path.split('/')[0]
    save_dir = 'tables/' + data_name + '/'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    for algorithm in params['models']:
        if algorithm in ['BiasedMF', 'PropensityMF']:
            df = pd.DataFrame(columns=['model', 'batch_size', 'lambda', 'iter'])
            for batch_size in params['batch_size']:
                for lam in params['lambda']:
                    format = "model: {0}, batch_size: {1}, lambda: {2}"
                    progress.section(format.format(algorithm, batch_size, lam))
                    RQ, Y, uBias, iBias = params['models'][algorithm](train, validation,
                                                                      matrix_unif_train=unif_train,
                                                                      iteration=params['iter'],
                                                                      rank=params['rank'], gpu_on=gpu_on,
                                                                      lam=lam, seed=seed,
                                                                      batch_size=batch_size,
                                                                      way=way,
                                                                      dataset=dataset)

                    progress.subsection("Prediction")
                    prediction = predict(matrix_U=RQ, matrix_V=Y, matrix_Valid=validation, ubias=uBias, ibias=iBias,
                                         gpu=gpu_on)

                    progress.subsection("Evaluation")
                    result = evaluate(prediction, validation, params['metric'], gpu=gpu_on)
                    result_dict = {'model': algorithm, 'batch_size': batch_size, 'lambda': lam, 'iter': params['iter']}
                    for name in result.keys():
                        result_dict[name] = round(result[name][0], 8)
                    df = df.append(result_dict, ignore_index=True)
                    save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['InitFeatureEmbedMF', 'AlterFeatureEmbedMF', 'WRSampleMF']:
            df = pd.DataFrame(columns=['model', 'lambda', 'iter'])
            for lam in params['lambda']:
                format = "model: {0}, lambda: {1}"
                progress.section(format.format(algorithm, lam))
                RQ, Y, uBias, iBias = params['models'][algorithm](train, validation,
                                                                  matrix_unif_train=unif_train,
                                                                  iteration=params['iter'],
                                                                  rank=params['rank'],
                                                                  gpu_on=gpu_on,
                                                                  lam=lam, seed=seed,
                                                                  batch_size=params['batch_size'],
                                                                  way=way,
                                                                  dataset=dataset)

                progress.subsection("Prediction")
                prediction = predict(matrix_U=RQ, matrix_V=Y, matrix_Valid=validation, ubias=uBias, ibias=iBias,
                                     gpu=gpu_on)

                progress.subsection("Evaluation")
                result = evaluate(prediction, validation, params['metric'], gpu=gpu_on)
                result_dict = {'model': algorithm, 'lambda': lam, 'iter': params['iter']}
                for name in result.keys():
                    result_dict[name] = round(result[name][0], 8)
                df = df.append(result_dict, ignore_index=True)
                save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['CausalSampleMF', 'BridgeLabelMF']:
            df = pd.DataFrame(columns=['model', 'lambda', 'lambda2', 'iter'])
            for lam in params['lambda']:
                for lam2 in params['lambda2']:
                    format = "model: {0}, lambda: {1}, lambda2: {2}"
                    progress.section(format.format(algorithm, lam, lam2))
                    RQ, Y, uBias, iBias = params['models'][algorithm](train, validation,
                                                                      matrix_unif_train=unif_train,
                                                                      iteration=params['iter'],
                                                                      rank=params['rank'],
                                                                      gpu_on=gpu_on,
                                                                      lam=lam, lam2=lam2,
                                                                      seed=seed,
                                                                      batch_size=params['batch_size'],
                                                                      way=way,
                                                                      dataset=dataset)

                    progress.subsection("Prediction")
                    prediction = predict(matrix_U=RQ, matrix_V=Y, matrix_Valid=validation, ubias=uBias, ibias=iBias,
                                         gpu=gpu_on)

                    progress.subsection("Evaluation")
                    result = evaluate(prediction, validation, params['metric'], gpu=gpu_on)
                    result_dict = {'model': algorithm, 'lambda': lam, 'lambda2': lam2, 'iter': params['iter']}
                    for name in result.keys():
                        result_dict[name] = round(result[name][0], 8)
                    df = df.append(result_dict, ignore_index=True)
                    save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['UnionSampleMF', 'RefineLabelMF']:
            df = pd.DataFrame(columns=['model', 'confidence', 'iter'])
            for conf in params['confidence']:
                format = "model: {0}, confidence: {1}"
                progress.section(format.format(algorithm, conf))
                RQ, Y, uBias, iBias = params['models'][algorithm](train, validation,
                                                                  matrix_unif_train=unif_train,
                                                                  iteration=params['iter'],
                                                                  rank=params['rank'],
                                                                  gpu_on=gpu_on,
                                                                  lam=params['lambda'], seed=seed,
                                                                  batch_size=params['batch_size'],
                                                                  way=way,
                                                                  confidence=conf,
                                                                  dataset=dataset)

                progress.subsection("Prediction")
                prediction = predict(matrix_U=RQ, matrix_V=Y, matrix_Valid=validation, ubias=uBias, ibias=iBias,
                                     gpu=gpu_on)

                progress.subsection("Evaluation")
                result = evaluate(prediction, validation, params['metric'], gpu=gpu_on)
                result_dict = {'model': algorithm, 'confidence': conf, 'iter': params['iter']}
                for name in result.keys():
                    result_dict[name] = round(result[name][0], 8)
                df = df.append(result_dict, ignore_index=True)
                save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['BatchSampleMF']:
            df = pd.DataFrame(columns=['model', 'step', 'iter'])
            for step in params['step']:
                format = "model: {0}, step: {1}"
                progress.section(format.format(algorithm, step))
                RQ, Y, uBias, iBias = params['models'][algorithm](train, validation,
                                                                  matrix_unif_train=unif_train,
                                                                  iteration=params['iter'],
                                                                  rank=params['rank'],
                                                                  gpu_on=gpu_on,
                                                                  lam=params['lambda'], seed=seed,
                                                                  batch_size=params['batch_size'],
                                                                  way=way,
                                                                  step=step,
                                                                  dataset=dataset)

                progress.subsection("Prediction")
                prediction = predict(matrix_U=RQ, matrix_V=Y, matrix_Valid=validation, ubias=uBias, ibias=iBias,
                                     gpu=gpu_on)

                progress.subsection("Evaluation")
                result = evaluate(prediction, validation, params['metric'], gpu=gpu_on)
                result_dict = {'model': algorithm, 'step': step, 'iter': params['iter']}
                for name in result.keys():
                    result_dict[name] = round(result[name][0], 8)
                df = df.append(result_dict, ignore_index=True)
                save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 16
0
def hyper_parameter_tuning(train, validation, params, save_path, gpu_on=True):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']

    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(
            columns=['model', 'rank', 'lambda', 'epoch', 'corruption', 'topK'])

    num_user = train.shape[0]

    for algorithm in params['models']:

        for rank in params['rank']:

            for lamb in params['lambda']:

                for corruption in params['corruption']:

                    if ((df['model'] == algorithm) & (df['rank'] == rank) &
                        (df['lambda'] == lamb) &
                        (df['corruption'] == corruption)).any():
                        continue

                    format = "model: {}, rank: {}, lambda: {}, corruption: {}"
                    progress.section(
                        format.format(algorithm, rank, lamb, corruption))
                    RQ, Yt, Bias = params['models'][algorithm](
                        train,
                        epoch=params['epoch'],
                        lamb=lamb,
                        rank=rank,
                        corruption=corruption)
                    Y = Yt.T

                    progress.subsection("Prediction")

                    prediction = predict(matrix_U=RQ,
                                         matrix_V=Y,
                                         bias=Bias,
                                         topK=params['topK'][-1],
                                         matrix_Train=train,
                                         gpu=gpu_on)

                    progress.subsection("Evaluation")

                    result = evaluate(prediction, validation, params['metric'],
                                      params['topK'])

                    result_dict = {
                        'model': algorithm,
                        'rank': rank,
                        'lambda': lamb,
                        'epoch': params['epoch'],
                        'corruption': corruption
                    }

                    for name in result.keys():
                        result_dict[name] = [
                            round(result[name][0], 4),
                            round(result[name][1], 4)
                        ]

                    df = df.append(result_dict, ignore_index=True)

                    save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 17
0
def critiquing(num_users, num_items, user_col, item_col, rating_col,
               keyphrase_vector_col, df_train, keyphrase_names, params,
               num_users_sampled, load_path, save_path):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    df = pd.read_csv(table_path + load_path)

    dfs_fmap = []

    for index, row in df.iterrows():

        if row['model'] not in critiquing_models:
            continue

        algorithm = row['model']
        rank = row['rank']
        num_layers = row['num_layers']
        train_batch_size = row['train_batch_size']
        predict_batch_size = row['predict_batch_size']
        lamb = row['lambda']
        learning_rate = row['learning_rate']
        epoch = 200
        negative_sampling_size = 1

        format = "model: {0}, rank: {1}, num_layers: {2}, train_batch_size: {3}, " \
                 "predict_batch_size: {4}, lambda: {5}, learning_rate: {6}, epoch: {7}, negative_sampling_size: {8}"
        progress.section(
            format.format(algorithm, rank, num_layers, train_batch_size,
                          predict_batch_size, lamb, learning_rate, epoch,
                          negative_sampling_size))

        progress.subsection("Initializing Negative Sampler")

        negative_sampler = Negative_Sampler(
            df_train[[user_col, item_col, keyphrase_vector_col]],
            user_col,
            item_col,
            rating_col,
            keyphrase_vector_col,
            num_items=num_items,
            batch_size=train_batch_size,
            num_keyphrases=len(keyphrase_names),
            negative_sampling_size=negative_sampling_size)

        model = critiquing_models[algorithm](num_users=num_users,
                                             num_items=num_items,
                                             text_dim=len(keyphrase_names),
                                             embed_dim=rank,
                                             num_layers=num_layers,
                                             negative_sampler=negative_sampler,
                                             lamb=lamb,
                                             learning_rate=learning_rate)

        pretrained_path = load_yaml('config/global.yml',
                                    key='path')['pretrained']
        try:
            model.load_model(pretrained_path + params['model_saved_path'],
                             row['model'])
        except:
            model.train_model(df_train,
                              user_col,
                              item_col,
                              rating_col,
                              epoch=epoch)
            model.save_model(pretrained_path + params['model_saved_path'],
                             row['model'])

        df_fmap = critiquing_evaluation(model,
                                        algorithm,
                                        num_users,
                                        num_items,
                                        num_users_sampled,
                                        topk=[5, 10, 20])

        dfs_fmap.append(df_fmap)

        model.sess.close()
        tf.reset_default_graph()

    df_output_fmap = pd.concat(dfs_fmap)

    save_dataframe_csv(df_output_fmap,
                       table_path,
                       name=save_path + '_FMAP.csv')
Exemplo n.º 18
0
def general(num_users, num_items, user_col, item_col, rating_col,
            keyphrase_vector_col, df_train, df_test, keyphrase_names, params,
            save_path):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    df = find_best_hyperparameters(table_path + params['tuning_result_path'],
                                   'NDCG')

    try:
        output_df = load_dataframe_csv(table_path, save_path)
    except:
        output_df = pd.DataFrame(columns=[
            'model', 'rank', 'num_layers', 'train_batch_size',
            'predict_batch_size', 'lambda', 'topK', 'learning_rate', 'epoch',
            'negative_sampling_size'
        ])

    for index, row in df.iterrows():

        algorithm = row['model']
        rank = row['rank']
        num_layers = row['num_layers']
        train_batch_size = row['train_batch_size']
        predict_batch_size = row['predict_batch_size']
        lamb = row['lambda']
        learning_rate = row['learning_rate']
        epoch = 300
        negative_sampling_size = row['negative_sampling_size']

        row['topK'] = [5, 10, 15, 20, 50]
        row['metric'] = [
            'R-Precision', 'NDCG', 'Clicks', 'Recall', 'Precision', 'MAP'
        ]

        format = "model: {0}, rank: {1}, num_layers: {2}, train_batch_size: {3}, " \
                 "predict_batch_size: {4}, lambda: {5}, learning_rate: {6}, epoch: {7}, negative_sampling_size: {8}"
        progress.section(
            format.format(algorithm, rank, num_layers, train_batch_size,
                          predict_batch_size, lamb, learning_rate, epoch,
                          negative_sampling_size))

        progress.subsection("Initializing Negative Sampler")

        negative_sampler = Negative_Sampler(
            df_train[[user_col, item_col, keyphrase_vector_col]],
            user_col,
            item_col,
            rating_col,
            keyphrase_vector_col,
            num_items=num_items,
            batch_size=train_batch_size,
            num_keyphrases=len(keyphrase_names),
            negative_sampling_size=negative_sampling_size)

        model = models[algorithm](num_users=num_users,
                                  num_items=num_items,
                                  text_dim=len(keyphrase_names),
                                  embed_dim=rank,
                                  num_layers=num_layers,
                                  negative_sampler=negative_sampler,
                                  lamb=lamb,
                                  learning_rate=learning_rate)

        progress.subsection("Training")

        pretrained_path = load_yaml('config/global.yml',
                                    key='path')['pretrained']
        # try:
        #     model.load_model(pretrained_path+params['tuning_result_path'], row['model'])
        # except:
        model.train_model(df_train,
                          user_col,
                          item_col,
                          rating_col,
                          epoch=epoch)
        # model.save_model(pretrained_path+params['tuning_result_path'], row['model'])

        progress.subsection("Prediction")

        prediction, explanation = predict_elementwise(
            model,
            df_train,
            user_col,
            item_col,
            row['topK'][-1],
            batch_size=row['predict_batch_size'],
            enable_explanation=False,
            keyphrase_names=keyphrase_names)

        R_test = to_sparse_matrix(df_test, num_users, num_items, user_col,
                                  item_col, rating_col)

        result = evaluate(prediction, R_test, row['metric'], row['topK'])

        # Note Finished yet
        result_dict = {
            'model': row['model'],
            'rank': row['rank'],
            'num_layers': row['num_layers'],
            'train_batch_size': row['train_batch_size'],
            'predict_batch_size': row['predict_batch_size'],
            'lambda': row['lambda'],
            'topK': row['topK'][-1],
            'learning_rate': row['learning_rate'],
            'epoch': epoch,
            'negative_sampling_size': row['negative_sampling_size'],
        }

        for name in result.keys():
            result_dict[name] = round(result[name][0], 4)
        output_df = output_df.append(result_dict, ignore_index=True)

        model.sess.close()
        tf.reset_default_graph()

        save_dataframe_csv(output_df, table_path, save_path)

    return output_df
Exemplo n.º 19
0
def hyper_parameter_tuning(train, validation, params, save_path, measure='Cosine', gpu_on=True):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']

    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(columns=['model', 'similarity', 'alpha', 'batch_size',
                                   'corruption', 'epoch', 'iteration', 'key_dimension',
                                   'lambda', 'learning_rate', 'mode_dimension',
                                   'normalize', 'rank', 'root', 'topK'])

    num_user = train.shape[0]

    for algorithm in params['models']:

        for alpha in params['alpha']:

            for batch_size in params['batch_size']:

                for corruption in params['corruption']:

                    for epoch in params['epoch']:

                        for iteration in params['iteration']:

                            for key_dim in params['key_dimension']:

                                for lamb in params['lambda']:

                                    for learning_rate in params['learning_rate']:

                                        for mode_dim in params['mode_dimension']:

                                            for rank in params['rank']:

                                                for root in params['root']:

                                                    if ((df['model'] == algorithm) &
                                                        (df['alpha'] == alpha) &
                                                        (df['batch_size'] == batch_size) &
                                                        (df['corruption'] == corruption) &
                                                        (df['epoch'] == epoch) &
                                                        (df['iteration'] == iteration) &
                                                        (df['key_dimension'] == key_dim) &
                                                        (df['lambda'] == lamb) &
                                                        (df['learning_rate'] == learning_rate) &
                                                        (df['mode_dimension'] == mode_dim) &
                                                        (df['rank'] == rank) &
                                                        (df['root'] == root)).any():
                                                        continue

                                                    format = "model: {}, alpha: {}, batch_size: {}, corruption: {}, epoch: {}, iteration: {}, \
                                                        key_dimension: {}, lambda: {}, learning_rate: {}, mode_dimension: {}, rank: {}, root: {}"
                                                    progress.section(format.format(algorithm, alpha, batch_size, corruption, epoch, iteration,
                                                                                   key_dim, lamb, learning_rate, mode_dim, rank, root))
                                                    RQ, Yt, Bias = params['models'][algorithm](train,
                                                                                               embedded_matrix=np.empty((0)),
                                                                                               mode_dim=mode_dim,
                                                                                               key_dim=key_dim,
                                                                                               batch_size=batch_size,
                                                                                               learning_rate=learning_rate,
                                                                                               iteration=iteration,
                                                                                               epoch=epoch,
                                                                                               rank=rank,
                                                                                               corruption=corruption,
                                                                                               gpu_on=gpu_on,
                                                                                               lamb=lamb,
                                                                                               alpha=alpha,
                                                                                               root=root)
                                                    Y = Yt.T

                                                    progress.subsection("Prediction")

                                                    prediction = predict(matrix_U=RQ,
                                                                         matrix_V=Y,
                                                                         bias=Bias,
                                                                         topK=params['topK'][-1],
                                                                         matrix_Train=train,
                                                                         measure=measure,
                                                                         gpu=gpu_on)

                                                    progress.subsection("Evaluation")

                                                    result = evaluate(prediction,
                                                                      validation,
                                                                      params['metric'],
                                                                      params['topK'])

                                                    result_dict = {'model': algorithm,
                                                                   'alpha': alpha,
                                                                   'batch_size': batch_size,
                                                                   'corruption': corruption,
                                                                   'epoch': epoch,
                                                                   'iteration': iteration,
                                                                   'key_dimension': key_dim,
                                                                   'lambda': lamb,
                                                                   'learning_rate': learning_rate,
                                                                   'mode_dimension': mode_dim,
                                                                   'rank': rank,
                                                                   'similarity': params['similarity'],
                                                                   'root': root}

                                                    for name in result.keys():
                                                        result_dict[name] = [round(result[name][0], 4), round(result[name][1], 4)]

                                                    df = df.append(result_dict, ignore_index=True)

                                                    save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 20
0
def hyper_parameter_tuning(train, validation, keyphrase_train, keyphrase_validation, params, save_path, tune_explanation=False):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']

    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(columns=['model', 'rank', 'beta', 'lambda_l2', 'lambda_keyphrase', 'lambda_latent', 'lambda_rating', 'topK', 'learning_rate', 'epoch', 'corruption', 'optimizer'])

    for algorithm in params['models']:

        for rank in params['rank']:

            for beta in params['beta']:

                for lamb_l2 in params['lambda_l2']:

                    for lamb_keyphrase in params['lambda_keyphrase']:

                        for lamb_latent in params['lambda_latent']:

                            for lamb_rating in params['lambda_rating']:

                                for learning_rate in params['learning_rate']:

                                    for epoch in params['epoch']:

                                        for corruption in params['corruption']:

                                            for optimizer in params['optimizer']:

                                                if ((df['model'] == algorithm) &
                                                    (df['rank'] == rank) &
                                                    (df['beta'] == beta) &
                                                    (df['lambda_l2'] == lamb_l2) &
                                                    (df['lambda_keyphrase'] == lamb_keyphrase) &
                                                    (df['lambda_latent'] == lamb_latent) &
                                                    (df['lambda_rating'] == lamb_rating) &
                                                    (df['learning_rate'] == learning_rate) &
                                                    (df['epoch'] == epoch) &
                                                    (df['corruption'] == corruption) &
                                                    (df['optimizer'] == optimizer)).any() or (lamb_latent != lamb_keyphrase):
                                                    continue

                                                format = "model: {}, rank: {}, beta: {}, lambda_l2: {}, " \
                                                    "lambda_keyphrase: {}, lambda_latent: {}, lambda_rating: {}, " \
                                                    "learning_rate: {}, epoch: {}, corruption: {}, optimizer: {}"
                                                progress.section(format.format(algorithm,
                                                                               rank,
                                                                               beta,
                                                                               lamb_l2,
                                                                               lamb_keyphrase,
                                                                               lamb_latent,
                                                                               lamb_rating,
                                                                               learning_rate,
                                                                               epoch,
                                                                               corruption,
                                                                               optimizer))

                                                progress.subsection("Training")

                                                model = models[algorithm](matrix_train=train,
                                                                          epoch=epoch,
                                                                          lamb_l2=lamb_l2,
                                                                          lamb_keyphrase=lamb_keyphrase,
                                                                          lamb_latent=lamb_latent,
                                                                          lamb_rating=lamb_rating,
                                                                          beta=beta,
                                                                          learning_rate=learning_rate,
                                                                          rank=rank,
                                                                          corruption=corruption,
                                                                          optimizer=optimizer,
                                                                          matrix_train_keyphrase=keyphrase_train)

                                                progress.subsection("Prediction")

                                                rating_score, keyphrase_score = model.predict(train.todense())

                                                progress.subsection("Evaluation")

                                                if tune_explanation:
                                                    prediction = predict_keyphrase(keyphrase_score,
                                                                                   topK=params['topK'][-1])

                                                    result = evaluate(prediction,
                                                                      keyphrase_validation,
                                                                      params['metric'],
                                                                      params['topK'])
                                                else:
                                                    prediction = predict(rating_score,
                                                                         topK=params['topK'][-1],
                                                                         matrix_Train=train)

                                                    result = evaluate(prediction,
                                                                      validation,
                                                                      params['metric'],
                                                                      params['topK'])

                                                result_dict = {'model': algorithm,
                                                               'rank': rank,
                                                               'beta': beta,
                                                               'lambda_l2': lamb_l2,
                                                               'lambda_keyphrase': lamb_keyphrase,
                                                               'lambda_latent': lamb_latent,
                                                               'lambda_rating': lamb_rating,
                                                               'learning_rate': learning_rate,
                                                               'epoch': epoch,
                                                               'corruption': corruption,
                                                               'optimizer': optimizer}

                                                for name in result.keys():
                                                    result_dict[name] = [round(result[name][0], 4),
                                                                         round(result[name][1], 4)]

                                                df = df.append(result_dict, ignore_index=True)

                                                model.sess.close()
                                                tf.reset_default_graph()

                                                save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 21
0
def explanation_parameter_tuning(num_users, num_items, user_col, item_col,
                                 rating_col, keyphrase_vector_col, df_train,
                                 df_valid, keyphrase_names, params, save_path):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(columns=[
            'model', 'rank', 'num_layers', 'train_batch_size',
            'predict_batch_size', 'lambda', 'topK', 'learning_rate', 'epoch',
            'negative_sampling_size'
        ])

    for algorithm in params['models']:

        for rank in params['rank']:

            for num_layers in params['num_layers']:

                for train_batch_size in params['train_batch_size']:

                    for predict_batch_size in params['predict_batch_size']:

                        for lamb in params['lambda']:

                            for learning_rate in params['learning_rate']:

                                for epoch in params['epoch']:

                                    for negative_sampling_size in params[
                                            'negative_sampling_size']:

                                        if ((df['model'] == algorithm) &
                                            (df['rank'] == rank) &
                                            (df['num_layers'] == num_layers) &
                                            (df['train_batch_size']
                                             == train_batch_size) &
                                            (df['predict_batch_size']
                                             == predict_batch_size) &
                                            (df['lambda'] == lamb) &
                                            (df['learning_rate']
                                             == learning_rate) &
                                            (df['epoch'] == epoch) &
                                            (df['negative_sampling_size']
                                             == negative_sampling_size)).any():
                                            continue

                                        format = "model: {0}, rank: {1}, num_layers: {2}, " \
                                                 "train_batch_size: {3}, predict_batch_size: {4}, " \
                                                 "lambda: {5}, learning_rate: {6}, epoch: {7}, " \
                                                 "negative_sampling_size: {8}"
                                        progress.section(
                                            format.format(
                                                algorithm, rank, num_layers,
                                                train_batch_size,
                                                predict_batch_size, lamb,
                                                learning_rate, epoch,
                                                negative_sampling_size))

                                        progress.subsection(
                                            "Initializing Negative Sampler")

                                        negative_sampler = Negative_Sampler(
                                            df_train[[
                                                user_col, item_col,
                                                keyphrase_vector_col
                                            ]],
                                            user_col,
                                            item_col,
                                            rating_col,
                                            keyphrase_vector_col,
                                            num_items=num_items,
                                            batch_size=train_batch_size,
                                            num_keyphrases=len(
                                                keyphrase_names),
                                            negative_sampling_size=
                                            negative_sampling_size)

                                        model = params['models'][algorithm](
                                            num_users=num_users,
                                            num_items=num_items,
                                            text_dim=len(keyphrase_names),
                                            embed_dim=rank,
                                            num_layers=num_layers,
                                            negative_sampler=negative_sampler,
                                            lamb=lamb,
                                            learning_rate=learning_rate)

                                        progress.subsection("Training")

                                        model.train_model(df_train,
                                                          user_col,
                                                          item_col,
                                                          rating_col,
                                                          epoch=epoch)

                                        progress.subsection("Prediction")

                                        df_valid_explanation = predict_explanation(
                                            model,
                                            df_valid,
                                            user_col,
                                            item_col,
                                            topk_keyphrase=params['topK'][-1])

                                        progress.subsection("Evaluation")

                                        explanation_result = evaluate_explanation(
                                            df_valid_explanation, df_valid,
                                            params['metric'], params['topK'],
                                            user_col, item_col, rating_col,
                                            keyphrase_vector_col)

                                        result_dict = {
                                            'model':
                                            algorithm,
                                            'rank':
                                            rank,
                                            'num_layers':
                                            num_layers,
                                            'train_batch_size':
                                            train_batch_size,
                                            'predict_batch_size':
                                            predict_batch_size,
                                            'lambda':
                                            lamb,
                                            'learning_rate':
                                            learning_rate,
                                            'epoch':
                                            epoch,
                                            'negative_sampling_size':
                                            negative_sampling_size
                                        }

                                        for name in explanation_result.keys():
                                            result_dict[name] = [
                                                round(
                                                    explanation_result[name]
                                                    [0], 4),
                                                round(
                                                    explanation_result[name]
                                                    [1], 4)
                                            ]

                                        df = df.append(result_dict,
                                                       ignore_index=True)

                                        model.sess.close()
                                        tf.reset_default_graph()

                                        save_dataframe_csv(
                                            df, table_path, save_path)
Exemplo n.º 22
0
def general(train, test, keyphrase_train, keyphrase_test, params, save_path, final_explanation=False):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    df = find_best_hyperparameters(table_path + params['tuning_result_path'], 'NDCG')

    try:
        output_df = load_dataframe_csv(table_path, save_path)
    except:
        output_df = pd.DataFrame(columns=['model', 'rank', 'beta', 'lambda_l2', 'lambda_keyphrase', 'lambda_latent', 'lambda_rating', 'topK', 'learning_rate', 'epoch', 'corruption', 'optimizer'])

    for index, row in df.iterrows():

        algorithm = row['model']
        rank = row['rank']
        beta = row['beta']
        lamb_l2 = row['lambda_l2']
        lamb_keyphrase = row['lambda_keyphrase']
        lamb_latent = row['lambda_latent']
        lamb_rating = row['lambda_rating']
        learning_rate = row['learning_rate']
        epoch = row['epoch']
        corruption = row['corruption']
        optimizer = row['optimizer']

        row['topK'] = [5, 10, 15, 20, 50]
        row['metric'] = ['R-Precision', 'NDCG', 'Clicks', 'Recall', 'Precision', 'MAP']

        format = "model: {}, rank: {}, beta: {}, lambda_l2: {}, lambda_keyphrase: {}, " \
                 "lambda_latent: {}, lambda_rating: {}, learning_rate: {}, " \
                 "epoch: {}, corruption: {}, optimizer: {}"

        progress.section(format.format(algorithm, rank, beta, lamb_l2, lamb_keyphrase, lamb_latent, lamb_rating, learning_rate, epoch, corruption, optimizer))

        progress.subsection("Training")

        model = models[algorithm](matrix_train=train,
                                  epoch=epoch,
                                  lamb_l2=lamb_l2,
                                  lamb_keyphrase=lamb_keyphrase,
                                  lamb_latent=lamb_latent,
                                  lamb_rating=lamb_rating,
                                  beta=beta,
                                  learning_rate=learning_rate,
                                  rank=rank,
                                  corruption=corruption,
                                  optimizer=optimizer,
                                  matrix_train_keyphrase=keyphrase_train)

        progress.subsection("Prediction")

        rating_score, keyphrase_score = model.predict(train.todense())

        progress.subsection("Evaluation")

        if final_explanation:
            prediction = predict_keyphrase(keyphrase_score,
                                           topK=row['topK'][-2])

            result = evaluate_explanation(prediction,
                                          keyphrase_test,
                                          row['metric'],
                                          row['topK'])
        else:
            prediction = predict(rating_score,
                                 topK=row['topK'][-1],
                                 matrix_Train=train)

            result = evaluate(prediction, test, row['metric'], row['topK'])

        result_dict = {'model': algorithm,
                       'rank': rank,
                       'beta': beta,
                       'lambda_l2': lamb_l2,
                       'lambda_keyphrase': lamb_keyphrase,
                       'lambda_latent': lamb_latent,
                       'lambda_rating': lamb_rating,
                       'learning_rate': learning_rate,
                       'epoch': epoch,
                       'corruption': corruption,
                       'optimizer': optimizer}

        for name in result.keys():
            result_dict[name] = [round(result[name][0], 4),
                                 round(result[name][1], 4)]

        output_df = output_df.append(result_dict, ignore_index=True)

        model.sess.close()
        tf.reset_default_graph()

        save_dataframe_csv(output_df, table_path, save_path)

    return output_df
Exemplo n.º 23
0
def hyper_parameter_tuning(train,
                           validation,
                           params,
                           save_path,
                           measure='Cosine',
                           gpu_on=True):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']

    try:
        df = load_dataframe_csv(table_path, save_path)
    except:
        df = pd.DataFrame(columns=[
            'model', 'rank', 'alpha', 'lambda', 'iter', 'similarity',
            'corruption', 'root', 'topK'
        ])

    num_user = train.shape[0]

    for algorithm in params['models']:

        for rank in params['rank']:

            for alpha in params['alpha']:

                for lam in params['lambda']:

                    for corruption in params['corruption']:

                        for root in params['root']:

                            if ((df['model'] == algorithm) &
                                (df['rank'] == rank) & (df['alpha'] == alpha) &
                                (df['lambda'] == lam) &
                                (df['corruption'] == corruption) &
                                (df['root'] == root)).any():
                                continue

                            format = "model: {0}, rank: {1}, alpha: {2}, lambda: {3}, corruption: {4}, root: {5}"
                            progress.section(
                                format.format(algorithm, rank, alpha, lam,
                                              corruption, root))
                            RQ, Yt, Bias = params['models'][algorithm](
                                train,
                                embeded_matrix=np.empty((0)),
                                iteration=params['iter'],
                                rank=rank,
                                lam=lam,
                                alpha=alpha,
                                corruption=corruption,
                                root=root,
                                gpu_on=gpu_on)
                            Y = Yt.T

                            progress.subsection("Prediction")

                            prediction = predict(matrix_U=RQ,
                                                 matrix_V=Y,
                                                 measure=measure,
                                                 bias=Bias,
                                                 topK=params['topK'][-1],
                                                 matrix_Train=train,
                                                 gpu=gpu_on)

                            progress.subsection("Evaluation")

                            result = evaluate(prediction, validation,
                                              params['metric'], params['topK'])

                            result_dict = {
                                'model': algorithm,
                                'rank': rank,
                                'alpha': alpha,
                                'lambda': lam,
                                'iter': params['iter'],
                                'similarity': params['similarity'],
                                'corruption': corruption,
                                'root': root
                            }

                            for name in result.keys():
                                result_dict[name] = [
                                    round(result[name][0], 4),
                                    round(result[name][1], 4)
                                ]

                            df = df.append(result_dict, ignore_index=True)

                            save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 24
0
def hyper_parameter_tuning(train,
                           validation,
                           params,
                           measure='Cosine',
                           gpu_on=True):
    progress = WorkSplitter()
    df = pd.DataFrame(columns=['model', 'rank', 'alpha', 'root', 'topK'])

    num_user = train.shape[0]

    for algorithm in params['models']:

        for rank in params['rank']:
            if 'alpha' in inspect.getargspec(params['models'][algorithm])[0]:
                alphas = params['alpha']
            else:
                alphas = [1]

            for alpha in alphas:

                if 'root' in inspect.getargspec(
                        params['models'][algorithm])[0]:
                    roots = params['root']
                else:
                    roots = [1]

                for root in roots:

                    progress.section(
                        "model: {0}, rank: {1}, root: {2}, alpha: {3}".format(
                            algorithm, rank, root, alpha))
                    RQ, Yt, Bias = params['models'][algorithm](
                        train,
                        embeded_matrix=np.empty((0)),
                        iteration=params['iter'],
                        rank=rank,
                        lam=params['lam'],
                        root=root,
                        alpha=alpha,
                        gpu_on=True)
                    Y = Yt.T

                    progress.subsection("Prediction")

                    prediction = predict(matrix_U=RQ,
                                         matrix_V=Y,
                                         measure=measure,
                                         bias=Bias,
                                         topK=params['topK'][-1],
                                         matrix_Train=train,
                                         gpu=gpu_on)

                    progress.subsection("Evaluation")

                    result = evaluate(prediction, validation, params['metric'],
                                      params['topK'])

                    result_dict = {
                        'model': algorithm,
                        'rank': rank,
                        'root': root,
                        'alpha': alpha
                    }

                    for name in result.keys():
                        result_dict[name] = [
                            round(result[name][0], 4),
                            round(result[name][1], 4)
                        ]

                    df = df.append(result_dict, ignore_index=True)
    return df
def critiquing(train_set, keyphrase_train_set, item_keyphrase_train_set,
               params, num_users_sampled, load_path, save_path,
               critiquing_function):
    progress = WorkSplitter()
    table_path = load_yaml('config/global.yml', key='path')['tables']
    df = pd.read_csv(table_path + load_path)

    dfs_fmap = []

    for index, row in df.iterrows():

        if row['model'] not in critiquing_models:
            continue

        algorithm = row['model']
        rank = row['rank']
        beta = row['beta']
        lamb_l2 = row['lambda_l2']
        lamb_keyphrase = row['lambda_keyphrase']
        lamb_latent = row['lambda_latent']
        lamb_rating = row['lambda_rating']
        learning_rate = row['learning_rate']
        epoch = row['epoch']
        corruption = row['corruption']
        optimizer = row['optimizer']

        format = "model: {}, rank: {}, beta: {}, lambda_l2: {}, lambda_keyphrase: {}, " \
                 "lambda_latent: {}, lambda_rating: {}, learning_rate: {}, " \
                 "epoch: {}, corruption: {}, optimizer: {}"
        progress.section(
            format.format(algorithm, rank, beta, lamb_l2, lamb_keyphrase,
                          lamb_latent, lamb_rating, learning_rate, epoch,
                          corruption, optimizer))

        progress.subsection("Training")

        model = critiquing_models[algorithm](
            matrix_train=train_set,
            epoch=epoch,
            lamb_l2=lamb_l2,
            lamb_keyphrase=lamb_keyphrase,
            lamb_latent=lamb_latent,
            lamb_rating=lamb_rating,
            beta=beta,
            learning_rate=learning_rate,
            rank=rank,
            corruption=corruption,
            optimizer=optimizer,
            matrix_train_keyphrase=keyphrase_train_set)

        num_users, num_items = train_set.shape
        df_fmap = critiquing_evaluation(train_set,
                                        keyphrase_train_set,
                                        item_keyphrase_train_set,
                                        model,
                                        algorithm,
                                        num_users,
                                        num_items,
                                        num_users_sampled,
                                        critiquing_function,
                                        topk=[5, 10, 20])

        df_fmap['model'] = algorithm
        df_fmap['rank'] = rank
        df_fmap['beta'] = beta
        df_fmap['lambda_l2'] = lamb_l2
        df_fmap['lambda_keyphrase'] = lamb_keyphrase
        df_fmap['lambda_latent'] = lamb_latent
        df_fmap['lambda_rating'] = lamb_rating
        df_fmap['learning_rate'] = learning_rate
        df_fmap['epoch'] = epoch
        df_fmap['corruption'] = corruption
        df_fmap['optimizer'] = optimizer

        dfs_fmap.append(df_fmap)

        model.sess.close()
        tf.reset_default_graph()

    df_output_fmap = pd.concat(dfs_fmap)

    save_dataframe_csv(df_output_fmap,
                       table_path,
                       name=save_path + '_FMAP.csv')
Exemplo n.º 26
0
def hyper_parameter_tuning(train, validation, params, unif_train, save_path,
                           seed, way, dataset, gpu_on):
    progress = WorkSplitter()

    table_path = 'tables/'
    data_name = save_path.split('/')[0]
    save_dir = 'tables/' + data_name + '/'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    for algorithm in params['models']:
        if algorithm in ['AutoRec']:
            df = pd.DataFrame(
                columns=['model', 'rank', 'batch_size', 'lambda', 'iter'])
            for rank in params['rank']:
                for batch_size in params['batch_size']:
                    for lam in params['lambda']:
                        format = "model: {0}, rank: {1}, batch_size: {2}, lambda: {3}"
                        progress.section(
                            format.format(algorithm, rank, batch_size, lam))
                        RQ, X, xBias, Y, yBias = params['models'][algorithm](
                            train,
                            validation,
                            matrix_unif_train=unif_train,
                            iteration=params['iter'],
                            rank=rank,
                            gpu_on=gpu_on,
                            lam=lam,
                            seed=seed,
                            batch_size=batch_size,
                            way=way,
                            dataset=dataset)

                        progress.subsection("Prediction")
                        prediction = predict(matrix_U=RQ,
                                             matrix_V=Y.T,
                                             matrix_Valid=validation,
                                             bias=yBias,
                                             gpu=gpu_on)

                        progress.subsection("Evaluation")
                        result = evaluate(prediction,
                                          validation,
                                          params['metric'],
                                          gpu=gpu_on)
                        result_dict = {
                            'model': algorithm,
                            'rank': rank,
                            'batch_size': batch_size,
                            'lambda': lam,
                            'iter': params['iter']
                        }
                        for name in result.keys():
                            result_dict[name] = round(result[name][0], 8)
                        df = df.append(result_dict, ignore_index=True)
                        save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['InitFeatureEmbedAE', 'ConcatFeatureEmbedAE']:
            df = pd.DataFrame(
                columns=['model', 'batch_size', 'lambda', 'iter'])
            for batch_size in params['batch_size']:
                for lam in params['lambda']:
                    format = "model: {0}, batch_size: {1}, lambda: {2}"
                    progress.section(format.format(algorithm, batch_size, lam))
                    RQ, X, xBias, Y, yBias = params['models'][algorithm](
                        train,
                        validation,
                        matrix_unif_train=unif_train,
                        iteration=params['iter'],
                        rank=params['rank'],
                        gpu_on=gpu_on,
                        lam=lam,
                        seed=seed,
                        batch_size=batch_size,
                        way=way,
                        dataset=dataset)

                    progress.subsection("Prediction")
                    prediction = predict(matrix_U=RQ,
                                         matrix_V=Y.T,
                                         matrix_Valid=validation,
                                         bias=yBias,
                                         gpu=gpu_on)

                    progress.subsection("Evaluation")
                    result = evaluate(prediction,
                                      validation,
                                      params['metric'],
                                      gpu=gpu_on)
                    result_dict = {
                        'model': algorithm,
                        'batch_size': batch_size,
                        'lambda': lam,
                        'iter': params['iter']
                    }
                    for name in result.keys():
                        result_dict[name] = round(result[name][0], 8)
                    df = df.append(result_dict, ignore_index=True)
                    save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['UnionSampleAE', 'RefineLabelAE']:
            df = pd.DataFrame(columns=['model', 'confidence', 'iter'])
            for conf in params['confidence']:
                format = "model: {0}, confidence: {1}"
                progress.section(format.format(algorithm, conf))
                RQ, X, xBias, Y, yBias = params['models'][algorithm](
                    train,
                    validation,
                    matrix_unif_train=unif_train,
                    iteration=params['iter'],
                    rank=params['rank'],
                    gpu_on=gpu_on,
                    lam=params['lambda'],
                    seed=seed,
                    batch_size=params['batch_size'],
                    way=way,
                    confidence=conf,
                    dataset=dataset)

                progress.subsection("Prediction")
                prediction = predict(matrix_U=RQ,
                                     matrix_V=Y.T,
                                     matrix_Valid=validation,
                                     bias=yBias,
                                     gpu=gpu_on)

                progress.subsection("Evaluation")
                result = evaluate(prediction,
                                  validation,
                                  params['metric'],
                                  gpu=gpu_on)
                result_dict = {
                    'model': algorithm,
                    'confidence': conf,
                    'iter': params['iter']
                }
                for name in result.keys():
                    result_dict[name] = round(result[name][0], 8)
                df = df.append(result_dict, ignore_index=True)
                save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['BatchSampleAE']:
            df = pd.DataFrame(columns=['model', 'step', 'iter'])
            for step in params['step']:
                format = "model: {0}, step: {1}"
                progress.section(format.format(algorithm, step))
                RQ, X, xBias, Y, yBias = params['models'][algorithm](
                    train,
                    validation,
                    matrix_unif_train=unif_train,
                    iteration=params['iter'],
                    rank=params['rank'],
                    gpu_on=gpu_on,
                    lam=params['lambda'],
                    seed=seed,
                    batch_size=params['batch_size'],
                    way=way,
                    step=step,
                    dataset=dataset)

                progress.subsection("Prediction")
                prediction = predict(matrix_U=RQ,
                                     matrix_V=Y.T,
                                     matrix_Valid=validation,
                                     bias=yBias,
                                     gpu=gpu_on)

                progress.subsection("Evaluation")
                result = evaluate(prediction,
                                  validation,
                                  params['metric'],
                                  gpu=gpu_on)
                result_dict = {
                    'model': algorithm,
                    'step': step,
                    'iter': params['iter']
                }
                for name in result.keys():
                    result_dict[name] = round(result[name][0], 8)
                df = df.append(result_dict, ignore_index=True)
                save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['BridgeLabelAE']:
            df = pd.DataFrame(columns=['model', 'lambda', 'lambda2', 'iter'])
            for lam in params['lambda']:
                for lam2 in params['lambda2']:
                    format = "model: {0}, lambda: {1}, lambda2: {2}"
                    progress.section(format.format(algorithm, lam, lam2))
                    RQ, X, xBias, Y, yBias = params['models'][algorithm](
                        train,
                        validation,
                        matrix_unif_train=unif_train,
                        iteration=params['iter'],
                        rank=params['rank'],
                        gpu_on=gpu_on,
                        lam=lam,
                        lam2=lam2,
                        seed=seed,
                        batch_size=params['batch_size'],
                        way=way,
                        dataset=dataset)

                    progress.subsection("Prediction")
                    prediction = predict(matrix_U=RQ,
                                         matrix_V=Y.T,
                                         matrix_Valid=validation,
                                         bias=yBias,
                                         gpu=gpu_on)

                    progress.subsection("Evaluation")
                    result = evaluate(prediction,
                                      validation,
                                      params['metric'],
                                      gpu=gpu_on)
                    result_dict = {
                        'model': algorithm,
                        'lambda': lam,
                        'lambda2': lam2,
                        'iter': params['iter']
                    }
                    for name in result.keys():
                        result_dict[name] = round(result[name][0], 8)
                    df = df.append(result_dict, ignore_index=True)
                    save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['SoftLabelAE']:
            df = pd.DataFrame(columns=['model', 'confidence', 'tau', 'iter'])
            for conf in params['confidence']:
                for tau in params['tau']:
                    format = "model: {0}, confidence: {1}, tau: {2}"
                    progress.section(format.format(algorithm, conf, tau))
                    RQ, X, xBias, Y, yBias, Z, zBias, K, kBias = params[
                        'models'][algorithm](train,
                                             validation,
                                             matrix_unif_train=unif_train,
                                             iteration=params['iter'],
                                             rank=params['rank'],
                                             rank2=params['rank2'],
                                             gpu_on=gpu_on,
                                             lam=params['lambda'],
                                             seed=seed,
                                             batch_size=params['batch_size'],
                                             confidence=conf,
                                             tau=tau,
                                             dataset=dataset)

                    progress.subsection("Prediction")
                    prediction = predict(matrix_U=RQ,
                                         matrix_V=K.T,
                                         matrix_Valid=validation,
                                         bias=yBias,
                                         gpu=gpu_on)

                    progress.subsection("Evaluation")
                    result = evaluate(prediction,
                                      validation,
                                      params['metric'],
                                      gpu=gpu_on)
                    result_dict = {
                        'model': algorithm,
                        'confidence': conf,
                        'tau': tau,
                        'iter': params['iter']
                    }
                    for name in result.keys():
                        result_dict[name] = round(result[name][0], 8)
                    df = df.append(result_dict, ignore_index=True)
                    save_dataframe_csv(df, table_path, save_path)
        elif algorithm in ['HintAE']:
            df = pd.DataFrame(columns=['model', 'confidence', 'iter'])
            for conf in params['confidence']:
                format = "model: {0}, confidence: {1}"
                progress.section(format.format(algorithm, conf))
                RQ, X, xBias, Y, yBias, Z, zBias, K, kBias = params['models'][
                    algorithm](train,
                               validation,
                               matrix_unif_train=unif_train,
                               iteration=params['iter'],
                               rank=params['rank'],
                               rank2=params['rank2'],
                               gpu_on=gpu_on,
                               lam=params['lambda'],
                               seed=seed,
                               batch_size=params['batch_size'],
                               confidence=conf,
                               dataset=dataset)

                progress.subsection("Prediction")
                prediction = predict(matrix_U=RQ,
                                     matrix_V=K.T,
                                     matrix_Valid=validation,
                                     bias=yBias,
                                     gpu=gpu_on)

                progress.subsection("Evaluation")
                result = evaluate(prediction,
                                  validation,
                                  params['metric'],
                                  gpu=gpu_on)
                result_dict = {
                    'model': algorithm,
                    'confidence': conf,
                    'iter': params['iter']
                }
                for name in result.keys():
                    result_dict[name] = round(result[name][0], 8)
                df = df.append(result_dict, ignore_index=True)
                save_dataframe_csv(df, table_path, save_path)
Exemplo n.º 27
0
def general(train,
            test,
            params,
            model,
            measure='Cosine',
            gpu_on=True,
            analytical=False,
            model_folder='latent'):
    progress = WorkSplitter()

    columns = [
        'model', 'similarity', 'alpha', 'batch_size', 'corruption', 'epoch',
        'iteration', 'key_dimension', 'lambda', 'learning_rate',
        'mode_dimension', 'normalize', 'rank', 'root', 'topK'
    ]

    progress.section("\n".join(
        [":".join((str(k), str(params[k]))) for k in columns]))

    df = pd.DataFrame(columns=columns)

    if os.path.isfile('{2}/U_{0}_{1}.npy'.format(params['model'],
                                                 params['rank'],
                                                 model_folder)):

        RQ = np.load('{2}/U_{0}_{1}.npy'.format(params['model'],
                                                params['rank'], model_folder))
        Y = np.load('{2}/V_{0}_{1}.npy'.format(params['model'], params['rank'],
                                               model_folder))

        if os.path.isfile('{2}/B_{0}_{1}.npy'.format(params['model'],
                                                     params['rank'],
                                                     model_folder)):
            Bias = np.load('{2}/B_{0}_{1}.npy'.format(params['model'],
                                                      params['rank'],
                                                      model_folder))
        else:
            Bias = None

    else:

        RQ, Yt, Bias = model(train,
                             embedded_matrix=np.empty((0)),
                             mode_dim=params['mode_dimension'],
                             key_dim=params['key_dimension'],
                             batch_size=params['batch_size'],
                             learning_rate=params['learning_rate'],
                             iteration=params['iteration'],
                             epoch=params['epoch'],
                             rank=params['rank'],
                             corruption=params['corruption'],
                             gpu_on=gpu_on,
                             lamb=params['lambda'],
                             alpha=params['alpha'],
                             root=params['root'])

        Y = Yt.T
        """
        np.save('{2}/U_{0}_{1}'.format(params['model'], params['rank'], model_folder), RQ)
        np.save('{2}/V_{0}_{1}'.format(params['model'], params['rank'], model_folder), Y)
        if Bias is not None:
            np.save('{2}/B_{0}_{1}'.format(params['model'], params['rank'], model_folder), Bias)
        """

    progress.subsection("Prediction")

    prediction = predict(matrix_U=RQ,
                         matrix_V=Y,
                         measure=measure,
                         bias=Bias,
                         topK=params['topK'][-1],
                         matrix_Train=train,
                         gpu=gpu_on)

    progress.subsection("Evaluation")

    result = evaluate(prediction,
                      test,
                      params['metric'],
                      params['topK'],
                      analytical=analytical)

    if analytical:
        return result
    else:
        result_dict = params

        for name in result.keys():
            result_dict[name] = [
                round(result[name][0], 4),
                round(result[name][1], 4)
            ]
        df = df.append(result_dict, ignore_index=True)

        return df
Exemplo n.º 28
0
def weighted_lrec_items(matrix_train,
                        embeded_matrix=np.empty((0)),
                        iteration=4,
                        lam=80,
                        rank=200,
                        alpha=100,
                        gpu=True,
                        seed=1,
                        **unused):
    """
    Function used to achieve generalized projected lrec w/o item-attribute embedding
    :param matrix_train: user-item matrix with shape m*n
    :param embeded_matrix: item-attribute matrix with length n (each row represents one item)
    :param iteration: number of SVD iterations
    :param lam: parameter of penalty
    :param rank: the latent dimension/number of items
    :param alpha: weights of the U-I ratings
    :param gpu: whether use gpu power
    :return: prediction in sparse matrix
    """
    progress = WorkSplitter()
    matrix_input = matrix_train
    if embeded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embeded_matrix.T))

    progress.subsection("Randomized SVD")
    start_time = time.time()
    P, sigma, Qt = randomized_svd(matrix_input,
                                  n_components=rank,
                                  n_iter=iteration,
                                  random_state=seed)
    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    start_time = time.time()
    if gpu:
        import cupy as cp
        progress.subsection("Create Cacheable Matrices")
        # RQ = matrix_input.dot(sparse.csc_matrix(Qt).T).toarray()

        # sqrt sigma injection
        RQ = matrix_input.dot(sparse.csc_matrix(Qt.T *
                                                np.sqrt(sigma))).toarray()

        # Exact
        matrix_B = cp.array(RQ)
        matrix_BT = matrix_B.T
        matrix_A = matrix_BT.dot(matrix_B) + cp.array(
            (lam * sparse.identity(rank, dtype=np.float32)).toarray())

        # Approx
        # matrix_A = cp.array(sparse.diags(sigma * sigma + lam).todense())
        # matrix_B = cp.array(P*sigma)
        # matrix_BT = cp.array(matrix_B.T)
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))

        progress.subsection("Item-wised Optimization")
        start_time = time.time()

        # For loop
        m, n = matrix_train.shape
        Y = []
        alpha = cp.array(alpha, dtype=cp.float32)
        for i in tqdm(xrange(n)):
            vector_r = matrix_train[:, i]
            vector_y = per_item_gpu(vector_r, matrix_A, matrix_B, matrix_BT,
                                    alpha)
            y_i_gpu = cp.asnumpy(vector_y)
            y_i_cpu = np.copy(y_i_gpu)
            Y.append(y_i_cpu)
        Y = scipy.vstack(Y)
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))
    else:
        progress.subsection("Create Cacheable Matrices")
        RQ = matrix_input.dot(sparse.csc_matrix(Qt).T).toarray()

        # Exact
        matrix_B = RQ
        matrix_BT = RQ.T
        matrix_A = matrix_BT.dot(matrix_B) + (
            lam * sparse.identity(rank, dtype=np.float32)).toarray()

        # Approx
        # matrix_B = P * sigma
        # matrix_BT = matrix_B.T
        # matrix_A = sparse.diags(sigma * sigma - lam).todense()
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))

        progress.subsection("Item-wised Optimization")
        start_time = time.time()

        # For loop
        m, n = matrix_train.shape
        Y = []
        for i in tqdm(xrange(n)):
            vector_r = matrix_train[:, i]
            vector_y = per_item_cpu(vector_r, matrix_A, matrix_B, matrix_BT,
                                    alpha)
            y_i_cpu = vector_y
            Y.append(y_i_cpu)
        Y = scipy.vstack(Y)
        print("Elapsed: {0}".format(inhour(time.time() - start_time)))
    return RQ, Y.T, None
Exemplo n.º 29
0
def mmp(matrix_train, embedded_matrix=np.empty((0)), mode_dim=5, key_dim=3,
        batch_size=32, optimizer="Adam", learning_rate=0.001, normalize=True,
        iteration=4, epoch=20, lamb=100, rank=200, corruption=0.5, fb=False,
        seed=1, root=1, alpha=1, return_model=False, **unused):
    """
    PureSVD algorithm
    :param matrix_train: rating matrix
    :param embedded_matrix: item or user embedding matrix(side info)
    :param iteration: number of random SVD iterations
    :param rank: SVD top K eigenvalue ranks
    :param fb: facebook package or sklearn package. boolean
    :param seed: Random initialization seed
    :param unused: args that not applicable for this algorithm
    :return:
    """
    progress = WorkSplitter()
    matrix_input = matrix_train
    if embedded_matrix.shape[0] > 0:
        matrix_input = vstack((matrix_input, embedded_matrix.T))
    progress.subsection("Create PMI matrix")
    pmi_matrix = get_pmi_matrix(matrix_input, root)
    progress.subsection("Randomized SVD")
    start_time = time.time()
    if fb:
        P, sigma, Qt = pca(pmi_matrix,
                           k=rank,
                           n_iter=iteration,
                           raw=True)
    else:
        P, sigma, Qt = randomized_svd(pmi_matrix,
                                      n_components=rank,
                                      n_iter=iteration,
                                      power_iteration_normalizer='QR',
                                      random_state=seed)
    Q = Qt.T*np.sqrt(sigma)
    # TODO: Verify this. Seems better with this.
    if normalize:
        Q = (Q - np.mean(Q)) / np.std(Q)

    # Type has to match with Tensorflow graph implementation which uses float32
    if isinstance(Q[0][0], np.float64):
        Q = np.float32(Q)

    model = MultiModesPreferenceEstimation(input_dim=matrix_train.shape[1],
                                           embed_dim=rank,
                                           mode_dim=mode_dim,
                                           key_dim=key_dim,
                                           batch_size=batch_size,
                                           alpha=alpha,
                                           lamb=lamb,
                                           learning_rate=learning_rate,
                                           optimizer=Optimizer[optimizer],
                                           item_embeddings=Q)
    model.train_model(matrix_train, corruption, epoch)
    print("Elapsed: {0}".format(inhour(time.time() - start_time)))

    if return_model:
        return model

    RQ = model.get_RQ(matrix_input)
    Y = model.get_Y()
    #Bias = model.get_Bias()
    model.sess.close()
    tf.reset_default_graph()
    return RQ, Y.T, None