def evaluate(self, rDscr: RecommenderDescription):
        print("StaticEvaluation")

        recom: ARecommender = rDscr.exportRecommender("test")
        args: dict = rDscr.getArguments()

        eventsDF: DataFrame = self.dataset.eventsDF

        eventsTrainDF: DataFrame = eventsDF[0:int(len(eventsDF) / 2)]
        eventsTestDF: DataFrame = eventsDF[int(len(eventsDF) / 2):]

        datasetTrain = DatasetRetailRocket("rrTrain", eventsTrainDF,
                                           DataFrame(), DataFrame())

        userIDs: List[int] = list(eventsDF[Events.COL_VISITOR_ID].unique())

        recom.train(HistoryDF("test"), datasetTrain)

        counter: int = 0

        for userIdI in userIDs:
            #print("userId: " + str(userIdI))
            itemIDs: List[int] = list(
                eventsTestDF.loc[eventsTestDF[Events.COL_VISITOR_ID] ==
                                 userIdI][Events.COL_ITEM_ID].unique())
            recommendationI: List[int] = recom.recommend(userIdI, 20,
                                                         args).keys()
            intersectionI: List[int] = [
                value for value in itemIDs if value in recommendationI
            ]
            #print("   " + str(len(intersectionI)))
            counter += len(intersectionI)

        print("  counter: " + str(counter))
Esempio n. 2
0
    def run(self, batchID: str, jobID: str):
        divisionDatasetPercentualSize: int
        uBehaviour: str
        repetition: int
        divisionDatasetPercentualSize, uBehaviour, repetition = \
            InputABatchDefinition().getBatchParameters(self.datasetID)[batchID]

        recommenderTheMPopID: str = "TheMostPopular"
        pRTheMPopDescr: RecommenderDescription = RecommenderDescription(
            RecommenderTheMostPopular, {})

        recommenderRPID: str = "RepeatedPurchase"
        pRecRPDescr: RecommenderDescription = RecommenderDescription(
            RecommenderRepeatedPurchase, {})

        selector: ADHondtSelector = self.getParameters()[jobID]
        aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling(
            selector)
        aDescDHont: AggregationDescription = InputAggrDefinition.exportADescBanditTS(
            selector)
        #aDescDHont:AggregationDescription = InputAggrDefinition.exportADescFAI()

        rIDs: List[str]
        rDescs: List[AggregationDescription]
        rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs(
        )
        #rIDs = [recommenderTheMPopID]
        #rDescs = [pRTheMPopDescr]

        p1AggrDescrID: str = "p1AggrDescrID"
        p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
            p1AggrDescrID, rIDs, rDescs, aDescDHont)

        #pProbTool:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002(
        #    InputSimulatorDefinition().numberOfAggrItems)
        pProbTool: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002(
            InputSimulatorDefinition().numberOfAggrItems)

        aHierDescr: AggregationDescription = AggregationDescription(
            AggrD21, {AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 0.0})

        pHierDescr: PortfolioHierDescription = PortfolioHierDescription(
            "pHierDescr", recommenderRPID, pRecRPDescr, p1AggrDescrID,
            p1AggrDescr, aHierDescr, pProbTool)

        eTool: AEvalTool = EvalToolBanditTS({})
        #eTool:AEvalTool = EToolDoNothing({})
        #model:DataFrame = PModelDHont(p1AggrDescr.getRecommendersIDs())
        model: DataFrame = PModelBandit(p1AggrDescr.getRecommendersIDs())

        simulator: Simulator = InputSimulatorDefinition(
        ).exportSimulatorRetailRocket(batchID, divisionDatasetPercentualSize,
                                      uBehaviour, repetition)
        simulator.simulate([pHierDescr], [model], [eTool],
                           [HistoryHierDF(p1AggrDescr.getPortfolioID())])
def test01():
    print("Test 01")

    rDescr: RecommenderDescription = RecommenderDescription(
        RecommenderTheMostPopular, {})

    recommenderID: str = "TheMostPopular"
    pDescr: Portfolio1MethDescription = Portfolio1MethDescription(
        recommenderID.title(), recommenderID, rDescr)

    dataset: ADataset = DatasetST.readDatasets()

    history: AHistory = HistoryDF("test")
    p: APortfolio = pDescr.exportPortfolio("jobID", history)

    portFolioModel: DataFrame = DataFrame()

    p.train(history, dataset)

    df: DataFrame = DataFrame(
        [[1, 555]], columns=[Events.COL_USER_ID, Events.COL_OBJECT_ID])

    p.update(ARecommender.UPDT_CLICK, df)

    userID: int = 1
    r, rp = p.recommend(userID, portFolioModel,
                        {APortfolio.ARG_NUMBER_OF_AGGR_ITEMS: 20})
    print(r)
    def getParameters(cls):

        aDict: Dict[str, object] = {}
        for cbDataPathI in cls.cbDataPaths:
            for userProfileStrategyI in cls.userProfileStrategies:
                for userProfileSizeI in cls.userProfileSizes:

                    cbDataPathStrI: str = ""
                    if "TFIDF" in cbDataPathI:
                        cbDataPathStrI = "TFIDF"
                    elif "OHE" in cbDataPathI or "simMatrixRR.npz" in cbDataPathI:
                        cbDataPathStrI = "OHE"
                    else:
                        print("error")

                    keyI:str = "cbd" + str(cbDataPathStrI) + "ups" + str(userProfileStrategyI) +\
                               "ups" + str(userProfileSizeI)

                    rCBI: ARecommender = RecommenderDescription(
                        RecommenderCosineCB, {
                            RecommenderCosineCB.ARG_CB_DATA_PATH:
                            cbDataPathI,
                            RecommenderCosineCB.ARG_USER_PROFILE_SIZE:
                            userProfileSizeI,
                            RecommenderCosineCB.ARG_USER_PROFILE_STRATEGY:
                            userProfileStrategyI
                        })

                    aDict[keyI] = rCBI
        return aDict
Esempio n. 5
0
 def exportRDescCBwindow3(datasetID: str):
     return RecommenderDescription(
         RecommenderCosineCB, {
             RecommenderCosineCB.ARG_CB_DATA_PATH:
             Configuration.cbDataFileWithPathTFIDF,
             RecommenderCosineCB.ARG_USER_PROFILE_STRATEGY: "window3"
         })
    def run(self, batchID: str, jobID: str):
        divisionDatasetPercentualSize: int
        uBehaviour: str
        repetition: int
        divisionDatasetPercentualSize, uBehaviour, repetition = \
            InputABatchDefinition().getBatchParameters(self.datasetID)[batchID]

        # eTool:AEvalTool
        selector, eTool = self.getParameters()[jobID]

        rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs(
        )

        p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
            "FDHont" + jobID, rIDs, rDescs,
            InputAggrDefinition.exportADescDHondt(selector))

        recommenderID: str = "TheMostPopular"
        rDescr: RecommenderDescription = RecommenderDescription(
            RecommenderTheMostPopular, {})

        pDescr: APortfolioDescription = PortfolioDynamicDescription(
            "Dynamic" + "FDHontPersStat" + jobID, recommenderID, rDescr,
            "FDHondt", p1AggrDescr)

        model: DataFrame = PModelDHondtPersonalisedStat(
            p1AggrDescr.getRecommendersIDs())

        simulator: Simulator = InputSimulatorDefinition(
        ).exportSimulatorRetailRocket(batchID, divisionDatasetPercentualSize,
                                      uBehaviour, repetition)
        simulator.simulate([pDescr], [model], [eTool],
                           [HistoryHierDF(pDescr.getPortfolioID())])
Esempio n. 7
0
 def exportRDescW2vPosnegWindow3(datasetID: str):
     return RecommenderDescription(
         RecommenderW2V, {
             RecommenderW2V.ARG_TRAIN_VARIANT: "posneg",
             RecommenderW2V.ARG_USER_PROFILE_STRATEGY: "window3",
             RecommenderW2V.ARG_DATASET_ID: datasetID
         })
Esempio n. 8
0
 def exportRDescW2vPositiveMax(datasetID: str):
     return RecommenderDescription(
         RecommenderW2V, {
             RecommenderW2V.ARG_TRAIN_VARIANT: "positive",
             RecommenderW2V.ARG_USER_PROFILE_STRATEGY: "max",
             RecommenderW2V.ARG_DATASET_ID: datasetID
         })
 def exportRDescCosineCB():
     return RecommenderDescription(
         RecommenderCosineCB, {
             RecommenderCosineCB.ARG_CB_DATA_PATH:
             "../data/simMatrixRR.npz",
             RecommenderCosineCB.ARG_USER_PROFILE_SIZE: 1,
             RecommenderCosineCB.ARG_USER_PROFILE_STRATEGY: "max"
         })
Esempio n. 10
0
 def exportRDescCosineCBcbdOHEupsmaxups1():
     return RecommenderDescription(
         RecommenderCosineCB, {
             RecommenderCosineCB.ARG_CB_DATA_PATH:
             Configuration.cbML1MDataFileWithPathOHE,
             RecommenderCosineCB.ARG_USER_PROFILE_SIZE: 1,
             RecommenderCosineCB.ARG_USER_PROFILE_STRATEGY: "max"
         })
Esempio n. 11
0
 def exportRDescCosineCBcbdOHEupsweightedMeanups3():
     return RecommenderDescription(
         RecommenderCosineCB, {
             RecommenderCosineCB.ARG_CB_DATA_PATH:
             Configuration.cbML1MDataFileWithPathOHE,
             RecommenderCosineCB.ARG_USER_PROFILE_SIZE: 3,
             RecommenderCosineCB.ARG_USER_PROFILE_STRATEGY: "weightedMean"
         })
 def exportRDescBPRMFIMPL():
     return RecommenderDescription(
         RecommenderBPRMFImplicit, {
             RecommenderBPRMFImplicit.ARG_FACTORS: 100,
             RecommenderBPRMFImplicit.ARG_ITERATIONS: 50,
             RecommenderBPRMFImplicit.ARG_LEARNINGRATE: 0.1,
             RecommenderBPRMFImplicit.ARG_REGULARIZATION: 0.01
         })
Esempio n. 13
0
 def exportRDescBPRMFIMPLf20i20lr0003r01():
     return RecommenderDescription(
         RecommenderBPRMFImplicit, {
             RecommenderBPRMFImplicit.ARG_FACTORS: 20,
             RecommenderBPRMFImplicit.ARG_ITERATIONS: 20,
             RecommenderBPRMFImplicit.ARG_LEARNINGRATE: 0.003,
             RecommenderBPRMFImplicit.ARG_REGULARIZATION: 0.1
         })
 def exportRDescW2talli200000ws1vs64upsweightedMeanups5():
     return RecommenderDescription(
         RecommenderW2V, {
             RecommenderW2V.ARG_TRAIN_VARIANT: "all",
             RecommenderW2V.ARG_ITERATIONS: 200000,
             RecommenderW2V.ARG_LEARNING_RATE: 1.0,
             RecommenderW2V.ARG_USER_PROFILE_SIZE: 5,
             RecommenderW2V.ARG_USER_PROFILE_STRATEGY: "weightedMean",
             RecommenderW2V.ARG_VECTOR_SIZE: 64,
             RecommenderW2V.ARG_WINDOW_SIZE: 1
         })
 def exportRDescW2V():
     return RecommenderDescription(
         RecommenderW2V, {
             RecommenderW2V.ARG_LEARNING_RATE: 0.6,
             RecommenderW2V.ARG_ITERATIONS: 100000,
             RecommenderW2V.ARG_TRAIN_VARIANT: "all",
             RecommenderW2V.ARG_USER_PROFILE_SIZE: 3,
             RecommenderW2V.ARG_USER_PROFILE_STRATEGY: "weightedMean",
             RecommenderW2V.ARG_VECTOR_SIZE: 64,
             RecommenderW2V.ARG_WINDOW_SIZE: 3
         })
Esempio n. 16
0
 def exportRDescW2Vtpositivei50000ws1vs64upsweightedMeanups7():
     return RecommenderDescription(
         RecommenderW2V, {
             RecommenderW2V.ARG_TRAIN_VARIANT: "positive",
             RecommenderW2V.ARG_ITERATIONS: 50000,
             RecommenderW2V.ARG_LEARNING_RATE: 1.0,
             RecommenderW2V.ARG_USER_PROFILE_SIZE: 7,
             RecommenderW2V.ARG_USER_PROFILE_STRATEGY: "weightedMean",
             RecommenderW2V.ARG_VECTOR_SIZE: 64,
             RecommenderW2V.ARG_WINDOW_SIZE: 1
         })
 def exportRDescW2Vtalli100000ws1vs32upsmaxups1():
     return RecommenderDescription(
         RecommenderW2V, {
             RecommenderW2V.ARG_TRAIN_VARIANT: "all",
             RecommenderW2V.ARG_ITERATIONS: 100000,
             RecommenderW2V.ARG_LEARNING_RATE: 1.0,
             RecommenderW2V.ARG_USER_PROFILE_SIZE: 1,
             RecommenderW2V.ARG_USER_PROFILE_STRATEGY: "max",
             RecommenderW2V.ARG_VECTOR_SIZE: 32,
             RecommenderW2V.ARG_WINDOW_SIZE: 1
         })
Esempio n. 18
0
 def exportRDescBPRMF():
     return RecommenderDescription(
         RecommenderBPRMF, {
             RecommenderBPRMF.ARG_EPOCHS: 2,
             RecommenderBPRMF.ARG_FACTORS: 10,
             RecommenderBPRMF.ARG_LEARNINGRATE: 0.05,
             RecommenderBPRMF.ARG_UREGULARIZATION: 0.0025,
             RecommenderBPRMF.ARG_BREGULARIZATION: 0,
             RecommenderBPRMF.ARG_PIREGULARIZATION: 0.0025,
             RecommenderBPRMF.ARG_NIREGULARIZATION: 0.00025
         })
def test01():

    print("Simulation: RR Dynamic")

    lrClick: float = 0.03
    #lrView:float = lrClick / 300
    lrViewDivisor: float = 250

    jobID: str = "Fixed" + "Clk" + str(lrClick).replace(
        ".", "") + "ViewDivisor" + str(lrViewDivisor).replace(".", "")

    selector: ADHondtSelector = TheMostVotedItemSelector({})

    rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs()

    p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        "FDHont" + jobID, rIDs, rDescs,
        InputAggrDefinition.exportADescDHondt(selector))

    recommenderID: str = "TheMostPopular"
    rDescr: RecommenderDescription = RecommenderDescription(
        RecommenderTheMostPopular, {})

    pDescr: APortfolioDescription = PortfolioDynamicDescription(
        "Dynamic" + "FDHontPersStat" + jobID, recommenderID, rDescr, "FDHondt",
        p1AggrDescr)

    batchID: str = "rrDiv90Ulinear0109R1"
    dataset: DatasetRetailRocket = DatasetRetailRocket.readDatasetsWithFilter(
        minEventCount=50)
    behaviourFile: str = BehavioursRR.getFile(BehavioursRR.BHVR_LINEAR0109)
    behavioursDF: DataFrame = BehavioursRR.readFromFileRR(behaviourFile)

    model: DataFrame = PModelDHondtPersonalisedStat(
        p1AggrDescr.getRecommendersIDs())

    eTool: AEvalTool = EvalToolDHondtPersonal({
        EvalToolDHondt.ARG_LEARNING_RATE_CLICKS:
        lrClick,
        EvalToolDHondt.ARG_LEARNING_RATE_VIEWS:
        lrClick / lrViewDivisor
    })

    # simulation of portfolio
    simulator: Simulator = Simulator(batchID, SimulationRR, argsSimulationDict,
                                     dataset, behavioursDF)
    simulator.simulate([pDescr], [model], [eTool],
                       [HistoryHierDF(pDescr.getPortfolioID())])
    def run(self, batchID:str, jobID:str):

        divisionDatasetPercentualSize:int
        uBehaviour:str
        repetition:int
        divisionDatasetPercentualSize, uBehaviour, repetition = InputABatchDefinition().getBatchParameters(self.datasetID)[batchID]

        kI:str = self.getParameters()[jobID]

        recommenderID:str = "RecommendervmContextKNN" + "K" + str(kI)

        rVMCtKNN:RecommenderDescription = RecommenderDescription(RecommenderVMContextKNN, {
                    RecommenderVMContextKNN.ARG_K: kI})

        pDescr:APortfolioDescription = Portfolio1MethDescription(recommenderID.title(), recommenderID, rVMCtKNN)

        simulator:Simulator = InputSimulatorDefinition().exportSimulatorSlantour(
                batchID, divisionDatasetPercentualSize, uBehaviour, repetition)
        simulator.simulate([pDescr], [DataFrame()], [EToolDoNothing({})], [HistoryHierDF(pDescr.getPortfolioID())])
    def getParameters(self):
        aDict: Dict[str, object] = {}
        for factorI in self.factors:
            for iterationI in self.iterations:
                for learningRateI in self.learningRates:
                    for regularizationI in self.regularizations:
                        keyI:str = "f" + str(factorI) + "i" + str(iterationI) +\
                                   "lr" + str(learningRateI).replace('.', '') + "r" + str(regularizationI).replace('.', '')

                        rBPRMFI: ARecommender = RecommenderDescription(
                            RecommenderBPRMFImplicit, {
                                RecommenderBPRMFImplicit.ARG_FACTORS:
                                factorI,
                                RecommenderBPRMFImplicit.ARG_ITERATIONS:
                                iterationI,
                                RecommenderBPRMFImplicit.ARG_LEARNINGRATE:
                                learningRateI,
                                RecommenderBPRMFImplicit.ARG_REGULARIZATION:
                                regularizationI
                            })

                        aDict[keyI] = rBPRMFI
        return aDict
Esempio n. 22
0
    def getParameters(cls):

        aDict: Dict[str, object] = {}
        for trainVariantI in cls.trainVariants:
            for learningRateI in cls.learningRates:
                for iterationI in cls.iterations:
                    for windowSizeI in cls.windowSizes:
                        for vectorSizeI in cls.vectorSizes:
                            for userProfileStrategyI in cls.userProfileStrategies:
                                for userProfileSizeI in cls.userProfileSizes:

                                    keyI:str = "t" + str(trainVariantI) + "lr" + str(learningRateI).replace(".", "") +\
                                                "i" + str(iterationI) +\
                                                "ws" + str(windowSizeI) + "vs" + str(vectorSizeI) +\
                                                "ups" + userProfileStrategyI + "ups" + str(userProfileSizeI)

                                    rW2V: ARecommender = RecommenderDescription(
                                        RecommenderW2V, {
                                            RecommenderW2V.ARG_LEARNING_RATE:
                                            learningRateI,
                                            RecommenderW2V.ARG_ITERATIONS:
                                            iterationI,
                                            RecommenderW2V.ARG_TRAIN_VARIANT:
                                            trainVariantI,
                                            RecommenderW2V.ARG_USER_PROFILE_SIZE:
                                            userProfileSizeI,
                                            RecommenderW2V.ARG_USER_PROFILE_STRATEGY:
                                            userProfileStrategyI,
                                            RecommenderW2V.ARG_VECTOR_SIZE:
                                            vectorSizeI,
                                            RecommenderW2V.ARG_WINDOW_SIZE:
                                            windowSizeI
                                        })

                                    aDict[keyI] = rW2V
        return aDict
Esempio n. 23
0
def test01():
    print("Test 01")

    rDescr: RecommenderDescription = RecommenderDescription(
        RecommenderTheMostPopular, {})

    recommenderID: str = "TheMostPopular"
    pDescr: Portfolio1MethDescription = Portfolio1MethDescription(
        recommenderID.title(), recommenderID, rDescr)

    dataset: ADataset = DatasetML.readDatasets()

    history: AHistory = HistoryDF("test")
    p: APortfolio = pDescr.exportPortfolio("jobID", history)
    p.train(history, dataset)

    #    r, rwr = p.recommend(1, DataFrame(), {APortfolio.ARG_NUMBER_OF_AGGR_ITEMS:20})
    #    rItemID1 = r[0]
    #    rItemID2 = r[1]
    #    rItemID3 = r[2]
    #
    #    print(r)
    #    print("rItemID1: " + str(rItemID1))
    #    print("rItemID2: " + str(rItemID2))
    #    print("rItemID3: " + str(rItemID3))

    testRatingsDF: DataFrame = DataFrame(columns=[
        Ratings.COL_USERID, Ratings.COL_MOVIEID, Ratings.COL_RATING,
        Ratings.COL_TIMESTAMP
    ])
    timeStampI: int = 1000

    userID1: int = 1
    userID2: int = 2
    userID3: int = 3
    rItemID1: int = 9001
    rItemID2: int = 9002
    rItemID3: int = 9003
    # training part of dataset
    for i in [i + 0 for i in range(5 * 8)]:
        timeStampI = timeStampI + 1
        testRatingsDF.loc[i] = [userID1] + list([9000, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID2] + list(
        [rItemID1, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID2] + list(
        [rItemID2, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID3] + list(
        [rItemID3, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID2] + list(
        [rItemID2, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID2] + list(
        [rItemID2, 5, timeStampI])

    # testing part of dataset
    userID11: int = 11
    userID12: int = 12
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID11] + list(
        [rItemID1, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID11] + list(
        [rItemID2, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID11] + list(
        [rItemID3, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID12] + list(
        [rItemID2, 5, timeStampI])
    timeStampI = timeStampI + 1
    testRatingsDF.loc[len(testRatingsDF)] = [userID11] + list(
        [rItemID2, 5, timeStampI])

    print("len(testRatingsDF): " + str(len(testRatingsDF)))
    print(testRatingsDF.head(20))
    print(testRatingsDF.tail(20))

    datasetMy: ADataset = DatasetML("", testRatingsDF, dataset.usersDF,
                                    dataset.itemsDF)

    behavioursDF: DataFrame = DataFrame(
        columns=[BehavioursML.COL_REPETITION, BehavioursML.COL_BEHAVIOUR])
    for ratingIndexI in range(len(testRatingsDF)):
        for repetitionI in range(5):
            behavioursDF.loc[ratingIndexI * 5 + repetitionI] = list(
                [repetitionI, [True] * 20])
    print(behavioursDF.head(20))

    argsSimulationDict: Dict[str, str] = {
        SimulationML.ARG_WINDOW_SIZE: 5,
        SimulationML.ARG_RECOM_REPETITION_COUNT: 1,
        SimulationML.ARG_NUMBER_OF_RECOMM_ITEMS: 100,
        SimulationML.ARG_NUMBER_OF_AGGR_ITEMS:
        InputSimulatorDefinition.numberOfAggrItems,
        SimulationML.ARG_DIV_DATASET_PERC_SIZE: 90,
        SimulationML.ARG_HISTORY_LENGTH: 10
    }

    # simulation of portfolio
    simulator: Simulator = Simulator("test", SimulationML, argsSimulationDict,
                                     datasetMy, behavioursDF)
    simulator.simulate([pDescr], [DataFrame()], [EToolDoNothing({})],
                       HistoryHierDF)
def test01():
    print("Test 01")

    recommenderID: str = "TheMostPopular"
    pRDescr: RecommenderDescription = RecommenderDescription(
        RecommenderTheMostPopular, {})

    selectorFixed: ADHondtSelector = TheMostVotedItemSelector({})
    aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling(
        selectorFixed)

    rIDs: List[str]
    rDescs: List[AggregationDescription]
    rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs()
    rIDs = [recommenderID]
    rDescs = [pRDescr]

    p1AggrDescrID: str = "p1AggrDescrID"
    p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        p1AggrDescrID, rIDs, rDescs, aDescDHont)

    pProbTool: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002(
        InputSimulatorDefinition.numberOfAggrItems)
    pProbTool: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002(
        InputSimulatorDefinition.numberOfAggrItems)

    aHierDescr: AggregationDescription = AggregationDescription(
        AggrD21, {AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 2.0})

    pHierDescr: PortfolioHierDescription = PortfolioHierDescription(
        "pHierDescr", recommenderID, pRDescr, p1AggrDescrID, p1AggrDescr,
        aHierDescr, pProbTool)

    userID: int = 1

    dataset: ADataset = DatasetRetailRocket.readDatasetsWithFilter(
        minEventCount=50)

    history: AHistory = HistoryDF("test")
    history.insertRecommendation(userID, 45, 1, False)
    history.insertRecommendation(userID, 45, 2, False)
    history.insertRecommendation(userID, 78, 3, False)

    p: APortfolio = pHierDescr.exportPortfolio("test", history)

    portFolioModel: DataFrame = PModelDHondtBanditsVotes(
        p1AggrDescr.getRecommendersIDs())

    p.train(history, dataset)

    #df:DataFrame = DataFrame([[1, 555]], columns=[Events.COL_USER_ID, Events.COL_OBJECT_ID])
    #p.update(ARecommender.UPDT_CLICK, df)

    args = {
        APortfolio.ARG_NUMBER_OF_AGGR_ITEMS: 20,
        APortfolio.ARG_ITEM_ID: 1,
        APortfolio.ARG_NUMBER_OF_RECOMM_ITEMS: 100,
        AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 0.5
    }

    r, rp = p.recommend(userID, portFolioModel, args)
    print(r)
 def exportRDescTheMostPopular(datasetID:str):
     return RecommenderDescription(RecommenderTheMostPopular,
             {})
Esempio n. 26
0
 def exportRDescVMContextKNNk25():
     return RecommenderDescription(RecommenderVMContextKNN,
                                   {RecommenderVMContextKNN.ARG_K: 25})
Esempio n. 27
0
 def exportRDescKNN():
     return RecommenderDescription(
         RecommenderItemBasedKNN, {
             RecommenderItemBasedKNN.ARG_K: 25,
             RecommenderItemBasedKNN.ARG_UPDATE_THRESHOLD: 500
         })
Esempio n. 28
0
 def exportRDescTheMostPopular():
     return RecommenderDescription(RecommenderTheMostPopular, {})
Esempio n. 29
0
 def exportRDescClusterBased(rNumericId: int):
     return RecommenderDescription(
         RecommenderClusterBased,
         {RecommenderClusterBased.ARG_RECOMMENDER_NUMERIC_ID: rNumericId})