def run(self, batchID: str, jobID: str):

        divisionDatasetPercentualSize: int
        uBehaviour: str
        repetition: int
        divisionDatasetPercentualSize, uBehaviour, repetition = InputABatchDefinition(
        ).getBatchParameters(self.datasetID)[batchID]

        selector = self.getParameters()[jobID]

        eTool: AEvalTool = EvalToolDHondtBanditVotes({})

        rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs(
        )

        aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling(
            selector)

        pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
            self.getBatchName() + jobID, rIDs, rDescs, aDescDHont)

        model: DataFrame = PModelDHondtBanditsVotes(
            pDescr.getRecommendersIDs())

        simulator: Simulator = InputSimulatorDefinition(
        ).exportSimulatorSlantour(batchID, divisionDatasetPercentualSize,
                                  uBehaviour, repetition)
        simulator.simulate([pDescr], [model], [eTool],
                           [HistoryHierDF(pDescr.getPortfolioID())])
def test01():

    print("Simulation: ML FuzzyDHondtDirectOptimize")

    jobID: str = "Roulette1"

    selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1})

    rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()

    pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        "FuzzyDHondtDirectOptimize" + jobID, rIDs, rDescs,
        InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling(
            selector, "DCG"))

    batchID: str = "ml1mDiv90Ulinear0109R1"
    dataset: DatasetML = DatasetML.readDatasets()
    behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109)
    behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile)

    #model:DataFrame = PModelDHont(pDescr.getRecommendersIDs())
    model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs())

    #lrClick:float = 0.03
    #lrView:float = lrClick / 500
    #eTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick,
    #                                  EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView})
    eTool: AEvalTool = EvalToolDHondtBanditVotes({})
    # simulation of portfolio
    simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict,
                                     dataset, behavioursDF)
    simulator.simulate([pDescr], [model], [eTool],
                       [HistoryHierDF(pDescr.getPortfolioID())])
Esempio n. 3
0
    def run(self, batchID: str, jobID: str):
        divisionDatasetPercentualSize: int
        uBehaviour: str
        repetition: int
        divisionDatasetPercentualSize, uBehaviour, repetition = \
            InputABatchDefinition().getBatchParameters(self.datasetID)[batchID]

        recommenderTheMPopID: str = "TheMostPopular"
        pRTheMPopDescr: RecommenderDescription = RecommenderDescription(
            RecommenderTheMostPopular, {})

        recommenderRPID: str = "RepeatedPurchase"
        pRecRPDescr: RecommenderDescription = RecommenderDescription(
            RecommenderRepeatedPurchase, {})

        selector: ADHondtSelector = self.getParameters()[jobID]
        aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling(
            selector)
        aDescDHont: AggregationDescription = InputAggrDefinition.exportADescBanditTS(
            selector)
        #aDescDHont:AggregationDescription = InputAggrDefinition.exportADescFAI()

        rIDs: List[str]
        rDescs: List[AggregationDescription]
        rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs(
        )
        #rIDs = [recommenderTheMPopID]
        #rDescs = [pRTheMPopDescr]

        p1AggrDescrID: str = "p1AggrDescrID"
        p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
            p1AggrDescrID, rIDs, rDescs, aDescDHont)

        #pProbTool:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002(
        #    InputSimulatorDefinition().numberOfAggrItems)
        pProbTool: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002(
            InputSimulatorDefinition().numberOfAggrItems)

        aHierDescr: AggregationDescription = AggregationDescription(
            AggrD21, {AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 0.0})

        pHierDescr: PortfolioHierDescription = PortfolioHierDescription(
            "pHierDescr", recommenderRPID, pRecRPDescr, p1AggrDescrID,
            p1AggrDescr, aHierDescr, pProbTool)

        eTool: AEvalTool = EvalToolBanditTS({})
        #eTool:AEvalTool = EToolDoNothing({})
        #model:DataFrame = PModelDHont(p1AggrDescr.getRecommendersIDs())
        model: DataFrame = PModelBandit(p1AggrDescr.getRecommendersIDs())

        simulator: Simulator = InputSimulatorDefinition(
        ).exportSimulatorRetailRocket(batchID, divisionDatasetPercentualSize,
                                      uBehaviour, repetition)
        simulator.simulate([pHierDescr], [model], [eTool],
                           [HistoryHierDF(p1AggrDescr.getPortfolioID())])
def test01():
    print("Test 01")

    recommenderID: str = "TheMostPopular"
    pRDescr: RecommenderDescription = RecommenderDescription(
        RecommenderTheMostPopular, {})

    selectorFixed: ADHondtSelector = TheMostVotedItemSelector({})
    aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling(
        selectorFixed)

    rIDs: List[str]
    rDescs: List[AggregationDescription]
    rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs()
    rIDs = [recommenderID]
    rDescs = [pRDescr]

    p1AggrDescrID: str = "p1AggrDescrID"
    p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        p1AggrDescrID, rIDs, rDescs, aDescDHont)

    pProbTool: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002(
        InputSimulatorDefinition.numberOfAggrItems)
    pProbTool: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002(
        InputSimulatorDefinition.numberOfAggrItems)

    aHierDescr: AggregationDescription = AggregationDescription(
        AggrD21, {AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 2.0})

    pHierDescr: PortfolioHierDescription = PortfolioHierDescription(
        "pHierDescr", recommenderID, pRDescr, p1AggrDescrID, p1AggrDescr,
        aHierDescr, pProbTool)

    userID: int = 1

    dataset: ADataset = DatasetRetailRocket.readDatasetsWithFilter(
        minEventCount=50)

    history: AHistory = HistoryDF("test")
    history.insertRecommendation(userID, 45, 1, False)
    history.insertRecommendation(userID, 45, 2, False)
    history.insertRecommendation(userID, 78, 3, False)

    p: APortfolio = pHierDescr.exportPortfolio("test", history)

    portFolioModel: DataFrame = PModelDHondtBanditsVotes(
        p1AggrDescr.getRecommendersIDs())

    p.train(history, dataset)

    #df:DataFrame = DataFrame([[1, 555]], columns=[Events.COL_USER_ID, Events.COL_OBJECT_ID])
    #p.update(ARecommender.UPDT_CLICK, df)

    args = {
        APortfolio.ARG_NUMBER_OF_AGGR_ITEMS: 20,
        APortfolio.ARG_ITEM_ID: 1,
        APortfolio.ARG_NUMBER_OF_RECOMM_ITEMS: 100,
        AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 0.5
    }

    r, rp = p.recommend(userID, portFolioModel, args)
    print(r)