def run(self, batchID: str, jobID: str): divisionDatasetPercentualSize: int uBehaviour: str repetition: int divisionDatasetPercentualSize, uBehaviour, repetition = InputABatchDefinition( ).getBatchParameters(self.datasetID)[batchID] selector, discFactor = self.getParameters()[jobID] eTool: AEvalTool = EvalToolDHondtBanditVotes({}) rIDs, rDescs = InputRecomMLDefinition.exportPairOfRecomIdsAndRecomDescrs( ) aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSamplingMMR( selector, discFactor) pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( self.getBatchName() + jobID, rIDs, rDescs, aDescDHont) model: DataFrame = PModelDHondtBanditsVotes( pDescr.getRecommendersIDs()) simulator: Simulator = InputSimulatorDefinition().exportSimulatorML1M( batchID, divisionDatasetPercentualSize, uBehaviour, repetition) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def test01(): print("Simulation: ML DHontThompsonSampling") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "DHontThompsonSampling" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondtThompsonSampling(selector)) batchID: str = "ml1mDiv90Ulinear0109R1" dataset: DatasetML = DatasetML.readDatasets() behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile) model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs()) eTool: AEvalTool = EvalToolDHondtBanditVotes({}) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def getFuzzyDHontThompsonSamplingINF(): taskID:str = "Web" + "FuzzyDHondtThompsonSamplingINF" + "Fixed" + "OLin0802HLin1002" selector:ADHondtSelector = TheMostVotedItemSelector({}) penalization:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002(20) aDescDHont:AggregationDescription = InputAggrDefinition.exportADescDHondtThompsonSamplingINF(selector, penalization) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription( taskID, rIDs, rDescs, aDescDHont) history:AHistory = HistoryHierDF(taskID) dataset:ADataset = DatasetST.readDatasets() port:APortfolio = pDescr.exportPortfolio(taskID, history) port.train(history, dataset) model:DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs()) evalTool:AEvalTool = EvalToolDHondtBanditVotes({}) return (taskID, port, model, evalTool, history)
def test01(): print("Simulation: ML FuzzyDHondtDirectOptimize") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FuzzyDHondtDirectOptimize" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling( selector, "DCG")) batchID: str = "ml1mDiv90Ulinear0109R1" dataset: DatasetML = DatasetML.readDatasets() behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile) #model:DataFrame = PModelDHont(pDescr.getRecommendersIDs()) model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs()) #lrClick:float = 0.03 #lrView:float = lrClick / 500 #eTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, # EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView}) eTool: AEvalTool = EvalToolDHondtBanditVotes({}) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def test21(): print("Simulation: ST FuzzyDHondtDirectOptimize") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 3}) pProbToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002( InputSimulatorDefinition.numberOfAggrItems) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FuzzyDHondtDirectOptimize" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSamplingINF( selector, pProbToolOLin0802HLin1002, "DCG")) batchID: str = "stDiv90Ulinear0109R1" dataset: DatasetST = DatasetST.readDatasets() behaviourFile: str = BehavioursST.getFile(BehavioursST.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursST.readFromFileST(behaviourFile) model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs()) #lrClick:float = 0.03 #lrView:float = lrClick / 500 #eTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, # EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView}) eTool: AEvalTool = EvalToolDHondtBanditVotes({}) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationST, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def test01(): print("Test 01") recommenderID: str = "TheMostPopular" pRDescr: RecommenderDescription = RecommenderDescription( RecommenderTheMostPopular, {}) selectorFixed: ADHondtSelector = TheMostVotedItemSelector({}) aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling( selectorFixed) rIDs: List[str] rDescs: List[AggregationDescription] rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs() rIDs = [recommenderID] rDescs = [pRDescr] p1AggrDescrID: str = "p1AggrDescrID" p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( p1AggrDescrID, rIDs, rDescs, aDescDHont) pProbTool: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) pProbTool: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002( InputSimulatorDefinition.numberOfAggrItems) aHierDescr: AggregationDescription = AggregationDescription( AggrD21, {AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 2.0}) pHierDescr: PortfolioHierDescription = PortfolioHierDescription( "pHierDescr", recommenderID, pRDescr, p1AggrDescrID, p1AggrDescr, aHierDescr, pProbTool) userID: int = 1 dataset: ADataset = DatasetRetailRocket.readDatasetsWithFilter( minEventCount=50) history: AHistory = HistoryDF("test") history.insertRecommendation(userID, 45, 1, False) history.insertRecommendation(userID, 45, 2, False) history.insertRecommendation(userID, 78, 3, False) p: APortfolio = pHierDescr.exportPortfolio("test", history) portFolioModel: DataFrame = PModelDHondtBanditsVotes( p1AggrDescr.getRecommendersIDs()) p.train(history, dataset) #df:DataFrame = DataFrame([[1, 555]], columns=[Events.COL_USER_ID, Events.COL_OBJECT_ID]) #p.update(ARecommender.UPDT_CLICK, df) args = { APortfolio.ARG_NUMBER_OF_AGGR_ITEMS: 20, APortfolio.ARG_ITEM_ID: 1, APortfolio.ARG_NUMBER_OF_RECOMM_ITEMS: 100, AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 0.5 } r, rp = p.recommend(userID, portFolioModel, args) print(r)