def getNegativeImplFeedbackParameters_(): pToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002( InputSimulatorDefinition().numberOfAggrItems) pToolOStat08HLin1002: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002( InputSimulatorDefinition().numberOfAggrItems) pProbToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002( InputSimulatorDefinition().numberOfAggrItems) pProbToolOStat08HLin1002: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002( InputSimulatorDefinition().numberOfAggrItems) pToolFilterBord3Lengt100: APenalization = PenalizationToolDefinition.exportPenaltyToolFiltering( ) aDict: Dict[str, object] = {} aDict["OLin0802HLin1002"] = pToolOLin0802HLin1002 aDict["OStat08HLin1002"] = pToolOStat08HLin1002 aDict["ProbOLin0802HLin1002"] = pProbToolOLin0802HLin1002 aDict["ProbOStat08HLin1002"] = pProbToolOStat08HLin1002 aDict["TFilterBord3Lengt100"] = pToolFilterBord3Lengt100 return aDict
def getContextFuzzyDHondtDirectOptimizeINF(): # taskID:str = "Web" + "ContextFuzzyDHondtDirectOptimizeINF" + "Roulette1" taskID:str = "Web" + "ContextFuzzyDHondtDirectOptimizeINF" + "Fixed" dataset:ADataset = DatasetST.readDatasets() # selector:ADHondtSelector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) selector:ADHondtSelector = TheMostVotedItemSelector({}) pToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() history:AHistory = HistoryHierDF(taskID) # Init eTool evalTool:AEvalTool = EvalToolContext({ EvalToolContext.ARG_ITEMS: dataset.serialsDF, # ITEMS EvalToolContext.ARG_EVENTS: dataset.eventsDF, # EVENTS (FOR CALCULATING HISTORY OF USER) EvalToolContext.ARG_DATASET: "st", # WHAT DATASET ARE WE IN EvalToolContext.ARG_HISTORY: history}) # empty instance of AHistory is OK for ST dataset aDescDHont:AggregationDescription = InputAggrDefinition.exportADescDContextHondtDirectOptimizeINF(selector, pToolOLin0802HLin1002, evalTool) pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription( taskID, rIDs, rDescs, aDescDHont) port:APortfolio = pDescr.exportPortfolio(taskID, history) port.train(history, dataset) model:DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) return (taskID, port, model, evalTool, history)
def getFuzzyDHontINF(): #taskID:str = "Web" + "FuzzyDHondtINF" + "Roulette1" taskID:str = "Web" + "FuzzyDHondt" + "Fixed" dataset:ADataset = DatasetST.readDatasets() #selector:ADHondtSelector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) selector:ADHondtSelector = TheMostVotedItemSelector({}) pToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) aDescDHont:AggregationDescription = InputAggrDefinition.exportADescDHondtINF(selector, pToolOLin0802HLin1002) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription( taskID, rIDs, rDescs, aDescDHont) history:AHistory = HistoryHierDF(taskID) port:APortfolio = pDescr.exportPortfolio(taskID, history) port.train(history, dataset) model:DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) evalTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: 0.03, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: 0.03 / 500}) return (taskID, port, model, evalTool, history)
def getFuzzyDHontThompsonSamplingINF(): taskID:str = "Web" + "FuzzyDHondtThompsonSamplingINF" + "Fixed" + "OLin0802HLin1002" selector:ADHondtSelector = TheMostVotedItemSelector({}) penalization:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002(20) aDescDHont:AggregationDescription = InputAggrDefinition.exportADescDHondtThompsonSamplingINF(selector, penalization) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription( taskID, rIDs, rDescs, aDescDHont) history:AHistory = HistoryHierDF(taskID) dataset:ADataset = DatasetST.readDatasets() port:APortfolio = pDescr.exportPortfolio(taskID, history) port.train(history, dataset) model:DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs()) evalTool:AEvalTool = EvalToolDHondtBanditVotes({}) return (taskID, port, model, evalTool, history)
def test21(): print("Simulation: ST FuzzyDHondtDirectOptimize") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 3}) pProbToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002( InputSimulatorDefinition.numberOfAggrItems) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FuzzyDHondtDirectOptimize" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSamplingINF( selector, pProbToolOLin0802HLin1002, "DCG")) batchID: str = "stDiv90Ulinear0109R1" dataset: DatasetST = DatasetST.readDatasets() behaviourFile: str = BehavioursST.getFile(BehavioursST.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursST.readFromFileST(behaviourFile) model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs()) #lrClick:float = 0.03 #lrView:float = lrClick / 500 #eTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, # EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView}) eTool: AEvalTool = EvalToolDHondtBanditVotes({}) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationST, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def run(self, batchID: str, jobID: str): divisionDatasetPercentualSize: int uBehaviour: str repetition: int divisionDatasetPercentualSize, uBehaviour, repetition = \ InputABatchDefinition().getBatchParameters(self.datasetID)[batchID] recommenderTheMPopID: str = "TheMostPopular" pRTheMPopDescr: RecommenderDescription = RecommenderDescription( RecommenderTheMostPopular, {}) recommenderRPID: str = "RepeatedPurchase" pRecRPDescr: RecommenderDescription = RecommenderDescription( RecommenderRepeatedPurchase, {}) selector: ADHondtSelector = self.getParameters()[jobID] aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling( selector) aDescDHont: AggregationDescription = InputAggrDefinition.exportADescBanditTS( selector) #aDescDHont:AggregationDescription = InputAggrDefinition.exportADescFAI() rIDs: List[str] rDescs: List[AggregationDescription] rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs( ) #rIDs = [recommenderTheMPopID] #rDescs = [pRTheMPopDescr] p1AggrDescrID: str = "p1AggrDescrID" p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( p1AggrDescrID, rIDs, rDescs, aDescDHont) #pProbTool:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002( # InputSimulatorDefinition().numberOfAggrItems) pProbTool: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002( InputSimulatorDefinition().numberOfAggrItems) aHierDescr: AggregationDescription = AggregationDescription( AggrD21, {AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 0.0}) pHierDescr: PortfolioHierDescription = PortfolioHierDescription( "pHierDescr", recommenderRPID, pRecRPDescr, p1AggrDescrID, p1AggrDescr, aHierDescr, pProbTool) eTool: AEvalTool = EvalToolBanditTS({}) #eTool:AEvalTool = EToolDoNothing({}) #model:DataFrame = PModelDHont(p1AggrDescr.getRecommendersIDs()) model: DataFrame = PModelBandit(p1AggrDescr.getRecommendersIDs()) simulator: Simulator = InputSimulatorDefinition( ).exportSimulatorRetailRocket(batchID, divisionDatasetPercentualSize, uBehaviour, repetition) simulator.simulate([pHierDescr], [model], [eTool], [HistoryHierDF(p1AggrDescr.getPortfolioID())])
def test21(): print("Simulation: ST ContextDHondtINF") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) #pProbToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002( # InputSimulatorDefinition.numberOfAggrItems) pToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() dataset: ADataset = DatasetST.readDatasets() events = dataset.eventsDF serials = dataset.serialsDF historyDF: AHistory = HistoryDF("test01") # Init evalTool evalTool: AEvalTool = EvalToolContext({ EvalToolContext.ARG_ITEMS: serials, # ITEMS EvalToolContext.ARG_EVENTS: events, # EVENTS (FOR CALCULATING HISTORY OF USER) EvalToolContext.ARG_DATASET: "st", # WHAT DATASET ARE WE IN EvalToolContext.ARG_HISTORY: historyDF }) # empty instance of AHistory is OK for ST dataset pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "ContextDHondtNIF" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDContextHondtINF( selector, pToolOLin0802HLin1002, evalTool)) batchID: str = "stDiv90Ulinear0109R1" dataset: DatasetST = DatasetST.readDatasets() behaviourFile: str = BehavioursST.getFile(BehavioursST.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursST.readFromFileST(behaviourFile) model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) print(model) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationST, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [evalTool], [HistoryHierDF(pDescr.getPortfolioID())])
def test01(): print("Simulation: ML ContextDHondtINF") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) #pProbToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002( # InputSimulatorDefinition.numberOfAggrItems) pToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) itemsDF: DataFrame = Items.readFromFileMl1m() usersDF: DataFrame = Users.readFromFileMl1m() historyDF: AHistory = HistoryDF("test01") eTool: AEvalTool = EvalToolContext({ EvalToolContext.ARG_USERS: usersDF, EvalToolContext.ARG_ITEMS: itemsDF, EvalToolContext.ARG_DATASET: "ml", EvalToolContext.ARG_HISTORY: historyDF }) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "ContextDHondtINF" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDContextHondtINF( selector, pToolOLin0802HLin1002, eTool)) batchID: str = "ml1mDiv90Ulinear0109R1" dataset: DatasetML = DatasetML.readDatasets() behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile) model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def test01(): print("Simulation: ML FuzzyDHondtINF") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) pProbToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002( InputSimulatorDefinition.numberOfAggrItems) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FuzzyDHondtINF" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondtINF(selector, pProbToolOLin0802HLin1002)) batchID: str = "ml1mDiv90Ulinear0109R1" dataset: DatasetML = DatasetML.readDatasets() behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile) model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) lrClick: float = 0.03 lrView: float = lrClick / 500 eTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView }) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def test01(): print("Test 01") recommenderID: str = "TheMostPopular" pRDescr: RecommenderDescription = RecommenderDescription( RecommenderTheMostPopular, {}) selectorFixed: ADHondtSelector = TheMostVotedItemSelector({}) aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling( selectorFixed) rIDs: List[str] rDescs: List[AggregationDescription] rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs() rIDs = [recommenderID] rDescs = [pRDescr] p1AggrDescrID: str = "p1AggrDescrID" p1AggrDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( p1AggrDescrID, rIDs, rDescs, aDescDHont) pProbTool: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) pProbTool: APenalization = PenalizationToolDefinition.exportPenaltyToolOStat08HLin1002( InputSimulatorDefinition.numberOfAggrItems) aHierDescr: AggregationDescription = AggregationDescription( AggrD21, {AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 2.0}) pHierDescr: PortfolioHierDescription = PortfolioHierDescription( "pHierDescr", recommenderID, pRDescr, p1AggrDescrID, p1AggrDescr, aHierDescr, pProbTool) userID: int = 1 dataset: ADataset = DatasetRetailRocket.readDatasetsWithFilter( minEventCount=50) history: AHistory = HistoryDF("test") history.insertRecommendation(userID, 45, 1, False) history.insertRecommendation(userID, 45, 2, False) history.insertRecommendation(userID, 78, 3, False) p: APortfolio = pHierDescr.exportPortfolio("test", history) portFolioModel: DataFrame = PModelDHondtBanditsVotes( p1AggrDescr.getRecommendersIDs()) p.train(history, dataset) #df:DataFrame = DataFrame([[1, 555]], columns=[Events.COL_USER_ID, Events.COL_OBJECT_ID]) #p.update(ARecommender.UPDT_CLICK, df) args = { APortfolio.ARG_NUMBER_OF_AGGR_ITEMS: 20, APortfolio.ARG_ITEM_ID: 1, APortfolio.ARG_NUMBER_OF_RECOMM_ITEMS: 100, AggrD21.ARG_RATING_THRESHOLD_FOR_NEG: 0.5 } r, rp = p.recommend(userID, portFolioModel, args) print(r)