def createDHontModel(recommendersIDs: List[str]): modelDHontData: List[List] = [[rIdI, 1] for rIdI in recommendersIDs] modelDHontDF: DataFrame = pd.DataFrame(modelDHontData, columns=["methodID", "votes"]) modelDHontDF.set_index("methodID", inplace=True) EvalToolDHondt.linearNormalizingPortfolioModelDHont(modelDHontDF) return modelDHontDF
def test01(): print("Test 01") #print("Running Two paralel History Databases:") # method results, items=[1,2,4,5,6,7,8,12,32,64,77] methodsResultDict:dict = { "metoda1": pd.Series([0.2, 0.1, 0.3, 0.3, 0.1], [32, 2, 8, 1, 4], name="rating"), "metoda2": pd.Series([0.1, 0.1, 0.2, 0.3, 0.3], [1, 5, 32, 6, 7], name="rating"), "metoda3": pd.Series([0.3, 0.1, 0.2, 0.3, 0.1], [7, 2, 77, 64, 12], name="rating") } rItemIDsWithResponsibility:List = [(7, {'metoda1': 0, 'metoda2': 24.0, 'metoda3': 18.0}), (1, {'metoda1': 30.0, 'metoda2': 8.0, 'metoda3': 0}), (32, {'metoda1': 20.0, 'metoda2': 16.0, 'metoda3': 0}), (8, {'metoda1': 30.0, 'metoda2': 0, 'metoda3': 0}), (6, {'metoda1': 0, 'metoda2': 24.0, 'metoda3': 0}), (64, {'metoda1': 0, 'metoda2': 0, 'metoda3': 18.0}), (2, {'metoda1': 10.0, 'metoda2': 0, 'metoda3': 6.0}), (77, {'metoda1': 0, 'metoda2': 0, 'metoda3': 12.0}), (4, {'metoda1': 10.0, 'metoda2': 0, 'metoda3': 0}), (5, {'metoda1': 0, 'metoda2': 8.0, 'metoda3': 0}), (12, {'metoda1': 0, 'metoda2': 0, 'metoda3': 6.0})] # methods parametes portfolioModelData:List[tuple] = [['metoda1',100], ['metoda2',80], ['metoda3',60]] portfolioModelDF:DataFrame = pd.DataFrame(portfolioModelData, columns=["methodID","votes"]) portfolioModelDF.set_index("methodID", inplace=True) print("Definition:") print(portfolioModelDF) print() # linearly normalizing to unit sum of votes #EvalToolDHont.linearNormalizingPortfolioModelDHont(portfolioModelDF) print("Linearly normalizing:") print(portfolioModelDF) print() evaluationDict:dict = {} print("Clicked:") EvalToolDHondt.click(rItemIDsWithResponsibility, 7, portfolioModelDF, evaluationDict) EvalToolDHondt.click(rItemIDsWithResponsibility, 1, portfolioModelDF, evaluationDict) EvalToolDHondt.click(rItemIDsWithResponsibility, 7, portfolioModelDF, evaluationDict) print() print("Displayed - start:") for i in range(100): rItemIDsWithResponsibility1:List = [(7, {'metoda1': 0, 'metoda2': 24.0, 'metoda3': 18.0})] EvalToolDHondt.displayed(rItemIDsWithResponsibility1, portfolioModelDF, evaluationDict) print(portfolioModelDF) print("Displayed - end:") print() print("Clicked:") EvalToolDHondt.click(rItemIDsWithResponsibility, 4, portfolioModelDF, evaluationDict) print()
def getParameters(self): selectorIDs: List[str] = BatchFuzzyDHondt().getSelectorParameters( ).keys() negativeImplFeedback: List[ str] = self.getNegativeImplFeedbackParameters().keys() #lrClicks:List[float] = [0.2, 0.1, 0.02, 0.005] lrClicks: List[float] = [0.1] #lrViewDivisors:List[float] = [200, 500, 1000] lrViewDivisors: List[float] = [200] aDict: dict = {} for selectorIDH in selectorIDs: for nImplFeedbackI in negativeImplFeedback: for lrClickJ in lrClicks: for lrViewDivisorK in lrViewDivisors: keyIJ: str = str(selectorIDH) + "Clk" + str( lrClickJ).replace(".", "") + "ViewDivisor" + str( lrViewDivisorK).replace(".", "") + nImplFeedbackI lrViewIJK: float = lrClickJ / lrViewDivisorK eTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClickJ, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrViewIJK }) nImplFeedback: APenalization = self.getNegativeImplFeedbackParameters( )[nImplFeedbackI] selectorH: ADHondtSelector = BatchFuzzyDHondt( ).getSelectorParameters()[selectorIDH] aDict[keyIJ] = (selectorH, nImplFeedback, eTool) return aDict
def test02(): print("Test 02") rIDs, rDescr = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() model: DataFrame = PModelDHondt(rIDs) print(model) userID: int = 1 rItemIDsWithResponsibility: List[(int, Dict)] = [(1, { rIDs[0]: 0.05, rIDs[1]: 0.05, rIDs[2]: 0.05, rIDs[3]: 0.05, rIDs[4]: 0.05, rIDs[5]: 0.05, rIDs[6]: 0.05, rIDs[7]: 0.65 })] lrClick: float = 0.03 lrView: float = lrClick / 500 evalTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView }) evalTool.click(userID, rItemIDsWithResponsibility, 1, model, {}) for i in range(555): evalTool.displayed(userID, rItemIDsWithResponsibility, model, {}) print(model)
def getParameters(self): selectorIDs: List[str] = self.getSelectorParameters().keys() lrClicks: List[float] = [0.2, 0.1, 0.02, 0.005] #lrClicks:List[float] = [0.1] lrViewDivisors: List[float] = [200, 500, 1000] #lrViewDivisors:List[float] = [500] aDict: dict = {} for selectorIDI in selectorIDs: for lrClickJ in lrClicks: for lrViewDivisorK in lrViewDivisors: keyIJ: str = selectorIDI + "Clk" + str(lrClickJ).replace( ".", "") + "ViewDivisor" + str(lrViewDivisorK).replace( ".", "") lrViewIJK: float = lrClickJ / lrViewDivisorK eToolIJK: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClickJ, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrViewIJK }) selectorIJK: ADHondtSelector = self.getSelectorParameters( )[selectorIDI] aDict[keyIJ] = (selectorIJK, eToolIJK) return aDict
def getFuzzyDHontINF(): #taskID:str = "Web" + "FuzzyDHondtINF" + "Roulette1" taskID:str = "Web" + "FuzzyDHondt" + "Fixed" dataset:ADataset = DatasetST.readDatasets() #selector:ADHondtSelector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) selector:ADHondtSelector = TheMostVotedItemSelector({}) pToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) aDescDHont:AggregationDescription = InputAggrDefinition.exportADescDHondtINF(selector, pToolOLin0802HLin1002) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription( taskID, rIDs, rDescs, aDescDHont) history:AHistory = HistoryHierDF(taskID) port:APortfolio = pDescr.exportPortfolio(taskID, history) port.train(history, dataset) model:DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) evalTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: 0.03, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: 0.03 / 500}) return (taskID, port, model, evalTool, history)
def getParameters(cls): selectorIDs: List[str] = BatchDefMLFuzzyDHondt().getSelectorParameters( ).keys() negativeImplFeedback: List[ str] = cls.getNegativeImplFeedbackParameters().keys() aDict: Dict[str, object] = {} for selectorIDH in selectorIDs: for nImplFeedbackI in negativeImplFeedback: for lrClickJ in cls.lrClicks: for lrViewDivisorK in cls.lrViewDivisors: keyIJ: str = str(selectorIDH) + "Clk" + str( lrClickJ).replace(".", "") + "ViewDivisor" + str( lrViewDivisorK).replace(".", "") + nImplFeedbackI lrViewIJK: float = lrClickJ / lrViewDivisorK eTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClickJ, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrViewIJK }) nImplFeedback: APenalization = cls.getNegativeImplFeedbackParameters( )[nImplFeedbackI] selectorH: ADHondtSelector = BatchDefMLFuzzyDHondt( ).getSelectorParameters()[selectorIDH] aDict[keyIJ] = (selectorH, nImplFeedback, eTool) return aDict
def test01(): print("Simulation: ML FuzzyDHondt") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FuzzyDHondt" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondt(selector)) batchID: str = "ml1mDiv90Ulinear0109R1" dataset: DatasetML = DatasetML.readDatasets() behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile) model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) lrClick: float = 0.03 lrView: float = lrClick / 500 eTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView }) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def __init__(self, argumentsDict: Dict[str, object]): if type(argumentsDict) is not dict: raise ValueError("Argument argumentsDict isn't type dict.") self.et: AEvalTool = EvalToolDHondt(argumentsDict) self.normalizationOfResponsibility: bool = argumentsDict.get( self.ARG_NORMALIZATION_OF_RESPONSIBILITY, False)
def getParameters(self): aDict:Dict[str,object] = {} for lrClickI in self.lrClicks: for lrViewDivisorJ in self.lrViewDivisors: keyIJ:str = "Clk" + str(lrClickI).replace(".", "") + "ViewDivisor" + str(lrViewDivisorJ).replace(".", "") lrViewIJ:float = lrClickI / lrViewDivisorJ eToolIJ:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClickI, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrViewIJ}) aDict[keyIJ] = eToolIJ return aDict
def getParameters(self): aDict:Dict[str,object] = {} for selectorIDI in self.selectorIDs: for lrClickJ in self.lrClicks: for normOfResponsK in self.normOfRespons: for lrViewDivisorK in self.lrViewDivisors: keyIJ:str = selectorIDI + "Clk" + str(lrClickJ).replace(".", "") + "ViewDivisor" + str(lrViewDivisorK).replace(".", "") + "NR" + str(normOfResponsK) lrViewIJK:float = lrClickJ / lrViewDivisorK eToolIJK:AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClickJ, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrViewIJK, EvalToolDHondt.ARG_NORMALIZATION_OF_RESPONSIBILITY: normOfResponsK}) selectorIJK:ADHondtSelector = self.getSelectorParameters()[selectorIDI] aDict[keyIJ] = (selectorIJK, eToolIJK) return aDict
def getParameters(self): aDict: Dict[str, object] = {} for selectorIDI in self.selectorIDs: for gLrClickJ in self.mGlobalLrClicks: for gLrViewDivisorK in self.mGlobalLrViewDivisors: for gNormOfResponsL in self.mGlobalNormOfRespons: for pLrClickM in self.mPersonLrClicks: for pLrViewDivisorN in self.mPersonLrViewDivisors: for pLrViewDivisorO in self.mPersonNormOfRespons: gjk: str = "Clk" + str(gLrClickJ).replace( ".", "") + "ViewDivisor" + str( gLrViewDivisorK).replace( ".", "") + "NR" + str( gNormOfResponsL) pjk: str = "Clk" + str(pLrClickM).replace( ".", "") + "ViewDivisor" + str( pLrViewDivisorN).replace( ".", "") + "NR" + str( pLrViewDivisorO) keyIJ: str = selectorIDI + gjk + pjk lrViewJK: float = gLrClickJ / gLrViewDivisorK lrViewLM: float = pLrClickM / pLrViewDivisorN evalToolMGlobal: EvalToolDHondt = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: gLrClickJ, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrViewJK, EvalToolDHondt.ARG_NORMALIZATION_OF_RESPONSIBILITY: gNormOfResponsL }) evalToolMPerson: EvalToolDHondt = EvalToolDHondtPersonal({ EvalToolDHondtPersonal.ARG_LEARNING_RATE_CLICKS: pLrClickM, EvalToolDHondtPersonal.ARG_LEARNING_RATE_VIEWS: lrViewLM, EvalToolDHondtPersonal.ARG_NORMALIZATION_OF_RESPONSIBILITY: pLrViewDivisorO }) eToolIJK: AEvalTool = EToolHybrid( evalToolMGlobal, evalToolMPerson, {}) selectorIJK: ADHondtSelector = BatchDefMLFuzzyDHondt( ).getSelectorParameters()[selectorIDI] aDict[keyIJ] = (selectorIJK, eToolIJK) return aDict
def test21(): print("Simulation: ST FuzzyDHondtINF") jobID: str = "Roulette1" selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1}) #pProbToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002( # InputSimulatorDefinition.numberOfAggrItems) pToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002( InputSimulatorDefinition.numberOfAggrItems) rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FuzzyDHondtINF" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondtINF(selector, pToolOLin0802HLin1002)) batchID: str = "stDiv90Ulinear0109R1" dataset: DatasetST = DatasetST.readDatasets() behaviourFile: str = BehavioursST.getFile(BehavioursST.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursST.readFromFileST(behaviourFile) model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) print(model) lrClick: float = 0.1 lrView: float = lrClick / 300 evalTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView }) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationST, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [evalTool], [HistoryHierDF(pDescr.getPortfolioID())])
def test31(): print("Simulation: RR FuzzyDHondt") lrClick: float = 0.03 #lrView:float = lrClick / 300 lrViewDivisor: float = 250 jobID: str = "Fixed" + "Clk" + str(lrClick).replace( ".", "") + "ViewDivisor" + str(lrViewDivisor).replace(".", "") selector: ADHondtSelector = TheMostVotedItemSelector({}) rIDs, rDescs = InputRecomRRDefinition.exportPairOfRecomIdsAndRecomDescrs() pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FDHondt" + jobID, rIDs, rDescs, InputAggrDefinition.exportADescDHondt(selector)) batchID: str = "rrDiv90Ulinear0109R1" dataset: DatasetRetailRocket = DatasetRetailRocket.readDatasetsWithFilter( minEventCount=50) behaviourFile: str = BehavioursRR.getFile(BehavioursRR.BHVR_LINEAR0109) behavioursDF: DataFrame = BehavioursRR.readFromFileRR(behaviourFile) model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) print(model) evalTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrClick / lrViewDivisor }) # simulation of portfolio simulator: Simulator = Simulator(batchID, SimulationRR, argsSimulationDict, dataset, behavioursDF) simulator.simulate([pDescr], [model], [evalTool], [HistoryHierDF(pDescr.getPortfolioID())])
def run(self, batchID:str, jobID:str): divisionDatasetPercentualSize:int uBehaviour:str repetition:int divisionDatasetPercentualSize, uBehaviour, repetition = InputABatchDefinition().getBatchParameters(self.datasetID)[batchID] selector, nImplFeedback = self.getParameters()[jobID] eTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: 0.02, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: 1000}) rIDs, rDescs = InputRecomMLDefinition.exportPairOfRecomIdsAndRecomDescrs() aDescFuzzyHontDirectOptimizeINF:AggregationDescription = InputAggrDefinition.exportADescDFuzzyHondtDirectOptimizeINF(selector, nImplFeedback) pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription( self.getBatchName() + jobID, rIDs, rDescs, aDescFuzzyHontDirectOptimizeINF) model:DataFrame = PModelDHondt(pDescr.getRecommendersIDs()) simulator:Simulator = InputSimulatorDefinition().exportSimulatorML1M( batchID, divisionDatasetPercentualSize, uBehaviour, repetition) simulator.simulate([pDescr], [model], [eTool], [HistoryHierDF(pDescr.getPortfolioID())])
def run(self, batchID: str, jobID: str): from execute.generateBatches import BatchParameters #class divisionDatasetPercentualSize: int uBehaviour: str repetition: int divisionDatasetPercentualSize, uBehaviour, repetition = BatchParameters.getBatchParameters( )[batchID] selector, nImplFeedback = self.getParameters()[jobID] eTool: AEvalTool = EvalToolDHondt({ EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: 0.02, EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: 1000 }) datasetID: str = "ml1m" + "Div" + str(divisionDatasetPercentualSize) rIDs, rDescs = InputRecomDefinition.exportPairOfRecomIdsAndRecomDescrs( datasetID) aDescFuzzyHontDirectOptimizeINF: AggregationDescription = InputAggrDefinition.exportADescDFuzzyHontDirectOptimizeINF( selector, nImplFeedback) pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription( "FuzzyDHondtDirectOptimizeINF" + jobID, rIDs, rDescs, aDescFuzzyHontDirectOptimizeINF) model: DataFrame = ModelDefinition.createDHontModel( pDescr.getRecommendersIDs()) simulator: Simulator = InputSimulatorDefinition.exportSimulatorML1M( batchID, divisionDatasetPercentualSize, uBehaviour, repetition) simulator.simulate([pDescr], [model], [eTool], HistoryHierDF)