def test01(): print("Test 01") # number of recommended items N = 120 #a = observationalLinearProbabilityFnc(0.1, 0.9, 5) #print(a) uBehaviourDesc: UserBehaviourDescription = UserBehaviourDescription( observationalLinearProbabilityFnc, [0.1, 0.9]) # method results, items=[1,2,4,5,6,7,8,12,32,64,77] methodsResultDict: dict[str, pd.Series] = { "metoda1": pd.Series([0.2, 0.1, 0.3, 0.3, 0.1], [32, 2, 8, 1, 4], name="rating"), "metoda2": pd.Series([0.1, 0.1, 0.2, 0.3, 0.3], [1, 5, 32, 6, 7], name="rating"), "metoda3": pd.Series([0.3, 0.1, 0.2, 0.3, 0.1], [7, 2, 77, 64, 12], name="rating") } #print(methodsResultDict) # methods parametes methodsParamsData: List[tuple] = [['metoda1', 100], ['metoda2', 80], ['metoda3', 60]] methodsParamsDF: DataFrame = pd.DataFrame(methodsParamsData, columns=["methodID", "votes"]) methodsParamsDF.set_index("methodID", inplace=True) #print(methodsParamsDF) userID: int = 0 itemID: int = 7 historyDF: AHistory = HistoryDF("test01") historyDF.insertRecommendation(userID, itemID, 1, True, None) historyDF.insertRecommendation(userID, itemID, 1, True, None) historyDF.insertRecommendation(userID, itemID, 1, True, None) historyDF.print() #ignoringValue:float = historyDF.getIgnoringValue(userID, itemID, limit=3) #print("IgnoringValue: " + str(ignoringValue)) penalization: APenalization = PenalUsingReduceRelevance( penaltyLinear, [0.8, 0.2, 3], penaltyLinear, [1.0, 0.2, 3], 3) aggr: AggrFuzzyDHondt = AggrFuzzyDHondtINF( historyDF, { AggrFuzzyDHondtINF.ARG_SELECTOR: TheMostVotedItemSelector({}), AggrFuzzyDHondtINF.ARG_PENALTY_TOOL: penalization }) itemIDs: List[tuple] = aggr.runWithResponsibility(methodsResultDict, methodsParamsDF, userID, N) print(itemIDs)
def test01(): print("Test 01") os.chdir("..") userID: int = 1 history: AHistory = HistoryHierDF("databse1") # userID, itemID, position, observation, clicked history.insertRecommendation(userID, 100, 1, 0.5, False) history.insertRecommendation(userID, 100, 1, 0.5, True) history.insertRecommendation(userID, 100, 1, 0.5, True) history.insertRecommendation(userID, 100, 1, 0.5, True) recommendationDict: dict = { 100: 0.35, 125: 0.25, 95: 0.15, 45: 0.1, 78: 0.05, 68: 0.05, 32: 0.02, 6: 0.01, 18: 0.01, 47: 0.01 } recommendationSrs: Series = Series(recommendationDict) penalty: APenalization = PenalUsingReduceRelevance(penaltyLinear, [0.8, 0.2, 100], penaltyLinear, [1.0, 0.2, 100], 100) pRecommendationSrs: Series = penalty.runOneMethodPenalization( userID, recommendationSrs, history) print(pRecommendationSrs)
def exportAPenaltyToolOLin0802HLin1002(numberOfAggrItems: int): return PenalUsingReduceRelevance(penaltyLinear, [0.8, 0.2, numberOfAggrItems], penaltyLinear, [1.0, 0.2, 100], 100)
def exportAPenaltyToolOStat08HLin1002(numberOfAggrItems: int): return PenalUsingReduceRelevance(penaltyStatic, [1.0], penaltyLinear, [1.0, 0.2, 100], 100)
def test03(): # First get Dataset Data dataset: ADataset = DatasetST.readDatasets() events = dataset.eventsDF serials = dataset.serialsDF # I created some dummy data, but each key,value pair should be result list from a recommender # (=what recommender recommended) methodsResultDict: dict = { "metoda1": pd.Series([0.2, 0.1, 0.3, 0.3, 0.1], [32, 2, 8, 1, 4], name="rating"), "metoda2": pd.Series([0.1, 0.1, 0.2, 0.3, 0.3], [1, 5, 32, 6, 7], name="rating") } # init votes for each recommender portfolioModelData = [['metoda1', 0.6], ['metoda2', 0.4]] portfolioModelDF: DataFrame = pd.DataFrame(portfolioModelData, columns=["methodID", "votes"]) portfolioModelDF.set_index("methodID", inplace=True) userID = 1 itemID = 20 historyDF: AHistory = HistoryDF("test01") # WHAT EVALUATIOR NEEDS into dictionary! evaluationDict: dict = { EvalToolContext.ARG_USER_ID: userID, EvalToolContext.ARG_ITEM_ID: itemID, # ITEMID (not mandatory if EvalToolContext.ARG_PAGE_TYPE != "zobrazit") EvalToolContext.ARG_SENIORITY: 5, # SENIORITY OF USER EvalToolContext.ARG_PAGE_TYPE: "zobrazit", # TYPE OF PAGE ("zobrazit", "index" or "katalog) EvalToolContext.ARG_ITEMS_SHOWN: 10 # HOW MANY ITEMS ARE SHOWN TO USER } # Init eTool eToolContext = EvalToolContext({ EvalToolContext.ARG_ITEMS: serials, # ITEMS EvalToolContext.ARG_EVENTS: events, # EVENTS (FOR CALCULATING HISTORY OF USER) EvalToolContext.ARG_DATASET: "st", # WHAT DATASET ARE WE IN EvalToolContext.ARG_HISTORY: historyDF } # empty instance of AHistory is OK for ST dataset ) penalization: APenalization = PenalUsingReduceRelevance( penaltyLinear, [0.8, 0.2, 3], penaltyLinear, [1.0, 0.2, 3], 3) aggr: AggrContextFuzzyDHondtINF = AggrContextFuzzyDHondtINF( historyDF, { # empty instance of AHistory is OK for ST dataset AggrContextFuzzyDHondtINF.ARG_EVAL_TOOL: eToolContext, # eTool AggrContextFuzzyDHondtINF.ARG_SELECTOR: TheMostVotedItemSelector( {}), # ? FuzzyDHondt needs this, not contextAggr AggrContextFuzzyDHondtINF.ARG_PENALTY_TOOL: penalization }) # Get data from aggregator rItemsWithResponsibility = aggr.runWithResponsibility( methodsResultDict, portfolioModelDF, userID, numberOfItems=5, argumentsDict=evaluationDict) # call click & displayed methods l1 = eToolContext.displayed(rItemsWithResponsibility, portfolioModelDF, evaluationDict) # rItemsWithResponsibility[0][0] is clicked item l1 = eToolContext.click(rItemsWithResponsibility, rItemsWithResponsibility[0][0], portfolioModelDF, evaluationDict) # ... # ... # ... # user is now on "index" page type, so we have to change page type in evaluationDict (!) evaluationDict[EvalToolContext.ARG_PAGE_TYPE] = "index" # same as before # Get data from aggregator rItemsWithResponsibility = aggr.runWithResponsibility( methodsResultDict, portfolioModelDF, userID, numberOfItems=5, argumentsDict=evaluationDict) # call click & displayed methods l1 = eToolContext.displayed(rItemsWithResponsibility, portfolioModelDF, evaluationDict) # rItemsWithResponsibility[0][0] is clicked item l1 = eToolContext.click(rItemsWithResponsibility, rItemsWithResponsibility[0][0], portfolioModelDF, evaluationDict)