Ejemplo n.º 1
0
    def run(self, batchID: str, jobID: str):

        divisionDatasetPercentualSize: int
        uBehaviour: str
        repetition: int
        divisionDatasetPercentualSize, uBehaviour, repetition = InputABatchDefinition(
        ).getBatchParameters(self.datasetID)[batchID]

        selector, discFactor = self.getParameters()[jobID]

        eTool: AEvalTool = EvalToolDHondtBanditVotes({})

        rIDs, rDescs = InputRecomMLDefinition.exportPairOfRecomIdsAndRecomDescrs(
        )

        aDescDHont: AggregationDescription = InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSamplingMMR(
            selector, discFactor)

        pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
            self.getBatchName() + jobID, rIDs, rDescs, aDescDHont)

        model: DataFrame = PModelDHondtBanditsVotes(
            pDescr.getRecommendersIDs())

        simulator: Simulator = InputSimulatorDefinition().exportSimulatorML1M(
            batchID, divisionDatasetPercentualSize, uBehaviour, repetition)
        simulator.simulate([pDescr], [model], [eTool],
                           [HistoryHierDF(pDescr.getPortfolioID())])
Ejemplo n.º 2
0
def getFuzzyDHontThompsonSamplingINF():

  taskID:str = "Web" + "FuzzyDHondtThompsonSamplingINF" + "Fixed" + "OLin0802HLin1002"

  selector:ADHondtSelector = TheMostVotedItemSelector({})

  penalization:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOLin0802HLin1002(20)

  aDescDHont:AggregationDescription = InputAggrDefinition.exportADescDHondtThompsonSamplingINF(selector, penalization)

  rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()

  pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription(
    taskID, rIDs, rDescs, aDescDHont)

  history:AHistory = HistoryHierDF(taskID)

  dataset:ADataset = DatasetST.readDatasets()

  port:APortfolio = pDescr.exportPortfolio(taskID, history)
  port.train(history, dataset)

  model:DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs())

  evalTool:AEvalTool = EvalToolDHondtBanditVotes({})

  return (taskID, port, model, evalTool, history)
def test01():

    print("Simulation: ML DHontThompsonSampling")

    jobID: str = "Roulette1"

    selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1})

    rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()

    pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        "DHontThompsonSampling" + jobID, rIDs, rDescs,
        InputAggrDefinition.exportADescDHondtThompsonSampling(selector))

    batchID: str = "ml1mDiv90Ulinear0109R1"
    dataset: DatasetML = DatasetML.readDatasets()
    behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109)
    behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile)

    model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs())

    eTool: AEvalTool = EvalToolDHondtBanditVotes({})

    # simulation of portfolio
    simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict,
                                     dataset, behavioursDF)
    simulator.simulate([pDescr], [model], [eTool],
                       [HistoryHierDF(pDescr.getPortfolioID())])
def test01():

    print("Simulation: ML FuzzyDHondtDirectOptimize")

    jobID: str = "Roulette1"

    selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1})

    rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()

    pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        "FuzzyDHondtDirectOptimize" + jobID, rIDs, rDescs,
        InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSampling(
            selector, "DCG"))

    batchID: str = "ml1mDiv90Ulinear0109R1"
    dataset: DatasetML = DatasetML.readDatasets()
    behaviourFile: str = BehavioursML.getFile(BehavioursML.BHVR_LINEAR0109)
    behavioursDF: DataFrame = BehavioursML.readFromFileMl1m(behaviourFile)

    #model:DataFrame = PModelDHont(pDescr.getRecommendersIDs())
    model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs())

    #lrClick:float = 0.03
    #lrView:float = lrClick / 500
    #eTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick,
    #                                  EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView})
    eTool: AEvalTool = EvalToolDHondtBanditVotes({})
    # simulation of portfolio
    simulator: Simulator = Simulator(batchID, SimulationML, argsSimulationDict,
                                     dataset, behavioursDF)
    simulator.simulate([pDescr], [model], [eTool],
                       [HistoryHierDF(pDescr.getPortfolioID())])
def test21():

    print("Simulation: ST FuzzyDHondtDirectOptimize")

    jobID: str = "Roulette1"

    selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 3})

    pProbToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002(
        InputSimulatorDefinition.numberOfAggrItems)

    rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()

    pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        "FuzzyDHondtDirectOptimize" + jobID, rIDs, rDescs,
        InputAggrDefinition.exportADescDHondtDirectOptimizeThompsonSamplingINF(
            selector, pProbToolOLin0802HLin1002, "DCG"))

    batchID: str = "stDiv90Ulinear0109R1"
    dataset: DatasetST = DatasetST.readDatasets()
    behaviourFile: str = BehavioursST.getFile(BehavioursST.BHVR_LINEAR0109)
    behavioursDF: DataFrame = BehavioursST.readFromFileST(behaviourFile)

    model: DataFrame = PModelDHondtBanditsVotes(pDescr.getRecommendersIDs())

    #lrClick:float = 0.03
    #lrView:float = lrClick / 500
    #eTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: lrClick,
    #                                  EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: lrView})
    eTool: AEvalTool = EvalToolDHondtBanditVotes({})
    # simulation of portfolio
    simulator: Simulator = Simulator(batchID, SimulationST, argsSimulationDict,
                                     dataset, behavioursDF)
    simulator.simulate([pDescr], [model], [eTool],
                       [HistoryHierDF(pDescr.getPortfolioID())])
    def run(self, batchID: str, jobID: str):

        from execute.generateBatches import BatchParameters  #class
        divisionDatasetPercentualSize: int
        uBehaviour: str
        repetition: int
        divisionDatasetPercentualSize, uBehaviour, repetition = BatchParameters.getBatchParameters(
        )[batchID]

        selector, nImplFeedback = self.getParameters()[jobID]

        eTool: AEvalTool = EvalToolDHondtBanditVotes({})

        datasetID: str = "ml1m" + "Div" + str(divisionDatasetPercentualSize)

        rIDs, rDescs = InputRecomDefinition.exportPairOfRecomIdsAndRecomDescrs(
            datasetID)

        aDescNegDHontThompsonSamplingI: AggregationDescription = InputAggrDefinition.exportADescDHontThompsonSamplingINF(
            selector, nImplFeedback)

        pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
            "DHondtThompsonSamplingINF" + jobID, rIDs, rDescs,
            aDescNegDHontThompsonSamplingI)

        model: DataFrame = ModelDefinition.createDHondtBanditsVotesModel(
            pDescr.getRecommendersIDs())

        simulator: Simulator = InputSimulatorDefinition.exportSimulatorML1M(
            batchID, divisionDatasetPercentualSize, uBehaviour, repetition)
        simulator.simulate([pDescr], [model], [eTool], HistoryHierDF)
    def getParameters(self):

        aDict: dict = {}
        for selectorIDI in self.selectorIDs:
            keyIJ: str = str(selectorIDI)
            eTool: AEvalTool = EvalToolDHondtBanditVotes({})
            selectorIJK: ADHondtSelector = BatchDefMLFuzzyDHondt(
            ).getSelectorParameters()[selectorIDI]
            aDict[keyIJ] = (selectorIJK, eTool)
        return aDict
    def getParameters():
        rouletteExps: List[int] = [1, 3]
        lrClicks: List[float] = [0.2]
        lrViews: List[float] = [0.1 / 500]

        aDict: dict = {}
        for rouletteExpI in rouletteExps:
            for lrClickJ in lrClicks:
                for lrViewK in lrViews:
                    keyIJ: str = str(rouletteExpI) + "Clk" + str(
                        lrClickJ).replace(
                            ".", "") + "View" + str(lrViewK).replace(".", "")
                    eTool: AEvalTool = EvalToolDHondtBanditVotes({})
                    aDict[keyIJ] = (rouletteExpI, eTool)
        return aDict