Esempio n. 1
0
    def run(self, batchID: str, jobID: str):

        divisionDatasetPercentualSize: int
        uBehaviour: str
        repetition: int
        divisionDatasetPercentualSize, uBehaviour, repetition = InputABatchDefinition(
        ).getBatchParameters(self.datasetID)[batchID]

        eTool: AEvalTool
        selector, nImplFeedback, eTool = self.getParameters()[jobID]

        rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs(
        )

        aDescDHontINF: AggregationDescription = InputAggrDefinition.exportADescDHondtINF(
            selector, nImplFeedback)

        pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
            self.getBatchName() + jobID, rIDs, rDescs, aDescDHontINF)

        model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs())

        simulator: Simulator = InputSimulatorDefinition(
        ).exportSimulatorSlantour(batchID, divisionDatasetPercentualSize,
                                  uBehaviour, repetition)
        simulator.simulate([pDescr], [model], [eTool],
                           [HistoryHierDF(pDescr.getPortfolioID())])
Esempio n. 2
0
def getFuzzyDHontINF():

  #taskID:str = "Web" + "FuzzyDHondtINF" + "Roulette1"
  taskID:str = "Web" + "FuzzyDHondt" + "Fixed"
  dataset:ADataset = DatasetST.readDatasets()

  #selector:ADHondtSelector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1})
  selector:ADHondtSelector = TheMostVotedItemSelector({})

  pToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002(
    InputSimulatorDefinition.numberOfAggrItems)

  aDescDHont:AggregationDescription = InputAggrDefinition.exportADescDHondtINF(selector, pToolOLin0802HLin1002)

  rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()

  pDescr:Portfolio1AggrDescription = Portfolio1AggrDescription(
    taskID, rIDs, rDescs, aDescDHont)

  history:AHistory = HistoryHierDF(taskID)

  port:APortfolio = pDescr.exportPortfolio(taskID, history)
  port.train(history, dataset)

  model:DataFrame = PModelDHondt(pDescr.getRecommendersIDs())

  evalTool:AEvalTool = EvalToolDHondt({EvalToolDHondt.ARG_LEARNING_RATE_CLICKS: 0.03,
                                        EvalToolDHondt.ARG_LEARNING_RATE_VIEWS: 0.03 / 500})

  return (taskID, port, model, evalTool, history)
def test21():

    print("Simulation: ST FuzzyDHondtINF")

    jobID: str = "Roulette1"

    selector = RouletteWheelSelector({RouletteWheelSelector.ARG_EXPONENT: 1})

    #pProbToolOLin0802HLin1002:APenalization = PenalizationToolDefinition.exportProbPenaltyToolOStat08HLin1002(
    #    InputSimulatorDefinition.numberOfAggrItems)
    pToolOLin0802HLin1002: APenalization = PenalizationToolDefinition.exportPenaltyToolOLin0802HLin1002(
        InputSimulatorDefinition.numberOfAggrItems)

    rIDs, rDescs = InputRecomSTDefinition.exportPairOfRecomIdsAndRecomDescrs()

    pDescr: Portfolio1AggrDescription = Portfolio1AggrDescription(
        "FuzzyDHondtINF" + jobID, rIDs, rDescs,
        InputAggrDefinition.exportADescDHondtINF(selector,
                                                 pToolOLin0802HLin1002))

    batchID: str = "stDiv90Ulinear0109R1"
    dataset: DatasetST = DatasetST.readDatasets()
    behaviourFile: str = BehavioursST.getFile(BehavioursST.BHVR_LINEAR0109)
    behavioursDF: DataFrame = BehavioursST.readFromFileST(behaviourFile)

    model: DataFrame = PModelDHondt(pDescr.getRecommendersIDs())
    print(model)

    lrClick: float = 0.1
    lrView: float = lrClick / 300
    evalTool: AEvalTool = EvalToolDHondt({
        EvalToolDHondt.ARG_LEARNING_RATE_CLICKS:
        lrClick,
        EvalToolDHondt.ARG_LEARNING_RATE_VIEWS:
        lrView
    })

    # simulation of portfolio
    simulator: Simulator = Simulator(batchID, SimulationST, argsSimulationDict,
                                     dataset, behavioursDF)
    simulator.simulate([pDescr], [model], [evalTool],
                       [HistoryHierDF(pDescr.getPortfolioID())])