예제 #1
0
    def getFitness(self, matrix):
        ev1_1 = ev.Evaluator(1, matrix)
        ev2_1 = ev.Evaluator(2)
        ag1_1 = ag.AlphaBetaAgent(1, 2, ev1_1)
        ag2_1 = ag.AlphaBetaAgent(2, 2, ev2_1)
        result1, game1 = self.simulateGame(ag1_1, ag2_1)

        white, black = game1.countColors()

        if black == 0:
            fitness1 = 64
        elif white == 0:
            fitness1 = -64
        else:
            fitness1 = white - black

        ev1_2 = ev.Evaluator(1, matrix)
        ev2_2 = ev.Evaluator(2)
        ag2_2 = ag.AlphaBetaAgent(1, 2, ev2_2)
        ag1_2 = ag.AlphaBetaAgent(2, 2, ev1_2)
        result2, game2 = self.simulateGame(ag2_2, ag1_2)

        white, black = game2.countColors()

        if black == 0:
            fitness2 = -64
        elif white == 0:
            fitness2 = 64
        else:
            fitness2 = black - white

        fitness = (fitness1 + fitness2)
        return fitness
예제 #2
0
def simulateAlpha():
    ev1 = ev.Evaluator(1)
    ev2 = ev.Evaluator(1)
    """#Alfabeta vs Union
    ag1 = ag.AlphaBetaAgent(1,4,ev1)
    ag2 = ag.UnionRulesAgent(2)
    sim = Simulation(ag1, ag2, 250, 1)

    # Union vs Alfabeta
    ag1 = ag.UnionRulesAgent(1)
    ag2 = ag.AlphaBetaAgent(2,4,ev1)
    sim = Simulation(ag1, ag2, 250, None, 1)"""

    # Determinista
    # Alfabeta vs Minimax
    ag1 = ag.AlphaBetaAgent(1, 4, ev1)
    ag2 = ag.MinimaxAgent(2, 4, ev2)
    sim = Simulation(ag1, ag2, 1, 1, 1)

    # Minimax vs Alfabeta
    ag1 = ag.MinimaxAgent(1, 4, ev2)
    ag2 = ag.AlphabetaAgent(2, 4, ev1)
    sim = Simulation(ag1, ag2, 1, 1, 1)

    #Alfabeta vs MCTS
    ag1 = ag.AlphaBetaAgent(1, 4, ev1)
    ag2 = ag.MonteCarloAgent(2, None)
    sim = Simulation(ag1, ag2, 250, 1)

    # MCTS vs Alfabeta
    ag1 = ag.MonteCarloAgent(1, None)
    ag2 = ag.AlphaBetaAgent(2, 4, ev1)
    sim = Simulation(ag1, ag2, 250, None, 1)
예제 #3
0
def simulateEvaluator():
    # Alfabeta vs alfabeta variando evaluator
    for i in range(1, 7):
        for j in range(1, 7):
            if i != j:
                sim = Simulation(ag.AlphaBetaAgent(1, 4, ev.Evaluator(i)),
                                 ag.AlphaBetaAgent(2, 4, ev.Evaluator(j)), 1,
                                 i, j)
예제 #4
0
def simulateBestAlphaModifyDepth(evaluator):
    ev1 = ev.Evaluator(evaluator)
    ev2 = ev.Evaluator(evaluator)
    # Deterministas
    #Alfabeta vs minimax
    for i in range(1, 3):
        ag1 = ag.MinimaxAgent(1, i, ev1)
        ag2 = ag.MinimaxAgent(1, 4, ev2)
        sim = Simulation(ag1, ag2, 1, evaluator, evaluator)
예제 #5
0
def simulateDepthM():
    ev1 = ev.Evaluator(1)
    ev2 = ev.Evaluator(1)

    for i in range(1, 9):
        sim = Simulation(ag.MinimaxAgent(1, i, ev1),
                         ag.AlphaBetaAgent(2, 4, ev2), 1, "", 1)

        sim = Simulation(ag.AlphaBetaAgent(1, 4, ev2),
                         ag.MinimaxAgent(2, i, ev1), 1, 1, "")
예제 #6
0
    def calc_map(self, gt, iou_thresh: float=0.25, ignore_grade: bool=False):

        boundingBoxes = self._extract_bounding_boxes(gt, ignore_grade)

        evaluator = Evaluator()
        metricsPerClass = evaluator.GetPascalVOCMetrics(boundingBoxes, iou_thresh)
        return np.mean([np.nan_to_num(mc['AP']) for mc in metricsPerClass])
예제 #7
0
    def tuneConfidenceBaseBid(self, testDF):
        print("Setting up XGBoost for Test set")
        y_pred = self.__estimateClick(testDF)

        y_pred = [1 if i >= 0.7 else 0 for i in y_pred]

        # print("number of 1 here: ", sum(y_pred))
        # avgCTR = np.count_nonzero(testDF.click) / testDF.shape[0]
        myEvaluator = Evaluator.Evaluator()

        bestCTR = -1
        bestBidPrice = -1
        for i in range(300, 301):
            bidprice = BidEstimator().confidenceBidPrice(y_pred, -1, i)

            # print("total bid price: ", sum(bidprice))
            # print("total bid submitted: ", np.count_nonzero(bidprice))
            # print("Number of $0 bid", bidprice.count(0))

            bids = np.stack([testDF['bidid'], bidprice], axis=1)

            bids = pd.DataFrame(bids, columns=['bidid', 'bidprice'])

            # print("Estimated bid price: ", bids.bidprice.ix[0])

            resultDict = myEvaluator.computePerformanceMetricsDF(6250 * 1000, bids, validateDF)
            myEvaluator.printResult()
            ctr = resultDict['click'] / resultDict['won']

            if ctr > bestCTR:
                bestCTR = ctr
                bestBidPrice = i

        print("Best CTR: %.5f \nPrice: %d" % (bestCTR, bestBidPrice))
예제 #8
0
def run():
    f = open('./config.yml')
    config = yaml.load(f)
    roi_shape = config['roi_shape']
    dead_zone = config['dead_zone']
    cube_resolution = config['cube_resolution']
    laser_num = config['laser_num']
    lidar_num = config['lidar_num']
    beam_angle = config['beam_angle']
    max_itrs = config['max_itrs']
    numb_bees = config['numb_bees']
    lower_bound = config['lower_bound'] * lidar_num
    upper_bound = config['upper_bound'] * lidar_num
    estimate_solve_time = config['estimate_solve_time']
    save_result_to_json = config['save_result_to_json']

    assert (len(beam_angle) == laser_num
            ), "Please check the number of beams' angles of pitch"
    eval.logConfig(config=config)

    VSR_solver = eval.Evaluator(roi_shape=roi_shape,
                                dead_zone=dead_zone,
                                cube_resolution=cube_resolution,
                                laser_num=laser_num,
                                lidar_num=lidar_num,
                                pitch_angle=beam_angle)
    now = time.time()
    for i in range(max_itrs):

        VSR_solver.solve(lower_bound)

    total_time = time.time() - now
    print("Average time consume: ", total_time / max_itrs)
예제 #9
0
    def __init__(self,
                 sparql=None,
                 compiler=None,
                 evaluator=None,
                 multiline_parser=None,
                 options=['time'],
                 debug=False,
                 load_translations=True):
        self.sparql = sparql
        if self.sparql:
            self.n = sparql.n
        else:
            self.n = Namespaces.Namespaces()
        # self.translator = translator
        if compiler:
            self.compiler = compiler
        else:
            self.compiler = Compiler.Compiler(self.n)
        if evaluator == None:
            evaluator = Evaluator.Evaluator(self.n)
        self.evaluator = evaluator
        self.parser = Parser.Parser(self.n)
        self.urigen = UniqueURIGenerator()
        if multiline_parser == None:
            multiline_parser = MultilineParser.MultilineParser(self.n, self)
        self.multiline_parser = multiline_parser
        self.options = options
        self.cum_comp_time = 0
        self.cum_eval_time = 0
        if not debug:
            self.compiler.debug_off()

        if load_translations:
            from loadTranslations import loadTranslations
            loadTranslations(self)
예제 #10
0
def exeLogisticRegressionBidModel_v2(validationReader=None, trainReader=None, writeResult2CSV=False):
    print("============ LogisticRegressionBidModel_v2")
    trainOneHotData, trainY = trainReader.getOneHotData()
    validationOneHotData, valY = validationReader.getOneHotData(
        train_cols=trainOneHotData.columns.get_values().tolist())

    X_train = trainOneHotData
    Y_train = trainY['click']
    X_val = validationOneHotData
    Y_val = valY['click']

    lbm = LinearBidModel_v2(cBudget=110, avgCTR=0.2)
    lbm.trainModel(X_train, Y_train)
    # lbm.gridSearchandCrossValidate(X_train, Y_train)
    # print (validationReader.getDataFrame().info())
    v_df = validationReader.getDataFrame()

    y_pred, bids = lbm.getBidPrice(X_val, v_df)
    if writeResult2CSV:
        ipinyouWriter.ResultWriter().writeResult("resultLogisticRegressionBidModel.csv", bids)

    myEvaluator = Evaluator()
    myEvaluator.computePerformanceMetricsDF(6250 * 1000, bids, v_df)
    myEvaluator.printResult()

    return y_pred
예제 #11
0
    def __init__(self):

        self.nmoves = 1
        # Menu para elegir a los jugadores
        self.menu = Interface.Menu()
        self.playerBlack, self.playerWhite = self.menu.returnPlayers()
        self.evaluator1 = ev.Evaluator(1)
        self.evaluator2 = ev.Evaluator(1)

        self.selectPlayers()

        self.board = Interface.Board()

        # Crea el juego
        self.game = gr.Othello()
        self.startGame()
예제 #12
0
def exeEnsemble_v1(trainDF, targetDF, trainPath, validationPath, targetPath, writeResult2CSV=False):
    xg_y_pred = exeXGBoostBidModel(validationData=targetDF, trainData=trainDF, writeResult2CSV=False)
    cnn_y_pred = exeCNNBidModel(validationDataPath=validationPath, trainDataPath=trainset, testDataPath=targetPath, writeResult2CSV=False)
    # fm_y_pred = exeFM_SGDBidModel(validationDataOneHot=validateDFonehot, trainDataOneHot=trainDFonehot, validationData=validateDF, writeResult2CSV=True)

    # Use XG's 0 when its threshold is below 0.75.
    y_pred = [0 if xg < 0.75 else cnn for xg, cnn in zip(xg_y_pred, cnn_y_pred)]

    # Use CNN's 1 when its threshold is above 0.2?
    prune_thresh = 0.2

    be = BidEstimator()
    bidprice = be.linearBidPrice_mConfi(y_pred, 230, 100, prune_thresh)
    # bidprice = be.linearBidPrice_variation(y_pred, 80, 0.2, slotprices=slotprices, prune_thresh=prune_thresh)
    bids = np.stack([targetDF['bidid'], bidprice], axis=1)
    bids = pd.DataFrame(bids, columns=['bidid', 'bidprice'])

    if writeResult2CSV:
        ipinyouWriter.ResultWriter().writeResult("resultEnsemble_v1.csv", bids)

    myEvaluator = Evaluator.Evaluator()
    myEvaluator.computePerformanceMetricsDF(6250*1000, bids, targetDF)

    # Force CNN result to 1 and 0 for F1 score
    y_pred = [1 if i >= prune_thresh else 0 for i in y_pred]
    ce = Evaluator.ClickEvaluator()
    ce.printClickPredictionScore(y_pred, targetDF)
예제 #13
0
def main(args):
    print(args)

    settings = Settings.Settings(args)

    # We already did these
    # ResNet50 and indices: 5, 2, 7, 3 (doing ? r.n.)
    settings.TestDataset_Fold_Index = int(args.FOLD_I)  # can be 0 to 9 (K-1)
    settings.TestDataset_K_Folds = int(args.KFOLDS)
    assert settings.TestDataset_Fold_Index < settings.TestDataset_K_Folds
    kfold_txt = "KFold_" + str(settings.TestDataset_Fold_Index) + "z" + str(
        settings.TestDataset_K_Folds)
    print(kfold_txt)

    settings.model_backend = args.model_backend
    settings.train_epochs = int(args.train_epochs)
    settings.train_batch = int(args.train_batch)

    # resnet 101 approx 5-6 hours (per fold - might be a bit less ...)
    # resnet 50  approx 3-4 hours
    model_txt = "cleanManual_" + str(
        settings.train_epochs) + "ep_ImagenetWgenetW_" + str(
            settings.model_backend) + "-" + str(
                settings.train_batch
            ) + "batch_Augmentation1to1_ClassWeights1to3_TestVal"
    print(model_txt)

    dataset = Dataset.Dataset(settings)
    evaluator = Evaluator.Evaluator(settings)

    #settings.run_name = settings.run_name + "AYRAN"
    show = False
    save = True

    #dataset.dataset
    model = ModelHandler.ModelHandler(settings, dataset)

    if not os.path.exists("plots/"):
        os.makedirs("plots/")

    model.model.train(show=show, save=save)

    # K-Fold_Crossval:
    model.model.save(
        "/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_"
        + model_txt + "_[" + kfold_txt + "].h5")

    SAVE_ALL_FOLDER = model_txt + "PLOTS/"
    SAVE_ALL_PLOTS = SAVE_ALL_FOLDER + "plot"
    # DEBUG_SAVE_ALL_THR_PLOTS = None
    if not os.path.exists(SAVE_ALL_FOLDER):
        os.makedirs(SAVE_ALL_FOLDER)

    evaluator.unified_test_report([model.model.model],
                                  dataset.test,
                                  validation_set=dataset.val,
                                  postprocessor=model.model.dataPreprocesser,
                                  name=SAVE_ALL_PLOTS,
                                  optionally_save_missclassified=True)
예제 #14
0
def evaluate():
    st = StanfordNERTagger(model, jar, encoding='utf-8')
    ev = Evaluator.Evaluator()
    for i in range(1, 301):
        print("File " + str(i))
        e = evaluateMeasure(
            dataDir + "\\seminars_training\\training\\{}.txt".format(i), ev, i)
    e.evalResults()
예제 #15
0
class Simpson13:
    evaluator = Evaluator.Evaluator()
    def simpson13(self, a, b):
        h = float((b - a))/2
        x1 = a + h
        fa = self.evaluator.evaluate(funtion, a)
        fb = self.evaluator.evaluate(funtion, b)
        fx1 = self.evaluator.evaluate(funtion, x1)
        w = ((float(h) / 3) * (fa + (4 * fx1) + fb))
        print "El resultado sin restar el error es: ", w
예제 #16
0
class ReglaSimpleTrapecio:
    evaluator = Evaluator.Evaluator()

    def simpleTrapecio(self, a, b):
        x = sympy.symbols('x')
        f = sympy.exp(x) - 2 * x
        h = b - a
        fa = self.evaluator.evaluate(f, a)
        fb = self.evaluator.evaluate(f, b)
        print(float(h) / 2) * (fa + fb)
예제 #17
0
def detections(cfg, gtFolder, detFolder, savePath, show_process=True):

    # getGTBoxes函数  getDetBoxes函数  得到真实框 真实种类  和检测框
    gt_boxes, classes, num_pos = getGTBoxes(cfg, gtFolder)
    det_boxes = getDetBoxes(cfg, detFolder)

    # 创建一个对象
    evaluator = Evaluator()

    # 返回内容为
    return evaluator.GetPascalVOCMetrics(cfg, classes, gt_boxes, num_pos,
                                         det_boxes)
예제 #18
0
    def resultSimulate(self, matrix1, matrix2):
        ev1_1 = ev.Evaluator(1, matrix1)
        ev2_1 = ev.Evaluator(1, matrix2)
        ag1_1 = ag.AlphaBetaAgent(1, 4, ev1_1)  # ind1 -> Blanco
        ag2_1 = ag.AlphaBetaAgent(2, 4, ev2_1)  # ind2 -> Negro
        simulate1 = self.simulateGame(ag1_1, ag2_1)
        a1w, a2b, d1 = 0, 0, 0
        if (simulate1 == 2):
            a2b += 1
        elif (simulate1 == 1):
            a1w += 1
        else:
            d1 += 1

        ev1_2 = ev.Evaluator(1, matrix1)
        ev2_2 = ev.Evaluator(1, matrix2)
        ag1_2 = ag.AlphaBetaAgent(2, 4, ev1_2)  # ind1 -> Negro
        ag2_2 = ag.AlphaBetaAgent(1, 4, ev2_2)  # ind2 -> Blanco
        simulate2 = self.simulateGame(ag2_2, ag1_2)
        a2w, a1b, d2 = 0, 0, 0
        if (simulate2 == 2):
            a1b += 1
        elif (simulate2 == 1):
            a2w += 1
        else:
            d2 += 1

        a1 = a1w + a1b
        a2 = a2w + a2b
        d = d1 + d2
        if a1 > a2:
            return matrix1, 1
        elif a2 > a1:
            return matrix2, 2
        else:
            random = randint(0, 1)
            if random == 0:
                return matrix1, -1
            else:
                return matrix2, -2
예제 #19
0
def detections(cfg,
               gtFolder,
               detFolder,
               savePath,
               show_process=True):

    gt_boxes, classes, num_pos = getGTBoxes(cfg, gtFolder)
    det_boxes = getDetBoxes(cfg, detFolder)

    evaluator = Evaluator()
    # 传入配置,类别

    return evaluator.GetPascalVOCMetrics(cfg, classes, gt_boxes, num_pos, det_boxes)
예제 #20
0
 def __init__(self, maxlen, num_tags, word_index, embeddings, model_type,
              texts_to_eval_dir, dumpPath):
     self.num_tags = num_tags
     self.word_index = word_index
     self.texts_to_eval_dir = texts_to_eval_dir
     self.dumpPath = dumpPath
     self.model_maker = nm.NeuralModel(maxlen, num_tags, word_index,
                                       embeddings)
     num_measures = 1 + 3 * (num_tags - 2)
     self.evaluator = ev.Evaluator(num_tags, num_measures,
                                   self.model_maker.tags)
     self.postprocessing = pp.PostProcessing(num_tags,
                                             self.model_maker.tags)
예제 #21
0
def main(argv=None):
    t = Tokenizer.Tokenizer()
    e = Evaluator.Evaluator()
    if len(argv) == 1:
        try:
            argv = input("Enter an expression: ")
        except [IOError, ValueError]:
            result = "You've entered an invalid expression!"
    else:
        argv = sys.argv[1]
    rpn = t.shunting(argv)
    result = e.evaluate(rpn)
    print(result)
예제 #22
0
def run():
    f = open('./config.yml')
    config = yaml.load(f)
    roi_shape = config['roi_shape']
    dead_zone = config['dead_zone']
    cube_resolution = config['cube_resolution']
    laser_num = config['laser_num']
    lidar_num = config['lidar_num']
    beam_angle = config['beam_angle']
    max_itrs = config['max_itrs']
    numb_bees = config['numb_bees']
    lower_bound = config['lower_bound'] * lidar_num
    upper_bound = config['upper_bound'] * lidar_num
    estimate_solve_time = config['estimate_solve_time']
    save_result_to_json = config['save_result_to_json']

    assert (len(beam_angle) == laser_num
            ), "Please check the number of beams' angles of pitch"
    eval.logConfig(config=config)

    VSR_solver = eval.Evaluator(roi_shape=roi_shape,
                                dead_zone=dead_zone,
                                cube_resolution=cube_resolution,
                                laser_num=laser_num,
                                lidar_num=lidar_num,
                                pitch_angle=beam_angle)
    # creates model
    if estimate_solve_time:
        eval.estimateTime(config=config, VSR_solver=VSR_solver, Hive=Hive)

    now = time.time()
    model = Hive.BeeHive(lower=lower_bound,
                         upper=upper_bound,
                         fun=VSR_solver.solve,
                         numb_bees=numb_bees,
                         max_itrs=max_itrs,
                         verbose=True)
    cost = model.run()
    total_time = time.time() - now
    print("Total time consume: ", total_time)
    print("Solution: ", model.solution)
    # prints out best solution
    print("Fitness Value ABC: {0}".format(model.best))

    if save_result_to_json:
        eval.saveResults(config=config,
                         model=model,
                         total_time=total_time,
                         cost=cost)
    # plots convergence
    Utilities.ConvergencePlot(cost)
예제 #23
0
def exeGaussianRandomBidModel(validationData, trainData=None, writeResult2CSV=False):
    # gaussian random Bidding Model
    randomBidModel = BidModels.GaussianRandomBidModel()

    bids = randomBidModel.getBidPrice(validationData.bidid)
    # bids = np.apply_along_axis(randomBidModel.getBidPrice, axis=1, arr=validationData.getTestData())

    if writeResult2CSV:
        ipinyouWriter.ResultWriter().writeResult("resultGaussianRandomBidModel.csv", bids)
    # myEvaluator = Evaluator.Evaluator(25000*1000, bids, validationData.getTrainData())
    # myEvaluator.computePerformanceMetrics()
    myEvaluator = Evaluator()
    myEvaluator.computePerformanceMetricsDF(6250 * 1000, bids, validationData)
    myEvaluator.printResult()
class ReglaCompuestaTrapecio:
    evaluator = Evaluator.Evaluator()

    def compuestaTrapecio(self, a, b, n):
        x = sympy.symbols('x')
        f = sympy.exp(x) - 2 * x  #Funcion a integrar
        h = float(b - a) / n
        s = 0
        xi = a + h
        for i in range(1, n):
            s += self.evaluator.evaluate(f, xi)
            xi += h
        fa = self.evaluator.evaluate(f, a)
        fb = self.evaluator.evaluate(f, b)
        print(float(h) / 2) * (fa + (2 * s) + fb)
예제 #25
0
def exeUniformRandomBidModel(validationData, trainData=None, writeResult2CSV=False):
    # uniform random Bidding Model
    randomBidModel = BidModels.UniformRandomBidModel(300) #upper bound for random bidding range
    # TODO: could train this too in a range.

    bids = randomBidModel.getBidPrice(validationData.bidid)
    # bids = np.apply_along_axis(randomBidModel.getBidPrice, axis=1, arr=validationData.getTestData())

    if writeResult2CSV:
        ipinyouWriter.ResultWriter().writeResult("resultUniformRandomBidModel.csv", bids)
    # myEvaluator = Evaluator.Evaluator(25000*1000, bids, validationData.getTrainData())
    # myEvaluator.computePerformanceMetrics()
    myEvaluator = Evaluator()
    myEvaluator.computePerformanceMetricsDF(6250 * 1000, bids, validationData)
    myEvaluator.printResult()
예제 #26
0
def runOnto():
    print("Loading model, please wait. (This may take a while)")
    model = KeyedVectors.load_word2vec_format(
        dataDir + "\\GoogleNews-vectors-negative300.bin", binary=True)
    print("Model loaded")
    for i in range(0, 485):
        id = i
        print(id)
        filename = dataDir + "\\seminars_training\\training\\{}.txt".format(id)
        ev = Evaluator.Evaluator()
        file = ev.getFileToTag(filename)
        email = loadInFile(file, id)
        tree = OntologyTree.tree
        ont = OntologyTagger.OntologyTagger(email, model, tree, dataDir)
        tree = ont.findOntTreeMatch(ont.keyWordsInTopic())
    ont.printTree(tree)
예제 #27
0
    def __init__(self, anchors, size, metric_names: list, detect_thresh: float=0.3, nms_thresh: float=0.3
                 , images_per_batch: int=-1):
        self.ap = 'AP'
        self.anchors = anchors
        self.size = size
        self.detect_thresh = detect_thresh
        self.nms_thresh = nms_thresh

        self.images_per_batch = images_per_batch
        self.metric_names_original = metric_names
        self.metric_names = ["{}-{}".format(self.ap, i) for i in metric_names]

        self.evaluator = Evaluator()
        if (self.anchors.shape[-1]==4):
            self.boundingObjects = BoundingBoxes()
        else:
            self.boundingObjects = BoundingCircles()
예제 #28
0
    def drawAllBoundingBoxes(self, gt, image_folder: Path, iou_thresh: float=0.25):

        font = cv2.FONT_HERSHEY_SIMPLEX
        fontScale = 0.5
        fontThickness = 1

        evaluator = Evaluator()
        boundingBoxes = self._extract_bounding_boxes(gt)

        images = {}

        for image_id in gt.images:

            image = np.zeros((376, 256,3), np.uint8)
            image[256:376, 0:256, :] = 255
            image[0:256, 0:256, :] = cv2.imread(str(image_folder/image_id))

            bbxesImage = BoundingBoxes()
            bbxes = boundingBoxes.getBoundingBoxesByImageName(image_id)

            for bb in bbxes:
                bbxesImage.addBoundingBox(bb)

                x1, y1, x2, y2 = bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)

                color = self.colors[bb.getClassId()]
                if bb.getBBType() == BBType.GroundTruth:
                    cv2.line(image, (x1, y1), (x2, y2), color, 2)
                    cv2.line(image, (x2, y1), (x1, y2), color, 2)
                else:
                    cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)

            metrics_per_class = evaluator.GetPascalVOCMetrics(bbxesImage, iou_thresh)

            for mc in metrics_per_class:
                cv2.putText(image, "Grade: {} mAP: {:01.2f}".format(mc['class'], mc['AP']),
                            (10, 270 + int(20 * mc['class'])),
                            font, fontScale, self.colors[mc['class']], fontThickness, cv2.LINE_AA)

            cv2.putText(image, "mAP: {:01.2f}".format( np.mean([np.nan_to_num(mc['AP']) for mc in metrics_per_class])),
                        (10, 365),
                        font, fontScale, (0,0,0), fontThickness, cv2.LINE_AA)

            images[image_id] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        return images
예제 #29
0
파일: gui-main.py 프로젝트: LalaXXX/SE_work
    def __init__(self):
        super(MyWindow, self).__init__()
        self.setupUi(self)
        self.tableWidget.setHorizontalHeaderLabels(['题目', '答案', '正确?'])
        self.timer = QTimer()
        self.label_3.setText('')
        self.label_4.setText('20s')

        self.g = Qg.QuestGenerator()
        self.ev = Evaluator.Evaluator()
        self.submitted = False
        self.correct = False
        self.remaining = 20

        self.pushButton.clicked.connect(self.pushButton_clicked)
        self.pushButton_2.clicked.connect(self.pushButton_2_clicked)
        self.pushButton_3.clicked.connect(self.pushButton_3_clicked)
        self.timer.timeout.connect(self.timer_timeout)
예제 #30
0
def exeConstantBidModel(validationData, trainData=None, train=False, writeResult2CSV=False):
    # Constant Bidding Model
    constantBidModel = BidModels.ConstantBidModel(defaultbid=77)

    if train:
        constantBidModel.trainModel(trainData, searchRange=[1, 300], budget=int(6250*1000*8.88))

    bids = constantBidModel.getBidPrice(validationData.bidid)
    # bids = np.apply_along_axis(constantBidModel.getBidPrice, axis=1, arr=validationData.getTestData())

    if writeResult2CSV:
        ipinyouWriter.ResultWriter().writeResult("resultConstantBidModel.csv", bids)
    # myEvaluator = Evaluator.Evaluator(25000*1000, bids, validationData.getTrainData())
    # myEvaluator.computePerformanceMetrics()

    myEvaluator = Evaluator()
    myEvaluator.computePerformanceMetricsDF(6250 * 1000, bids, validationData)
    myEvaluator.printResult()