Example #1
0
def partII(classifier):
    setup()
    print("PART II Predict classify by numeric rating 1-5 ")    
    numericTagTraining = [(e["text"], e["overAllRating"]) for e in trainingParagraphs] 
    numericTagTesting = [(e["text"], e["overAllRating"]) for e in testParagraphs]
    featureExtractors = []
    featureExtractors.append(HappySad.featureNumericScore)
    featureExtractors.append(HappySad.featureHitCountBucketed)

    #BASELINE RUN
    print("Running Baseline")
    trainedBaseline = ClassifierRunner.runNfoldCrossValidation(ClassifierRunner.mostCommonTag, numericTagTraining, featureExtractors, 4)
    predictionsBaseline = [c[2] for c in trainedBaseline]
    truthsBaseline = [c[3] for c in trainedBaseline]
    #bRMS = Evaluator.reportAvgBinaryRMS(predictionsBaseline, truthsBaseline)
    predictionsTesting,bAcc = ClassifierRunner.predictTagged(trainedBaseline[0][0], featureExtractors, numericTagTesting)
    truthsTesting = [c[1] for c in numericTagTesting]
    bRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    print("BaseLine RMS Error:", bRMS)

    #OUR CLASSIFIER RUN
    trainedClassifiers = ClassifierRunner.runNfoldCrossValidation(classifier, numericTagTraining, featureExtractors, 4)
        
    predictions = [c[2] for c in trainedClassifiers]
    truths = [c[3] for c in trainedClassifiers]
    print("Running most accurate trained classifier on test set")
    predictionsTesting, cAcc = ClassifierRunner.predictTagged(trainedClassifiers[0][0], featureExtractors, numericTagTesting)
    truthsTesting = [c[1] for c in numericTagTesting]
    cRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    print("Our RMS Error:", cRMS)
    print("Accuracy improvement over baseline:", cAcc - bAcc)
    print("RMS Error reduction from baseline:", bRMS - cRMS)
    

    return (trainedClassifiers, featureExtractors) # for use in Exercise 2
def main ():
    """Main function"""


    p = POSTagger(LaPlaceBigramTransModel(0.5), LaPlaceEmissionModel(0.5))
    train_corpus = Corpus("../data/ftb_1.tagged")
    dev_corpus = Corpus("../data/ftb_2.tagged")

    p.train(train_corpus)
    dev_tagged = p.tag(dev_corpus)

    e = Evaluator()
    e.evaluate(dev_corpus, dev_tagged)
Example #3
0
    def default(self, line):       
        """Called on an input line when the command prefix is not recognized.
           In that case we execute the line as Python code.
        """
        result = yacc.parse(line)
        print "mini-lisp.py: AST is: ", result

        dirtyToken = Evaluator.tokenCleaner(result)
        f = lambda x, y, z : x[z][z:-z]
        cleanedToken = f(result, Evaluator.tokenCleaner(), 1)
        output = Evaluator.evaluate(cleanedToken)

        if output is not None:
           print output
Example #4
0
 def configure_options(self):
   super(MR, self).configure_options()
   self.add_file_option('--database', default='test.db')
   self.resultset = {}
   self.audiences = {}
   self.rules = {}
   self.platform = {}
   self.evaluator = Evaluator()
Example #5
0
def partI(classifier):
    global testParagraphs
    global trainingParagraphs
    global happySadScoredWords
    setup()
    print("PART I Just classify by positive or negative where \"+\" = {4,5} and \"-\" = {1,2,3}")
   # print("Classify by straight HappySad score mapping:")
    #ourtagsAdded = HappySad.happySadClassifier(happySadScoredWords, trainingParagraphs)
   # print("Classify by using HappySad score as features for:")
    
    binaryTagTraining = [(e["text"], "+") if e["overAllRating"] in [4,5] else (e["text"], "-") for e in trainingParagraphs] 
    binaryTagTesting = [(e["text"], "+") if e["overAllRating"] in [4,5] else (e["text"], "-") for e in testParagraphs]
    featureExtractors = []
    featureExtractors.append(HappySad.featureBinaryScore)
    featureExtractors.append(HappySad.featureHitCountBucketed)

    #BASELINE RUN
    print("Running Baseline")
    trainedBaseline = ClassifierRunner.runNfoldCrossValidation(ClassifierRunner.mostCommonTag, binaryTagTraining, featureExtractors, 4)
    predictionsBaseline = [c[2] for c in trainedBaseline]
    truthsBaseline = [c[3] for c in trainedBaseline]
    #bRMS = Evaluator.reportAvgBinaryRMS(predictionsBaseline, truthsBaseline)
    predictionsTesting, bAcc = ClassifierRunner.predictTagged(trainedBaseline[0][0], featureExtractors, binaryTagTesting)
    truthsTesting = [c[1] for c in binaryTagTesting]
    #input(predictionsTesting)
    #input(truthsTesting)
    bRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    print("BaseLine RMS Error:", bRMS)

    #OUR CLASSIFIER RUN
    print("Running Our Classifier")
    trainedClassifiers = ClassifierRunner.runNfoldCrossValidation(classifier, binaryTagTraining, featureExtractors, 4)
    predictions = [c[2] for c in trainedClassifiers]
    truths = [c[3] for c in trainedClassifiers]
    predictionsTesting,cAcc = ClassifierRunner.predictTagged(trainedClassifiers[0][0], featureExtractors, binaryTagTesting)
    truthsTesting = [c[1] for c in binaryTagTesting]
    cRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    print("Our RMS Error:", cRMS)
    print("Accuracy improvement over baseline:", cAcc - bAcc)
    print("RMS Error reduction from baseline:", bRMS - cRMS)
    print("Running most accurate trained classifier on test set")

    return (trainedClassifiers, featureExtractors) # for use in Exercise 2
Example #6
0
def partI(classifier):
    setup()
    paragraphClassifier = Iclassifiers[0][0]

    overallTagTraining = [([e["p1"], e["p2"], e["p3"], e["p4"]], "+" if e["overAllRating"] in [4,5] else "-") for e in trainingReviews] 
    overallTagTesting = [([e["p1"], e["p2"], e["p3"], e["p4"]], "+" if e["overAllRating"] in [4,5] else "-") for e in testReviews]    

    #Use ratings of each paragraph from Ex
    def featureParagraphNumericRatings(sample):
        #input(sample)
        Ex1FeatureSet = Extractor.extractAll(sample, IEx1features)
        #input(Ex1FeatureSet)
        rating = [number for data, number in paragraphClassifier(Ex1FeatureSet)]
        #input(rating)
        return {"Food": rating[0], "Service": rating[1], "Venue" : rating[2], "OverallP" : rating[3]}
    featureExtractors = []
    featureExtractors.append(featureParagraphNumericRatings)

    #BASELINE RUN
    print("Running Baseline")
    trainedBaseline = ClassifierRunner.runNfoldCrossValidation(ClassifierRunner.mostCommonTag, overallTagTraining, featureExtractors, 4)
    predictionsBaseline = [c[2] for c in trainedBaseline]
    truthsBaseline = [c[3] for c in trainedBaseline]
    predictionsTesting, bAcc = ClassifierRunner.predictTagged(trainedBaseline[0][0], featureExtractors, overallTagTesting)
    truthsTesting = [c[1] for c in overallTagTesting]
    bRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    print("BaseLine RMS Error:", bRMS)
    #bRMS = Evaluator.reportAvgBinaryRMS(predictionsBaseline, truthsBaseline)

    #OUR CLASSIFIER RUN
    trainedClassifiers = ClassifierRunner.runNfoldCrossValidation(classifier, overallTagTraining, featureExtractors, 4)
    predictions = [c[2] for c in trainedClassifiers]
    truths = [c[3] for c in trainedClassifiers]
    print("Running most accurate trained classifier on test set")
    predictionsTesting, cAcc = ClassifierRunner.predictTagged(trainedClassifiers[0][0], featureExtractors, overallTagTesting)
    truthsTesting = [c[1] for c in overallTagTesting]
    #input(predictionsTesting)
    #input(truthsTesting)
    cRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    print("Our RMS Error:", cRMS)
    print("Accuracy improvement over baseline:", cAcc - bAcc)
    print("RMS Error reduction from baseline:", bRMS - cRMS)
    def evaluateResults(self, query, results):
        """
        A method to get evaluation metrics from the results to the query.

        Metrics and usefull data are returned inside a dict:
            key "recallPoints": has a list of interpolated recall points.
            key "MAP": has the MAP metric on the recall points.
            key "P@10": has the precision on point 10 metric.

        param query: util.Query object representing the query. The relevants
        field must not be empty.
        param results: a list with the results to the query, the list is formed
        by tuples of the kind(similarity, util.Document).
        return: a dict containing usefull data about the evaluation of the
        result. More detail above.
        """
        relevants = query.relevants
        assert relevants
        assert results

        # get a list with the ids of the documents of the result
        resultIds = [doc.id for sim, doc in results]

        # calculate non interpolated recall point and precision at point 10
        pair = Evaluator.getRecallPointsAndPrecisionAt(relevants, resultIds, point=10)
        recallPoints, pAtTen = pair

        # interpolate recall points to get exactly 11 points
        iRecallPoints = Evaluator.interpolateRecallPoints(recallPoints)

        # we ignore the recall point with recall of zero (slides Baeza-Yates)
        # to calculate interpolated MAP
        MAP = Evaluator.calculateMAP(recallPoints[1:])

        # place usefull data in a dict
        evalResults = {}
        evalResults["recallPoints"] = iRecallPoints
        evalResults["P@10"] = pAtTen
        evalResults["MAP"] = MAP
        return evalResults
Example #8
0
def partI(classifier):
    print("PART I Classify by author")
    print("Loading Corpus...")
    testReviews, trainingReviews = PreProcess.getByAuthor()
    authorTagTraining = [(e["text"], e["author"]) for e in trainingReviews] 
    authorTagTesting = [(e["text"], e["author"]) for e in testReviews]
    featureExtractors = []
    if classifier == ClassifierRunner.naiveBayes:
        featureExtractors.append(HappySad.featureNumericScore)
        featureExtractors.append(HappySad.featureHitCountBucketed)
        featureExtractors.append(AuthorshipFeatures.typeTokenRatioBucketed)
        featureExtractors.append(AuthorshipFeatures.vocabSizeBucketed)
    else:
        featureExtractors.append(HappySad.featureNumericScore)
        featureExtractors.append(HappySad.featureHitCount)
        featureExtractors.append(AuthorshipFeatures.typeTokenRatio)
        featureExtractors.append(AuthorshipFeatures.vocabSize)

    #BASELINE RUN
    print("Running Baseline")
    trainedBaseline = ClassifierRunner.runNfoldCrossValidation(ClassifierRunner.mostCommonTag, authorTagTraining, featureExtractors, 4)
    predictionsBaseline = [c[2] for c in trainedBaseline]
    truthsBaseline = [c[3] for c in trainedBaseline]
    predictionsTesting,bAcc = ClassifierRunner.predictTagged(trainedBaseline[0][0], featureExtractors, authorTagTesting)
    truthsTesting = [c[1] for c in authorTagTesting]
    bRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    print("BaseLine RMS Error:", bRMS)

    #OUR CLASSIFIER RUN
    trainedClassifiers = ClassifierRunner.runNfoldCrossValidation(classifier, authorTagTraining, featureExtractors, 4)
    predictions = [c[2] for c in trainedClassifiers]
    truths = [c[3] for c in trainedClassifiers]
    print("Running most accurate trained classifier on test set")
    predictionsTesting, cAcc = ClassifierRunner.predictTagged(trainedClassifiers[0][0], featureExtractors, authorTagTesting)
    truthsTesting = [c[1] for c in authorTagTesting]
    cRMS = Evaluator.rmsBinaryDifference(predictionsTesting, truthsTesting)
    Evaluator.createConfusionMatrix([t for d,t in authorTagTraining], predictionsTesting, truthsTesting)
    print("Our RMS Error:", cRMS)
    print("Accuracy improvement over baseline:", cAcc - bAcc)
    print("RMS Error reduction from baseline:", bRMS - cRMS)
Example #9
0
def menuQueryFile(eng, queryFile, rankingSize):
    eng = loadIndexWrapper(eng)

    if not queryFile:
        print("Please enter the path to the cfc query file using the -in argument")
        sys.exit(-1)

    print("ranking size: {}".format(rankingSize))

    MAPs = []
    recallPointsLst = []
    pAtTens = []
    times = []
    try:
        print("query id ; P@10 ; interpolated MAP ; time (s)")
        for query in eng.parser.parseQueryFile(queryFile):
            start = getTime()
            results, evalResults = eng.processQuery(query, rankingSize,
                    evaluate=True)
            end = getTime() - start

            MAPs.append(evalResults["MAP"])
            recallPointsLst.append(evalResults["recallPoints"])
            pAtTens.append(evalResults["P@10"])
            times.append(end)
            print("{:03d} ; {:.5f} ; {:.5f} ; {:.5f} "
                    .format(query.id, pAtTens[-1], MAPs[-1], times[-1]))
    except IOError as e:
        print("Could not open the cfc query file at: {}.".format(queryFile))
        print(e.message)
        sys.exit(-1)

    avgRecallPoints = Evaluator.getAverageRecallPoints(recallPointsLst)
    avgMAP = sum(MAPs) / len(MAPs)
    avgPAtTen = sum(pAtTens) / len(pAtTens)
    avgTime = sum(times) / len(times)
    print("\nAverages:")

    print("\tP@10: {:.5f}".format(avgPAtTen))
    print("\tinterpolated MAP: {:.5f}".format(avgMAP))
    print("\ttime: {:.5f} s".format(avgTime))

    print("\tinterpolated recall points (precision, recall):")
    for pair in avgRecallPoints:
        p, r = pair
        print("\t({:.5f}, {:.5f}),".format (p, r))
Example #10
0
class MR(MRJob):
  
  def connect_to(self, db_file):
    self.options.database = db_file

  def configure_options(self):
    super(MR, self).configure_options()
    self.add_file_option('--database', default='test.db')
    self.resultset = {}
    self.audiences = {}
    self.rules = {}
    self.platform = {}
    self.evaluator = Evaluator()



  def mapper_init(self):
    try:
      print "Connecting to the database '{0}' ..".format(self.options.database)
      self.sqlite_conn = lite.connect(self.options.database) 
      self.sqlite_conn.row_factory = lite.Row 
      self.cur = self.sqlite_conn.cursor()
      self.cur.execute("""
                    SELECT A.name, R.condition, R.platform 
                    FROM audience A JOIN audience_rule R ON (A.id = R.audience_id)
                    -- WHERE R.id = 1
                    """)
      self.resultset = self.cur.fetchall()
      for idx, item in enumerate(self.resultset):
        self.rules[idx] = item['condition']
        self.audiences[idx] = item['name'] 
        self.platform[idx] = item['platform']
      print "Connected!"  
    except:
      print "Connetion failed!"
      print "Exiting.."
      sys.exit()
    
  def mapper(self, _, line):

    try:
      event = json.loads(line)
      if event["TMEvent"] and int(event["TMEvent"]) == 24:
        for idx in range(len(self.rules)):
          # if condition is satisfied  
          if ( self.evaluator.evaluate(self.rules[idx], event, self.platform[idx]) ):
            
            yield (event["installID"], self.audiences[idx])  
            # yield (self.audiences[idx], event["installID"])
          else:
            yield (event["installID"],_)
            # yield ("nothing",event["installID"])

    except:
      # yield ("error", 1)
      pass

  def combiner(self, key, value):
    combinedList = []
    for val in value:
      if val:
        if val not in combinedList and val != 'None':
          combinedList.append(val)
    yield (key, combinedList)


  def reducer(self, key, values):
    
    audList = []
    for value in values:
      for val in value:
        if val not in audList:
          audList.append(val)
    yield (key, audList)
Example #11
0
    def begintest(self, iteration=0):
        # model_R_p2p = svm_load_model(
        #     os.path.join(
        #         self.config.get('REID', 'REWARD_MODEL_SAVED_PATH'),
        #         'model_r_p2p.model'))
        # model_R_p2G = svm_load_model(
        #     os.path.join(
        #         self.config.get('REID', 'REWARD_MODEL_SAVED_PATH'),
        #         'model_r_p2g.model'))
        # model_R_G2G = svm_load_model(
        #     os.path.join(
        #         self.config.get('REID', 'REWARD_MODEL_SAVED_PATH'),
        #         'model_r_g2g.model'))
        with open(
                os.path.join(
                    self.config.get('REID', 'REWARD_MODEL_SAVED_PATH'),
                    'model_r_p2p.model')) as f:
            model_R_p2p = pickle.load(f)
        with open(
                os.path.join(
                    self.config.get('REID', 'REWARD_MODEL_SAVED_PATH'),
                    'model_r_p2g.model')) as f:
            model_R_p2G = pickle.load(f)
        with open(
                os.path.join(
                    self.config.get('REID', 'REWARD_MODEL_SAVED_PATH'),
                    'model_r_g2g.model')) as f:
            model_R_G2G = pickle.load(f)

        is_first_iteration = False

        model_dir = self.config.get('REID', 'REWARD_MODEL_SAVED_PATH')
        if os.path.exists(os.path.join(model_dir, 'model_q_p2p.model')):
            model_Q_p2p = xgb.Booster(
                model_file=os.path.join(model_dir, 'model_q_p2p.model'))
            model_Q_p2G = xgb.Booster(
                model_file=os.path.join(model_dir, 'model_q_p2g.model'))
            model_Q_G2G = xgb.Booster(
                model_file=os.path.join(model_dir, 'model_q_g2g.model'))
        else:
            is_first_iteration = True

        data = list(list())
        data.append([0, 0, 0])
        data_Q = list(list())
        data_Q.append([0, 0, 0])
        index = 0
        reward = 0
        decision = Dicision.Dicision()
        t01 = time.time()

        while self.frame.checkState(check_batch=True):
            package = self.frame.getObservation()
            index += 1
            if type(package) == int:
                print 'Done!'
                break
            data[0] = package
            question_type = len(package)
            model = None
            if question_type == 3:  #point-----point
                if not is_first_iteration:
                    model = model_Q_p2p.copy()
                tp = 'P2P'
                #Reward Function
                # action_R, _, confidence = svm_predict([0], data, model_R_p2p,
                #                                       '-b 1 -q')
                # confidence = model_R_p2p.predict_proba(data)
                w = model_R_p2p.coef_[0]
                b = model_R_p2p.intercept_[0]
                #Reward Value Function: action = 0
                temp = package[:]
                temp.insert(0, 0)
                data_Q[0] = temp
                DM_data = xgb.DMatrix(np.array(data_Q))
                if not is_first_iteration:
                    value_0 = model_Q_p2p.predict(DM_data)
                else:
                    value_0 = [random.random()]
                del temp[0]
                #Reward Value Function: action = 1
                temp.insert(0, 1)
                data_Q[0] = temp
                DM_data = xgb.DMatrix(np.array(data_Q))
                if not is_first_iteration:
                    value_1 = model_Q_p2p.predict(DM_data)
                else:
                    value_1 = [random.random()]
                #choose the most awarded action
                if value_1[0] >= value_0[0]:
                    action = [1]
                else:
                    action = [0]

            elif question_type == 3 + self.frame.k_size:  #point-----Group or group---point
                if not is_first_iteration:
                    model = model_Q_p2G.copy()
                tp = 'P2G'
                #Reward Function
                # action_R, _, confidence = svm_predict([0], data, model_R_p2G,
                #                                       '-b 1 -q')
                # confidence = model_R_p2G.predict_proba(data)
                w = model_R_p2G.coef_[0]
                b = model_R_p2G.intercept_[0]
                #Reward Value Function: action = 0
                temp = package[:]
                temp.insert(0, 0)
                data_Q[0] = temp
                DM_data = xgb.DMatrix(np.array(data_Q))
                if not is_first_iteration:
                    value_0 = model_Q_p2G.predict(DM_data)
                else:
                    value_0 = [random.random()]
                del temp[0]
                #Reward Value Function: action = 1
                temp.insert(0, 1)
                data_Q[0] = temp
                DM_data = xgb.DMatrix(np.array(data_Q))
                if not is_first_iteration:
                    value_1 = model_Q_p2G.predict(DM_data)
                else:
                    value_1 = [random.random()]
                #choose the most awarded action
                if value_1[0] >= value_0[0]:
                    action = [1]
                else:
                    action = [0]
            else:
                if not is_first_iteration:
                    model = model_Q_G2G.copy()
                tp = 'G2G'
                #Reward Function
                # action_R, _, confidence = svm_predict([0], data, model_R_G2G,
                #                                       '-b 1 -q')
                # confidence = model_R_G2G.predict_proba(data)
                w = model_R_G2G.coef_[0]
                b = model_R_G2G.intercept_[0]
                #Reward Value Function: action = 0
                temp = package[:]
                temp.insert(0, 0)
                data_Q[0] = temp
                DM_data = xgb.DMatrix(np.array(data_Q))
                if not is_first_iteration:
                    value_0 = model_Q_G2G.predict(DM_data)
                else:
                    value_0 = [random.random()]
                del temp[0]
                #Reward Value Function: action = 1
                temp.insert(0, 1)
                data_Q[0] = temp
                DM_data = xgb.DMatrix(np.array(data_Q))
                if not is_first_iteration:
                    value_1 = model_Q_G2G.predict(DM_data)
                else:
                    value_1 = [random.random()]
                #choose the most awarded action
                if value_1[0] > value_0[0]:
                    action = [1]
                else:
                    action = [0]
            #获取操作量原数量
            # t-lambda processing  (iteration in [1,400])
            if random.random() >= (0.025 * iteration):
                action = [random.randint(0, 1)]

            # get reward of the action
            # reward_action = 10 * abs(2 * confidence[0] - 1)
            reward_action = abs(np.sum(np.multiply(w, package)) + b)

            #get the variance of operate number
            self.frame.Normalize_label()
            operatenum_pre = Evaluator.evaluate(self.dataset.imgID,
                                                self.frame.label, [0])

            #check the action is True or False
            action_result = self.frame.setPerception(action, save=False)
            if action_result == False:
                reward_action = -reward_action
            #save history
            self.puthistory(package, action, reward_action, operatenum_pre,
                            model)

        if not self.inference:
            #calculate Metric
            self.frame.Normalize_label()
            self.Recall = Evaluate.Recall(self.dataset.imgID, self.frame.label)
            self.Precision = Evaluate.Precision(self.dataset.imgID,
                                                self.frame.label)
            self.operatenum = Evaluator.evaluate(self.dataset.imgID,
                                                 self.frame.label, [0])
            self.Recall_edge = Evaluate.Recall_edge(self.dataset.imgID,
                                                    self.frame.label, 0)
            self.Precision_edge = Evaluate.Precision_edge(
                self.dataset.imgID, self.frame.label)
            print self.dataset.size, self.Recall_edge, self.Precision_edge, self.operatenum
            with open(
                    os.path.join(
                        self.config.get('REID', 'REWARD_MODEL_SAVED_PATH'),
                        'xgboost_output_nstepsarsa_origin.log'), 'a') as f:
                f.write('{}, {}, {}, {}\n'.format(self.dataset.size,
                                                  self.Recall_edge,
                                                  self.Precision_edge,
                                                  self.operatenum))
Example #12
0
 #iris
 distributions_dictionary = NBParser.naive_bayes_load_distributions(
     NBParser.naive_bayes_iris_distributions_file_name)
 validation_set = NBParser.naive_bayes_load_validation_instances(
     NBParser.naive_bayes_iris_validation_instances_file_name)
 classes_labels = ["Iris Setosa", "Iris Versicolour", "Iris Virginica"]
 classes_distributions = {
     "Iris Setosa": 1 / 3,
     "Iris Versicolour": 1 / 3,
     "Iris Virginica": 1 / 3
 }
 classified_data = naive_bayes_classify_dataset(classes_distributions,
                                                distributions_dictionary,
                                                classes_labels,
                                                validation_set)
 Evaluator.evaluate_classifier(classified_data, classes_labels,
                               'naive_bayes_exp/iris.data')
 #covtype
 distributions_dictionary = NBParser.naive_bayes_load_distributions(
     NBParser.naive_bayes_covtype_distributions_file_name)
 validation_set = NBParser.naive_bayes_load_validation_instances(
     NBParser.naive_bayes_covtype_validation_instances_file_name)
 classes_labels = [
     "Spruce/Fir", "Lodgepole Pine", "Ponderosa Pine", "Cottonwood/Willow",
     "Aspen", "Douglas-fir", "Krummholz"
 ]
 classes_distributions = {
     "Spruce/Fir": 211840 / 581012,
     "Lodgepole Pine": 283301 / 581012,
     "Ponderosa Pine": 35754 / 581012,
     "Cottonwood/Willow": 2747 / 581012,
     "Aspen": 9493 / 581012,
Example #13
0
def main(args):
    print(args)

    settings = Settings.Settings(args)

    # We already did these
    # ResNet50 and indices: 5, 2, 7, 3 (doing ? r.n.)
    settings.TestDataset_Fold_Index = int(args.FOLD_I)  # can be 0 to 9 (K-1)
    settings.TestDataset_K_Folds = int(args.KFOLDS)
    assert settings.TestDataset_Fold_Index < settings.TestDataset_K_Folds
    kfold_txt = "KFold_" + str(settings.TestDataset_Fold_Index) + "z" + str(
        settings.TestDataset_K_Folds)
    print(kfold_txt)

    # resnet 101 approx 5-6 hours (per fold - might be a bit less ...)
    # resnet 50  approx 3-4 hours
    model_txt = "cleanManual_" + args.train_epochs + "ep_ImagenetWgenetW_" + args.model_backend + "-" + args.train_batch + "batch_Augmentation1to1_ClassWeights1to3_TestVal"
    print(model_txt)

    dataset = Dataset.Dataset(settings)
    evaluator = Evaluator.Evaluator(settings)

    # settings.run_name = settings.run_name + "AYRAN"
    show = False
    save = True

    # dataset.dataset
    settings.model_backend = args.model_backend
    settings.train_epochs = int(args.train_epochs)
    settings.train_batch = int(args.train_batch)
    model = ModelHandler.ModelHandler(settings, dataset)

    model.model.train(show=show, save=save)

    # Model 2 ...

    # TODO Note:
    # - change settings.run_name to have saved plots
    # write down:
    # - model bottom (resnet34 ?)
    # - initial weights (imagenet ?)
    # - used augmentation ?
    # - epoch number
    # - class weights changed ?
    # - ... any other special cool thing ...

    # K-Fold_Crossval:
    # model.model.save("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_"+model_txt+"_["+kfold_txt+"].h5")
    model.model.save(
        "/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_"
        + model_txt + "_[" + kfold_txt + "].h5")

    # Next = train Resnet50 on the same dataset without the whole STRIP2 (to have some large Test images)

    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_100ep_ImagenetWgenetW_seresnext50-8batch_Augmentation1to1_ClassWeights1to3.h5")

    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual-noStrip2_100ep_ImagenetWgenetW_resnet50-16batch_Augmentation1to1_ClassWeights1to3.h5")
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_100ep_ImagenetWgenetW_resnet101-8batch_Augmentation1to1_ClassWeights1to3.h5")

    # Senet154 crashed, 10hrs train + Imagenet weights + Data Aug 1:1 + Class weight 1:3
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_XYZep_ImagenetW_senet154-4batch_Augmentation1to1_ClassWeights1to3_early_stop_save_26mar-7am(cca10hrs).h5")

    # Seresnet34 + Imagenet weights + Data Aug 1:1 + Class weight 1:3
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_100ep_ImagenetWgenetW_seresnet34_Augmentation1to1_ClassWeights1to3.h5")

    # Resnet50 (batch 16) + Imagenet weights + Data Aug 1:1 + Class weight 1:3
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_100ep_ImagenetWgenetW_resnet50-16batch_Augmentation1to1_ClassWeights1to3.h5")

    # Resnet34 + Imagenet weights + Data Aug 1:1 + Class weight 1:3
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_100ep_ImagenetW_Resnet34_Augmentation1to1_ClassWeights1to3.h5")

    # Resnet34 + Imagenet weights + No Data Aug + Class weight 1:3
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_100ep_ImagenetBase.h5")
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_54ep_ImagenetBase_best_so_far_for_eastly_stops.h5") # early stop at 54 ep

    # Resnet34 + Custom DSM weights + No Data Aug + Class weight 1:3
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_100ep_CustomDSMBase.h5")
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_49ep_CustomDSMBase_best_so_far_for_eastly_stops.h5")

    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_25ep_ImagenetFrozenEnc.h5") # 26,428,523 > 5,139,429 trainable params - faster?
    # model.model.load("/scratch/ruzicka/python_projects_large/ChangeDetectionProject_files/weightsModel2_cleanManual_.h5")
    # ...

    SAVE_ALL_FOLDER = model_txt + "PLOTS/"
    SAVE_ALL_PLOTS = SAVE_ALL_FOLDER + "plot"
    # DEBUG_SAVE_ALL_THR_PLOTS = None
    if not os.path.exists(SAVE_ALL_FOLDER):
        os.makedirs(SAVE_ALL_FOLDER)

    evaluator.unified_test_report([model.model.model],
                                  dataset.test,
                                  validation_set=dataset.val,
                                  postprocessor=model.model.dataPreprocesser,
                                  name=SAVE_ALL_PLOTS,
                                  optionally_save_missclassified=True)
# print('detFormat = %s' % detFormat)
# print('gtFolder = %s' % gtFolder)
# print('detFolder = %s' % detFolder)
# print('gtCoordType = %s' % gtCoordType)
# print('detCoordType = %s' % detCoordType)
# print('showPlot %s' % showPlot)

# Get groundtruth boxes
allBoundingBoxes, allClasses = getBoundingBoxes(
    gtFolder, True, gtFormat, gtCoordType, imgSize=imgSize)
# Get detected boxes
allBoundingBoxes, allClasses = getBoundingBoxes(
    detFolder, False, detFormat, detCoordType, allBoundingBoxes, allClasses, imgSize=imgSize)
allClasses.sort()

evaluator = Evaluator()
acc_AP = 0
validClasses = 0

# Plot Precision x Recall curve
# detections = evaluator.PlotPrecisionRecallCurve(
#     allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
#     IOUThreshold=iouThreshold,  # IOU threshold
#     method=MethodAveragePrecision.EveryPointInterpolation,
#     showAP=True,  # Show Average Precision in the title of the plot
#     showInterpolatedPrecision=False,  # Don't plot the interpolated precision curve
#     savePath=savePath,
#     showGraphic=showPlot)

# Plot only Average IOU for video, i.e., IOU per bounding box, Average IOU per image.
# average_iou = evaluator.GetIOU(allBoundingBoxes)
Example #15
0
def Pascal():
    # Validate formats
    def ValidateFormats(argFormat, argName, errors):
        if argFormat == 'xywh':
            return BBFormat.XYWH
        elif argFormat == 'xyrb':
            return BBFormat.XYX2Y2
        elif argFormat is None:
            return BBFormat.XYWH  # default when nothing is passed
        else:
            errors.append(
                'argument %s: invalid value. It must be either \'xywh\' or \'xyrb\''
                % argName)

    # Validate mandatory args
    def ValidateMandatoryArgs(arg, argName, errors):
        if arg is None:
            errors.append('argument %s: required argument' % argName)
        else:
            return True

    def ValidateImageSize(arg, argName, argInformed, errors):
        errorMsg = 'argument %s: required argument if %s is relative' % (
            argName, argInformed)
        ret = None
        if arg is None:
            errors.append(errorMsg)
        else:
            arg = arg.replace('(', '').replace(')', '')
            args = arg.split(',')
            if len(args) != 2:
                errors.append(
                    '%s. It must be in the format \'width,height\' (e.g. \'600,400\')'
                    % errorMsg)
            else:
                if not args[0].isdigit() or not args[1].isdigit():
                    errors.append(
                        '%s. It must be in INdiaTEGER the format \'width,height\' (e.g. \'600,400\')'
                        % errorMsg)
                else:
                    ret = (int(args[0]), int(args[1]))
        return ret

    # Validate coordinate types
    def ValidateCoordinatesTypes(arg, argName, errors):
        if arg == 'abs':
            return CoordinatesType.Absolute
        elif arg == 'rel':
            return CoordinatesType.Relative
        elif arg is None:
            return CoordinatesType.Absolute  # default when nothing is passed
        errors.append(
            'argument %s: invalid value. It must be either \'rel\' or \'abs\''
            % argName)

    def ValidatePaths(arg, nameArg, errors):
        if arg is None:
            errors.append('argument %s: invalid directory' % nameArg)
        elif os.path.isdir(arg) is False and os.path.isdir(
                os.path.join(currentPath, arg)) is False:
            errors.append('argument %s: directory does not exist \'%s\'' %
                          (nameArg, arg))
        # elif os.path.isdir(os.path.join(currentPath, arg)) is True:
        #     arg = os.path.join(currentPath, arg)
        else:
            arg = os.path.join(currentPath, arg)
        return arg

    def getBoundingBoxes(directory,
                         isGT,
                         bbFormat,
                         coordType,
                         allBoundingBoxes=None,
                         allClasses=None,
                         imgSize=(0, 0)):
        """Read txt files containing bounding boxes (ground truth and ss)."""
        if allBoundingBoxes is None:
            allBoundingBoxes = BoundingBoxes()
        if allClasses is None:
            allClasses = []
        # Read ground truths
        os.chdir(directory)
        files = glob.glob("*.txt")
        files.sort()
        # Read GT detections from txt file
        # Each line of the files in the groundtruths folder represents a ground truth bounding box
        # (bounding boxes that a detector should detect)
        # Each value of each line is  "class_id, x, y, width, height" respectively
        # Class_id represents the class of the bounding box
        # x, y represents the most top-left coordinates of the bounding box
        # x2, y2 represents the most bottom-right coordinates of the bounding box
        for f in files:
            nameOfImage = f.replace(".txt", "")
            fh1 = open(f, "r")
            for line in fh1:
                line = line.replace("\n", "")
                if line.replace(' ', '') == '':
                    continue
                splitLine = line.split(" ")
                if isGT:
                    # idClass = int(splitLine[0]) #class
                    idClass = (splitLine[0])  # class
                    x = float(splitLine[1])
                    y = float(splitLine[2])
                    w = float(splitLine[3])
                    h = float(splitLine[4])
                    bb = BoundingBox(nameOfImage,
                                     idClass,
                                     x,
                                     y,
                                     w,
                                     h,
                                     coordType,
                                     imgSize,
                                     BBType.GroundTruth,
                                     format=bbFormat)
                else:
                    # idClass = int(splitLine[0]) #class
                    idClass = (splitLine[0])  # class
                    confidence = float(splitLine[1])
                    x = float(splitLine[2])
                    y = float(splitLine[3])
                    w = float(splitLine[4])
                    h = float(splitLine[5])
                    bb = BoundingBox(nameOfImage,
                                     idClass,
                                     x,
                                     y,
                                     w,
                                     h,
                                     coordType,
                                     imgSize,
                                     BBType.Detected,
                                     confidence,
                                     format=bbFormat)
                allBoundingBoxes.addBoundingBox(bb)
                if idClass not in allClasses:
                    allClasses.append(idClass)
            fh1.close()
        return allBoundingBoxes, allClasses

    # Get current path to set default folders
    currentPath = os.path.dirname(os.path.abspath(__file__))

    VERSION = '0.1 (beta)'

    parser = argparse.ArgumentParser(
        prog='Object Detection Metrics - Pascal VOC',
        description=
        'This project applies the most popular metrics used to evaluate object detection '
        'algorithms.\nThe current implemention runs the Pascal VOC metrics.\nFor further references, '
        'please check:\nhttps://github.com/rafaelpadilla/Object-Detection-Metrics',
        epilog="Developed by: Rafael Padilla ([email protected])")
    # formatter_class=RawTextHelpFormatter)
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + VERSION)
    # Positional arguments
    # Mandatory
    parser.add_argument(
        '-gt',
        '--gtfolder',
        dest='gtFolder',
        default=os.path.join(currentPath, 'groundtruths'),
        metavar='',
        help='folder containing your ground truth bounding boxes')
    parser.add_argument('-det',
                        '--detfolder',
                        dest='detFolder',
                        default=os.path.join(currentPath, 'detections'),
                        metavar='',
                        help='folder containing your detected bounding boxes')
    # Optional
    parser.add_argument('-t',
                        '--threshold',
                        dest='iouThreshold',
                        type=float,
                        default=0.5,
                        metavar='',
                        help='IOU threshold. Default 0.5')
    parser.add_argument(
        '-gtformat',
        dest='gtFormat',
        metavar='',
        default='xywh',
        help='format of the coordinates of the ground truth bounding boxes: '
        '(\'xywh\': <left> <top> <width> <height>)'
        ' or (\'xyrb\': <left> <top> <right> <bottom>)')
    parser.add_argument(
        '-detformat',
        dest='detFormat',
        metavar='',
        default='xywh',
        help='format of the coordinates of the detected bounding boxes '
        '(\'xywh\': <left> <top> <width> <height>) '
        'or (\'xyrb\': <left> <top> <right> <bottom>)')
    parser.add_argument(
        '-gtcoords',
        dest='gtCoordinates',
        default='abs',
        metavar='',
        help='reference of the ground truth bounding box coordinates: absolute '
        'values (\'abs\') or relative to its image size (\'rel\')')
    parser.add_argument(
        '-detcoords',
        default='abs',
        dest='detCoordinates',
        metavar='',
        help='reference of the ground truth bounding box coordinates: '
        'absolute values (\'abs\') or relative to its image size (\'rel\')')
    parser.add_argument(
        '-imgsize',
        dest='imgSize',
        metavar='',
        help='image size. Required if -gtcoords or -detcoords are \'rel\'')
    parser.add_argument('-sp',
                        '--savepath',
                        dest='savePath',
                        metavar='',
                        help='folder where the plots are saved')
    parser.add_argument('-np',
                        '--noplot',
                        dest='showPlot',
                        action='store_false',
                        help='no plot is shown during execution')
    args = parser.parse_args()

    iouThreshold = args.iouThreshold

    # Arguments validation
    errors = []
    # Validate formats
    gtFormat = ValidateFormats(args.gtFormat, '-gtformat', errors)
    detFormat = ValidateFormats(args.detFormat, '-detformat', errors)
    # Groundtruth folder
    if ValidateMandatoryArgs(args.gtFolder, '-gt/--gtfolder', errors):
        gtFolder = ValidatePaths(args.gtFolder, '-gt/--gtfolder', errors)
    else:
        # errors.pop()
        gtFolder = os.path.join(currentPath, 'groundtruths')
        if os.path.isdir(gtFolder) is False:
            errors.append('folder %s not found' % gtFolder)
    # Coordinates types
    gtCoordType = ValidateCoordinatesTypes(args.gtCoordinates,
                                           '-gtCoordinates', errors)
    detCoordType = ValidateCoordinatesTypes(args.detCoordinates,
                                            '-detCoordinates', errors)
    imgSize = (0, 0)
    if gtCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(args.imgSize, '-imgsize', '-gtCoordinates',
                                    errors)
    if detCoordType == CoordinatesType.Relative:  # Image size is required
        imgSize = ValidateImageSize(args.imgSize, '-imgsize',
                                    '-detCoordinates', errors)
    # Detection folder
    if ValidateMandatoryArgs(args.detFolder, '-det/--detfolder', errors):
        detFolder = ValidatePaths(args.detFolder, '-det/--detfolder', errors)
    else:
        # errors.pop()
        detFolder = os.path.join(currentPath, 'detections')
        if os.path.isdir(detFolder) is False:
            errors.append('folder %s not found' % detFolder)
    if args.savePath is not None:
        savePath = ValidatePaths(args.savePath, '-sp/--savepath', errors)
    else:
        savePath = os.path.join(currentPath, 'results')
    # Validate savePath
    # If error, show error messages
    if len(errors) != 0:
        print(
            """usage: Object Detection Metrics [-h] [-v] [-gt] [-det] [-t] [-gtformat]
                                    [-detformat] [-save]""")
        print('Object Detection Metrics: error(s): ')
        [print(e) for e in errors]
        sys.exit()

    # Create directory to save results
    shutil.rmtree(savePath, ignore_errors=True)  # Clear folder
    os.makedirs(savePath)
    # Show plot during execution
    showPlot = args.showPlot

    # print('iouThreshold= %f' % iouThreshold)
    # print('savePath = %s' % savePath)
    # print('gtFormat = %s' % gtFormat)
    # print('detFormat = %s' % detFormat)
    # print('gtFolder = %s' % gtFolder)
    # print('detFolder = %s' % detFolder)
    # print('gtCoordType = %s' % gtCoordType)
    # print('detCoordType = %s' % detCoordType)
    # print('showPlot %s' % showPlot)

    # Get groundtruth boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder,
                                                    True,
                                                    gtFormat,
                                                    gtCoordType,
                                                    imgSize=imgSize)
    # Get detected boxes
    allBoundingBoxes, allClasses = getBoundingBoxes(detFolder,
                                                    False,
                                                    detFormat,
                                                    detCoordType,
                                                    allBoundingBoxes,
                                                    allClasses,
                                                    imgSize=imgSize)
    allClasses.sort()

    evaluator = Evaluator()
    acc_AP = 0
    validClasses = 0

    # Plot Precision x Recall curve
    detections = evaluator.PlotPrecisionRecallCurve(
        allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
        IOUThreshold=iouThreshold,  # IOU threshold
        method=MethodAveragePrecision.EveryPointInterpolation,
        showAP=True,  # Show Average Precision in the title of the plot
        showInterpolatedPrecision=
        False,  # Don't plot the interpolated precision curve
        savePath=savePath,
        showGraphic=showPlot)

    f = open(os.path.join(savePath, 'results.txt'), 'w')
    f.write('Object Detection Metrics\n')
    f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
    f.write('Average Precision (AP), Precision and Recall per class:')

    # each detection is a class
    for metricsPerClass in detections:

        # Get metric values per each class
        cl = metricsPerClass['class']
        ap = metricsPerClass['AP']
        precision = metricsPerClass['precision']
        recall = metricsPerClass['recall']
        totalPositives = metricsPerClass['total positives']
        total_TP = metricsPerClass['total TP']
        total_FP = metricsPerClass['total FP']

        if totalPositives > 0:
            validClasses = validClasses + 1
            acc_AP = acc_AP + ap
            prec = ['%.2f' % p for p in precision]
            rec = ['%.2f' % r for r in recall]
            ap_str = "{0:.2f}%".format(ap * 100)
            # ap_str = "{0:.4f}%".format(ap * 100)
            print('AP: %s (%s)' % (ap_str, cl))
            f.write('\n\nClass: %s' % cl)
            f.write('\nAP: %s' % ap_str)
            f.write('\nPrecision: %s' % prec)
            f.write('\nRecall: %s' % rec)

    mAP = acc_AP / validClasses
    mAP_str = "{0:.2f}%".format(mAP * 100)
    print('mAP: %s' % mAP_str)
    f.write('\n\n\nmAP: %s' % mAP_str)
    f.close()
    # Copy results to result_path
    return (mAP_str)
def main(args):
    import keras.backend as K

    print(args)

    settings = Settings.Settings(args)
    settings.TestDataset_Fold_Index = 0
    settings.TestDataset_K_Folds = 5
    settings.model_backend = args.model_backend
    settings.train_batch = args.train_batch
    settings.train_epochs = args.train_epochs

    dataset = Dataset.Dataset(settings)
    evaluator = Evaluator.Evaluator(settings)

    show = False
    save = True

    model_h = ModelHandler.ModelHandler(settings, dataset)
    model_h.model.load(args.one_model_path)
    model = model_h.model.model

    """ # One could also reload all the weights manually ...
    # care about a model inside a model!
    weights_list = []
    for i, layer in enumerate(model.layers[3:]):
        weights_list.append(layer.get_weights())
    for i, layer in enumerate(model.layers[3:]):
        weights = weights_list[i]
        name = layer.name
        print(name, len(weights), len(layer.weights))
        # restore by:
        if "_bn" in name:
            layer.set_weights(weights) # Batch normalization weights are: [gamma, beta, mean, std]
    """

    # data prep:
    test_set_processed = dataset.dataPreprocesser.apply_on_a_set_nondestructively(dataset.test)
    train_set_processed = dataset.dataPreprocesser.apply_on_a_set_nondestructively(dataset.train)
    test_L, test_R, test_V = test_set_processed
    train_L, train_R, train_V = train_set_processed

    if test_L.shape[3] > 3:
        # 3 channels only - rgb
        test_L = test_L[:, :, :, 1:4]
        test_R = test_R[:, :, :, 1:4]
        train_L = train_L[:, :, :, 1:4]
        train_R = train_R[:, :, :, 1:4]


    train_V = train_V.reshape(train_V.shape + (1,))
    from keras.utils import to_categorical
    train_V = to_categorical(train_V)

    import random
    import matplotlib.pyplot as plt
    import numpy as np

    T = 5
    batch_size = 16 # as it was when training

    train_data_indices = list(range(0,len(train_L)))

    f = K.function([model.layers[0].input, model.layers[1].input, K.learning_phase()],
                   [model.layers[-1].output])
    print("f", f)

    # For each sample?

    samples_N = 32
    predictions_for_sample = np.zeros((T,samples_N) + (256,256,)) # < T, SamplesN, 256x256 >
    sample = [test_L[0:samples_N], test_R[0:samples_N]]  # (16, 2,256,256,3)
    sample = np.asarray(sample)

    for MC_iteration in range(T):
        selected_indices = random.sample(train_data_indices, batch_size*4)

        print("train_L[selected_indices] :: ", train_L[selected_indices].shape)  # 16, 256,256,3
        print("sample :: ", sample.shape)  # 16, 2,256,256,3 ?

        train_sample = [train_L[selected_indices], train_R[selected_indices]]
        train_sample = np.asarray(train_sample)
        train_sample_labels = np.asarray(train_V[selected_indices])

        print("MonteCarloBatchNormalization")
        print("T", T)
        print("batch_size", batch_size)
        print("sample.shape", sample.shape)
        print("train_sample.shape", train_sample.shape)

        """
        # complete revert? Arguably not necessary
        model_h = ModelHandler.ModelHandler(settings, dataset) # < this will be slow
        model_h.model.load(args.one_model_path)
        model = model_h.model.model
        #model.load_weights(args.one_model_path) # revert at each MC_iteration start
        """

        # freeze everything besides BN layers
        for i, layer in enumerate(model.layers[2].layers):
            name = layer.name
            if "bn" not in name:
                # freeeze layer which is not BN:
                layer.trainable = False
            #print(name, layer.trainable)
        for i, layer in enumerate(model.layers):
            name = layer.name
            if "bn" not in name:
                # freeeze layer which is not BN:
                layer.trainable = False
                # else layer.stateful = True ?
            #print(name, layer.trainable)

        """ Without it shouts a warning, but seems alright
        # Re-Compile! (after changing the trainable param.)
        from keras.optimizers import Adam
        from loss_weighted_crossentropy import weighted_categorical_crossentropy
        loss = "categorical_crossentropy"
        weights = [1, 3]
        loss = weighted_categorical_crossentropy(weights)
        metric = "categorical_accuracy"
        model.compile(optimizer=Adam(lr=0.00001), loss=loss, metrics=[metric, 'mse'])
        #
        """

        model.fit(x=[train_sample[0], train_sample[1]], y=train_sample_labels, batch_size=16, epochs=25, verbose=2)

        """ # revert weights? (another way instead of loading from the .h5 file)
        weights_list = []
        for i, layer in enumerate(model.layers[3:]):
            weights_list.append(layer.get_weights())

        model.load_weights(args.one_model_path) # revert

        for i, layer in enumerate(model.layers[3:]):
            weights = weights_list[i]

            name = layer.name
            print(name, len(weights), len(layer.weights))

            if "_bn" in name:
                layer.set_weights(weights) # Batch normalization weights are: [gamma, beta, mean, std]
        """


        # model.predict would be nice to be able to batch easily
        # .... however ... predictions = model.predict(x=[sample[0], sample[1]], batch_size=16, verbose=2) # q: can i replace the f(...) with this?
        # it's not behaving

        ## don't want to make a new function every time though...
        #X#f = K.function([model.layers[0].input, model.layers[1].input, K.learning_phase()],
        #X#               [model.layers[-1].output])

        predictions = \
        f((np.asarray(sample[0], dtype=np.float32), np.asarray(sample[1], dtype=np.float32), 1))[0]

        # here BNs use exponentially weighted (/running) avg of the params for each layer from values it has seen during training
        # (sort of like the latest average value)
        # Ps: second prediction here is the same

        print("predictions.shape", predictions.shape)  # 16, 256,256,2

        sample_predicted = predictions[:, :, :, 1]
        print("sample_predicted.shape", sample_predicted.shape)  # 256,256

        predictions_for_sample[MC_iteration, :, :, :] = sample_predicted

    #print("are they equal? 0-1", np.array_equal(predictions_for_sample[0], predictions_for_sample[1]))
    #print("are they equal? 1-2", np.array_equal(predictions_for_sample[1], predictions_for_sample[2]))
    #print("are they equal? 2-3", np.array_equal(predictions_for_sample[2], predictions_for_sample[3]))

    predictions_for_sample = np.asarray(predictions_for_sample)  # [5, 100, 256, 256]

    print("predictions_for_sample ::", predictions_for_sample.shape)
    predictions_for_sample_By_Images = np.swapaxes(predictions_for_sample, 0, 1)  # [100, 5, 256, 256]

    print("predictions_for_sample_By_Images ::", predictions_for_sample_By_Images.shape)

    resolution = len(predictions_for_sample[0][0])  # 256
    predictions_N = len(predictions_for_sample[0])

    print("predictions_N:", predictions_N)

    for prediction_i in range(predictions_N):
        predictions = predictions_for_sample_By_Images[prediction_i]  # 5 x 256x256

        variance_image = np.var(predictions, axis=0)
        sum_var = np.sum(variance_image.flatten())

        do_viz = True
        if do_viz:
            fig = plt.figure(figsize=(10, 8))
            for i in range(T):
                img = predictions[i]
                ax = fig.add_subplot(1, T + 1, i + 1)
                plt.imshow(img, cmap='gray')
                ax.title.set_text('Model ' + str(i))

            ax = fig.add_subplot(1, T + 1, T + 1)
            plt.imshow(variance_image, cmap='gray')
            ax.title.set_text('Variance Viz (' + str(sum_var) + ')')

            plt.show()


    # MCBN (sample, T, train_data, batch_size)
    # predictions_for_sample = []
    # for i in T:
    #   batch of train data <- random from train_data of size batch_size
    #   update_layer_statistics (= eval with training mode on)
    #   prediction = model.predict(sample)
    #   predictions.append(prediction)
    # return predictions

    nkhnkkjnjkghhhhhh
Example #17
0
        image = np.zeros((height, width, 3), np.uint8)
        gt_boundingboxes = dictGroundTruth[key]
        image = gt_boundingboxes.drawAllBoundingBoxes(image)
        detection_boundingboxes = dictDetected[key]
        image = detection_boundingboxes.drawAllBoundingBoxes(image)
        # Show detection and its GT
        cv2.imshow(key, image)
        cv2.waitKey()


# Read txt files containing bounding boxes (ground truth and detections)
boundingboxes = getBoundingBoxes()
# Uncomment the line below to generate images based on the bounding boxes
# createImages(dictGroundTruth, dictDetected)
# Create an evaluator object in order to obtain the metrics
evaluator = Evaluator()
##############################################################
# VOC PASCAL Metrics
##############################################################
# Plot Precision x Recall curve
evaluator.PlotPrecisionRecallCurve(
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=0.3,  # IOU threshold
    method=MethodAveragePrecision.
    EveryPointInterpolation,  # As the official matlab code
    showAP=True,  # Show Average Precision in the title of the plot
    showInterpolatedPrecision=True,
)  # Plot the interpolated precision curve
# Get metrics with PASCAL VOC metrics
metricsPerClass = evaluator.GetPascalVOCMetrics(
    boundingboxes,  # Object containing all bounding boxes (ground truths and detections)
Example #18
0
            elif op == '-n':
                num = int(value)
            elif op == '-l':
                operators = int(value)
            elif op == '-e':
                exp = True
            elif op == '-p':
                BiTree.set_power_operator(True)
            elif op == '-v':
                verbose = True
            elif op == '-i':
                interact = True

    g = Qg.QuestGenerator()
    g.generate(quantity=num, operators=operators, enable_power=exp)
    ev = Evaluator.Evaluator()

    # 显示结果
    if verbose:
        for out in g.output_list:
            print(out.to_string())
        for out in g.output_list:
            print(str(ev.evaluate(out)))
    print('\n  Generate complete\n  Writing to file...')

    # 写入文件
    with open('quests.txt', 'w', encoding='utf-8') as f:
        for out in g.output_list:
            f.write(out.to_string() + '\n')
    with open('solutions.txt', 'w', encoding='utf-8') as f:
        for out in g.output_list:
 def evaluatorException(self, msg: str):
     if self.__line is not None:
         msg += " on line: " + str(self.__line)
         if self.__column is not None:
             msg += " column: " + str(self.__column)
     raise Evaluator.EvaluatorException(msg)
Example #20
0
# print('detFormat = %s' % detFormat)
# print('gtFolder = %s' % gtFolder)
# print('detFolder = %s' % detFolder)
# print('gtCoordType = %s' % gtCoordType)
# print('detCoordType = %s' % detCoordType)
# print('showPlot %s' % showPlot)

# Get groundtruth boxes
allBoundingBoxes, allClasses = getBoundingBoxes(
    gtFolder, True, gtFormat, gtCoordType, imgSize=imgSize)
# Get detected boxes
allBoundingBoxes, allClasses = getBoundingBoxes(
    detFolder, False, detFormat, detCoordType, allBoundingBoxes, allClasses, imgSize=imgSize)
allClasses.sort()

evaluator = Evaluator()

acc_AP = 0
validClasses = 0

# Plot Precision x Recall curve
detections = evaluator.PlotPrecisionRecallCurve(
    allBoundingBoxes,  # Object containing all bounding boxes (ground truths and detections)
    IOUThreshold=iouThreshold,  # IOU threshold
    method=MethodAveragePrecision.EveryPointInterpolation,
    showAP=True,  # Show Average Precision in the title of the plot
    showInterpolatedPrecision=False,  # Don't plot the interpolated precision curve
    savePath=savePath,
    showGraphic=showPlot)

f = open(os.path.join(savePath, 'results.txt'), 'w')
Example #21
0
 
 # Find best c-parameter from parameter estimation data
 print >> sys.stderr, "Finding optimal c-parameters from", options.parameters    
 rows = TableUtils.readCSV(options.parameters, fieldnames)
 folds = sorted(list(TableUtils.getValueSet(rows, "fold")))
 cParameterByFold = {}
 for fold in folds:
     print >> sys.stderr, "  Processing fold", fold
     foldRows = TableUtils.selectRowsCSV(rows, {"fold":fold})
     cParameters = sorted(list(TableUtils.getValueSet(foldRows, "c")))
     evaluators = []
     cParameterByEvaluator = {}
     for cParameter in cParameters:
         print >> sys.stderr, "    Processing c-parameter", cParameter, 
         paramRows = TableUtils.selectRowsCSV(foldRows, {"c":cParameter})
         evaluator = Evaluator.calculateFromCSV(paramRows, EvaluatorClass)
         #print evaluator.toStringConcise()
         cParameterByEvaluator[evaluator] = cParameter
         evaluators.append(evaluator)
         if evaluator.type == "multiclass":
             print " F-score:", evaluator.microFScore
         else:
             print " F-score:", evaluator.fScore
     evaluators.sort(Evaluator.compare)
     print >> sys.stderr, "  Optimal C-parameter:", cParameterByEvaluator[evaluators[-1]]
     cParameterByFold[fold] = cParameterByEvaluator[evaluators[-1]]
 
 print >> sys.stderr, "Evaluating test data from", options.parameters
 rows = TableUtils.readCSV(options.input, fieldnames)
 selectedRows = []
 for fold in folds:
 def prepass(self, prepass: Evaluator.PrepassState = None):
     if prepass is None:
         self.prepass(Evaluator.PrepassState())
         return
     self.__expr.prepass(prepass)
     self.__identifier.prepass(prepass)
 def evaluate(self, state: Evaluator.EvaluationState = None):
     if state is None:
         return self.evaluate(Evaluator.EvaluationState())
# print('gtFolder = %s' % gtFolder)
# print('detFolder = %s' % detFolder)
# print('gtCoordType = %s' % gtCoordType)
# print('detCoordType = %s' % detCoordType)
# print('showPlot %s' % showPlot)

allBoundingBoxes, allClasses = getBoundingBoxes(gtFolder, True, gtFormat)
allBoundingBoxes, allClasses = getBoundingBoxes(detFolder, False, detFormat, allBoundingBoxes, allClasses)
allClasses.sort()

f = open(os.path.join(savePath,'results.txt'),'w') 
f.write('Object Detection Metrics\n')
f.write('https://github.com/rafaelpadilla/Object-Detection-Metrics\n\n\n')
f.write('Average Precision (AP), Precision and Recall per class:')

evaluator = Evaluator()
acc_AP = 0
validClasses = 0
# for each class
for c in allClasses:
    # Plot Precision x Recall curve
    metricsPerClass = evaluator.PlotPrecisionRecallCurve(c, # Class to show
                                    allBoundingBoxes, # Object containing all bounding boxes (ground truths and detections)
                                    IOUThreshold=iouThreshold, # IOU threshold
                                    showAP=True, # Show Average Precision in the title of the plot
                                    showInterpolatedPrecision=False, # Don't plot the interpolated precision curve
                                    savePath = os.path.join(savePath,c+'.png'),
                                    showGraphic=showPlot)
    # Get metric values per each class
    cl = metricsPerClass['class']
    ap = metricsPerClass['AP']