def generate_original_unvisited(self):
     univisted = None
     if self.dbname == "DBLP4":
         scorer = Scorer.DBLPScorer(self.merge_logger.node_data,
                                    self.merge_logger.uri_to_oid)
         unvisited = SRUS(scorer, self.super_nodes)
     elif self.dbname == "wordnet":
         scorer = Scorer.WordnetScorer(self.merge_logger.node_data,
                                       self.merge_logger.uri_to_oid)
         unvisited = SRUS(scorer, self.super_nodes)
     elif self.dbname == "IMDBSmall":
         scorer = Scorer.IMDBScorer(self.merge_logger.node_data,
                                    self.merge_logger.uri_to_oid)
         unvisited = SRUS(scorer, self.super_nodes)
     elif self.dbname == "LUBM":
         scorer = Scorer.LUBMScorer(self.merge_logger.node_data,
                                    self.merge_logger.uri_to_oid)
         unvisited = SRUS(scorer, self.super_nodes)
     elif self.dbname == "SP2B":
         scorer = Scorer.SP2BScorer(self.merge_logger.node_data,
                                    self.merge_logger.uri_to_oid)
         unvisited = SRUS(scorer, self.super_nodes)
     else:
         raise TypeError("Invalid Database")
     return unvisited
Exemplo n.º 2
0
def remoteDegenerate(image,
                     alternationfn=_noise,
                     decay=0.01,
                     iterations=10,
                     maxloops=2000,
                     verbose=True,
                     history=True):
    # First: Check if the Credentials are correct and the image is detected
    initialResp = Scorer.send_ppm_image(image)
    if (initialResp.status_code != 200):
        return
    # Initialise Start-Variables from our first score
    totalLoops = 0  #Counts all loops
    depth = 0  #Counts successfull loops
    lastImage = image
    lastScore = Scorer.get_best_score(initialResp.text)
    # To check if we put garbage in
    print("StartConfidence:", lastScore)

    if history:
        h = []

    #We stop if we either reach our depth, or we exceed the maxloops
    while (depth < iterations and totalLoops < maxloops):
        totalLoops += 1
        # Alter the last image and score it
        degenerated = alternationfn(lastImage.copy())
        degeneratedResp = Scorer.send_ppm_image(degenerated)
        if (degeneratedResp.status_code == 200):
            degeneratedScore = Scorer.get_best_score(degeneratedResp.text)
            # if we verbose, we want console output (then we see directly if anything is not working, e.g. a to strong alternationfn)
            if verbose:
                print("Score:", degeneratedScore, "Depth:", depth, "Loop:",
                      totalLoops)
            # if we have history=True, we collect the same data as in verbose to plot something nice
            if history:
                h.append((degeneratedScore, depth, totalLoops))
            # If our score is acceptable (better than the set decay) we keep the new image and score
            if (degeneratedScore >= lastScore - decay):
                lastImage = degenerated
                lastScore = degeneratedScore
                depth += 1
        else:
            print("Error, status code was: ", degeneratedResp.status_code)
            #Attempts do not count
            totalLoops -= 1

        #We are working remote, we need to take a short break
        time.sleep(1.1)
    #We return the lastImg, this can be something not that good if we just reach maxloops!
    if h != []:
        plotHistory(h)
        return lastScore, lastImage, h
    else:
        return lastScore, lastImage
Exemplo n.º 3
0
def total_score(owner_index):
    raw_scores = Scorer.get_raw_scores()
    if owner_index == 0:
        my_scores = raw_scores['VIS']
    else:
        my_scores = raw_scores['HOME']
    total = my_scores['H']['18OPS'] + my_scores['H']['PA']
    total += my_scores['SP']['18OPS'] + my_scores['SP']['IP'] + my_scores['SP']['K/9']
    total += my_scores['CL']['18OPS'] + my_scores['CL']['G'] + my_scores['CL']['SV%'] + my_scores['CL']['K/9']
    return total
Exemplo n.º 4
0
  def __init__(self):
    indexer      = Indexer()
    self.graph   = Graph()
    self.crawler = Crawler({"http://mysql12.f4.htw-berlin.de/crawl/d01.html",
                            "http://mysql12.f4.htw-berlin.de/crawl/d06.html",
                            "http://mysql12.f4.htw-berlin.de/crawl/d08.html"},
                            self.graph, indexer)
    self.crawler.crawl()
    self.scorer = Scorer(indexer.index, indexer.documents)

    self.pageRank = PageRank(self.graph)
    self.pageRank.calc()
Exemplo n.º 5
0
 def _testClassifier(self, start, stop):
     testSet = self.trainer.buildTrainingExamples(start, stop, False)
     if testSet is None:
         return None
     featureList = map(lambda x: x[0], testSet)
     actualLabels = map(lambda x: x[1], testSet)
     classLabels = self.classifier.batch_classify(featureList)
     a = Scorer.Scorer()
     a.computeAccuracy(actualLabels, classLabels)
     a.computeFScores(actualLabels, classLabels)
     a.computeMathewsCoef(actualLabels, classLabels)
     return [a.accuracy, a.f1score, a.prMeasures[0], a.prMeasures[1]]
Exemplo n.º 6
0
class SearchEngine:

  def __init__(self):
    indexer      = Indexer()
    self.graph   = Graph()
    self.crawler = Crawler({"http://mysql12.f4.htw-berlin.de/crawl/d01.html",
                            "http://mysql12.f4.htw-berlin.de/crawl/d06.html",
                            "http://mysql12.f4.htw-berlin.de/crawl/d08.html"},
                            self.graph, indexer)
    self.crawler.crawl()
    self.scorer = Scorer(indexer.index, indexer.documents)

    self.pageRank = PageRank(self.graph)
    self.pageRank.calc()


  def search (self, string, scoreOnly = False):
    query  = string.split()
    scores = self.scorer.scoreQuery(query)

    if scoreOnly:
      results = scores
    else:
      results = {}
      for url, score in scores.items():
        results[url] = score * self.graph.get_document(url).rank

    sortedResults = sorted(results.items(), key=operator.itemgetter(1), reverse = True)
    for res in sortedResults:
      print(res)

  def printPageRanks(self):
    print('Page ranks:')
    print('  d01  -   d02  -   d03  -   d04  -   d05  -   d06  -   d07  -   d08')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d01.html").rank, 4), end = ' - ')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d02.html").rank, 4), end = ' - ')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d03.html").rank, 4), end = ' - ')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d04.html").rank, 4), end = ' - ')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d05.html").rank, 4), end = ' - ')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d06.html").rank, 4), end = ' - ')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d07.html").rank, 4), end = ' - ')
    print(round(self.graph.get_document("http://mysql12.f4.htw-berlin.de/crawl/d08.html").rank, 4), end = '\n\n')
Exemplo n.º 7
0
def readFinancialData(nav,basicSheet, readStandalone):
    
    balancedata = BalanceSheetScrapper.BalanceSheetScrapper(nav['BalanceSheet'],readStandalone).readBalanceSheet()
    print("Balance Sheet processed")
    
    pldata = profitLossScrapper.ProfitLossScrapper(nav['ProfitLoss'], readStandalone).readPL()   
    print("Profit & Loss Sheet processed")
    
    ratiodata = None
    ratioScrapper = RatioScrapper.RatoiSheetScrapper(nav['RatioSheet'])
    # try:
    #     ratiodata = ratioScrapper.readSheet()
    #     print("Ratio Sheet processed")
    # except :
    ratiodata = ratioScrapper.calculateRatio(balancedata, pldata)
    frames = [balancedata, pldata, ratiodata]
    finData = pd.concat(frames,sort=False,axis=1,).astype(float)
    finData.insert(loc=0,column='Company Name', value=(basicSheet['name']))
    finData.reset_index( inplace = True)
    finData.set_index(["Company Name","Year"],inplace = True)
    finDataWithScore = Scorer.Scorer(basicSheet, finData).calculate() 
    return finDataWithScore
Exemplo n.º 8
0
def wrapper(coefficients):
    rides, bonus, out = Qual2018.run([1, 1, 1, 1] + coefficients + [0])
    ret = 100000000 - Scorer.run(rides, bonus, out)
    print(ret)
    return ret
Exemplo n.º 9
0
Arquivo: test.py Projeto: rdub/pysent
import Splitter
import Tagger
import Scorer
import Importer

import yaml
from pprint import pprint

if __name__ == "__main__":
    rss = Importer.RSSImporter(
        'https://news.google.com/news/feeds?q=apple&output=rss')
    input_text = rss.parse()

    s = Splitter.Splitter()
    tagger = Tagger.DictionaryTagger([
        'dicts/positive.yml', 'dicts/negative.yml', 'dicts/inc.yml',
        'dicts/dec.yml', 'dicts/inv.yml'
    ])
    scorer = Scorer.Scorer()
    total = 0
    for summary in input_text:

        split = s.split(summary)

        tagged = tagger.tag(split)

        score = scorer.score(tagged)
        print "%s -> %d" % (summary, score)
        total += score

    print "Total: %d" % total
    pt_age = p['features']['_age']
    pt_data['Age_gte75'] = pt_age >= 75
    pt_data['Age_65-74'] = (pt_age >= 65 and pt_age < 75)
    pt_data['Age_gt65'] = pt_age > 65
    df_data.append(pt_data)
    
df = pd.DataFrame(df_data)

# in this example we're working at the level of UMLS CUI's, but any identifiers
# can be used as long as the patient data and score definition are using the
# same system. 


#chads2vasc
print "score chads2vasc"
chadsvasc = scorer.Scorer(chads_definition)
chadsvasc_result = chadsvasc.score(df, ['patient_id'])
chadsvasc_scores = chadsvasc_result['scores']
#concepts in score definition that were never seen in the input patient data
chadsvasc_not_found = chadsvasc_result['not_found'] 
#save scores to csv
chadsvasc_scores.to_csv('../example_data/chadsvasc_scores.csv', index=False)


#hasbled
print "score hasbled"
hasbled = scorer.Scorer(hasbled_definition)
hasbled_scores = hasbled.score(df, ['patient_id'])
#export directly from Scorer result
hasbled_scores['scores'].to_csv('../example_data/hasbled_scores.csv', index=False)
Exemplo n.º 11
0
                    break

            if user.endStation in simMap.stations:
                if simMap.stations[user.endStation].isDocAvail():
                    simMap.stations[user.endStation].increaseBikeAvail()
                    nonErrors += 1
                else:
                    stationsDocUnavail.append(user.endStation)
                    DocUnavailErrors += 1
            else:
                missingStations.append(user.endStation)
                stationMissingErrors += 1


# Shows the stats of algorithm.
print('Station Missing: ', stationMissingErrors)
print('Missing Stations: ', set(missingStations))
print('Doc Unavail Errors: ', DocUnavailErrors)
print('Doc  Unavail Stations: ', set(stationsDocUnavail))
print('Bike Unavail Errors: ', BikeUnavailErrors)
print('Bike  Unavail Stations: ', set(stationsBikeUnavail))
print('Nonerrors: ', nonErrors)
print('prp: ', Station.prop)
with open('StationJson/StationDataOut.json', 'w') as outfile:
    simMap.generateStationJson(outfile)

#print(list(simMap.stations.keys()))

Scorer.scorer('StationJson/StationDataOut.json')

Exemplo n.º 12
0
Created on Thu Jul  2 16:46:47 2020

@author: Andrew
"""

import sys
import Scorer
from importlib import reload

TYPE = None
QTR = None
QUESTION = None
if __name__ == "__main__":
    while True:
        if not TYPE:
            Scorer.standings, Scorer.four_q, Scorer.quarter_bonus, Scorer.final_qs, Scorer.teams, Scorer.no_teams, Scorer.repo, Scorer.html_temp = Scorer.setup(
            )
            Scorer.files = [
                r'/home/andrew/Documents/aefreeman.github.io/Trivia/Live_Tracker.html',
                r'/home/andrew/Documents/aefreeman.github.io/Trivia/img/fig1.png'
            ]
            Scorer.scores = [[0] for team in Scorer.teams]
            Scorer.counter = 2
            questions = [['Quarter', i // 4 + 1, i % 4 + 1] for i in range(16)]
            bonuses = [4, 9, 14]
            for q in bonuses:
                questions.insert(q, ['Bonus', q // 5 + 1, 1])
            questions.extend([['Final', 1, 1], ['Final', 1, 2]])
            Scorer.questions = questions
            quest_order = iter(questions)
            print('Setup')
        else:
Exemplo n.º 13
0
 def __init__(self,
              fnames,
              numFiles=100,
              percentage=0.05,
              pswd="neo4j",
              bolt=None,
              secure=False,
              host="localhost",
              portNum=7474,
              portType="http",
              user="******"):
     """
     Divides the papers by field of study and determines the experts in each field.
     
     Parameters:
     * fnames: the name of the TXT file that contains the JSONs
     * numFiles (Default = 100): the number of JSONs to use
     * percentage: the decimal representation of the percentage of authors to consider experts. By default, the top 5% of authors are considered experts.
     * pswd (Default = \"neo4j\"): the password for the neo4j database to be used if using neo4j and py2neo.
     * bolt (Default = None): specifies whether to use the bolt protocol for connection to neo4j (None means autodetect).
       - Note: This is not needed for py2neo v4.0 or higher, as it uses bolt by default.
     * secure (Default = False): specifies whether to use a secure connection for neo4j.
     * host (Default = \"localhost\"): specifies the database server host name if using neo4j.
     * portNum (Default = 7474): specifies the database server port if using neo4j.
       - Note: In py2neo v4.0, the default connection type is bolt, not http. As a result, portnum should probably be set to 7687.
     * portType (Default = \"http\"): specifies the type of port that you want to use for neo4j. Can be \"bolt\", \"https\", or \"http\". If set to a different value, it will assume \"http\".
       - Note: In py2neo v4.0, the default connection type is bolt. As a result, it is best to set portType to \"bolt\", although you can still use the others.
     * user (Default = \"neo4j\"): the user used to authenticate connection to the neo4j database if neo4j is used.
     """
     self.fnames = [
         line.rstrip("\n") for idx, line in enumerate(open(fnames, "r"))
         if idx < numFiles
     ]
     # Dictionaries to store the data by field (key)
     self.papersByField = {}
     self.authScoresByField = {}
     self.graphsByField = {}
     # Splits the papers by field
     for f in self.fnames:
         jf = json.loads(f)
         for fos in jf["fos"]:
             if fos not in self.papersByField:
                 self.papersByField[fos] = [f]
             else:
                 self.papersByField[fos].append(f)
     # Scores are calculated based on the papers in each field
     for key, paperList in self.papersByField.items():
         # Creates a Scorer object from the papers for the current field.
         scorer = Scorer(paperList, pswd, bolt, secure, host, portNum,
                         portType, user)
         # Calculates the scores for the field
         scorer.calculateScores()
         # Stores the publication graph used for scoring in the graphsByField dict
         self.graphsByField[key] = scorer.getGraph()
         # Converts the scores into a list of index-score tuples.
         scoreList = [(i, s) for i, s in enumerate(scorer.scores)
                      if list(scorer.scores).index(s) >= scorer.authStart]
         # Sorts the list by scores and extracts the experts
         scoreList.sort(key=lambda x: x[1], reverse=True)
         numExperts = ceil(len(scoreList) * percentage)
         scoreList = scoreList[:numExperts]
         # Saves the index-score pairs for experts as a dictionary in self.authScoresByField
         self.authScoresByField[key] = {s[0]: s[1] for s in scoreList}