Beispiel #1
0
def maybeRespond(name, msg):
  global prevWords, responses, avgQuality
  # responses is person talking to previous person talking to words said previously to word to say to frequency
  print "maybe responding"
  name = utils.getCanonicalName(name)
  prevWords[name] = utils.tokenizeLine(msg)
  if name and beingWho:
    score = 0
    modifier = {}
    for person in prevWords:
      for word in prevWords[person]:
        word = word.strip()
        if (beingWho in responses) and (person in responses[beingWho]) and (word in responses[beingWho][person]):
          print word
          for cword in responses[beingWho][person][word]:
            modifier[cword] = responses[beingWho][person][word][cword]**2 # TODO: +=, don't override
            score += modifier[cword]
    print modifier
    print('response score: '+ str(score))
    if score > 0:
      # TODO: factor response score into decision for whether to fire off the response?
      best = 0
      bestc = None
      for i in range(20):
        candidate = babble(beingWho, frequencies, modifier)
        cscore = 0
        cwords = utils.tokenizeLine(candidate)
        for cw in cwords:
          if cw in modifier:
            cscore += modifier[cw]
        length = len(cwords) - 2 # start and end token
        if length < 4 and re.search(r'[a-zA-z]+', candidate):
          print "length modifier"
          cscore += (4-length)**2
        print candidate, cwords, cscore
        if cscore > best:
          best = cscore
          bestc = candidate
      # chance to discard:
      threshold = randrange(avgQuality*2)
      if bestc:
        print bestc+': '+str(best)+' > '+str(threshold)+'?'
        if best > threshold:
          avgQuality = math.ceil(float(avgQuality+best)/2)
          print 'New average:', avgQuality
          sendMsg(bestc)
          prevWords = {}
        else:
          avgQuality = math.ceil(float(avgQuality*2+best)/3) # TODO: evaluate
          print 'updated average:', avgQuality
Beispiel #2
0
def parseFile(filename, d, responses):
    # stores word occurrences for file in the giant word map
    badNames = set()
    prevWords = {}  # people to word set
    f = open(filename, "r")
    for line in f:
        (who, what) = parseLine(line)
        if who != "":
            name = utils.getCanonicalName(who)
            if name is None:
                if who not in ["Topic", "Mode"]:
                    badNames.add(who)
            else:
                tokens = utils.tokenizeLine(what)
                storeTokens(name, tokens, d)
                storeResponses(name, set(tokens), prevWords, responses)
                prevWords[name] = set(
                    tokens
                )  # TODO: do more filtering here instead of in storeResponses; move to helper util function
                for word in set(tokens):
                    if not word in totalWords:
                        totalWords[word] = 0
                    totalWords[word] += 1
    print badNames