class TextGenerator:
	def __init__(self, generatorName, trainString, prefixLength):
		self.generatorName = generatorName
		self.chain = MarkovChain()
		self.chain.generateDatabase(trainString, n=prefixLength)
		self.currState = []
		self.hyphenator = Hyphenator('en_US')
		self.syllableQ = Queue()
		self.stripPattern = re.compile('[\W_]+')
		while (len(self.currState) < prefixLength):
			self.currState = self.chain.generateString().split()[-(prefixLength+1):-1]
	
	def load_next_word(self):
		nextword = ""
		try:
			while nextword == "":
				nextword = self.stripPattern.sub('', self.chain._nextWord(self.currState))
				self.currState = self.currState[1:]
				self.currState.append(nextword)
			if len(nextword) < 4: # because hyphenator doesnt work for words less than 4 letters
				self.syllableQ.put(nextword)
			else: 
				for syllable in self.hyphenator.syllables(nextword):
					self.syllableQ.put(syllable)
		except UnicodeEncodeError:
			print("unicode error")
		
	def get_next_syllable(self):
		if (self.syllableQ.empty()):
			self.load_next_word()
		return self.syllableQ.get()
Beispiel #2
0
class EuroMarkov:
    def __init__(self):
        self.mc = MarkovChain("./markovdata")

    def generateCountryList(self):
        countryList = []
        for filename in os.listdir("json_lyrics/2015"):
            countryList.append(os.path.splitext(filename)[0])
        return countryList

    def loadFiles(self,startYear,endYear,countryList):
        model = ""
        for year in range(startYear,endYear+1):
            for country in countryList:
                fname = "json_lyrics/"+str(year)+"/"+country+".json"
                if os.path.isfile((fname)):
                    with open (fname,"r") as myfile:
                        data = json.load(myfile)
                        model += (data['lyrics']) + '\n';
        return model

    def runMarkov(self,model):
        self.mc.generateDatabase(model)

    def generateString(self):
        return self.mc.generateString()
Beispiel #3
0
def lyrics():
    artist = request.form['artist']
    lines = int(request.form['lines'])

    if not artist:
        return redirect(url_for('index'))

    # Get a response of sample lyrics from the provided artist
    uri = "http://api.lyricsnmusic.com/songs"
    params = {
        'api_key': API_KEY,
        'artist': artist,
    }
    response = requests.get(uri, params=params)
    lyric_list = response.json()
    # Parse results into a long string of lyrics
    lyrics = ''
    for lyric_dict in lyric_list:
        lyrics += lyric_dict['snippet'].replace('...', '') + ' '

    # Generate a Markov model
    mc = MarkovChain()
    mc.generateDatabase(lyrics)

    result = []
    for line in range(0, lines):
        result.append(mc.generateString())

    return render_template('lyrics.html', result=result, artist=artist)
Beispiel #4
0
class Michiov(object):
  def __init__(self, autogen=True, markovdb=os.path.expanduser("~/markov"), twcreds=os.path.expanduser("~/.michiov_twitter_credentials"),twappcreds=os.path.expanduser("~/.michiov_twitter_appdata")):
    self.mc = MarkovChain(markovdb)
    self.reload()
    if not os.path.exists(twappcreds):
      print("Lack of app creds")
      sys.exit(1)
    twcons = json.loads(open(twappcreds).read())
    conskey = twcons['key']
    conssec = twcons['secret']
    while not os.path.exists(twcreds):
      twitter.oauth_dance("MPRZ Tech Labs", conskey, conssec, twcreds)
    oauth_token, oauth_secret = twitter.read_token_file(twcreds)
    self.t = twitter.Twitter(auth=twitter.OAuth(oauth_token, oauth_secret, conskey, conssec))
  def should(self):
    ret = input("Should I send it? (y/N) ")
    return ("y" in ret or "Y" in ret)
  def qas(self):
    idea = self.mc.generateString()
    print("Generated: %s" % idea)
    if self.should():
      self.t.statuses.update(status=idea)
  def loop(self):
    try:
      while True:
        self.qas()
        #self.reload()
    except KeyboardInterrupt:
      pass
  def reload(self):
    with open("markovpredb.txt") as file:
      self.mc.generateDatabase(file.read())
Beispiel #5
0
def fetch_lyrics(artist, lines):
    API_KEY = os.environ.get('API_KEY')

    uri = "http://api.lyricsnmusic.com/songs"
    params = {
        'api_key': API_KEY,
        'artist': artist,
    }
    response = requests.get(uri, params=params)
    lyric_list = response.json()

    lyrics = ''
    for lyric_dict in lyric_list:
        lyrics += lyric_dict['snippet'].replace('...', '') + ' '

    # Generate a Markov model
    mc = MarkovChain('./markov')
    mc.generateDatabase(lyrics)

    # Add lines of lyrics
    result = []
    for line in range(0, lines):
        line_string = mc.generateString()
        result.append(line_string)
    return result
def main():
    import config
    from pymarkovchain import MarkovChain

    optp = ArgumentParser()

    optp.add_argument("-d", "--database", dest="database", help="Where to store the database")
    optp.add_argument("-f", "--file", dest="file", help="File source to use for generating the database")
    opts = optp.parse_args()

    # Setup logging.
    logformat = "%(levelname)-8s %(name)s %(message)s"
    logging.basicConfig(level=logging.INFO, format=logformat)

    if opts.database is None:
        try:
            opts.database = config.markovbrainfile
        except NameError:
            log.critical("I require a brainfile to write into!")
            exit(1)
    if opts.file is None:
        log.critical("I require an imput file to learn from!")
        exit(1)

    mc = MarkovChain(opts.database)

    mc.generateDatabase(opts.file)
Beispiel #7
0
def markov(msg, botName, channel, db):
  if msg.rawMatchRe('!markov (?P<source>#?[a-zA-Z]\S*)\s*$') or msg.rawMatchRe('what (would|does) (the )?(?P<source>#?[a-zA-Z]\S+) say\??'):
    m = msg.getRegExpResult()
    source = m.group('source')

    if source[0] == '#':
      logsList = db.getLogs(chan=source, lines=2000)
    else:
      logsList = db.getLogs(nick=source, lines=2000)
    
    if len(logsList) < 100:
      hexchat.command("msg %s Not enough data for %s" % (channel, source))
      
    else:
      mc = MarkovChain("./markov_db")
      ircText = ''
      
      for line in logsList:
        # disqualify lines that are too short or are certain bot functions that start with '!'
        if len(line.split(' ')) >= 5 and line[0] != '!':
          ircText += line.replace('.','') + '. '
          
      mc.generateDatabase(ircText)
      markovOutput = mc.generateString().capitalize()
      hexchat.command('msg %s "%s"  --%s' % (channel, markovOutput, source))
      
    return True
  return False
Beispiel #8
0
def mkdb():
  mc = MarkovChain('./markov')
  with open('yaks.txt') as input:
    mc.generateDatabase(input.read())
  with open('yaks.txt') as input:
    yaks = [l.strip() for l in input]
  return mc, yaks
Beispiel #9
0
def markov():
    """A simple markov function"""
    mc = MarkovChain("./tempchain")

    with open(CORPUS, 'r') as f:
        data = f.read()

    mc.generateDatabase(data)

    return mc.generateString()
Beispiel #10
0
    def prepare_dict(self):
        if self.dictfile is None:
            print "error: no dictfile"
            return
        # now build the markov database. just using pymarkovchain's default settings for now. will fail if it doesn't
        # have write access to $PWD.
        chain = MarkovChain("./markov")

        source = self.build_source()
        chain.generateDatabase(source)

        # seem to need to do this to reload the database after generating it
        self.chain = MarkovChain("./markov")
def main(args):
	markov_filename = "./" + args.subreddit + ".mcd"
	new_chain = os.path.isfile(markov_filename) == False # this must come before the creation of the Markov Chain
	mc = MarkovChain(markov_filename)

	if args.new or new_chain:
		titles = getTitles(getSubmissions(100, args.subreddit))
		training_data = str.join('.', titles)
		mc.generateDatabase(training_data)

	N = args.num_submissions
	while N > 0:
		print(mc.generateString())
		N -= 1
    def refresh_database(self):
        with open(self._reviews_file, 'r') as review_data:
            reviews = json.load(review_data)

        reviews_string = [r['desc'] for r in reviews]
        names_string = [r['name'] for r in reviews]

        new_markov = MarkovChain(self._markov_dir + '/beer_desc')
        new_markov.generateDatabase(' '.join(reviews_string))

        new_name_markov = MarkovChain(self._markov_dir + '/beer_name')
        new_name_markov.generateDatabase('.'.join(names_string))

        self._markov = new_markov
        self._name_markov = new_name_markov
Beispiel #13
0
class markovbuild(object):
    '''Builds a markov chain DB and outputs data'''
    def __init__(self, target, data, lines=5):
        self.database = '/tmp/markov_%s.db' % target
        self.lines = lines
        self.data = '\n'.join(data)
        self.mchain = MarkovChain(self.database)

    def build(self):
        '''Builds a markov chain'''
        self.mchain.generateDatabase(self.data)

    def output(self):
        '''Outputs markov chain data'''
        self.build()
        return [ self.mchain.generateString() for x in xrange(0, self.lines) ]
Beispiel #14
0
    def analyze(self):
        # GenerateModel
        """ Generate a Markov chain based on retrieved strings. """

        mc = MarkovChain()
        mc.generateDatabase(self.text)
        result = r''

        print "Generating:"

        for i in range(0, 10):
            print "Sentence %d" % i
            # Create 10 sentences
            sentence = mc.generateString()
            result += sentence.capitalize() + '. '

        return result
Beispiel #15
0
def poem():
    story = str(request.form['story'].encode('ascii', 'ignore'))
    lines = int(request.form['lines'])

    if not story:
        return redirect(url_for('index'))

    mc = MarkovChain()
    mc.generateDatabase(story)

    result = []
    for line in range(0, lines):
        new_line = mc.generateString()
        if new_line not in result:
            result.append(new_line)

    return render_template('poem.html', result=result, story=story)
Beispiel #16
0
class SaulBotFactory(protocol.ClientFactory):
    protocol = SaulBot

    def __init__(self, reactor, channel='', nickname=''):
        self.channel = channel
        self.nickname = nickname
        self.markov = MarkovChain("./tempchain")
        self.reactor = reactor

        with open('corpus.txt', 'r') as f:
            self.markov.generateDatabase(f.read())

    def clientConnectionLost(self, connector, reason):
        print "Lost connection (%s), reconnecting." % (reason,)
        connector.connect()

    def clientConnectionFailed(self, connector, reason):
        print "Could not connect: %s" % (reason,)
Beispiel #17
0
class MarkovBot(BotPlugin):

    def __init__(self):
        self.markov = MarkovChain()

    @botcmd
    def talk(self, mess, args):
        """ Generate a sentence based on database """
        return self.markov.generateString()

    @botcmd
    def complete(self, mess, args):
        """ Try to complete a sentence """
        return self.markov.generateStringWithSeed(args)

    @botcmd
    def gendbfromfile(self, mess, args):
        """ Generate markov chain word database """
        try:
            with open(args) as txtFile:
                txt = txtFile.read()
        except IOError as e:
            return 'Error: could not open text file'
        # At this point, we've got the file contents
        if self.markov.generateDatabase(txt):
            return 'Done.'
        else:
            return 'Error: Could not generate database'

    @botcmd
    def gendbfromstring(self, mess, args):
        if self.markov.generateDatabase(args):
            return 'Done.'
        else:
            return 'Error: Could not generate database from String'

    @botcmd
    def gendbfromurl(self, mess, args):
        req = requests.get(args)
        if req.ok and self.markov.generateDatabase(req.content):
            return 'Done.'
        else:
            return 'Error: Could not generate database from URL'
Beispiel #18
0
def main():
    with open("test.txt", "r") as myfile:
        data = myfile.read().replace('\n', '')
    mc = MarkovChain("./markovdb")

    # Start a session so we can have persistant cookies
    session = requests.Session()

    # This is the form data that the page sends when logging in
    login_data = {
        'user_email': EMAIL,
        'user_password': PASSWORD,
        'login': '******',
    }

    # Authenticate
    r = session.post(URL, data=login_data)

    mc.generateDatabase(data)

    for x in range(0, 5):
        r = os.urandom(16).encode('hex')
        title = "Report#" + str(x) + " " + str(r)
        description = mc.generateString()

        #europe only because americans are fags
        y, x = uniform(-17, 43), uniform(28, 55)

        print (title)

        # Create new report based on random content
        report_data = {
            'title': title,
            'category': "2",
            'description': description,
            'latitude': x,
            'longitude': y,
            'newreport': "1",
        }

        r = session.post(newRep, data=report_data)
Beispiel #19
0
def main():
    args = parser.parse_args()
    dirname=os.path.split(__file__)[0]
    filename=os.path.join(dirname,"phil.txt")
    title_filename=os.path.join(dirname,"phil_titles.txt")
    dbname1 = "database.pkl"
    dbname2 = "database_title.pkl"
    new_db = not os.path.exists(dbname1)
    body_maker = MarkovChain(dbname1)
    title_maker = MarkovChain(dbname2)
    if new_db:
        title_maker.generateDatabase(open(title_filename).read())
        title_maker.dumpdb()
        body_maker.generateDatabase(open(filename).read())
        body_maker.dumpdb()

    name = title_maker.generateString()
    body = '  '.join([body_maker.generateString()+'.' for i in xrange(3)])

    if args.repo:
        if args.token:
            token = args.token
        else:
            token_filename = os.path.join(dirname, "token.txt")
            if not os.path.exists(token_filename):
                sys.stderr.write("Please either specify --token=XXX on the command line or put a github API token in token.txt\n")
                sys.stderr.write("You can generate a token here: https://github.com/settings/tokens\n")
                sys.exit(1)
            token = open(token_filename).read().strip()

        import github
        gh=github.Github(token)
        user=gh.get_user()
        repo=user.get_repo(args.repo)
        issue = repo.create_issue(title=name, body=body)
        print issue.html_url
    else:
        print 
        print name
        print "-"*len(name)
        print body
Beispiel #20
0
    if args.upper_case:
        inputText = inputText.upper()

    if args.lower_case:
        inputText = inputText.lower()

    if args.title_case:
        inputText = inputText.title()
    
    # generate a markov chain based text from the input
    if args.generate and args.generate > 0:
        # disable error message about on-the-fly database
        logging.disable(logging.WARNING)
        mc = MarkovChain("./markov-chain-database")
        mc.generateDatabase(inputText)

        # reinstate logging
        logging.disable(logging.NOTSET)

        generatedText = ""
        while len(generatedText) < args.generate:
            if generatedText is not "":
                generatedText = generatedText + " "
            generatedText = generatedText + mc.generateString()
        inputText = generatedText

    if args.filter_punctuation:
        inputText = text.removePunctuation(inputText)

    if args.filter_numbers:
Beispiel #21
0
# https://github.com/TehMillhouse/PyMarkovChain
# pip install PyMarkovChain
from pymarkovchain import MarkovChain

mc = MarkovChain("./am_m")
f = open('cap_short.txt', 'r')
mc.generateDatabase(f.read())
for x in range(0, 20):
    mc.generateString()
Beispiel #22
0
class Trollette:
    def __init__(self):
        self.presenter = ""
        self.title = ""

        self.GIPHY_API_KEY = 'FILL API KEY HERE'

        self.slide_count = 0
        self.slide_min = 15
        self.slide_max = 25

        self.console = None
        self.output_dir = ""

        with open("terms.json", "r") as f:
            self.terms = json.load(f)

        try :
            with open(os.path.join("GIFs", "hashes.json"), "r") as f:
                self.gifs = json.load(f)
        except:
            self.gifs = {}
            with open(os.path.join("GIFs", "hashes.json"), "w") as f:
                json.dump(self.gifs, f, indent=2)

        with open(os.path.join("Images", "hashes.json"), "r") as f:
            self.images = json.load(f)

        # Load up the proverb data
        with open(os.path.join("Proverbs", "facts"), "r") as f:
            self.proverb_lines = f.readlines()
        self.proverbs = map(string.strip, self.proverb_lines)
        self.proverb_markov = MarkovChain("markov.db")
        self.proverb_markov.generateDatabase("".join(self.proverb_lines), n=1)

        # Make the text data
        # self.my_face = comptroller.face(self.title)
        # self.slide_titles = self.my_face.get_titles(50)
        # self.slide_bullets = self.my_face.get_bullets(100)

        self.my_face = Face("")

        self.slide_titles = ["shit", "balls", "butts"]
        self.slide_bullets = ["butts", "do", "stuff", "f***s", "more f***s"]

        self.ppt = Presentation()
        self.slide_weights = SlideWeights()

    def generate_slide_deck(self):
        # Create a place to put data and resources
        self.output_dir = os.path.join("Output", "%s_%s_%s" % (self.title,
                                                               self.presenter,
                                                               datetime.datetime.strftime(datetime.datetime.now(), '%Y_%m_%d_%H_%M_%S')))

        self.resources_dir = os.path.join(self.output_dir, "Resources")

        # Start with a fresh PowerPoint
        self.ppt = Presentation()

        # Make sure the directories exist
        try:
            os.makedirs(self.output_dir)
            os.makedirs(self.resources_dir)
        except:
            self.log("Directory %s already exists, overwriting..." % self.output_dir)

        self.slide_count = random.randint(self.slide_min, self.slide_max)
        self.log("Generating a slide deck of %d slides about '%s'" % (self.slide_count, self.title))

        try:
            self.log("Getting slide content...")
            self.my_face.set_topic(self.title)

            self.log("Generating slide titles...")
            self.slide_titles = self.my_face.get_titles(self.slide_count)

            self.log("Generating slide bullets...")
            self.slide_bullets = self.my_face.get_bullets(self.slide_count*3)
        except Exception,e:
            print str(e)
            self.log("Problem generating content for a talk on '%s', exiting" % self.title)
            return

        #self.farm_gif_term(self.title)
        #sp = self.title.split(" ")
        #if len(sp) > 1:
        #    for i in range(len(sp)):
        #        if len(sp[i]) > 5:
        #            self.farm_gif_term(sp[i])
        #self.farm_image_term(self.title)

        self.log_slide_weights()

        self.create_title_slide()
        self.create_slides()

        slide_path = os.path.join(self.output_dir, "%s.pptx" % self.title)
        self.ppt.save(slide_path)

        self.log("Successfully generated PPT on '%s' to:\n%s" % (self.title, slide_path))
Beispiel #23
0
class BuzzFeeder(object):

    def __init__(self, **kwargs):
        self.chain = MarkovChain("%s/static/markov" % dirname(__file__))
        self.proceed = True
        for k, v in kwargs.items():
            if not k in defaults:
                raise ValueError
            setattr(self, k, kwargs.get(v, defaults[k]))
        with open(self.filename) as f:
            self.data = load(f)
        if not getattr(self, 'seed'):
            self.seed = False

    @property
    def titles(self):
        return map(lambda x: d['title'], filter(lambda y: y,
                                                self.data))

    @property
    def text(self):
        return rc(r'[%s]' % escape(punctuation)) \
            .sub(" b", "\n".join(self.titles).lower())

    def generate_database(self):
        self.chain.generateDatabase(self.text)

    def ask(self, prompt, opts=[]):
        prompt = ">>>  " + prompt
        if opts:
            prompt += " [%s]" % "|".join(opts)
        response = raw_input(prompt).lower()
        if 'x' in response:
            self.proceed = False
            return self.proceed
        if opts and response not in opts:
            raise ValueError
        return response

    def prompt(self, candidate):
        print ">>> '%s'" % candidate
        if not self.proceed:
            return False
        q = self.ask("Tweet this text?", opts=['y', 'n'])
        if not q:
            return False
        if 'y' in q:
            return candidate
        if 'n' in q:
            if 'y' in self.ask("Edit this text?", opts=['y', 'n']):
                return self.ask("Enter edited text: ")
            else:
                return True

    def generate(self):
        if not self.seed:
            yielder = self.chain.generateString
        else:
            yielder = self.chain.generateStringWithSeed
        yargs = [] if not self.seed else [self.seed]
        while self.proceed:
            yield yielder(*yargs) \
                .split(".py")[-1] \
                .strip() \
                .title()

    def run(self):
        print "[ press X to stop at any time ]"
        with open(self.output, "a") as tweets:
            for candidate in self.generate():
                response = self.prompt(candidate)
                if not response:
                    break
                if not isinstance(response, bool):
                    tweets.write(response.encode('ascii', 'ignore'))
                    tweets.write('\n')
                print ''
        artist_name.lower().encode('utf-8')).hexdigest()
    mc = MarkovChain(db_name_hashed)

    # Checking if the database already exists, if so uses the cache instead another API call
    if not os.path.isfile(db_name_hashed):
        print(
            "No data cached. Please be patient while we search the lyrics of %s."
            % artist_name)

        # Adding lyrics to a single gigant string
        lyrics = ''

        # Parsing each lyric from this artist.
        # [http://api.wikia.com/wiki/LyricWiki_API]
        artist = requests.get(API_URI, params=params).json()
        for album in artist['albums']:
            for song in album['songs']:
                params = {'artist': artist_name, 'song': song}
                print("Parsing \"{}\" from Wikia.".format(song))
                response = requests.get(API_URI,
                                        params=params).json()["lyrics"]
                lyrics += response.replace('[...]', '') + ' '

        # Generating the database
        mc.generateDatabase(lyrics)
        mc.dumpdb()

    # Printing a string
    for i in range(0, int(number_of_phrases)):
        print(mc.generateString())
Beispiel #25
0
class MarkovBot(BotPlugin):
    def __init__(self):
        super(MarkovBot, self).__init__()
        self.sentenceSep = None
        self.markov = MarkovChain(dbFilePath="./markovdb")

    @botcmd
    def talk(self, mess, args):
        """ Generate a sentence based on database """
        return self.markov.generateString()

    @botcmd
    def complete(self, mess, args):
        """ Try to complete a sentence """
        return self.markov.generateStringWithSeed(args)

    @botcmd
    def gendbfromfile(self, mess, args):
        """ Generate markov chain word database based on local file """
        try:
            with open(args) as txtFile:
                txt = txtFile.read()
        except IOError as e:
            return "Error: could not open text file"
        # At this point, we've got the file contents
        if self.sentenceSep:
            result = self.markov.generateDatabase(txt, self.sentenceSep)
        else:
            result = self.markov.generateDatabase(txt)
        if result:
            return "Done."
        else:
            return "Error: Could not generate database"

    @botcmd
    def setsentencesep(self, mess, args):
        """ Specify how to detect sentence borders """
        self.sentenceSep = args

    @botcmd
    def gendbfromstring(self, mess, args):
        """ Generate markov chain word database based on given string """
        if self.sentenceSep:
            result = self.markov.generateDatabase(args, self.sentenceSep)
        else:
            result = self.markov.generateDatabase(args)
        if result:
            return "Done."
        else:
            return "Error: Could not generate database from String"

    @botcmd
    def gendbfromurl(self, mess, args):
        """ Generate markov chain word database based on contents of url """
        response, content = httplib2.Http().request(args, "GET")
        if response["status"] == "200":
            if self.sentenceSep:
                result = self.markov.generateDatabase(content.decode("utf-8"), self.sentenceSep)
            else:
                result = self.markov.generateDatabase(content.decode("utf-8"))
            if result:
                return "Done."
            else:
                return "Error: Could not generate database from URL"
Beispiel #26
0
class BuzzFeeder(object):
    def __init__(self, **kwargs):
        self.chain = MarkovChain("%s/static/markov" % dirname(__file__))
        self.proceed = True
        for k, v in kwargs.items():
            if not k in defaults:
                raise ValueError
            setattr(self, k, kwargs.get(v, defaults[k]))
        with open(self.filename) as f:
            self.data = load(f)
        if not getattr(self, 'seed'):
            self.seed = False

    @property
    def titles(self):
        return map(lambda x: d['title'], filter(lambda y: y, self.data))

    @property
    def text(self):
        return rc(r'[%s]' % escape(punctuation)) \
            .sub(" b", "\n".join(self.titles).lower())

    def generate_database(self):
        self.chain.generateDatabase(self.text)

    def ask(self, prompt, opts=[]):
        prompt = ">>>  " + prompt
        if opts:
            prompt += " [%s]" % "|".join(opts)
        response = raw_input(prompt).lower()
        if 'x' in response:
            self.proceed = False
            return self.proceed
        if opts and response not in opts:
            raise ValueError
        return response

    def prompt(self, candidate):
        print ">>> '%s'" % candidate
        if not self.proceed:
            return False
        q = self.ask("Tweet this text?", opts=['y', 'n'])
        if not q:
            return False
        if 'y' in q:
            return candidate
        if 'n' in q:
            if 'y' in self.ask("Edit this text?", opts=['y', 'n']):
                return self.ask("Enter edited text: ")
            else:
                return True

    def generate(self):
        if not self.seed:
            yielder = self.chain.generateString
        else:
            yielder = self.chain.generateStringWithSeed
        yargs = [] if not self.seed else [self.seed]
        while self.proceed:
            yield yielder(*yargs) \
                .split(".py")[-1] \
                .strip() \
                .title()

    def run(self):
        print "[ press X to stop at any time ]"
        with open(self.output, "a") as tweets:
            for candidate in self.generate():
                response = self.prompt(candidate)
                if not response:
                    break
                if not isinstance(response, bool):
                    tweets.write(response.encode('ascii', 'ignore'))
                    tweets.write('\n')
                print ''
Beispiel #27
0
# generates text using Markov Chain
# uses the PyMarkovChain implimetntation: https://pypi.python.org/pypi/PyMarkovChain/
# to install on linux, run "pip install PyMarkovChain" in the shell

# import function
from pymarkovchain import MarkovChain
import re, string

# first, read in file with training text data (I made mine by getting all my blog text as a .xml, grabbing only the nodes with the actual blog text in them and then scrubbing all html tags)
f = open("extractedText.txt", "r")

# tidy up our text input a bit
textToScrub = f.read()
text = re.sub(r'^https?:\/\/.*[\r\n]*', '', textToScrub,
              flags=re.MULTILINE)  #get rid of urls/links
text = text.replace(u'\xa0', u' ')  #throw out those pesky non-breaking spaces

# then create the markov chain generator
mc = MarkovChain("./markov")
mc.generateDatabase(text)

# finally, generate some text -- run this command multiple times to generate multiple text strings
for num in range(1, 10):
    mc.generateString()
Beispiel #28
0
class Trollette:
    def __init__(self):
        self.presenter = ""
        self.title = ""

        self.slide_count = 0
        self.slide_min = 15
        self.slide_max = 25

        self.console = None
        self.output_dir = ""

        with open("terms.json", "r") as f:
            self.terms = json.load(f)

        with open(os.path.join("GIFs", "hashes.json"), "r") as f:
            self.gifs = json.load(f)

        with open(os.path.join("Images", "hashes.json"), "r") as f:
            self.images = json.load(f)

        # Load up the proverb data
        with open(os.path.join("Proverbs", "facts"), "r") as f:
            self.proverb_lines = f.readlines()
        self.proverbs = map(string.strip, self.proverb_lines)
        self.proverb_markov = MarkovChain("markov.db")
        self.proverb_markov.generateDatabase("".join(self.proverb_lines), n=1)

        # Make the text data
        # self.my_face = comptroller.face(self.title)
        # self.slide_titles = self.my_face.get_titles(50)
        # self.slide_bullets = self.my_face.get_bullets(100)

        self.my_face = Face("")

        self.slide_titles = ["shit", "balls", "butts"]
        self.slide_bullets = ["butts", "do", "stuff", "f***s", "more f***s"]

        self.ppt = Presentation()
        self.slide_weights = SlideWeights()

    def generate_slide_deck(self):
        # Create a place to put data and resources
        self.output_dir = os.path.join(
            "Output",
            "%s_%s_%s" % (self.title, self.presenter,
                          datetime.datetime.strftime(datetime.datetime.now(),
                                                     '%Y_%m_%d_%H_%M_%S')))

        self.resources_dir = os.path.join(self.output_dir, "Resources")

        # Start with a fresh PowerPoint
        self.ppt = Presentation()

        # Make sure the directories exist
        try:
            os.makedirs(self.output_dir)
            os.makedirs(self.resources_dir)
        except:
            self.log("Directory %s already exists, overwriting..." %
                     self.output_dir)

        self.slide_count = random.randint(self.slide_min, self.slide_max)
        self.log("Generating a slide deck of %d slides about %s" %
                 (self.slide_count, self.title))

        try:
            self.log("Getting slide content...")
            self.my_face.set_topic(self.title)

            self.log("Generating slide titles...")
            self.slide_titles = self.my_face.get_titles(self.slide_count)

            self.log("Generating slide bullets...")
            self.slide_bullets = self.my_face.get_bullets(self.slide_count * 3)
        except:
            self.log(
                "Problem generating content for a talk on %s, exiting..." %
                self.title)
            return

        #self.farm_gif_term(self.title)
        #sp = self.title.split(" ")
        #if len(sp) > 1:
        #    for i in range(len(sp)):
        #        if len(sp[i]) > 5:
        #            self.farm_gif_term(sp[i])
        #self.farm_image_term(self.title)

        self.log_slide_weights()

        self.create_title_slide()
        self.create_slides()

        slide_path = os.path.join(self.output_dir, "%s.pptx" % self.title)
        self.ppt.save(slide_path)

        self.log("Successfully generated PPT on %s to %s" %
                 (self.title, slide_path))

    def create_title_slide(self):
        title_slide_layout = self.ppt.slide_layouts[0]
        slide = self.ppt.slides.add_slide(title_slide_layout)
        title = slide.shapes.title
        subtitle = slide.placeholders[1]

        title.text = self.title
        subtitle.text = self.presenter

    def create_slides(self):
        for i in range(self.slide_count):
            choice = self.slide_weights.choose_weighted()

            self.log("  Generating slide #%d: %s" % (i + 1, choice))

            new_slide_layout = None
            if choice == "Single GIF":
                ns = self.create_gif_slide(random.choice(self.slide_titles),
                                           self.get_giphy_search_term(), i)
            elif choice == "Full Slide GIF":
                ns = self.create_full_gif_slide(self.get_giphy_search_term(),
                                                i)
            elif choice == "Single Image":
                ns = self.create_image_slide(random.choice(self.slide_titles),
                                             self.get_image_search_term(), i)
            elif choice == "Full Slide Image":
                ns = self.create_full_image_slide(self.get_image_search_term(),
                                                  i)
            elif choice == "Information":
                ns = self.create_info_slide(i)
            elif choice == "Quotation":
                ns = self.create_quote_slide()

    def create_single_full_image_slide(self, image_path):
        blank_slide_layout = self.ppt.slide_layouts[6]
        new_slide = self.ppt.slides.add_slide(blank_slide_layout)

        left = Inches(0)
        top = Inches(0)
        height = Inches(8)
        width = Inches(10)
        pic = new_slide.shapes.add_picture(image_path,
                                           left,
                                           top,
                                           height=height,
                                           width=width)
        return new_slide

    def create_single_image_slide(self, slide_title, image_path):

        blank_slide_layout = self.ppt.slide_layouts[1]
        new_slide = self.ppt.slides.add_slide(blank_slide_layout)

        for shape in new_slide.shapes:
            if shape.is_placeholder:
                phf = shape.placeholder_format

                if phf.type == 1:
                    shape.text = slide_title

        left = Inches(1)
        top = Inches(1)
        height = Inches(6)
        width = Inches(8)
        pic = new_slide.shapes.add_picture(image_path,
                                           left,
                                           top,
                                           height=height,
                                           width=width)

        return new_slide

    def download_gif(self, term, slide_num):
        # If we have at least 3 local gifs, use one of those
        if (term in self.gifs) and (len(self.gifs[term]) > 3):
            return os.path.join("GIFs",
                                "%s.gif" % random.choice(self.gifs[term]))

        try:
            # Download the gif
            img = translate(term)
            image_path = os.path.join(self.resources_dir, "%d.gif" % slide_num)
            wget.download(img.fixed_height.url, image_path)

            file_hasher = hashlib.md5()
            with open(image_path, "rb") as f:
                file_hasher.update(f.read())
            file_md5 = file_hasher.hexdigest()

            if not (term in self.gifs):
                self.gifs[term] = []

            if not (file_md5 in self.gifs[term]):
                self.gifs[term].append(file_md5)
                shutil.copy(image_path,
                            os.path.join("GIFs", "%s.gif" % file_md5))
                with open(os.path.join("GIFs", "hashes.json"), "w") as f:
                    json.dump(self.gifs, f, indent=2)

            return image_path
        except:
            return None

    def download_image(self, term, slide_num):
        # If we have at least 3 local images, use one of those
        if (term in self.images) and (len(self.images[term]) > 3):
            return os.path.join("Images",
                                "%s.img" % random.choice(self.images[term]))

        try:
            search_term = term
            if (random.randint(0, 100) % 2) == 0:
                search_term = self.title

            download_attempts = 0
            image_bytes = ""
            image_path = ""
            while download_attempts < 10:

                fetcher = urllib2.build_opener()
                start_index = random.randint(0, 50)
                search_url = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=%s&start=%s" % (
                    search_term, str(start_index))
                f = fetcher.open(search_url)
                deserialized_output = simplejson.load(f)

                image_url = deserialized_output['responseData']['results'][
                    random.randint(
                        0,
                        len(deserialized_output['responseData']['results']) -
                        1)]['unescapedUrl']
                image_path = os.path.join(self.resources_dir,
                                          "%d.img" % slide_num)
                wget.download(image_url, image_path)

                with open(image_path, "rb") as f:
                    image_bytes = f.read()

                if (not image_bytes.startswith("<!DOCTYPE html>")) and (
                        not image_bytes.startswith("<html>")):
                    break

                download_attempts += 1
                self.log(
                    "    Attempting to download image about %s failed try #%d"
                    % (search_term, download_attempts))

            if image_bytes.startswith(
                    "<!DOCTYPE html") or image_bytes.startswith("<html>"):
                return None

            file_hasher = hashlib.md5()
            file_hasher.update(image_bytes)
            file_md5 = file_hasher.hexdigest()

            if not (term in self.images):
                self.images[term] = []

            if not (file_md5 in self.images[term]):
                self.images[term].append(file_md5)
                shutil.copy(image_path,
                            os.path.join("Images", "%s.img" % file_md5))
                with open(os.path.join("Images", "hashes.json"), "w") as f:
                    json.dump(self.images, f, indent=2)

            return image_path
        except:
            return None

    def create_gif_slide(self, slide_title, term, slide_num):
        image_path = self.download_gif(term, slide_num)
        if image_path:
            return self.create_single_image_slide(slide_title, image_path)

    def create_full_gif_slide(self, term, slide_num):
        image_path = self.download_gif(term, slide_num)
        if image_path:
            return self.create_single_full_image_slide(image_path)

    def create_image_slide(self, slide_title, term, slide_num):
        while True:
            try:
                image_path = self.download_image(term, slide_num)
                if image_path:
                    return self.create_single_image_slide(
                        slide_title, image_path)
            except:
                pass

    def create_full_image_slide(self, term, slide_num):
        image_path = self.download_image(term, slide_num)
        if image_path:
            return self.create_single_full_image_slide(image_path)

    def create_info_slide(self, slide_num):
        slide_title_info = random.choice(self.slide_titles)
        slide_title = slide_title_info
        if (random.randint(0, 100) % 3) == 0:
            slide_title = self.get_markov_proverb()

        sb = random.sample(self.slide_bullets, random.randint(1, 4))
        if (random.randint(0, 100) % 4) == 0:
            sb.append(self.get_markov_proverb())

        bullet_slide_layout = self.ppt.slide_layouts[1]
        new_slide = self.ppt.slides.add_slide(bullet_slide_layout)
        shapes = new_slide.shapes

        title_shape = shapes.title
        body_shape = shapes.placeholders[1]
        body_shape.width = Inches(4)
        body_shape.left = Inches(1)
        body_shape.top = Inches(2)

        title_shape.text = slide_title

        tf = body_shape.text_frame
        for b in sb:
            p = tf.add_paragraph()
            #p.text = b

            p.alignment = PP_PARAGRAPH_ALIGNMENT.LEFT
            run1 = p.add_run()
            run1.text = b
            font1 = run1.font
            font1.name = 'Sans Serif'
            font1.size = Pt(20)
            font1.italic = True
            font1.bold = True

        image_path = None
        attempts = 0
        while attempts < 10:
            try:
                tries = 0
                while (not image_path) and (tries < 10):
                    if (random.randint(0, 100) % 2) == 0:
                        search_term = self.get_giphy_search_term()
                        image_path = self.download_gif(search_term, slide_num)
                    else:
                        search_term = self.get_image_search_term()
                        image_path = self.download_image(
                            search_term, slide_num)

                    tries += 1

                if tries < 10:
                    left = Inches(5.5)
                    top = Inches(3)
                    #height = Inches(3)
                    width = Inches(3)
                    pic = new_slide.shapes.add_picture(image_path,
                                                       left,
                                                       top,
                                                       width=width)
                    break
                attempts += 1

            except:
                attempts += 1

        return new_slide

    def create_quote_slide(self):
        # Pick a random quote category and quote
        cat = random.choice(self.terms["quote_categories"])
        with open(os.path.join("Quotes", "quotes_%s.json" % cat)) as f:
            q1 = random.choice(json.load(f))

        cat = random.choice(self.terms["quote_categories"])
        with open(os.path.join("Quotes", "quotes_%s.json" % cat)) as f:
            q2 = random.choice(json.load(f))

        quote_text = "\"%s\"" % q1["quote"]
        if (random.randint(0, 100) % 5) == 0:
            quote_text = random.choice(self.proverbs)

        quote_author = "- %s" % q2["name"]

        blank_slide_layout = self.ppt.slide_layouts[2]
        new_slide = self.ppt.slides.add_slide(blank_slide_layout)

        for shape in new_slide.shapes:
            if shape.is_placeholder:
                phf = shape.placeholder_format
                if phf.type == 1:
                    # Put in the quote title
                    shape.text = random.choice(self.terms["quote_titles"])

                elif phf.type == 2:
                    text_frame = shape.text_frame

                    # Create the quote text paragraph
                    p1 = text_frame.paragraphs[0]
                    p1.alignment = PP_PARAGRAPH_ALIGNMENT.LEFT
                    run1 = p1.add_run()
                    run1.text = quote_text
                    font1 = run1.font
                    font1.name = 'Sans Serif'
                    font1.size = Pt(30)
                    font1.italic = True
                    font1.bold = True

                    # Create the Author text paragraph
                    p2 = text_frame.add_paragraph()
                    p2.alignment = PP_PARAGRAPH_ALIGNMENT.RIGHT
                    run2 = p2.add_run()
                    run2.text = quote_author
                    font2 = run2.font
                    font2.name = 'Calibri'
                    font2.size = Pt(24)

        return new_slide

    def get_giphy_search_term(self):
        st = random.choice(self.terms["giphy_searches"])
        if (random.randint(0, 100) % 5) == 0:
            st = self.title
        return st

    def get_image_search_term(self):
        st = random.choice(self.terms["image_searches"])
        if (random.randint(0, 100) % 2) == 0:
            st = self.title
        return st

    def get_proverb(self):
        return random.choice(self.proverb_lines)

    def get_markov_proverb(self, min=5, max=10):
        b = ""

        while True:
            b = self.proverb_markov.generateString()
            s = b.split(" ")
            if min <= len(s) <= max:
                break

        return b

    def add_term(self, term_type, term):
        if term in self.terms[term_type]:
            return "Term \"%s\" is already in %s!" % (term, term_type)
        else:
            self.terms[term_type].append(term)
            with open("terms.json", "w") as f:
                json.dump(self.terms, f, indent=4)
            return "Term \"%s\" added to %s." % (term, term_type)

    def delete_term(self, term_type, term):
        if not (term in self.terms[term_type]):
            return "Term \"%s\" isn't in %s, can't delete!" % (term, term_type)
        else:
            self.terms[term_type].remove(term)
            with open("terms.json", "w") as f:
                json.dump(self.terms, f, indent=4)
            return "Term \"%s\" removed from %s." % (term, term_type)

    def show_term_counts(self, term_type, term_json):
        log_str = "%s Terms:\n" % term_type
        for term in self.terms[term_type]:
            if term in term_json:
                log_str += "  %s: %d\n" % (term, len(term_json[term]))
            else:
                log_str += "  %s: 0\n" % term
        self.log(log_str)

    def get_file_md5(self, file_path):
        with open(file_path, "rb") as f:
            image_bytes = f.read()

        file_hasher = hashlib.md5()
        file_hasher.update(image_bytes)
        return file_hasher.hexdigest()

    def farm_image_term(self, term, amount=25, threshold=10):
        self.log("Farming images for %s..." % term)

        if not (term in self.images):
            self.images[term] = []

        attempt_count = 0
        while (attempt_count < threshold) and (len(self.images[term]) <
                                               amount):
            myopener = MyOpener()
            page = myopener.open(
                'https://www.google.pt/search?q=%s&source=lnms&tbm=isch&sa=X&tbs=isz:l&tbm=isch'
                % term.replace(" ", "+"))
            html = page.read()

            for match in re.finditer(
                    r'<a href="/imgres\?imgurl=(.*?)&amp;imgrefurl', html,
                    re.IGNORECASE | re.DOTALL | re.MULTILINE):
                if len(self.images[term]) >= amount:
                    break

                try:
                    os.remove("test.img")
                except:
                    pass

                try:
                    path = urlparse.urlsplit(match.group(1)).path
                    self.log("  Downloading %s" % match.group(1))
                    myopener.retrieve(match.group(1), "test.img")

                    image_md5 = self.get_file_md5("test.img")

                    if not (image_md5 in self.images[term]):
                        self.images[term].append(image_md5)
                        shutil.copy(
                            "test.img",
                            os.path.join("Images", "%s.img" % image_md5))
                        os.remove("test.img")
                        self.log("    Image saved to archive. %d/%d images." %
                                 (len(self.images[term]), amount))
                        attempt_count = 0
                    else:
                        self.log("    Already had image!")
                        attempt_count += 1
                except:
                    self.log("    Downloading failed")
                    attempt_count += 1

        self.log("Farming of %s images complete, now holding %d images" %
                 (term, len(self.images[term])))

        with open(os.path.join("Images", "hashes.json"), "w") as f:
            json.dump(self.images, f, indent=2)

    def farm_images(self, amount=25, threshold=10):
        self.show_term_counts("image_searches", self.images)

        all_farm = self.terms["image_searches"]
        all_farm.extend(self.terms["talk_titles"])

        for term in all_farm:
            self.farm_image_term(term, amount, threshold)

    def farm_gif_term(self, term, amount=25, threshold=10):
        self.log("Farming GIFs for %s..." % term)

        if not (term in self.gifs):
            self.gifs[term] = []

        attempt_count = 0
        while (attempt_count < threshold) and (len(self.gifs[term]) < amount):

            image_path = "test.gif"
            try:
                os.remove(image_path)
            except:
                pass

            try:
                img = translate(term)
                wget.download(img.fixed_height.url, image_path)

                image_md5 = self.get_file_md5("test.gif")

                if not (image_md5 in self.gifs[term]):
                    self.gifs[term].append(image_md5)
                    shutil.copy(image_path,
                                os.path.join("GIFs", "%s.gif" % image_md5))
                    self.log("    GIF saved to archive. %d/%d GIFs." %
                             (len(self.gifs[term]), amount))
                    attempt_count = 0
                else:
                    self.log("    Already had GIF!")
                    attempt_count += 1
            except:
                self.log("    Downloading failed")
                attempt_count += 1

        self.log("Farming of %s GIFs complete, now holding %d GIFs" %
                 (term, len(self.gifs[term])))

        with open(os.path.join("GIFs", "hashes.json"), "w") as f:
            json.dump(self.gifs, f, indent=2)

    def farm_gifs(self, amount=25, threshold=10):
        self.show_term_counts("giphy_searches", self.gifs)

        all_farm = self.terms["giphy_searches"]
        all_farm.extend(self.terms["talk_titles"])

        for term in all_farm:

            self.log("Farming GIFs for %s..." % term)

            if not (term in self.gifs):
                self.gifs[term] = []

            self.farm_gif_term(term, amount, threshold)

    def farm_content(self, all_content):
        for talk_title in self.terms["talk_titles"]:
            talk_path = os.path.join("Content", "%s.txt" % talk_title)
            # Either we're replacing all content or we're only replacing files that don't exist
            if all_content or (not os.path.exists(talk_path)):
                self.log("Farming data on %s..." % talk_title)
                with open(talk_path, "w") as f:
                    content = self.my_face.fully_research_topic(
                        talk_title, self.log)
                    if type(content) is str:
                        clean_content = content
                    else:
                        clean_content = unicodedata.normalize(
                            'NFKD', content).encode('ascii', 'ignore')
                    f.write(clean_content)

    def log_slide_weights(self):
        self.log(self.slide_weights.get_weights_string())

    def log(self, message):
        if self.console:
            self.console.config(state=tk.NORMAL)
            self.console.insert(tk.END, "%s\n" % message)
            self.console.see(tk.END)
            self.console.config(state=tk.DISABLED)
            self.console.update()
        else:
            print(message)
class ResponseGenerator:
    def __init__(self):
        self.eightball = EightBall()
        self.excuses = Excuses()
        self.commands = Commands()
        self.straws = Straws("/", "=", "/")
        self.chain = MarkovChain("./markovdb")
        self.chain.db = _db_factory()
        with open("markovsource", "r") as markov_file:
            self.chain.generateDatabase(markov_file.readline())

    def generate_response(self, body):
        # Tokenize body
        body_tokens = body.lower().split(" ")
        # Important commands can only be run if line is started with the word
        command = body_tokens[0]

        if command == '!create':
            new_command = body_tokens[1]
            response_index = body.find(new_command) + len(new_command) + 1
            response = body[response_index:]
            self.commands.set(new_command, response)

            return "Command !{0} created.".format(new_command)

        elif command == "!list":
            string = "!create !delete !reload !excuse !8ball !straws !image "
            for command_ in self.commands.list():
                string += "!{0} ".format(command_)

            return string

        elif command == "!delete":
            cleaned_command = body_tokens[1].lower()
            success = self.commands.delete(cleaned_command)

            if success:
                return "Command !{0} deleted.".format(cleaned_command)
            else:
                return "Command !{0} does not exist.".format(cleaned_command)

        elif command == "!reload":
            with open("markovsource", "r") as markov_file:
                self.chain.generateDatabase(markov_file.readline())

            return "Successfully reloaded my word database"

        # Not a system command, continue attempting to parse
        else:
            for token in body_tokens:
                if token == "!fortune":
                    # TODO
                    pass
                elif token == "!excuse":
                    return self.excuses.get()

                elif token == "!8ball":
                    return self.eightball.get()

                elif token == "!straws":
                    return self.straws.get()

                elif token == "!image":
                    return "/get " + self.chain.generateString()

                elif token == "tase":
                    return self.chain.generateString()

                elif len(token) > 0 and token[0] == "!":
                    return self.commands.get(token[1:])

                # we have a sentence to listen to, arbitrary length requirement
                elif len(body) > 10:
                    string_to_write = body + "."
                    if body[len(body) - 1] == ".":
                        string_to_write = body

                    with open("markovsource", "a") as markov_file:
                        markov_file.write(string_to_write)
Beispiel #30
0
# import json

# tag_cloud_file = open('tag_cloud.json', 'r+')
# tag_cloud_csv_file = open('tag_cloud.csv', 'w+')
# tag_cloud = json.load(tag_cloud_file)
# for tag in tag_cloud:
#     tag_cloud_csv_file.write('%(text)s, %(size)s\n' % tag)

# tag_cloud_csv_file.close()

import random
charecters = [
    "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N"
]

for index in xrange(20):
    print[random.choice(charecters) for x in xrange(3)]

from pymarkovchain import MarkovChain
mc = MarkovChain("./markov")
mc.generateDatabase("a b c")
Beispiel #31
0
from pymarkovchain import MarkovChain

seed_file = open('./fixtures/wikipedia_india_content.txt')
mc = MarkovChain("../markov_db")
seed_text = seed_file.read()
mc.generateDatabase(seed_text)
print mc.generateString()
 	person = ' '.join(c[0] for c in tree.leaves())
 	persons.append(person)
 if tree.label() == "LOCATION": 
 	location = ' '.join(c[0] for c in tree.leaves())
 	locations.append(location)
 if tree.label() == "ORGANIZATION": 
 	organization = ' '.join(c[0] for c in tree.leaves())
 	organizations.append(organization)
 if tree.label() == "DATE": 
 	date = ' '.join(c[0] for c in tree.leaves())
 	dates.append(date)
 if tree.label() == "TIME": 
 	time = ' '.join(c[0] for c in tree.leaves())
 	timess.append(time)
 if tree.label() == "PERCENT": 
 	percent = ' '.join(c[0] for c in tree.leaves())
 	percents.append(percent)
 if tree.label() == "FACILITY": 
 	facility = ' '.join(c[0] for c in tree.leaves())
 	facilities.append(facility)
 if tree.label() == "GPE": 
 	gpe = ' '.join(c[0] for c in tree.leaves())
 	gpes.append(gpe)


 mc = MarkovChain()
 for i in range(1,20):
 	mc.generateDatabase(r)
 	g = mc.generateString()
 	print(g)
from pymarkovchain import MarkovChain

mc = MarkovChain("./markov")

texts = [
    "text/confessions.txt",
    "text/discourses-and-social-contract.txt",
    "text/emile.txt"
]

entire_string = ""

for text_url in texts:
    f = open(text_url, 'r')
    entire_string += f.read()
    entire_string += "\n"
    f.close()

test = open("test.txt", 'w')
test.write(entire_string)
test.close()

mc.generateDatabase(entire_string, '\n')

print(mc.generateString())

for i in range(10000):
    f = open("output/{0}.txt".format(i), 'w')
    f.write(mc.generateString().strip())
    f.close()
Beispiel #34
0
def main(username):
    r = praw.Reddit(user_agent='trollolol v0.1')
    r.config.decode_html_entities = True

    m = MarkovChain('markov-data/%s.chain' % username)

    last_comment = None
    try:
        last_comment = Node.objects(
            username=username).order_by('-created').first()
        if last_comment:
            print("Checking for new messages.")
            comments = r.get_redditor(username).get_comments(
                limit=500, params={'after': last_comment.node_id})
        else:
            raise
    except:
        print("No messages fetched yet, doing inital import")
        comments = r.get_redditor(username).get_comments(limit=500)

    for comment in comments:
        try:
            node = Node.objects.get(node_id=comment.name)
        except:
            node = Node(node_id=comment.name,
                        parent_id=comment.parent_id,
                        body=comment.body,
                        created=comment.created,
                        username=username)
            node.save()

    first_comment = Node.objects(
        username=username).order_by('+created').first()
    if first_comment:
        print("Checking for messages before %s." % first_comment.node_id)
        comments = r.get_redditor(username).get_comments(
            limit=500, params={'before': first_comment.node_id})

        for comment in comments:
            try:
                node = Node.objects.get(node_id=comment.name)
            except:
                node = Node(node_id=comment.name,
                            parent_id=comment.parent_id,
                            body=comment.body,
                            created=comment.created,
                            username=username)
                node.save()

    comments = Node.objects(username=username).all()

    corpus = []
    for comment in comments:
        corpus.append(comment.body)

    shuffle(corpus)
    if len(corpus) > 0:
        print(
            "We have %i messages to work with. Building new markov corpus now."
            % len(corpus))
        m.generateDatabase(" ".join(corpus))

        print("Looking for acceptable output for first round of transforms.")
        output = []
        tries = 0
        while len(output) < 10:
            tries = tries + 1
            result = m.generateString()
            if tries < 100:
                if len(result.split(" ")) >= 10:
                    sys.stdout.write("x")
                    output.append(result)
                else:
                    sys.stdout.write(".")

        print("")

        response = ""
        for result in output:
            response = response + " " + result

        print response
    else:
        print("No comments found.")
Beispiel #35
0
			w1, w2 = w2, random.choice(self.cache[(w1, w2)])
		        gen_words.append(w2)
		return ' '.join(gen_words)
	

# Create an instance of the markov chain. By default, it uses MarkovChain.py's location to
# store and load its database files to. You probably want to give it another location, like so:
mc = MarkovChain(os.environ['LAV_DIR'] + "/out/twitterUD.db")
text_file = os.environ['LAV_DIR'] + '/out/twitterUD.csv'
cred = []
with open(text_file,'rb') as f:
        rows = csv.reader(f, delimiter=',', quotechar='|')
    for row in rows:
            cred = cred + row

mc.generateDatabase('.'.join(cred))
# To let the markov chain generate some text, execute
print mc.generateString()
print mc.generateString()
print mc.generateString()
print mc.generateString()

mc.dumpdb()


markov = Markov(open(text_file,'rb'))
print markov.generate_markov_text()
print markov.generate_markov_text()
print markov.generate_markov_text()
print markov.generate_markov_text()
Beispiel #36
0
lyrics_directory = "data1/"
files = glob.glob(lyrics_directory + '*.txt')
# iterate over the list getting each file
all_lyrics = ""
for file in files:
    # open the file and then call .read() to get the text
    print(file)
    with open(file) as f:
        text = f.read()
        verse_lyrics = parse_file(text)
        verse_lyrics = re.sub("[\[\]\(\)\"]", " ", verse_lyrics)
        verse_lyrics = re.sub(" +", " ", verse_lyrics)
        all_lyrics += verse_lyrics

mc = MarkovChain("test")
mc.generateDatabase(all_lyrics)

output_directory = "generated_lyrics/"
if not os.path.exists(output_directory):
    os.makedirs(output_directory)

number_of_phrases = 8
num_files = 1000
for i in range(num_files):
    # Printing a string
    with open(output_directory + "{}.txt".format(i), "w") as f:
        for i in range(0, int(number_of_phrases)):

            while True:
                line = mc.generateString()
                if len(line) > 1:
Beispiel #37
0
import tweepy
from tweepy import OAuthHandler
from pymarkovchain import MarkovChain
import requests, time
import re

r = requests.get('http://www.kanyerest.xyz/api/lyrics')
mc = MarkovChain("")
mc.generateDatabase(r.content)


def writeLine():
    tweet = mc.generateString()
    tweet = tweet[:140].rsplit(r'\n', 1)[0]
    return tweet


def createRhymePair():
    song = []
    line1 = re.sub(r'\)|\(', '', writeLine())
    song.append(line1)
    line2 = re.sub(r'\)|\(', '', writeLine())
    while line2[-2:] != line1[-2:]:
        line2 = re.sub(r'\)|\(', '', writeLine())
    song.append(line2)
    return song


def createSong(n):
    song = []
    for i in range(0, n - 1):
    for root, dirs, files in os.walk(_dir, topdown=False):
        random.shuffle(files)
        for name in files:
            num_files -= 1
            if num_files > 0:
                _path = "{}/{}".format(_dir,name)
                train_text += file_get_contents(_path)


#scrub train_text
train_text = re.sub('<[^<]+?>', '', train_text) #remove html
train_text = re.sub(r'[^a-zA-Z. ]+', ' ', train_text).replace('\n', '') #remove non-alphanumeric chars
for word in remove_words:
    train_text = train_text.replace(word,'') #remove words

#create markov db
mc.generateDatabase(train_text)

#tweet
for x in range(0,num_tweets):
    random.shuffle(seed_words)
    status = (ucfirst(mc.generateStringWithSeed(seed_words[0])) + ".  ")
    if not validate_tweet(status):
        continue;
    try:
        status = api.PostUpdate(status)
    except:
        pass
    time.sleep(wait_time_between_tweets_in_secs)

        cur_ps = cur_note.ps
        next_val = cur_ps - first_ps
        # Limit the size of jumps?
        if next_val < 8.0 and next_val > -8.0:
          to_add = str(next_val)

    # Add the duration
    to_add = to_add + "!@#" + dur
    db = db + (' ' + to_add);
  db = db + ('\n');

keepGoing = 1
sen = ""

while keepGoing == 1:
  mc.generateDatabase(db, '\n')
  sen = mc.generateString()
  sen = sen.split(' ')
  length = 0.0
  counter = 0
  for word in sen:
    counter = counter + 1
    val_dur = word.split('!@#')
    dur = val_dur[1]
    cur_dur = assoc[dur]
    length += cur_dur.quarterLength
  if length == 4.0 and counter > 10:
    keepGoing = 0


s1 = stream.Stream()
			f.write(commentBody + '\n')
		except UnicodeEncodeError:
			continue

comments_path = "c:/users/jeff/dropbox/weakpots_legion_bot/comments.txt"
user_agent = "Comment-Gatherer for Markov Chain by /u/pbnjeff"

comment_limit = 20

r = praw.Reddit(user_agent)

weakpots = r.get_subreddit('weakpots')
comments = weakpots.get_comments(limit=comment_limit)

if os.path.exists(comments_path):
	with open(comments_path,'a') as f:
		writeComments(f, comments)
else:
	with open(comments_path,'w') as f:
		writeComments(f, comments)
		
# update database
comments = ''
with open(comments_path,'r') as f:
	for line in f:
		comments = comments + str(line)
		
# testing
mc = MarkovChain('c:/users/jeff/dropbox/weakpots_legion_bot')
mc.generateDatabase(comments)
Beispiel #41
0
    for cur_note in part.notes:

        name = cur_note.fullName
        '''if "Chord" in name:
      n = note.Note(cur_note.pitches[0])
      n.duration = cur_note.duration
      print cur_note
      print n
      cur_note = n
      name = n.fullName'''
        name = name.replace(' ', '#')
        assoc[name] = cur_note
        db = db + (' ' + name)
    db = db + ('\n')

mc.generateDatabase(db, '\n')
sen = mc.generateString()
s1 = stream.Stream()

keepGoing = 1
sen = ""

while keepGoing == 1:
    mc.generateDatabase(db, '\n')
    sen = mc.generateString()
    sen = sen.split(' ')
    length = 0.0
    counter = 0
    for word in sen:
        counter = counter + 1
        val_dur = word.split('!@#')
Beispiel #42
0
import re
from datetime import datetime
from pymarkovchain import MarkovChain
from glob import glob
from random import randint

bot = discord.Client()
bot.login('USERNAME', 'PASSWORD')

# comment out the next 5 lines unless you have a markov database
importlib.import_module("plugins")
mc = MarkovChain()
with open(r'C:\\Python35\\discordbot\\logpruned.txt', 'r',
          encoding="utf8") as log:
    thelog = log.read()
mc.generateDatabase(thelog)

#for plugin in glob("C:/Python35/discordbot/plugins/[!_]*.py"):
#	module = 'plugins.' + plugin[31:-3]
#	print(module)
#	print(plugin)
#	try:
#		importlib.import_module(module)
#	except Exception as e:
#		print('Failed to import {0}: {1}'.format(plugin, e))


@bot.event
def on_message(message):
    currentTime = '[' + str(datetime.now().strftime("%H:%M:%S")) + '] '
    print(currentTime + str(message.author) + ': ' + message.content)
Beispiel #43
0
  'IH',
  'IY',
  'OW',
  'OY',
  'UH',
  'UW'
]

# Create an instance of the markov chain. By default, it uses MarkovChain.py's
# location to store and load its database files to. You probably want to give it
# another location, like so:
mc = MarkovChain("./markov")
# To generate the markov chain's language model, in case it's not present
with open (LYRIC_FILE, "r") as myfile:
  data=myfile.read().replace('\n', '\n')
mc.generateDatabase(data, '\n')

# Reads in the rhyming dictionary and stores it in program memory
rhyming_dictionary = {}
with open(RHYMING_DICTIONARY, 'rb') as csvfile:
  spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
  for row in spamreader:
    row_zero = row[0].split('\t')
    rhyming_dictionary[row_zero[0]] = [row_zero[1]] + row[1:]

# Finds the last vowel in a group of words
def lastVowel(syllables):
  counter = len(syllables) - 1
  while counter > 0:
    if syllables[counter] in vowels:
      return syllables[counter:]
Beispiel #44
0
def getLines():
    mc = MarkovChain("")
    mc.generateDatabase(r.content)
    tweet = mc.generateString()
    tweet = tweet[:140].rsplit(r'\n', 1)[0]
    return tweet
Beispiel #45
0
        except UnicodeEncodeError:
            continue


comments_path = "c:/users/jeff/dropbox/weakpots_legion_bot/comments.txt"
user_agent = "Comment-Gatherer for Markov Chain by /u/pbnjeff"

comment_limit = 20

r = praw.Reddit(user_agent)

weakpots = r.get_subreddit('weakpots')
comments = weakpots.get_comments(limit=comment_limit)

if os.path.exists(comments_path):
    with open(comments_path, 'a') as f:
        writeComments(f, comments)
else:
    with open(comments_path, 'w') as f:
        writeComments(f, comments)

# update database
comments = ''
with open(comments_path, 'r') as f:
    for line in f:
        comments = comments + str(line)

# testing
mc = MarkovChain('c:/users/jeff/dropbox/weakpots_legion_bot')
mc.generateDatabase(comments)
Beispiel #46
0
    corpus = ''

    print("Gathering tweets...")
    for page in tweepy.Cursor(api.user_timeline, screen_name=secret.SOURCE_USER).pages(secret.PAGE_COUNT):
      for item in page:
        text = item.text.replace(".", "%2E")
        if len(corpus) == 0:
          corpus = text
        else:
          corpus = corpus + ' ' + text

    print("Setting up Markov chain database...")
    chain = MarkovChain("./markov")
    print("Generating Markov chain database...")
    chain.generateDatabase(corpus)

    # 10 * 15 minutes = API update every 150 minutes
    #  that will change based on new timing between tweets, *shrugs*
    print("Beginning tweet loop.")
    for x in xrange(9):
      print("Tweet " + str(x) + " of tweet loop. (max=9)")
      status = chain.generateString()
      print('Tweet created: "' + status + '"')

      status = status.replace("%2E", ".")
      status = status.replace("&amp;", "&")
      status = status.replace("&lt;", "<")
      status = status.replace("&gt;", ">")
      status = status.replace("@", "")                    # cutting out all @'s entirely
      status = status.replace("twitter.com", "abc.xyz")   # why did I do this?
Beispiel #47
0
def get_markov_chain_horoscopes(tokenize: bool = False):
    unified_horoscopes = read_horoscopes(tokenize)
    chain = MarkovChain()
    chain.generateDatabase(unified_horoscopes, n=2, sentenceSep='[\n]')
    return chain
Beispiel #48
0
        if "//" in t:
            continue
        if "cw: " in t:
            continue

        # Prune short tags
        if ARGS.prune and len(t) <= 3:
            continue

        # Tags which are just numbers should not be in the corpus
        try:
            int(t.strip())
            continue
        except ValueError:
            pass

        if ARGS.nohash:
            CORPUS += t + " "
        else:
            CORPUS += '#' + t + " "
    CORPUS += "\n"

if ARGS.debug:
    print(CORPUS)
    exit(1)
print("Generating database...")
BOT = MarkovChain(TARGET_FILE)
BOT.generateDatabase(CORPUS)
print("Dumping database to {}".format(TARGET_FILE))
BOT.dumpdb()
import numpy as np
# import matplotlib as pyplot

from pymarkovchain import MarkovChain
# Create an instance of the markov chain, tell it where to load / save its database
mc = MarkovChain("./markov")
# generate the markov chain's language model
mc.generateDatabase(
    "This is a string of Text. It won't generate an interesting database though."
)
mc.generateString()
Beispiel #50
0
#!/usr/bin/env python

from pymarkovchain import MarkovChain
# Create an instance of the markov chain. By default, it uses MarkovChain.py's location to
# store and load its database files to. You probably want to give it another location, like so:
mc = MarkovChain("C:/Users/Andrew/OneDrive/Documents/Northwestern/Courses/495-Data-Science/Final Project")
# To generate the markov chain's language model, in case it's not present
mc.generateDatabase("It is nice to meet you.  I would like to meet your friend.")
# To let the markov chain generate some text, execute
for i in range(10):
        print(mc.generateString())
Beispiel #51
0
###############################################################################

# Vowels used to determine whether words rhyme
vowels = [
    'AA', 'AE', 'AH', 'AO', 'AW', 'AY', 'EH', 'ER', 'EY', 'IH', 'IY', 'OW',
    'OY', 'UH', 'UW'
]

# Create an instance of the markov chain. By default, it uses MarkovChain.py's
# location to store and load its database files to. You probably want to give it
# another location, like so:
mc = MarkovChain("./markov")
# To generate the markov chain's language model, in case it's not present
with open(LYRIC_FILE, "r") as myfile:
    data = myfile.read().replace('\n', '\n')
mc.generateDatabase(data, '\n')

# Reads in the rhyming dictionary and stores it in program memory
rhyming_dictionary = {}
with open(RHYMING_DICTIONARY, 'rb') as csvfile:
    spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
    for row in spamreader:
        row_zero = row[0].split('\t')
        rhyming_dictionary[row_zero[0]] = [row_zero[1]] + row[1:]


# Finds the last vowel in a group of words
def lastVowel(syllables):
    counter = len(syllables) - 1
    while counter > 0:
        if syllables[counter] in vowels: