for t in tweets:
    replies += 1
    print(f'___tweet #{replies}___\n')
    print(f'___original tweet:___\n{t["full_text"]}\n')

    words = sorted(re.split(r'[^a-zA-Z#]', t['full_text']),
                   key=lambda w: len(w),
                   reverse=True)
    begin = None
    for w in words:
        if len(w) > 0 and w.lower() in chain.tree:
            begin = w.lower()
            break
        elif len(w) == 0:
            break

    reply = chain.generate_tweet(start_with=begin,
                                 append_tag='\n#YourWordFrequencies')
    print(f'___reply:___\n{reply}\n')

    try:
        twit.statuses.update(status=reply,
                             in_reply_to_status_id=t['id'],
                             auto_populate_reply_metadata='true')
    except Exception as e:
        replies -= 1
        print(f'{"!"*32}error{"!"*32}\n{e}\n')

    print(f'{"—"*64}\n')
    sleep(randint(2, 8))
Exemple #2
0
	print(f'search_term: {user}')
	tweets = twit.statuses.user_timeline(screen_name=user, count=200, tweet_mode='extended', include_rts=False, trim_user=True)
	for t in tweets:
		if EXCLUDE_WORDS.search(t['full_text']) is None:
			tweet = TEXT_ONLY.sub(' ', t['full_text'])
			tweet = USER_NAME.sub(' ', tweet)
			tweet = LINKS.sub(' ', tweet)
			tweet = TYPO_HASHTAGS.sub(fix_hashtag, tweet)
			tweet = TYPO_PERIOD.sub(fix_period, tweet)
			tweet = TYPO_QUESTION.sub(fix_question, tweet)
			tweet = TYPO_EXCLAMATION.sub(fix_exclamation, tweet)
			tweet = LONE_PUNCTUATION.sub(' ', tweet)
			tweet = AMPERSAND.sub('and', tweet)
			tweet = GT.sub('>', tweet)
			tweet = LT.sub('<', tweet)
			chain.train(tweet)
		# chain.train(t['full_text'])
	print(f'len(chain.tree): {len(chain.tree)}')

chain.bulk_adjust_weights(fitness_functions=[aw_mult(aw_favor_complexity, .001), aw_mult(aw_favor_punctuation, .00015), aw_mult(dg_disfavor_consecutive_hashtags, .001)], iterations=len(chain.tree))

print('Sample tweet:', chain.generate_tweet())

# chain.save_training('bin/twitter/apologists.bin')
# chain.save_training('bin/twitter/atheists.bin')
chain.save_training('bin/twitter/news.bin')
# chain.save_training('bin/twitter/newagers.bin')
# chain.save_training('bin/twitter/churches.bin')
# chain.save_training('bin/twitter/trumpsterfire.bin')
# chain.save_training('bin/twitter/meta.bin')
Exemple #3
0
    next_id = re.split(r'\D+', tweets['search_metadata']['next_results'])[1]
    try:
        tweets = twit.search.tweets(q=query,
                                    count=100,
                                    lang='en',
                                    result_type='recent',
                                    tweet_mode='extended',
                                    include_entities=False,
                                    max_id=next_id)
    except Exception as e:
        print('____an error occurred____')
        print(f'____search ended at i = {i+1}____')
        break
    for t in tweets['statuses']:
        if EXCLUDE_WORDS.search(t['full_text']) is None:
            tweet = clean_tweet(t['full_text'])
            chain.train(tweet)
print(f'____len(chain.tree) = {len(chain.tree)}____')

print('____adjusting weights, this may take a moment____')
chain.bulk_adjust_weights(fitness_functions=[
    aw_mult(aw_favor_complexity, .001),
    aw_mult(dg_disfavor_consecutive_hashtags, .001)
],
                          iterations=len(chain.tree))
print('____done____')

chain.save_training('bin/twitter/beliefs.bin')

print('____sample tweet____:\n', chain.generate_tweet())
from math import sqrt
from random import random
from markov_chain import MarkovChain

test_chain = MarkovChain()
# file_name = './bin/new_testament.bin'
# file_name = './bin/quran.bin'
# file_name = './bin/quran_testament.bin'
# file_name = './bin/atheists.bin'
# file_name = './bin/twitter/news.bin'
file_name = './bin/star_trek/PICARD.bin'
# file_name = './bin/star_trek/DATA.bin'
test_chain.load_training(file_name)

print(f'{"_"*16} file_name: "{file_name}" {"_"*16}')
print(f'{"_"*16} len(test_chain.tree): {len(test_chain.tree)} {"_"*16}\n')
for i in range(11):
    # print(test_chain.generate_tweet(start_with='amen', append_verse=True), '\n_\n')
    print(
        test_chain.generate_tweet(start_with=None,
                                  append_tag='#MarkovProcess'), '\n_\n')

# that’s
Exemple #5
0
from markov_chain import MarkovChain
from markov_algorithms import *

chain = MarkovChain()

chain.train_on_file(filename='training_txt/quran.txt', verbose=True)
chain.train_on_file(filename='training_txt/new_testament.csv', verbose=True)

print(f'len(chain.tree): {len(chain.tree)}\n')

# print('Adjusting weights. This may take a while.\n_\n')
# chain.bulk_adjust_weights(fitness_functions=[aw_mult(aw_favor_complexity, .001), aw_mult(aw_favor_punctuation, .00015), aw_mult(aw_favor_alternating_complexity, .1)], iterations=len(chain.tree))

chain.save_training('bin/quran_testament.bin')

for i in range(8):
    print(chain.generate_tweet(append_tag=None, follow=False), '\n_\n')
        # chain.load_training('bin/twitter/allgods.bin')
    elif i % 7 == 4:
        category = 'deepakchopra'
        chain.load_training('bin/chopra.bin')
    elif i % 7 == 5:
        category = 'shakespeare'
        chain.load_training('bin/shakespeare.bin')
    else:
        category = 'news'
        chain.load_training('bin/twitter/news.bin')
        # category = 'programming'
        # chain.load_training('bin/programming.bin')

    verse = True if category == 'bible' else False
    tweet = chain.generate_tweet(
        append_verse=verse,
        append_tag=f'#MarkovChain.\n\n[Category: #{category}]')
    print(f'-t: {tweet}')
    twit.statuses.update(status=tweet)
    # speak(tweet)

    if i < num - 1:
        delay = random.randint(1024, 2048)
        delta = dt.timedelta(seconds=delay)
        when = dt.datetime.now(tz=dt.timezone(dt.timedelta(hours=-7))) + delta
        delay_text = f'delay: {delay} seconds (next tweet at {when.strftime("%H:%M:%S")})'
        print(delay_text, '\n')
        # speak(delay_text)
        # time.sleep(delay - 12)
        time.sleep(delay)