示例#1
0
    no_links = re.sub(r'\S+.com', '', no_links)
    return no_links


def create_words(clean_string):
    words = clean_string.split(" ")
    words = [w for w in words if len(w) > 3]  # ignore a, to, at...
    return words


#Collect the data from the user timeline

with open("tweets_username.json", "r") as read_file:
    user_timeline = json.load(read_file)

raw_tweets = []
for tweets in user_timeline:
    raw_tweets.append(tweets['text'])

#Generate the cloud
clean_text = clean_tweets(raw_tweets)
words = create_words(clean_text)
clean_string = ','.join(words)
mask = np.array(Image.open('twitter-logo.jpg'))
wc = PersianWordCloud(background_color="white", max_words=2000, mask=mask)
wc.generate(clean_string)

plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.show(block=True)
示例#2
0
#text = ' '.join(all_words)
print("finished")

# loading the mask
twitter_mask = np.array(Image.open(path.join(d, "twitter_mask.png")))

# generating wordcloud
wc = PersianWordCloud(only_persian=True,
                      regexp=r".*\w+.*",
                      font_step=3,
                      font_path=path.join(d, "IRANSans.ttf"),
                      background_color="white",
                      max_words=800,
                      mask=twitter_mask,
                      stopwords=stopwords)
wc.generate(text)

currTime = datetime.datetime.utcnow()
output_name = currTime.strftime("%d-%m-%Y_%H_%M.png")
#output_name = "test.png"

# store to file
wc.to_file(path.join(d, output_name))

import tweepy
import telegram
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
telegram_bot = telegram.Bot(token=telegram_bot_token)