Exemplo n.º 1
0
    def confuser(self, submission=None, comments=None, id=None, size=10):
        # TODO: How long to confuse. Example 3 weeks or 1 hour
        if id:
            if submission:
                sub = self.get_submission(id=id)
                sub.edit(get_text(size))
                quit(f'[i] Submission with id {id} confused')

            elif comments:
                com = self.get_comment(id=id)
                com.edit(get_text(size))
                quit(f'[i] Comment with id: {id} confused')

        if submission:
            print('[i] This may take some time.\nLoading data...')
            data = self.user_activity(submission=True)
            print('[i] Confusing submission text but NOT title...')
            for s in data:
                self.reddit.submission(s).edit(get_text(size))

        elif comments:
            print('[i] This may take some time.\nLoading data...')
            data = self.user_activity(comments=True)
            print('[i] Confusing comments...')
            for c in data:
                self.reddit.comment(c).edit(get_text(size))
        print('[i] Confused {0} items.'.format(len(data)))
Exemplo n.º 2
0
	async def on_reaction_add(self, reaction, user):
		#TODO: video_id_to_url
		#TODO: when searched random, get a new random result instead of next???
		try:
			if '👎' in str(reaction):
				associated_search_result = self.video_messages[reaction.message.id]
				# TODO: if random video was searched, random random one instead of next

				await reaction.message.channel.send(get_text(reaction.message.guild.id, "next_video"))
				next_video = associated_search_result.next_item()
				if not next_video:
					associated_search_result = associated_search_result.get_next_page(self.search)
					next_video = associated_search_result.first_item()
				await self.send_video(reaction.message.channel, next_video, associated_search_result)

		except KeyError as e:
			pass
Exemplo n.º 3
0
def magic_eight_ball(guild):
    return choice(get_text(guild.id, "magic_eight_ball"))
Exemplo n.º 4
0
def get_random_quote(guild):
    return choice(get_text(guild.id, "quotes"))
Exemplo n.º 5
0
from gensim.models import Phrases
from helpers import get_file_names, get_text
from itertools import groupby
from pathlib import Path
from typing import Dict, List, Optional, Tuple

import logging
import matplotlib.pyplot as plt
import nltk
import os
import re
import rus_preprocessing_udpipe

nltk.download("stopwords")
stopwordsiso = get_text("stopwords-ru.txt").split("\n")
from nltk.corpus import stopwords
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                    level=logging.INFO)


class ScholarlyPreprocessor(object):
    """Prepares a list of raw Russian text from scholarly parpers into a list of normalized 
    tokens.
    """

    russian_stopwords = stopwords.words("russian") + stopwords.words("english") + \
        stopwordsiso + ["что-то", "который", "это", "также", "диалог", "что-ловек", "чем-ловек", "как-то",
                       "поскольку", "никак", "текст", "явление", "являться", "автор", "вообще-то", "получать",
                       "сравнивать", "корпус", "исследование", "словарь", "конструкция", "таблица", "предложение",
                       "эксперимент", "причина", "отношение", "данные", "объект", "анализ", "рисяча", "во-вторых",
                       "во-первых", "в-третьих", "заключение", "выражение", "высказывание", "материал",
Exemplo n.º 6
0
import helpers
import numpy as np
import random
import sys

# How many characters we look back.
SEQUENCE_LENGTH = 80
# On how many characters we split a sequence.
SEQUENCE_STEP = 1
# The file that contains the text.
CORPUS = "corpus.txt"
# How many epochs to train for.
EPOCHS = 10

# Get the text from corpus.
text = helpers.get_text(CORPUS)
# Get unique characters.
chars = helpers.get_unique_characters(text)
# Get length of unique chars.
chars_length = len(chars)

# Create sequences that are the input values and the next characters that are the labels.
values, labels = helpers.create_sequences(text, SEQUENCE_LENGTH, SEQUENCE_STEP)

char_to_index, index_to_char = helpers.create_dictionaries(chars)

# Convert to one hot arrays.
x, y = helpers.convert_to_one_hot(values, SEQUENCE_LENGTH, chars_length,
                                  char_to_index, labels)

# Create model.