Exemplo n.º 1
0
def words_generator(n):
    start = time.time()
    gib = Gibberish()
    words = gib.generate_words(n)
    end = time.time()
    print('Time generating:: {}'.format(end - start))
    print('Real Number of words: {}'.format(len(words)))
    len_unique = len(set(words))
    print('Number of unique words: {}'.format(len_unique))
    print('Size of words: {} Mb, {} Kb'.format(asizeof.flatsize(words)/1024/1024, asizeof.flatsize(words)/1024))
    print('________________________________________________________________\n')
    return words, len_unique
Exemplo n.º 2
0
class TestSearch:
    
    gib = Gibberish()
    def test_random_clothes_result_count(self, browser):
        clothes = ['dress', 'blouse', 't-shirt']
        search_bar = SearchBar(browser)
        search_bar.search(random.choice(clothes))

        # Get only direct child elements from results using '*' in xpath
        results = browser.find_element_by_xpath("//ul[@class='product_list grid row']").find_elements_by_xpath('*')
        
        assert len(results)
    
    @pytest.mark.zero
    def test_zero_result(self, browser):
        search_bar = SearchBar(browser)
        search_bar.search(self.gib.generate_word())
        
        assert browser.find_element_by_css_selector('#center_column > p')
        
    @pytest.mark.sortprice
    def test_sort_price(self, browser):
        search_bar = SearchBar(browser)
        search_bar.search('dress')
        
        sort = SortFilter(browser)
        sort.by_price_asc()

        price_el = browser.find_elements_by_xpath("//div[@class='right-block']//span[@class='price product-price']")
        prices = [a.text for a in price_el]

        sorted_prices = sorted(prices)

        assert prices == sorted_prices
        
        
    @pytest.mark.sortname
    def test_sort_name(self, browser):
        search_bar = SearchBar(browser)
        search_bar.search('dress')
        
        sort = SortFilter(browser)
        sort.by_name_asc()

        name_el = browser.find_elements_by_css_selector("#center_column > ul  div > div.right-block > h5 > a")
        names = [a.text for a in name_el]

        sorted_names = sorted(names)

        assert names == sorted_names
Exemplo n.º 3
0
def generate_synthetic_names(n_synth):
    n_faker = min(round(n_synth / 2),
                  700)  # reduce repetitions, faker come from limited dataset
    n_gibberish = n_synth - n_faker
    synth_list = []

    fake = Faker()
    for _ in range(n_faker):
        # faker returns brands composed of multiple last names, just return first (may include hyphens)
        name = fake.company().replace(",", "").split()[0]
        if random.random() < 0.5:
            name = name.lower()
        if ("-" in name) and (random.random() < 0.5):
            name = name.replace("-", " ")
        synth_list.append(name)

    gib = Gibberish()
    for _ in range(n_gibberish):
        # gibberish returns uncapitalized gibberish, by default begins and ends with consonants
        type_of_gibberish = random.random()
        if type_of_gibberish < 0.25:
            name = gib.generate_word(start_vowel=True)
        elif type_of_gibberish < 0.5:
            name = gib.generate_word(end_vowel=True)
        elif type_of_gibberish < 0.75:
            name = gib.generate_word()
        else:
            name = gib.generate_word(2, start_vowel=True, end_vowel=True)
        add_gibberish = random.random()
        if add_gibberish < 0.1:
            name += " " + gib.generate_word()
        if random.random() < 0.5:
            name = name.title()
        synth_list.append(name)

    random.shuffle(synth_list)
    return synth_list
Exemplo n.º 4
0
from gibberish import Gibberish

gib = Gibberish()


def test_generate_word(rep=50):
    for _ in range(rep):
        print(gib.generate_word())


def test_generate_word_start_vowel(rep=50):
    for _ in range(rep):
        print(gib.generate_word(start_vowel=True))


def test_generate_word_end_vowel(rep=50):
    for _ in range(rep):
        print(gib.generate_word(end_vowel=True))


def test_generate_words(rep=50):
    for _ in range(rep):
        print(gib.generate_words(3))


if __name__ == '__main__':
    test_generate_word()
    test_generate_words()
    test_generate_word_start_vowel()
    test_generate_word_end_vowel()
Exemplo n.º 5
0
# Prints a 140-character string of gibberish.
from gibberish import Gibberish
print Gibberish.random().tweet().encode("utf8")
Exemplo n.º 6
0
def main ():

    g = Gibberish(text, 4)
    print g.generate_paragraph()
def getGibberishWordsUsingPyGibberish(n):
    gib = Gibberish()
    return gib.generate_words(n)
Exemplo n.º 8
0
# -*- coding: utf-8 -*-
# (c) 2019 Andreas Motl <*****@*****.**>
# License: GNU Affero General Public License, Version 3
from gibberish import Gibberish

size_map = {'small': 1, 'medium': 2, 'large': 3}
generator = Gibberish()


def generate_gibberish(size=None):
    vowel_consonant_repeats = get_wordlength(size)
    return generator.generate_word(
        vowel_consonant_repeats=vowel_consonant_repeats)


def get_wordlength(selector):
    selector = selector or 'small'
    return size_map[selector]
Exemplo n.º 9
0
class Language(Perception):
    gibberish = Gibberish()

    def __init__(self, params):
        Perception.__init__(self)
        self.lexicon = []
        self.lxc = AssociativeMatrix()
        self.stm = params['stimulus']
        self.delta_inc = params['delta_inc']
        self.delta_dec = params['delta_dec']
        self.delta_inh = params['delta_inh']
        self.discriminative_threshold = params['discriminative_threshold']
        self.alpha = params['alpha']  # forgetting
        self.beta = params['beta']  # learning rate
        self.super_alpha = params['super_alpha']

    def add_new_word(self):
        new_word = Language.gibberish.generate_word()
        self.add_word(new_word)
        return new_word

    def add_word(self, word):
        self.lexicon.append(word)
        self.lxc.add_row()

    def add_category(self, stimulus, weight=0.5):
        # print("adding discriminative category centered on %5.2f" % (stimulus.a/stimulus.b))
        c = Category(id=self.get_cat_id())
        c.add_reactive_unit(stimulus, weight)
        self.categories.append(c)
        # TODO this should work
        self.lxc.add_col()
        return self.lxc.col_count(
        ) - 1  # this is the index of the added category

    def update_category(self, i, stimulus):
        logging.debug(
            "updating category by adding reactive unit centered on %s" %
            stimulus)
        self.categories[i].add_reactive_unit(stimulus)

    def get_most_connected_word(self, category):
        if category is None:
            raise ERROR

        if not self.lexicon or all(v == 0.0
                                   for v in self.lxc.get_row_by_col(category)):
            raise NO_WORD_FOR_CATEGORY
            # print("not words or all weights are zero")

        return self.get_words_sorted_by_val(category)[0]

    def get_words_sorted_by_val(self, category, threshold=-1):
        # https://stackoverflow.com/questions/1286167/is-the-order-of-results-coming-from-a-list-comprehension-guaranteed/1286180
        return [
            self.lexicon[index] for index, weight in
            self.lxc.get_index2row_sorted_by_value(category)
            if weight > threshold
        ]

    def get_categories_sorted_by_val(self, word):
        word_index = self.lexicon.index(word)
        return self.lxc.get_index2col_sorted_by_value(word_index)

    def get_categories_by_word(self, word):
        word_index = self.lexicon.index(word)
        return self.lxc.get_col_by_row(word_index)

    def get_words_by_category(self, category):
        return self.lxc.get_row_by_col(category)

    def get_most_connected_category(self, word):
        if word is None:
            raise ERROR

        if word not in self.lexicon:
            raise NO_SUCH_WORD

        category_index, max_propensity = self.get_categories_sorted_by_val(
            word)[0]

        # TODO still happens
        if max_propensity == 0:
            logging.debug("\"%s\" has no associated categories" % word)
            raise NO_ASSOCIATED_CATEGORIES

        return category_index

    def initialize_word2category_connection(self, word, category_index):
        word_index = self.lexicon.index(word)
        self.lxc.set_value(word_index, category_index, .5)

    def increment_word2category_connection(self, word, category_index):
        word_index = self.lexicon.index(word)
        value = self.lxc.get_value(word_index, category_index)
        self.lxc.set_value(word_index, category_index,
                           value + self.delta_inc * value)

    def inhibit_word2category_connection(self, word, category_index):
        word_index = self.lexicon.index(word)
        value = self.lxc.get_value(word_index, category_index)
        self.lxc.set_value(word_index, category_index,
                           value - self.delta_inh * value)

    def inhibit_word2categories_connections(self, word, category_index):
        for k_index, _ in self.get_categories_sorted_by_val(word):
            if k_index != category_index:
                self.inhibit_word2category_connection(word, k_index)

    def inhibit_category2words_connections(self, word, category_index):
        for v in self.get_words_sorted_by_val(category_index):
            if v != word:
                self.inhibit_word2category_connection(
                    word=v, category_index=category_index)

    def decrement_word2category_connection(self, word, category_index):
        word_index = self.lexicon.index(word)
        value = self.lxc.get_value(word_index, category_index)
        self.lxc.set_value(word_index, category_index,
                           value - self.delta_dec * value)

    def forget_categories(self, category_in_use):
        category_index = self.categories.index(category_in_use)
        for c in self.categories:
            c.decrement_weights(self.alpha)
        to_forget = [
            j for j in range(len(self.categories))
            if self.categories[j].max_weigth() < self.super_alpha
            and j != category_index
        ]

        if len(to_forget):
            self.lxc.__matrix__ = delete(self.lxc.__matrix__,
                                         to_forget,
                                         axis=1)
            self.categories = list(delete(self.categories, to_forget))

    def forget_words(self):
        to_forget = self.lxc.forget(0.01)
        self.lexicon = list(delete(self.lexicon, to_forget))

    def discrimination_game(self, context, topic):
        self.store_ds_result(False)
        winning_category = self.discriminate(context, topic)
        winning_category.reinforce(context[topic], self.beta)
        self.forget_categories(winning_category)
        self.switch_ds_result()
        return self.categories.index(winning_category)

    def increment_word2category_connections_by_csimilarity(
            self, word, csimilarities):
        row = self.lexicon.index(word)
        increments = [
            sim * self.delta_inc * (sim > 0.25) for sim in csimilarities
        ]
        #logging.debug("Increments: %s" % str(increments))

        old_weights = self.lxc.get_col_by_row(self.lexicon.index(word))
        #logging.debug("Old weights: %s" % str(old_weights))

        incremented_weights = [
            weight + inc for weight, inc in zip(old_weights, increments)
        ]
        #logging.debug("Incremented weights: %s" % str(incremented_weights))
        self.lxc.set_values(axis=0, index=row, values=incremented_weights)

    # based on how much the word meaning covers the category
    def csimilarity(self, word, category):
        area = category.union()
        # omit multiplication by x_delta because all we need is ratio: coverage/area:
        word_meaning = self.word_meaning(word)
        coverage = minimum(word_meaning, area)

        return sum(coverage) / sum(area)

    def word_meaning(self, word):
        word_index = self.lexicon.index(word)
        return sum([
            category.union() * word2category_weigth
            for category, word2category_weigth in zip(
                self.categories, self.lxc.__matrix__[word_index])
        ])

    def semantic_meaning(self, word, stimuli):
        word_index = self.lexicon.index(word)
        activations = [
            sum([
                float(c.response(s) > 0.0) * float(
                    self.lxc.get_value(word_index, self.categories.index(c)) >
                    0.0) for c in self.categories
            ]) for s in stimuli
        ]
        flat_bool_activations = map(lambda x: int(x > 0.0), activations)
        mean_bool_activations = []
        for i in range(0, len(flat_bool_activations)):
            window = flat_bool_activations[
                max(0, i - 5):min(len(flat_bool_activations), i + 5)]
            mean_bool_activations.append(int(sum(window) / len(window) > 0.5))
        #logging.critical("Word %s ba: %s" % (word, bool_activations))
        #logging.critical("Word %s mba: %s" % (word, mean_bool_activations))
        return mean_bool_activations if self.stm == 'quotient' else flat_bool_activations
        #return flat_bool_activations

    def is_monotone(self, word, stimuli):
        bool_activations = self.semantic_meaning(word, stimuli)
        alt = len([
            a for a, aa in izip(bool_activations, bool_activations[1:])
            if a != aa
        ])
        return alt == 1
Exemplo n.º 10
0
    async def shittytyperacer(self, ctx, num_words: int = 5):
        '''
        A game to test your typing speed?
        w.typeracer [number of words]

        Number of words defaults to 5 if not specified.
        Type "w.stop" to stop the game. Only the game starter and server admins can stop the game.
        '''
        def get_scoreboard_embed(sorted_lst):
            embed = discord.Embed(color=0x48d1cc)
            temp = None
            offset = 0
            for i in range(len(sorted_lst)):
                player_score = sorted_lst[i]
                player = player_score[0]
                score = player_score[1]
                # checking to make sure people don't have same scores
                if score == temp:
                    offset += 1
                else:
                    offset = 0
                temp = score
                xp_increase, balance_increase = update_db_and_return_increase(
                    player, score)
                embed.add_field(
                    name=f"{i+1-offset}. {player.name}",
                    value=
                    f"**{score} words** *(+{xp_increase} XP, +{balance_increase} Coins)*",
                    inline=False)
                embed.set_author(name="Final Scoreboard",
                                 icon_url=self.bot.user.avatar_url)
            return embed

        def update_db_and_return_increase(player, score):
            conn = psycopg2.connect(DATABASE_URL, sslmode='require')
            c = conn.cursor()
            c.execute(
                """ SELECT xp, balance FROM users
                        WHERE ID = %s; """, (str(player.id), ))
            fetch = c.fetchone()
            xp = int(fetch[0])
            balance = int(fetch[1])
            xp_increase = 0
            balance_increase = 0
            for i in range(score):
                xp_increase += random.randint(12, 20)
                balance_increase += random.randint(50, 80)
            xp += xp_increase
            balance += balance_increase
            c.execute(
                """ UPDATE users SET xp = %s, balance = %s WHERE ID = %s; """,
                (xp, balance, str(player.id)))
            conn.commit()
            conn.close()
            return xp_increase, balance_increase

        # remove 'typeracer' status from channel
        def remove_status():
            conn = psycopg2.connect(DATABASE_URL, sslmode='require')
            c = conn.cursor()
            c.execute(
                """ UPDATE channels
                        SET status = array_remove(status, %s)
                        WHERE channel_id = %s; """,
                ('typeracer', str(ctx.channel.id)))
            conn.commit()
            conn.close()

        # Check that the game isn't already running in the channel
        conn = psycopg2.connect(DATABASE_URL, sslmode='require')
        c = conn.cursor()
        c.execute(""" SELECT status FROM channels WHERE channel_id = %s;""",
                  (str(ctx.channel.id), ))
        status_lst = c.fetchone()[0]
        if status_lst is not None:
            if "typeracer" in status_lst:
                await ctx.send(
                    "This channel is already in a game of typeracer!")
                conn.commit()
                conn.close()
                return
        c.execute(
            """ UPDATE channels
                    SET status = array_append(status, %s)
                    WHERE channel_id = %s; """,
            ('typeracer', str(ctx.channel.id)))
        conn.commit()
        conn.close()

        if num_words > 25 or num_words < 1:
            await ctx.send("Number of words must be between 1 and 25!")
            remove_status()
            return

        await ctx.send("*The race has started!\nThe word to type is...*")
        # getting list of words
        gib = Gibberish()
        words_lst = gib.generate_words(num_words)

        scoreboard_dict = {}
        i = 0
        word_sent = False
        while i < len(words_lst):
            if not word_sent:
                word_sent = True
                word = words_lst[i]
                word_display = word[0] + "\u200B" + word[1:]
                embed = discord.Embed(title="The word is:",
                                      description="`" + word_display + "`",
                                      color=0xF5DE50)
                embed.set_author(
                    name="Type the word!",
                    icon_url=
                    "http://www.law.uj.edu.pl/kpk/strona/wp-content/uploads/2016/03/52646-200.png"
                )
                embed.set_footer(text=f"{i+1}/{len(words_lst)}")
                msg = await ctx.send(embed=embed)

            def check(m):
                return not m.author.bot and (
                    m.content == word or m.content == word_display or
                    (m.content == 'w.stop' and
                     (m.author == ctx.author
                      or m.author.permissions_in(ctx.channel).administrator))
                ) and m.channel == ctx.channel

            try:
                answer = await self.bot.wait_for('message',
                                                 check=check,
                                                 timeout=25)
            except asyncio.TimeoutError:
                embed = discord.Embed(title="The word was:",
                                      description=word_display,
                                      color=0xED1C24)
                embed.set_author(
                    name="The type race has timed out!",
                    icon_url="http://cdn.onlinewebfonts.com/svg/img_96745.png")
                embed.set_footer(text=f"{i+1}/{len(words_lst)}")
                await msg.edit(embed=embed)
                break
            else:
                if answer.content == word:
                    embed = discord.Embed(title="The word was:",
                                          description=word_display,
                                          color=0x4CC417)
                    embed.set_author(
                        name=f"{answer.author.name} got it right!",
                        icon_url=answer.author.avatar_url)
                    embed.set_footer(text=f"{i+1}/{len(words_lst)}")
                    await msg.edit(embed=embed)
                    # update scoreboard
                    if answer.author in scoreboard_dict:
                        scoreboard_dict[answer.author] += 1
                    else:
                        scoreboard_dict[answer.author] = 1
                    i += 1
                    word_sent = False
                elif answer.content == word_display:
                    await ctx.send(answer.author.mention +
                                   " Don't even try to ctrl+C ctrl+V!")
                elif answer.content == 'w.stop':
                    embed = discord.Embed(title="The word was:",
                                          description=word_display,
                                          color=0xED1C24)
                    embed.set_author(
                        name="The type race has been stopped",
                        icon_url=
                        "https://upload.wikimedia.org/wikipedia/commons/thumb/c/ce/Black_close_x.svg/2000px-Black_close_x.svg.png"
                    )
                    embed.set_footer(text=f"{i+1}/{len(words_lst)}")
                    await msg.edit(embed=embed)
                    break

        if len(scoreboard_dict) > 0:
            # gives sorted list in order of decreasing score
            sorted_lst = sorted(scoreboard_dict.items(), key=lambda x: x[1])
            sorted_lst.reverse()
            await ctx.send(embed=get_scoreboard_embed(sorted_lst))

        remove_status()