Example #1
0
        # to pass in all the keyword arguments coming
        # from `ColorizedInputDescriber`:

        ##### YOUR CODE HERE

        # Return a `ColorizedEncoderDecoder` that uses
        # your encoder and decoder:

        ##### YOUR CODE HERE


# That's it! Since these modifications are pretty intricate, you might want to use [a toy dataset](colors_overview.ipynb#Toy-problems-for-development-work) to debug it:

# In[ ]:

toy_color_seqs, toy_word_seqs, toy_vocab = create_example_dataset(
    group_size=50, vec_dim=2)

# In[ ]:

toy_color_seqs_train, toy_color_seqs_test, toy_word_seqs_train, toy_word_seqs_test = train_test_split(
    toy_color_seqs, toy_word_seqs)

# In[ ]:

toy_mod = ColorizedInputDescriber(toy_vocab,
                                  embed_dim=10,
                                  hidden_dim=10,
                                  max_iter=100,
                                  batch_size=128)

# In[ ]:
Example #2
0
def dataset():
    color_seqs, word_seqs, vocab = create_example_dataset(group_size=50,
                                                          vec_dim=2)
    return color_seqs, word_seqs, vocab
def color_describer_dataset():
    color_seqs, word_seqs, vocab = torch_color_describer.create_example_dataset(
        group_size=50, vec_dim=2)
    return color_seqs, word_seqs, vocab
Example #4
0
from colors import ColorsCorpusReader
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from torch_color_describer import (ContextualColorDescriber,
                                   create_example_dataset)
import utils
from utils import START_SYMBOL, END_SYMBOL, UNK_SYMBOL

tiny_contexts, tiny_words, tiny_vocab = create_example_dataset(group_size=3,
                                                               vec_dim=2)

toy_mod = ContextualColorDescriber(
    tiny_vocab,
    embedding=None,  # Option to supply a pretrained matrix as an `np.array`.
    embed_dim=10,
    hidden_dim=20,
    max_iter=100,
    eta=0.01,
    optimizer=torch.optim.Adam,
    batch_size=128,
    l2_strength=0.0,
    warm_start=False,
    device=None)

_ = toy_mod.fit(tiny_contexts, tiny_words)

metric = toy_mod.listener_accuracy(tiny_contexts, tiny_words)
print("listener_accuracy:", metric)