Beispiel #1
0
 def __init__(self, headPort='/dev/elmo0', leftPort='/dev/elmo1', rightPort='/dev/elmo2', baudrate=19200):
     self._port = [headPort, leftPort, rightPort]
     self._baudrate = baudrate
     self._elmoList = [Elmo(self._port[0], self._baudrate), \
                       Elmo(self._port[1], self._baudrate), \
                       Elmo(self._port[2], self._baudrate)]
     self._speedList = [0] * 3
 def __init__(self,
              leftFrontPort='/dev/elmo0',
              leftRearPort='/dev/elmo1',
              rightFrontPort='/dev/elmo2',
              rightRearPort='/dev/elmo3',
              baudrate=19200):
     self._port = [
         leftFrontPort, leftRearPort, rightFrontPort, rightRearPort
     ]
     self._baudrate = baudrate
     self._elmoList = [Elmo(self._port[0], self._baudrate), \
                       Elmo(self._port[1], self._baudrate), \
                       Elmo(self._port[2], self._baudrate), \
                       Elmo(self._port[3], self._baudrate)]
     self._speedList = [0] * 4
Beispiel #3
0
    def __init__(self, hidden_dim, device):
        super(BiLSTM, self).__init__()
        self.embedding_dim = 1024
        self.hidden_dim = hidden_dim
        self.num_layers = 1
        # self.tag_to_ix = tag_to_ix
        # self.tagset_size = len(tag_to_ix)

        self.elmo = Elmo(device)
        self.lstm = nn.GRU(self.embedding_dim,
                           hidden_dim // 2,
                           num_layers=self.num_layers,
                           bidirectional=True,
                           batch_first=True)
        self.linear = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim // 2, 2),
        )

        # # Matrix of transition parameters.  Entry i,j is the score of
        # # transitioning *to* i *from* j.
        # self.transitions = nn.Parameter(
        #     torch.randn(self.tagset_size, self.tagset_size))
        #
        # # These two statements enforce the constraint that we never transfer
        # # to the start tag and we never transfer from the stop tag
        # self.transitions.data[tag_to_ix[START_TAG], :] = -10000
        # self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000

        self._dev = device
        self.hidden = self.init_hidden()
        self.to(self._dev)
class ElmoTokenEmbedder(torch.nn.Module):
    """
    Compute a single layer of ELMo representations.
    This class serves as a convenience when you only want to use one layer of ELMo representations at the input of
    your network.  It's essentially a wrapper around Elmo(num_output_representations=1, ...)

    Parameters:
    ----------
    options_file : ``str``, required.
        An ELMo JSON options file.
    weight_file : ``str``, required.
        An ELMo hdf5 weight file.
    do_layer_norm : ``bool``, optional.
        Should we apply layer normalization (passed to ``ScalarMix``)?
    dropout : ``float``, optional.
        The dropout value to be applied to the ELMo representations.
    requires_grad : ``bool``, optional
        If True, compute gradient of ELMo parameters for fine tuning.
    projection_dim : ``int``, optional
        If given, we will project the ELMo embedding down to this dimension.  We recommend that you
        try using ELMo with a lot of dropout and no projection first, but we have found a few cases
        where projection helps (particularly where there is very limited training data).
    """
    def __init__(self, options_file, weight_file, do_layer_norm=False, dropout=0.0, requires_grad=False):
        super(ElmoTokenEmbedder, self).__init__()
        self._elmo = Elmo(options_file, weight_file, 1, do_layer_norm=do_layer_norm, dropout=dropout,
                          requires_grad=requires_grad)
        self._projection = None

    def get_output_dim(self):
        return self._elmo.get_output_dim()

    def forward(self, inputs):
        """
        Parameters
        ----------
        inputs: ``torch.autograd.Variable``
            Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
        Returns
        -------
        The ELMo representations for the input sequence, shape
        ``(batch_size, timesteps, embedding_dim)``
        """
        elmo_output = self._elmo(inputs)
        elmo_representations = elmo_output['elmo_representations'][0]
        if self._projection:
            projection = self._projection
            for _ in range(elmo_representations.dim() - 2):
                projection = TimeDistributed(projection)
            elmo_representations = projection(elmo_representations)
        return elmo_representations
Beispiel #5
0
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)

if __name__ == "__main__":
    layer = sys.argv[1]
    task = int(sys.argv[2])
    mix_params = np.ones(3)
    if layer in ['0', '1', '2']:
        layer = int(layer)
        mix_params *= -1e4
        mix_params[layer] = 1e4

    elmo = Elmo(options_file,
                weight_file,
                1,
                dropout=0,
                scalar_mix_parameters=mix_params).cuda()

    print("Using {} layer of ElMo".format(layer))

    se = enteval.engine.SE(params_enteval, batcher, prepare)
    task_groups = [
        ["CAPsame", "CAPnext", "CERP", "EFP", "KORE", "WikiSRS", "ERT"],
        ["Rare"],
        ["ET"],
        ["ConllYago"],
    ]

    results = se.eval(task_groups[task])
    for k, v in results.items():
 def __init__(self, options_file, weight_file, do_layer_norm=False, dropout=0.0, requires_grad=False):
     super(ElmoTokenEmbedder, self).__init__()
     self._elmo = Elmo(options_file, weight_file, 1, do_layer_norm=do_layer_norm, dropout=dropout,
                       requires_grad=requires_grad)
     self._projection = None
Beispiel #7
0
from elmo import Elmo, batch_to_ids

elmo = Elmo.get_default(3)
elmo.cuda()

batch = [["hi", "there"], ["what", "is", "up"]]

char_ids = batch_to_ids(batch).cuda()
embeddings = elmo(char_ids)
print(embeddings["elmo_representations"])
Beispiel #8
0
 def __init__(self, leftPort='COM3', rightPort='COM6', baudrate=19200):
     self._port = [leftPort, rightPort]
     self._baudrate = baudrate
     self._elmoList = [Elmo(self._port[0], self._baudrate), \
                       Elmo(self._port[1], self._baudrate)]
     self._v = 0
Beispiel #9
0
    start_fasttext_time = time()
    fasttext_search_result = fasttext_searcher.search(query)
    fasttext_elapsed_time = time() - start_fasttext_time

    print(
        f'Вот что нашлось fasttext`ом по запросу "{query}" за {fasttext_elapsed_time} сек:\n'
    )
    for index, result in enumerate(fasttext_search_result):
        print(f'{index + 1}) {result}')

    # ------------------ Fasttext part end --------------------------- #

    input("\nНажмите Enter чтобы поискать ещё и с Elmo...\n")

    # ------------------ Elmo part start --------------------------- #

    elmo_searcher = Elmo(inverted_index)

    start_elmo_time = time()
    elmo_search_result = elmo_searcher.search(query)
    elmo_elapsed_time = time() - start_elmo_time

    print(
        f'Вот что нашлось elmo`м по запросу "{query}" за {elmo_elapsed_time} сек:'
    )
    for index, result in enumerate(elmo_search_result):
        print(f'{index + 1}) {result}')

    # ------------------ Elmo part end --------------------------- #