Exemplo n.º 1
0
def main(params):
    # initialize the egg lib
    opts = core.init(params=params)
    # get pre-defined common line arguments (batch/vocab size, etc).
    # See egg/core/util.py for a list

    # prepare the dataset
    kwargs = {"num_workers": 1, "pin_memory": True} if opts.cuda else {}
    transform = transforms.ToTensor()

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        "./data", train=True, download=True, transform=transform),
                                               batch_size=opts.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        "./data", train=False, transform=transform),
                                              batch_size=opts.batch_size,
                                              shuffle=True,
                                              **kwargs)

    # initialize the agents and the game
    sender = Sender(opts.vocab_size)  # the "data" transform part of an agent
    sender = core.GumbelSoftmaxWrapper(
        sender, temperature=1.0)  # wrapping into a GS interface

    receiver = Receiver()
    receiver = core.SymbolReceiverWrapper(receiver,
                                          vocab_size=opts.vocab_size,
                                          agent_input_size=400)
    # setting up as a standard Sender/Receiver game with 1 symbol communication
    game = core.SymbolGameGS(sender, receiver, loss)
    # This callback would be called at the end of each epoch by the Trainer; it reduces the sampling
    # temperature used by the GS
    temperature_updater = core.TemperatureUpdater(agent=sender,
                                                  decay=0.75,
                                                  minimum=0.01)
    # get an optimizer that is set up by common command line parameters,
    # defaults to Adam
    optimizer = core.build_optimizer(game.parameters())

    # initialize and launch the trainer
    trainer = core.Trainer(
        game=game,
        optimizer=optimizer,
        train_data=train_loader,
        validation_data=test_loader,
        callbacks=[
            temperature_updater,
            core.ConsoleLogger(as_json=True, print_train_loss=True),
        ],
    )
    trainer.train(n_epochs=opts.n_epochs)

    core.close()
def test_symbol_wrapper():
    core.init()

    receiver = core.SymbolReceiverWrapper(Receiver(), vocab_size=15, agent_input_size=5)

    # when trained with REINFORCE, the message would be encoded as long ids
    message_rf = torch.randint(high=15, size=(16,)).long()
    output_rf = receiver(message_rf)

    assert output_rf.size() == torch.Size((16, 5))

    # when trained with Gumbel-Softmax, the message would be encoded as one-hots
    message_gs = torch.zeros((16, 15))
    message_gs.scatter_(1, message_rf.unsqueeze(1), 1.0)  # same message, one-hot-encoded
    output_gs = receiver(message_gs)

    assert output_rf.eq(output_gs).all().item() == 1
Exemplo n.º 3
0
def main(params):
    opts = get_params(params)
    print(opts)

    device = opts.device

    train_loader = OneHotLoader(n_bits=opts.n_bits,
                                bits_s=opts.bits_s,
                                bits_r=opts.bits_r,
                                batch_size=opts.batch_size,
                                batches_per_epoch=opts.n_examples_per_epoch /
                                opts.batch_size)

    test_loader = UniformLoader(n_bits=opts.n_bits,
                                bits_s=opts.bits_s,
                                bits_r=opts.bits_r)
    test_loader.batch = [x.to(device) for x in test_loader.batch]

    if not opts.variable_length:
        sender = Sender(n_bits=opts.n_bits,
                        n_hidden=opts.sender_hidden,
                        vocab_size=opts.vocab_size)
        if opts.mode == 'gs':
            sender = core.GumbelSoftmaxWrapper(agent=sender,
                                               temperature=opts.temperature)
            receiver = Receiver(n_bits=opts.n_bits,
                                n_hidden=opts.receiver_hidden)
            receiver = core.SymbolReceiverWrapper(
                receiver,
                vocab_size=opts.vocab_size,
                agent_input_size=opts.receiver_hidden)
            game = core.SymbolGameGS(sender, receiver, diff_loss)
        elif opts.mode == 'rf':
            sender = core.ReinforceWrapper(agent=sender)
            receiver = Receiver(n_bits=opts.n_bits,
                                n_hidden=opts.receiver_hidden)
            receiver = core.SymbolReceiverWrapper(
                receiver,
                vocab_size=opts.vocab_size,
                agent_input_size=opts.receiver_hidden)
            receiver = core.ReinforceDeterministicWrapper(agent=receiver)
            game = core.SymbolGameReinforce(
                sender,
                receiver,
                diff_loss,
                sender_entropy_coeff=opts.sender_entropy_coeff)
        elif opts.mode == 'non_diff':
            sender = core.ReinforceWrapper(agent=sender)
            receiver = ReinforcedReceiver(n_bits=opts.n_bits,
                                          n_hidden=opts.receiver_hidden)
            receiver = core.SymbolReceiverWrapper(
                receiver,
                vocab_size=opts.vocab_size,
                agent_input_size=opts.receiver_hidden)

            game = core.SymbolGameReinforce(
                sender,
                receiver,
                non_diff_loss,
                sender_entropy_coeff=opts.sender_entropy_coeff,
                receiver_entropy_coeff=opts.receiver_entropy_coeff)
    else:
        if opts.mode != 'rf':
            print('Only mode=rf is supported atm')
            opts.mode = 'rf'

        if opts.sender_cell == 'transformer':
            receiver = Receiver(n_bits=opts.n_bits,
                                n_hidden=opts.receiver_hidden)
            sender = Sender(
                n_bits=opts.n_bits,
                n_hidden=opts.sender_hidden,
                vocab_size=opts.sender_hidden)  # TODO: not really vocab
            sender = core.TransformerSenderReinforce(
                agent=sender,
                vocab_size=opts.vocab_size,
                embed_dim=opts.sender_emb,
                max_len=opts.max_len,
                num_layers=1,
                num_heads=1,
                hidden_size=opts.sender_hidden)
        else:
            receiver = Receiver(n_bits=opts.n_bits,
                                n_hidden=opts.receiver_hidden)
            sender = Sender(
                n_bits=opts.n_bits,
                n_hidden=opts.sender_hidden,
                vocab_size=opts.sender_hidden)  # TODO: not really vocab
            sender = core.RnnSenderReinforce(agent=sender,
                                             vocab_size=opts.vocab_size,
                                             embed_dim=opts.sender_emb,
                                             hidden_size=opts.sender_hidden,
                                             max_len=opts.max_len,
                                             cell=opts.sender_cell)

        if opts.receiver_cell == 'transformer':
            receiver = Receiver(n_bits=opts.n_bits, n_hidden=opts.receiver_emb)
            receiver = core.TransformerReceiverDeterministic(
                receiver,
                opts.vocab_size,
                opts.max_len,
                opts.receiver_emb,
                num_heads=1,
                hidden_size=opts.receiver_hidden,
                num_layers=1)
        else:
            receiver = Receiver(n_bits=opts.n_bits,
                                n_hidden=opts.receiver_hidden)
            receiver = core.RnnReceiverDeterministic(receiver,
                                                     opts.vocab_size,
                                                     opts.receiver_emb,
                                                     opts.receiver_hidden,
                                                     cell=opts.receiver_cell)

            game = core.SenderReceiverRnnGS(sender, receiver, diff_loss)

        game = core.SenderReceiverRnnReinforce(
            sender,
            receiver,
            diff_loss,
            sender_entropy_coeff=opts.sender_entropy_coeff,
            receiver_entropy_coeff=opts.receiver_entropy_coeff)

    optimizer = torch.optim.Adam([
        dict(params=sender.parameters(), lr=opts.sender_lr),
        dict(params=receiver.parameters(), lr=opts.receiver_lr)
    ])

    loss = game.loss

    intervention = CallbackEvaluator(test_loader,
                                     device=device,
                                     is_gs=opts.mode == 'gs',
                                     loss=loss,
                                     var_length=opts.variable_length,
                                     input_intervention=True)

    trainer = core.Trainer(game=game,
                           optimizer=optimizer,
                           train_data=train_loader,
                           validation_data=test_loader,
                           callbacks=[
                               core.ConsoleLogger(as_json=True),
                               EarlyStopperAccuracy(opts.early_stopping_thr),
                               intervention
                           ])

    trainer.train(n_epochs=opts.n_epochs)

    core.close()