Ejemplo n.º 1
0
 def add_defaults(cls, defaults):
     """ Adds parser defaults to object from enviroment variables.
     """
     defaults.axon = bittensor.Config()
     defaults.axon.priority = bittensor.Config()
     defaults.axon.priority.max_workers = os.getenv('BT_AXON_PRIORITY_MAX_WORKERS') if os.getenv('BT_AXON_PRIORITY_MAX_WORKERS') != None else 10
     defaults.axon.priority.maxsize = os.getenv('BT_AXON_PRIORITY_MAXSIZE') if os.getenv('BT_AXON_PRIORITY_MAXSIZE') != None else -1
Ejemplo n.º 2
0
def construct_config():
    defaults = bittensor.Config()
    bittensor.subtensor.add_defaults(defaults)
    bittensor.dendrite.add_defaults(defaults)
    bittensor.axon.add_defaults(defaults)
    bittensor.wallet.add_defaults(defaults)
    bittensor.dataset.add_defaults(defaults)

    return defaults
Ejemplo n.º 3
0
 def add_defaults(cls, defaults):
     """ Adds parser defaults to object from enviroment variables.
     """
     defaults.wallet = bittensor.Config()
     defaults.wallet.name = os.getenv('BT_WALLET_NAME') if os.getenv(
         'BT_WALLET_NAME') != None else 'default'
     defaults.wallet.hotkey = os.getenv('BT_WALLET_HOTKEY') if os.getenv(
         'BT_WALLET_HOTKEY') != None else 'default'
     defaults.wallet.path = os.getenv('BT_WALLET_PATH') if os.getenv(
         'BT_WALLET_PATH') != None else '~/.bittensor/wallets/'
Ejemplo n.º 4
0
 def add_defaults(cls, defaults):
     """ Adds parser defaults to object from enviroment variables.
     """
     defaults.subtensor = bittensor.Config()
     defaults.subtensor.network = os.getenv(
         'BT_SUBTENSOR_NETWORK'
     ) if os.getenv('BT_SUBTENSOR_NETWORK') != None else 'nakamoto'
     defaults.subtensor.chain_endpoint = os.getenv(
         'BT_SUBTENSOR_CHAIN_ENDPOINT'
     ) if os.getenv('BT_SUBTENSOR_CHAIN_ENDPOINT') != None else None
Ejemplo n.º 5
0
 def add_defaults(cls, defaults):
     """ Adds parser defaults to object from enviroment variables.
     """
     defaults.dataset = bittensor.Config()
     defaults.dataset.batch_size = os.getenv('BT_DATASET_BATCH_SIZE') if os.getenv('BT_DATASET_BATCH_SIZE') != None else 10
     defaults.dataset.block_size = os.getenv('BT_DATASET_BLOCK_SIZE') if os.getenv('BT_DATASET_BLOCK_SIZE') != None else 20
     defaults.dataset.max_corpus_size = os.getenv('BT_DATASET_MAX_CORPUS_SIZE') if os.getenv('BT_DATASET_MAX_CORPUS_SIZE') != None else 1e+4
     defaults.dataset.num_workers = os.getenv('BT_DATASET_NUM_WORKERS') if os.getenv('BT_DATASET_NUM_WORKERS') != None else 0
     defaults.dataset.dataset_name = os.getenv('BT_DATASET_DATASET_NAME') if os.getenv('BT_DATASET_DATASET_NAME') != None else 'default'
     defaults.dataset.data_dir = os.getenv('BT_DATASET_DATADIR') if os.getenv('BT_DATASET_DATADIR') != None else '~/.bittensor/data/'
     defaults.dataset.save_dataset = os.getenv('BT_DATASET_SAVE_DATASET') if os.getenv('BT_DATASET_SAVE_DATASET') != None else False
     defaults.dataset.max_datasets = os.getenv('BT_DATASET_max_datasets') if os.getenv('BT_DATASET_max_datasets') != None else 3
Ejemplo n.º 6
0
    def add_defaults(cls, defaults):
        """ Adds parser defaults to object from enviroment variables.
        """
        defaults.axon = bittensor.Config()
        defaults.axon.port = os.getenv(
            'BT_AXON_PORT') if os.getenv('BT_AXON_PORT') != None else 8091
        defaults.axon.ip = os.getenv(
            'BT_AXON_IP') if os.getenv('BT_AXON_IP') != None else '[::]'
        defaults.axon.max_workers = os.getenv(
            'BT_AXON_MAX_WORERS'
        ) if os.getenv('BT_AXON_MAX_WORERS') != None else 10
        defaults.axon.maximum_concurrent_rpcs = os.getenv(
            'BT_AXON_MAXIMUM_CONCURRENT_RPCS'
        ) if os.getenv('BT_AXON_MAXIMUM_CONCURRENT_RPCS') != None else 400

        defaults.axon.priority = bittensor.Config()
        defaults.axon.priority.max_workers = os.getenv(
            'BT_AXON_PRIORITY_MAX_WORKERS'
        ) if os.getenv('BT_AXON_PRIORITY_MAX_WORKERS') != None else 10
        defaults.axon.priority.maxsize = os.getenv(
            'BT_AXON_PRIORITY_MAXSIZE'
        ) if os.getenv('BT_AXON_PRIORITY_MAXSIZE') != None else -1
Ejemplo n.º 7
0
 def add_defaults(cls, defaults):
     """ Adds parser defaults to object from enviroment variables.
     """
     defaults.logging = bittensor.Config()
     defaults.logging.debug = os.getenv('BT_LOGGING_DEBUG') if os.getenv(
         'BT_LOGGING_DEBUG') != None else False
     defaults.logging.trace = os.getenv('BT_LOGGING_TRACE') if os.getenv(
         'BT_LOGGING_DEBUG') != None else False
     defaults.logging.record_log = os.getenv(
         'BT_LOGGING_RECORD_LOG'
     ) if os.getenv('BT_LOGGING_RECORD_LOG') != None else False
     defaults.logging.logging_dir = os.getenv(
         'BT_LOGGING_LOGGING_DIR') if os.getenv(
             'BT_LOGGING_LOGGING_DIR') != None else '~/.bittensor/miners'
Ejemplo n.º 8
0
 def add_defaults(cls, defaults):
     """ Adds parser defaults to object from enviroment variables.
     """
     defaults.dendrite = bittensor.Config()
     defaults.dendrite.max_worker_threads = os.getenv(
         'BT_DENDRITE_MAX_WORKER_THREADS'
     ) if os.getenv('BT_DENDRITE_MAX_WORKER_THREADS') != None else 150
     defaults.dendrite.max_active_receptors = os.getenv(
         'BT_DENDRITE_MAX_ACTIVE_RECEPTORS'
     ) if os.getenv('BT_DENDRITE_MAX_ACTIVE_RECEPTORS') != None else 500
     defaults.dendrite.timeout = os.getenv(
         'BT_DENDRITE_TIMEOUT') if os.getenv(
             'BT_DENDRITE_TIMEOUT') != None else bittensor.__blocktime__
     defaults.dendrite.requires_grad = os.getenv(
         'BT_DENDRITE_REQUIRES_GRAD'
     ) if os.getenv('BT_DENDRITE_REQUIRES_GRAD') != None else True
Ejemplo n.º 9
0
def main(hparams):

    # Args
    batch_size = 20
    eval_batch_size = 10
    bptt = 35

    dataset = Dataset()
    train_data = dataset.batchify(dataset.train_txt, batch_size)
    val_data = dataset.batchify(dataset.val_txt, eval_batch_size)
    test_data = dataset.batchify(dataset.test_txt, eval_batch_size)

    # Transformer model architecture
    ntokens = len(dataset.TEXT.vocab.stoi)  # the size of vocabulary
    emsize = 200  # embedding dimension
    nhid = 200  # the dimension of the feedforward network model in nn.TransformerEncoder
    nlayers = 2  # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
    nhead = 2  # the number of heads in the multiheadattention models
    dropout = 0.2  # the dropout value
    transformer = TransformerModel(ntokens, emsize, nhead, nhid, nlayers,
                                   dropout)

    # Optimizer.
    criterion = nn.CrossEntropyLoss()  # loss function
    lr = 5.0  # learning rate
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)

    # bittensor:
    # Load bittensor config from hparams.
    config = bittensor.Config(hparams)

    # Build the neuron from configs.
    neuron = bittensor.Neuron(config)

    # Init a trainable request router.
    router = bittensor.Router(x_dim=784, key_dim=100, topk=10)

    # Build local network.
    net = Net()

    # Subscribe the local network to the network
    neuron.subscribe(transformer)

    # Start the neuron backend.
    neuron.start()

    def train(dataset, transformer):
        model.train()  # Turn on the train mode
        total_loss = 0.
        start_time = time.time()
        ntokens = len(dataset.TEXT.vocab.stoi)
        for batch, i in enumerate(
                range(0,
                      train_data.size(0) - 1, dataset.bptt)):
            data, targets = dataset.get_batch(train_data, i)
            optimizer.zero_grad()

            # data
            print(data.shape)

            # Flatten encoder inputs inputs
            inputs = data.view(-1, bptt, emsize)
            inputs = torch.flatten(inputs, start_dim=1)

            # Query the remote network.
            synapses = neuron.synapses(
            )  # Returns a list of synapses on the network.
            requests, scores = router.route(
                inputs, synapses)  # routes inputs to network.
            responses = neuron(requests, synapses)  # Makes network calls.
            remote = router.join(responses)  # Joins responses based on scores.

            # Encode sequence inputs.
            encodings = transformer.encode(
                data)  # (seq_len, batch_size, embedding_size)

            # Get nodes from metagraph.
            # and map nodes to torch keys.
            axons = neuron.axons()  # List[bittensor_pb2.Node]))
            keys = keymap.toKeys(axons)  # (-1, key_dim)

            # Learning a map from the gate_inputs to keys
            # gates[i, j] = score for the jth key for input i
            gate_inputs = encodings.view(
                batch_size, x_dim)  # (batch_size, seq_len * embedding_size)
            gates = gate(gate_inputs, keys, topk=min(len(keys), topk))

            # Dispatch data to inputs for each key.
            # when gates[i, j] == 0, the key j does not recieve input i
            dispatch_inputs = data.view(batch_size,
                                        -1)  # (batch_size, sequence_length)
            dispatch = dispatcher.dispatch(dispatch_inputs,
                                           gates)  # List[(-1, seq_len)]

            # Query the network by mapping from keys to node endpoints.
            # results = list[torch.Tensor], len(results) = len(keys)
            axons = keymap.toAxons(keys)  # List[bittensor_pb2.Node]
            query = neuron(dispatch, axons)  # List[(-1, embedding_size)]

            # Join results using gates to combine inputs.
            results = dispatcher.combine(
                query, gates)  # (batch_size, seq_len * embedding_size)

            # Decode responses.
            results = results.view(
                -1, batch_size,
                emsize)  # (seq_len, batch_size, embedding_size)
            to_decode = results + encodings
            output = model.decode(
                to_decode)  # (target_len, batch_size, embedding_size)

            # Loss and optimizer step
            loss = criterion(output.view(-1, ntokens), targets)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
            optimizer.step()

            # Update bittensor weights
            weights = neuron.getweights(axons)
            weights = (0.95) * weights + (0.05) * torch.mean(gates, dim=0)
            neuron.setweights(axons, weights)

            total_loss += loss.item()
            log_interval = 1
            if batch % log_interval == 0 and batch > 0:
                cur_loss = total_loss / log_interval
                elapsed = time.time() - start_time
                print('| epoch {:3d} | {:5d}/{:5d} batches | '
                      'lr {:02.2f} | ms/batch {:5.2f} | '
                      'loss {:5.2f} | ppl {:8.2f}'.format(
                          epoch, batch,
                          len(train_data) // dataset.bptt,
                          scheduler.get_lr()[0], elapsed * 1000 / log_interval,
                          cur_loss, math.exp(cur_loss)))
                total_loss = 0
                start_time = time.time()

    for epoch in range(1, epochs + 1):
        epoch_start_time = time.time()
        train(dataset, model)
        scheduler.step()
Ejemplo n.º 10
0
def test_loaded_config():
    with pytest.raises(NotImplementedError):
        bittensor.Config(loaded_config=True)
Ejemplo n.º 11
0
def main(hparams):

    # Training params.
    batch_size_train = 50
    learning_rate = 0.01
    momentum = 0.5
    log_interval = 10

    # Dataset.
    train_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST(
        root="~/tmp/",
        train=True,
        download=True,
        transform=torchvision.transforms.Compose(
            [torchvision.transforms.ToTensor()])),
                                               batch_size=batch_size_train,
                                               shuffle=True)

    # bittensor:
    # Load bittensor config from hparams.
    config = bittensor.Config(hparams)

    # Build the neuron from configs.
    neuron = bittensor.Neuron(config)

    # Init a trainable request router.
    router = bittensor.Router(x_dim=784, key_dim=100, topk=10)

    # Build local network.
    net = Net()

    # Subscribe the local network to the network
    neuron.subscribe(net)

    # Start the neuron backend.
    neuron.start()

    # Build summary writer for tensorboard.
    writer = SummaryWriter(log_dir='./runs/' + config.neuron_key)

    # Build the optimizer.
    optimizer = optim.SGD(net.parameters(),
                          lr=learning_rate,
                          momentum=momentum)

    def train(epoch, global_step):
        net.train()
        for batch_idx, (data, target) in enumerate(train_loader):
            optimizer.zero_grad()

            # Flatten mnist inputs
            inputs = torch.flatten(data, start_dim=1)

            # Query the remote network.
            synapses = neuron.synapses(
            )  # Returns a list of synapses on the network.
            requests, scores = router.route(
                inputs, synapses)  # routes inputs to network.
            responses = neuron(requests, synapses)  # Makes network calls.
            remote = router.join(responses)  # Joins responses based on scores.

            # Query the local network.
            local = net(inputs)

            # Train.
            output = local + remote

            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()
            global_step += 1

            # Set network weights.
            weights = neuron.getweights(synapses)
            weights = (0.99) * weights + 0.01 * torch.mean(scores, dim=0)
            neuron.setweights(synapses, weights)

            if batch_idx % log_interval == 0:
                writer.add_scalar('n_peers', len(neuron.metagraph.peers),
                                  global_step)
                writer.add_scalar('n_synapses', len(neuron.metagraph.synapses),
                                  global_step)
                writer.add_scalar('Loss/train', float(loss.item()),
                                  global_step)
                logger.info(
                    'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} \tnP|nS: {}|{}'
                    .format(epoch, batch_idx * len(data),
                            len(train_loader.dataset),
                            100. * batch_idx / len(train_loader), loss.item(),
                            len(neuron.metagraph.peers),
                            len(neuron.metagraph.synapses)))

    epoch = 0
    global_step = 0
    try:
        while True:
            train(epoch, global_step)
            epoch += 1
    except Exception as e:
        logger.error(e)
        neuron.stop()