예제 #1
0
def new_neuron():
    # 1. Init Config item.
    config = {
        'session': {
            'datapath': 'data/',
            'learning_rate': 0.01,
            'momentum': 0.9,
            'batch_size_train': 64,
            'batch_size_test': 64,
            'log_interval': 10,
            'sync_interval': 100,
            'priority_interval': 100,
            'name': 'mnist',
            'trial_id': '1608070667'
        },
        'synapse': {
            'target_dim': 10
        },
        'dendrite': {
            'key_dim': 100,
            'topk': 10,
            'stale_emit_filter': 10000,
            'pass_gradients': True,
            'timeout': 0.5,
            'do_backoff': True,
            'max_backoff': 100
        },
        'axon': {
            'local_port': 8091,
            'external_ip': '191.97.53.53',
            'max_workers': 5,
            'max_gradients': 1000
        },
        'nucleus': {
            'max_workers': 5,
            'queue_timeout': 5,
            'queue_maxsize': 1000
        },
        'metagraph': {
            'chain_endpoint': '206.189.254.5:12345',
            'stale_emit_filter': 10000
        },
        'meta_logger': {
            'log_dir': 'data/'
        },
        'neuron': {
            'keyfile': None,
            'keypair': None
        }
    }

    config = Munch.fromDict(config)

    logger.info(Config.toString(config))
    mnemonic = Keypair.generate_mnemonic()
    keypair = Keypair.create_from_mnemonic(mnemonic)
    neuron = Neuron(config)
    neuron.keypair = keypair
    return neuron
예제 #2
0
            self.tensorboard.add_scalar('Neuron/Rloss',
                                        output.remote_target_loss.item(),
                                        self.global_step)
            self.tensorboard.add_scalar('Neuron/Lloss',
                                        output.local_target_loss.item(),
                                        self.global_step)
            self.tensorboard.add_scalar('Neuron/Dloss',
                                        output.distillation_loss.item(),
                                        self.global_step)

            # ---- Step increments ----
            self.global_step += 1
            self.training_loss += output.local_target_loss.item()

            # --- Memory clean up ----
            torch.cuda.empty_cache()
            del output


if __name__ == "__main__":
    # ---- Config ----
    parser = argparse.ArgumentParser()
    Session.add_args(parser)
    config = Config.to_config(parser)
    Session.check_config(config)
    logger.info(Config.toString(config))

    # ---- Build + Run ----
    session = Session(config)
    session.run()
예제 #3
0
 def default_config() -> Munch:
     parser = argparse.ArgumentParser()
     Session.add_args(parser)
     config = Config.to_config(parser)
     Session.check_config(config)
     return config
예제 #4
0
파일: mnist.py 프로젝트: il-dar/bittensor
            self.tensorboard.add_scalar('Dloss',
                                        output.distillation_loss.item(),
                                        self.global_step)

    # --- Test epoch ----
    def test(self):
        with torch.no_grad(
        ):  # Turns off gradient computation for inference speed up.
            self.model.eval()  # Turns off Dropoutlayers, BatchNorm etc.
            loss = 0.0
            accuracy = 0.0
            for _, (images, labels) in enumerate(self.testloader):

                # ---- Local Forward pass ----
                outputs = self.model.local_forward(
                    images=images.to(self.device),
                    targets=torch.LongTensor(labels).to(self.device),
                )
                loss += outputs.local_target_loss.item()
                accuracy += outputs.local_accuracy.item()

            return loss / len(self.testloader), accuracy / len(self.testloader)


if __name__ == "__main__":
    # ---- Build and Run ----
    config = Miner.build_config()
    logger.info(Config.toString(config))
    miner = Miner(config)
    miner.run()
예제 #5
0
                    self.neuron.dendrite)
            self.tensorboard.add_scalar('Rloss', output.remote_target_loss.item(), self.global_step)
            self.tensorboard.add_scalar('Lloss', output.local_target_loss.item(), self.global_step)
            self.tensorboard.add_scalar('Dloss', output.distillation_loss.item(), self.global_step)


    # --- Test epoch ----
    def test (self):
        with torch.no_grad(): # Turns off gradient computation for inference speed up.
            self.model.eval() # Turns off Dropoutlayers, BatchNorm etc.
            loss = 0.0; accuracy = 0.0
            for _, (images, labels) in enumerate(self.testloader):

                # ---- Local Forward pass ----
                outputs = self.model.local_forward(
                    images = images.to(self.device), 
                    targets = torch.LongTensor(labels).to(self.device), 
                )
                loss += outputs.local_target_loss.item()
                accuracy += outputs.local_accuracy.item()
                
            return loss / len(self.testloader), accuracy / len(self.testloader) 

        
if __name__ == "__main__":
    # ---- Build and Run ----
    config = Session.config(); logger.info(Config.toString(config))
    session = Session(config)
    session.run()