def new_neuron(): # 1. Init Config item. config = { 'session': { 'datapath': 'data/', 'learning_rate': 0.01, 'momentum': 0.9, 'batch_size_train': 64, 'batch_size_test': 64, 'log_interval': 10, 'sync_interval': 100, 'priority_interval': 100, 'name': 'mnist', 'trial_id': '1608070667' }, 'synapse': { 'target_dim': 10 }, 'dendrite': { 'key_dim': 100, 'topk': 10, 'stale_emit_filter': 10000, 'pass_gradients': True, 'timeout': 0.5, 'do_backoff': True, 'max_backoff': 100 }, 'axon': { 'local_port': 8091, 'external_ip': '191.97.53.53', 'max_workers': 5, 'max_gradients': 1000 }, 'nucleus': { 'max_workers': 5, 'queue_timeout': 5, 'queue_maxsize': 1000 }, 'metagraph': { 'chain_endpoint': '206.189.254.5:12345', 'stale_emit_filter': 10000 }, 'meta_logger': { 'log_dir': 'data/' }, 'neuron': { 'keyfile': None, 'keypair': None } } config = Munch.fromDict(config) logger.info(Config.toString(config)) mnemonic = Keypair.generate_mnemonic() keypair = Keypair.create_from_mnemonic(mnemonic) neuron = Neuron(config) neuron.keypair = keypair return neuron
def add_args(parser: argparse.ArgumentParser): parser.add_argument('--session.learning_rate', default=0.01, type=float, help='Training initial learning rate.') parser.add_argument('--session.momentum', default=0.9, type=float, help='Training initial momentum for SGD.') parser.add_argument('--session.batch_size_train', default=64, type=int, help='Training batch size.') parser.add_argument('--session.batch_size_test', default=64, type=int, help='Testing batch size.') parser.add_argument( '--session.log_interval', default=150, type=int, help='Batches until session prints log statements.') parser.add_argument( '--session.sync_interval', default=150, type=int, help='Batches before we we sync with chain and emit new weights.') parser.add_argument( '--session.root_dir', default='~/.bittensor/sessions/', type=str, help='Root path to load and save data associated with each session' ) parser.add_argument( '--session.name', default='cifar', type=str, help='Trials for this session go in session.root / session.name') parser.add_argument( '--session.trial_uid', default=str(time.time()).split('.')[0], type=str, help= 'Saved models go in session.root_dir / session.name / session.trial_uid' ) parser.add_argument('--session.record_log', default=True, help='Record all logs when running this session') parser.add_argument( '--session.config_file', type=str, help= 'config file to run this neuron, if not using cmd line arguments.') Neuron.add_args(parser) DPNSynapse.add_args(parser)
def check_config(config: Munch): assert config.session.momentum > 0 and config.session.momentum < 1, "momentum must be a value between 0 and 1" assert config.session.batch_size_train > 0, "batch_size_train must a positive value" assert config.session.learning_rate > 0, "learning_rate must be a positive value." full_path = '{}/{}/{}'.format(config.session.root_dir, config.session.name, config.session.trial_uid) config.session.full_path = os.path.expanduser(full_path) if not os.path.exists(config.session.full_path): os.makedirs(config.session.full_path) BertNSPSynapse.check_config(config) Neuron.check_config(config)
def check_config(config: Munch): assert config.session.log_interval > 0, "log_interval dimension must be positive" assert config.session.momentum > 0 and config.session.momentum < 1, "momentum must be a value between 0 and 1" assert config.session.batch_size_train > 0, "batch_size_train must be a positive value" assert config.session.batch_size_test > 0, "batch_size_test must be a positive value" assert config.session.learning_rate > 0, "learning rate must be be a positive value." full_path = '{}/{}/{}/'.format(config.session.root_dir, config.session.name, config.session.uid) config.session.full_path = full_path if not os.path.exists(config.session.full_path): os.makedirs(config.session.full_path) FFNNSynapse.check_config(config) Neuron.check_config(config)
def add_args(parser: argparse.ArgumentParser): parser.add_argument('--session.learning_rate', default=0.01, type=float, help='Training initial learning rate.') parser.add_argument('--session.momentum', default=0.9, type=float, help='Training initial momentum for SGD.') parser.add_argument('--session.batch_size_train', default=64, type=int, help='Training batch size.') parser.add_argument('--session.batch_size_test', default=64, type=int, help='Testing batch size.') parser.add_argument('--session.log_interval', default=150, type=int, help='Batches until session prints log statements.') parser.add_argument('--session.sync_interval', default=150, type=int, help='Batches before we we sync with chain and emit new weights.') parser.add_argument('--session.root_dir', default='data/', type=str, help='Root path to load and save data associated with each session') parser.add_argument('--session.name', default='mnist', type=str, help='Trials for this session go in session.root / session.name') parser.add_argument('--session.uid', default=str(time.time()).split('.')[0], type=str, help='Saved models go in session.root_dir / session.name / session.uid') Neuron.add_args(parser) FFNNSynapse.add_args(parser)
def __init__(self, config: Munch): self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = BertNSPSynapse(self.config) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule( self.optimizer, 50, 300) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('bookcorpus') # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
def __init__(self, config: Munch): self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = BertMLMSynapse(self.config) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule( self.optimizer, 50, 300) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('bookcorpus')['train'] # The collator accepts a list [ dict{'input_ids, ...; } ] where the internal dict # is produced by the tokenizer. self.data_collator = DataCollatorForLanguageModeling( tokenizer=bittensor.__tokenizer__(), mlm=True, mlm_probability=0.15) # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
def __init__(self, config: Munch): self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = DPNSynapse( config) # Feedforward neural network with PKMDendrite. self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.model.to(self.device) # Set model to device # ---- Optimizer ---- self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10.0, gamma=0.1) # ---- Dataset ---- self.train_data = torchvision.datasets.CIFAR10( root=self.config.session.root_dir + "datasets/", train=True, download=True, transform=transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ])) self.trainloader = torch.utils.data.DataLoader( self.train_data, batch_size=self.config.session.batch_size_train, shuffle=True, num_workers=2) self.test_data = torchvision.datasets.CIFAR10( root=self.config.session.root_dir + "datasets/", train=False, download=True, transform=transforms.ToTensor()) self.testloader = torch.utils.data.DataLoader( self.test_data, batch_size=self.config.session.batch_size_test, shuffle=False, num_workers=2) self.test_data = torchvision.datasets.CIFAR10( root=self.config.session.root_dir + "datasets/", train=False, download=True, transform=transforms.ToTensor()) # ---- Tensorboard ---- self.global_step = 0 self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
def __init__(self, config: Munch): if config == None: config = Session.build_config() self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = FFNNSynapse( config ) # Feedforward neural network with PKMRouter. self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.model.to( self.device ) # Set model to device # ---- Optimizer ---- self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10.0, gamma=0.1) # ---- Dataset ---- self.train_data = torchvision.datasets.MNIST(root = self.config.session.root_dir + "datasets/", train=True, download=True, transform=transforms.ToTensor()) self.trainloader = torch.utils.data.DataLoader(self.train_data, batch_size = self.config.session.batch_size_train, shuffle=True, num_workers=2) self.test_data = torchvision.datasets.MNIST(root = self.config.session.root_dir + "datasets/", train=False, download=True, transform=transforms.ToTensor()) self.testloader = torch.utils.data.DataLoader(self.test_data, batch_size = self.config.session.batch_size_test, shuffle=False, num_workers=2) # ---- Tensorboard ---- self.global_step = 0 self.tensorboard = SummaryWriter(log_dir = self.config.session.full_path)
def __init__(self, config: Munch): self.config = config # ---- Build Neuron ---- self.neuron = Neuron(config) # ---- Build FFNN Model ---- self.model = FFNNSynapse(self.config) self.model.to( torch.device("cuda" if torch.cuda.is_available() else "cpu")) self.neuron.axon.serve(self.model) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
def add_args(parser: argparse.ArgumentParser): parser.add_argument('--session.learning_rate', default=0.01, type=float, help='Training initial learning rate.') parser.add_argument('--session.momentum', default=0.98, type=float, help='Training initial momentum for SGD.') parser.add_argument('--session.epoch_length', default=10, type=int, help='Iterations of training per epoch') parser.add_argument('--session.batch_size_train', default=1, type=int, help='Training batch size.') parser.add_argument( '--session.sync_interval', default=100, type=int, help='Batches before we sync with chain and emit new weights.') parser.add_argument('--session.log_interval', default=10, type=int, help='Batches before we log session info.') parser.add_argument( '--session.accumulation_interval', default=1, type=int, help='Batches before we apply acummulated gradients.') parser.add_argument( '--session.apply_remote_gradients', default=False, type=bool, help= 'If true, neuron applies gradients which accumulate from remotes calls.' ) parser.add_argument( '--session.root_dir', default='~/.bittensor/sessions/', type=str, help='Root path to load and save data associated with each session' ) parser.add_argument( '--session.name', default='bert-nsp', type=str, help='Trials for this session go in session.root / session.name') parser.add_argument( '--session.trial_uid', default=str(time.time()).split('.')[0], type=str, help= 'Saved models go in session.root_dir / session.name / session.uid') parser.add_argument('--session.record_log', default=True, help='Record all logs when running this session') parser.add_argument( '--session.config_file', type=str, help= 'config file to run this neuron, if not using cmd line arguments.') BertNSPSynapse.add_args(parser) Neuron.add_args(parser)