class Miner(): """ Initializes, trains, and tests models created inside of 'bittensor/synapses'. During instantiation, this class takes a config as a [Munch](https://github.com/Infinidat/munch) object. """ def __init__(self, config: Munch = None, **kwargs): if config == None: config = Miner.default_config() bittensor.config.Config.update_with_kwargs(config.miner, kwargs) Miner.check_config(config) self.config = config # ---- Neuron ---- self.neuron = bittensor.neuron.Neuron(self.config) # ---- Model ---- self.model = XLMSynapse(self.config) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.miner.learning_rate, momentum=self.config.miner.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule( self.optimizer, 50, 300) # ---- Model Load/Save tools ---- self.model_toolbox = ModelToolbox(XLMSynapse, torch.optim.SGD) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('amazon_reviews_multi', 'en')['train'] self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") if self.config.synapse.device: self.device = torch.device(self.config.synapse.device) # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.miner.full_path) if self.config.miner.record_log == True: filepath = f"{self.config.miner.full_path}/{self.config.miner.name}_ {self.config.miner.trial_uid}.log" logger.add( filepath, format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}", rotation="250 MB", retention="10 days") @staticmethod def default_config() -> Munch: parser = argparse.ArgumentParser() Miner.add_args(parser) config = bittensor.config.Config.to_config(parser) return config @staticmethod def add_args(parser: argparse.ArgumentParser): parser.add_argument('--miner.learning_rate', default=0.01, type=float, help='Training initial learning rate.') parser.add_argument('--miner.momentum', default=0.98, type=float, help='Training initial momentum for SGD.') parser.add_argument('--miner.n_epochs', default=int(sys.maxsize), type=int, help='Number of training epochs.') parser.add_argument('--miner.epoch_length', default=500, type=int, help='Iterations of training per epoch') parser.add_argument('--miner.batch_size_train', default=1, type=int, help='Training batch size.') parser.add_argument( '--miner.sync_interval', default=100, type=int, help='Batches before we sync with chain and emit new weights.') parser.add_argument('--miner.log_interval', default=10, type=int, help='Batches before we log miner info.') parser.add_argument( '--miner.accumulation_interval', default=1, type=int, help='Batches before we apply acummulated gradients.') parser.add_argument( '--miner.apply_remote_gradients', default=False, type=bool, help= 'If true, neuron applies gradients which accumulate from remotes calls.' ) parser.add_argument( '--miner.root_dir', default='~/.bittensor/miners/', type=str, help='Root path to load and save data associated with each miner') parser.add_argument( '--miner.name', default='xlm_wiki', type=str, help='Trials for this miner go in miner.root / miner.name') parser.add_argument( '--miner.trial_uid', default=str(time.time()).split('.')[0], type=str, help='Saved models go in miner.root_dir / miner.name / miner.uid') parser.add_argument('--miner.record_log', default=False, help='Record all logs when running this miner') parser.add_argument( '--miner.config_file', type=str, help= 'config file to run this neuron, if not using cmd line arguments.') parser.add_argument('--debug', dest='debug', action='store_true', help='''Turn on bittensor debugging information''') parser.set_defaults(debug=False) XLMSynapse.add_args(parser) bittensor.neuron.Neuron.add_args(parser) @staticmethod def check_config(config: Munch): if config.debug: bittensor.__log_level__ = 'TRACE' logger.debug('DEBUG is ON') else: logger.info('DEBUG is OFF') assert config.miner.momentum > 0 and config.miner.momentum < 1, "momentum must be a value between 0 and 1" assert config.miner.batch_size_train > 0, "batch_size_train must be a positive value" assert config.miner.learning_rate > 0, "learning_rate must be a positive value." full_path = '{}/{}/{}'.format(config.miner.root_dir, config.miner.name, config.miner.trial_uid) config.miner.full_path = os.path.expanduser(full_path) if not os.path.exists(config.miner.full_path): os.makedirs(config.miner.full_path) # --- Main loop ---- def run(self): # ---- Subscribe ---- with self.neuron: # ---- Weights ---- self.row = self.neuron.metagraph.row.to(self.model.device) # --- Run state --- self.global_step = 0 self.best_train_loss = math.inf # --- Loop for epochs --- for self.epoch in range(self.config.miner.n_epochs): try: # ---- Serve ---- self.neuron.axon.serve(self.model) # ---- Train Model ---- self.train() self.scheduler.step() # If model has borked for some reason, we need to make sure it doesn't emit weights # Instead, reload into previous version of model if torch.any( torch.isnan( torch.cat([ param.view(-1) for param in self.model.parameters() ]))): self.model, self.optimizer = self.model_toolbox.load_model( self.config) continue # ---- Emitting weights ---- self.neuron.metagraph.set_weights( self.row, wait_for_inclusion=True ) # Sets my row-weights on the chain. # ---- Sync metagraph ---- self.neuron.metagraph.sync( ) # Pulls the latest metagraph state (with my update.) self.row = self.neuron.metagraph.row.to(self.model.device) # --- Epoch logs ---- print(self.neuron.axon.__full_str__()) print(self.neuron.dendrite.__full_str__()) print(self.neuron.metagraph) # ---- Update Tensorboard ---- self.neuron.dendrite.__to_tensorboard__( self.tensorboard, self.global_step) self.neuron.metagraph.__to_tensorboard__( self.tensorboard, self.global_step) self.neuron.axon.__to_tensorboard__( self.tensorboard, self.global_step) # ---- Save best loss and model ---- if self.training_loss and self.epoch % 10 == 0 and self.training_loss < self.best_train_loss: self.best_train_loss = self.training_loss / 10 # update best train loss self.model_toolbox.save_model( self.config.miner.full_path, { 'epoch': self.epoch, 'model_state_dict': self.model.state_dict(), 'loss': self.best_train_loss, 'optimizer_state_dict': self.optimizer.state_dict(), }) self.tensorboard.add_scalar('Neuron/Train_loss', self.training_loss, self.global_step) # --- Catch Errors ---- except Exception as e: logger.error( 'Exception in training script with error: {}, {}', e, traceback.format_exc()) logger.info('Continuing to train.') # ---- Train Epoch ---- def train(self): self.training_loss = 0.0 for local_step in range(self.config.miner.epoch_length): # ---- Forward pass ---- inputs = nextbatch(self.dataset, self.config.miner.batch_size_train, bittensor.__tokenizer__()) output = self.model.remote_forward( self.neuron, inputs.to(self.model.device), training=True, ) # ---- Backward pass ---- loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss loss.backward() # Accumulates gradients on the model. self.optimizer.step() # Applies accumulated gradients. self.optimizer.zero_grad( ) # Zeros out gradients for next accummulation # ---- Train row weights ---- batch_weights = torch.mean(output.router.weights, axis=0).to( self.model.device) # Average over batch. self.row = ( 1 - 0.03) * self.row + 0.03 * batch_weights # Moving avg update. self.row = F.normalize(self.row, p=1, dim=0) # Ensure normalization. # ---- Step logs ---- logger.info( 'GS: {} LS: {} Epoch: {}\tLocal Target Loss: {}\tRemote Target Loss: {}\tDistillation Loss: {}\tAxon: {}\tDendrite: {}', colored('{}'.format(self.global_step), 'red'), colored('{}'.format(local_step), 'blue'), colored('{}'.format(self.epoch), 'green'), colored('{:.4f}'.format(output.local_target_loss.item()), 'green'), colored('{:.4f}'.format(output.remote_target_loss.item()), 'blue'), colored('{:.4f}'.format(output.distillation_loss.item()), 'red'), self.neuron.axon, self.neuron.dendrite) logger.info('Codes: {}', output.router.return_codes.tolist()) self.tensorboard.add_scalar('Neuron/Rloss', output.remote_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Lloss', output.local_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Dloss', output.distillation_loss.item(), self.global_step) # ---- Step increments ---- self.global_step += 1 self.training_loss += output.local_target_loss.item() # --- Memory clean up ---- torch.cuda.empty_cache() del output
class Miner( bittensor.miner.Miner ): def __init__(self, config: Munch = None, **kwargs): if config == None: config = Miner.default_config(); bittensor.config.Config.update_with_kwargs(config.miner, kwargs) Miner.check_config(config) self.config = config # ---- Model ---- self.model = BertMLMSynapse( self.config ) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.config.miner.learning_rate, momentum=self.config.miner.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, 50, 300) # ---- Model Load/Save tools ---- self.model_toolbox = ModelToolbox(BertMLMSynapse, torch.optim.SGD) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('ag_news')['train'] # The collator accepts a list [ dict{'input_ids, ...; } ] where the internal dict # is produced by the tokenizer. self.data_collator = DataCollatorForLanguageModeling ( tokenizer=bittensor.__tokenizer__(), mlm=True, mlm_probability=0.15 ) super( Miner, self ).__init__( self.config, **kwargs ) @staticmethod def default_config() -> Munch: parser = argparse.ArgumentParser(); Miner.add_args(parser) config = bittensor.config.Config.to_config(parser); return config @staticmethod def check_config(config: Munch): assert config.miner.momentum > 0 and config.miner.momentum < 1, "momentum must be a value between 0 and 1" assert config.miner.batch_size_train > 0, "batch_size_train must a positive value" assert config.miner.learning_rate > 0, "learning_rate must be a positive value." BertMLMSynapse.check_config( config ) bittensor.miner.Miner.check_config( config ) @staticmethod def add_args(parser: argparse.ArgumentParser): parser.add_argument('--miner.learning_rate', default=0.01, type=float, help='Training initial learning rate.') parser.add_argument('--miner.momentum', default=0.98, type=float, help='Training initial momentum for SGD.') parser.add_argument('--miner.clip_gradients', default=0.8, type=float, help='Implement gradient clipping to avoid exploding loss on smaller architectures.') parser.add_argument('--miner.n_epochs', default=int(sys.maxsize), type=int, help='Number of training epochs.') parser.add_argument('--miner.epoch_length', default=500, type=int, help='Iterations of training per epoch') parser.add_argument('--miner.batch_size_train', default=1, type=int, help='Training batch size.') parser.add_argument('--miner.name', default='bert_mlm', type=str, help='Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ') BertMLMSynapse.add_args(parser) bittensor.miner.Miner.add_args(parser) # --- Main loop ---- def run (self): # ---- Subscribe ---- with self: # ---- Weights ---- self.row = self.metagraph.row # --- Run state --- self.global_step = 0 self.best_train_loss = math.inf # --- Loop for epochs --- for self.epoch in range(self.config.miner.n_epochs): try: # ---- Serve ---- self.axon.serve( self.model ) # ---- Train Model ---- self.train() self.scheduler.step() # If model has borked for some reason, we need to make sure it doesn't emit weights # Instead, reload into previous version of model if torch.any(torch.isnan(torch.cat([param.view(-1) for param in self.model.parameters()]))): self.model, self.optimizer = self.model_toolbox.load_model(self.config) continue # ---- Emitting weights ---- self.metagraph.set_weights(self.row, wait_for_inclusion = True) # Sets my row-weights on the chain. # ---- Sync metagraph ---- self.metagraph.sync() # Pulls the latest metagraph state (with my update.) self.row = self.metagraph.row logger.info(self.metagraph) # ---- Update Tensorboard ---- self.dendrite.__to_tensorboard__(self.tensorboard, self.global_step) self.metagraph.__to_tensorboard__(self.tensorboard, self.global_step) self.axon.__to_tensorboard__(self.tensorboard, self.global_step) # ---- Save best loss and model ---- if self.training_loss and self.epoch % 10 == 0: if self.training_loss < self.best_train_loss: self.best_train_loss = self.training_loss # update best train loss self.model_toolbox.save_model( self.config.miner.full_path, { 'epoch': self.epoch, 'model_state_dict': self.model.state_dict(), 'loss': self.best_train_loss, 'optimizer_state_dict': self.optimizer.state_dict(), } ) self.tensorboard.add_scalar('Neuron/Train_loss', self.training_loss, self.global_step) # --- Catch Errors ---- except Exception as e: logger.error('Exception in training script with error: {}', e) logger.info(traceback.print_exc()) logger.info('Continuing to train.') time.sleep(1) # ---- Train Epoch ---- def train(self): self.training_loss = 0.0 for local_step in range(self.config.miner.epoch_length): # ---- Forward pass ---- inputs, targets = mlm_batch(self.dataset, self.config.miner.batch_size_train, bittensor.__tokenizer__(), self.data_collator) output = self.model.remote_forward ( self, inputs = inputs.to(self.model.device), targets = targets.to(self.model.device) ) # ---- Backward pass ---- loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss loss.backward() # Accumulates gradients on the model. clip_grad_norm_(self.model.parameters(), self.config.miner.clip_gradients) # clip model gradients self.optimizer.step() # Applies accumulated gradients. self.optimizer.zero_grad() # Zeros out gradients for next accummulation # ---- Train row weights ---- batch_weights = torch.mean(output.router.weights, axis = 0) # Average over batch. self.row = (1 - 0.03) * self.row + 0.03 * batch_weights # Moving avg update. self.row = F.normalize(self.row, p = 1, dim = 0) # Ensure normalization. # ---- Step logs ---- logger.info('GS: {} LS: {} Epoch: {}\tLocal Target Loss: {}\tRemote Target Loss: {}\tDistillation Loss: {}\tAxon: {}\tDendrite: {}', colored('{}'.format(self.global_step), 'red'), colored('{}'.format(local_step), 'blue'), colored('{}'.format(self.epoch), 'green'), colored('{:.4f}'.format(output.local_target_loss.item()), 'green'), colored('{:.4f}'.format(output.remote_target_loss.item()), 'blue'), colored('{:.4f}'.format(output.distillation_loss.item()), 'red'), self.axon, self.dendrite) logger.info('Codes: {}', output.router.return_codes.tolist()) self.tensorboard.add_scalar('Neuron/Rloss', output.remote_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Lloss', output.local_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Dloss', output.distillation_loss.item(), self.global_step) # ---- Step increments ---- self.global_step += 1 self.training_loss += output.local_target_loss.item() # --- Memory clean up ---- torch.cuda.empty_cache() del output
class Session(): def __init__(self, config: Munch): self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = BertNSPSynapse(self.config) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule( self.optimizer, 50, 300) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('bookcorpus') # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}") @staticmethod def add_args(parser: argparse.ArgumentParser): parser.add_argument('--session.learning_rate', default=0.01, type=float, help='Training initial learning rate.') parser.add_argument('--session.momentum', default=0.98, type=float, help='Training initial momentum for SGD.') parser.add_argument('--session.epoch_length', default=10, type=int, help='Iterations of training per epoch') parser.add_argument('--session.batch_size_train', default=1, type=int, help='Training batch size.') parser.add_argument( '--session.sync_interval', default=100, type=int, help='Batches before we sync with chain and emit new weights.') parser.add_argument('--session.log_interval', default=10, type=int, help='Batches before we log session info.') parser.add_argument( '--session.accumulation_interval', default=1, type=int, help='Batches before we apply acummulated gradients.') parser.add_argument( '--session.apply_remote_gradients', default=False, type=bool, help= 'If true, neuron applies gradients which accumulate from remotes calls.' ) parser.add_argument( '--session.root_dir', default='~/.bittensor/sessions/', type=str, help='Root path to load and save data associated with each session' ) parser.add_argument( '--session.name', default='bert-nsp', type=str, help='Trials for this session go in session.root / session.name') parser.add_argument( '--session.trial_uid', default=str(time.time()).split('.')[0], type=str, help= 'Saved models go in session.root_dir / session.name / session.uid') parser.add_argument('--session.record_log', default=True, help='Record all logs when running this session') parser.add_argument( '--session.config_file', type=str, help= 'config file to run this neuron, if not using cmd line arguments.') BertNSPSynapse.add_args(parser) Neuron.add_args(parser) @staticmethod def check_config(config: Munch): assert config.session.momentum > 0 and config.session.momentum < 1, "momentum must be a value between 0 and 1" assert config.session.batch_size_train > 0, "batch_size_train must a positive value" assert config.session.learning_rate > 0, "learning_rate must be a positive value." full_path = '{}/{}/{}'.format(config.session.root_dir, config.session.name, config.session.trial_uid) config.session.full_path = os.path.expanduser(full_path) if not os.path.exists(config.session.full_path): os.makedirs(config.session.full_path) BertNSPSynapse.check_config(config) Neuron.check_config(config) # --- Main loop ---- def run(self): # ---- Subscribe ---- with self.neuron: # ---- Weights ---- self.row = self.neuron.metagraph.row # --- Run state --- self.epoch = -1 self.global_step = 0 self.best_train_loss = math.inf # --- Loop forever --- while True: try: self.epoch += 1 # ---- Serve ---- self.neuron.axon.serve(self.model) # ---- Train Model ---- self.train() self.scheduler.step() # ---- Emit row-weights ---- self.neuron.metagraph.emit( self.row, wait_for_inclusion=True ) # Sets my row-weights on the chain. # ---- Sync metagraph ---- self.neuron.metagraph.sync( ) # Pulls the latest metagraph state (with my update.) self.row = self.neuron.metagraph.row # --- Epoch logs ---- print(self.neuron.axon.__full_str__()) print(self.neuron.dendrite.__full_str__()) print(self.neuron.metagraph) # ---- Update Tensorboard ---- self.neuron.dendrite.__to_tensorboard__( self.tensorboard, self.global_step) self.neuron.metagraph.__to_tensorboard__( self.tensorboard, self.global_step) self.neuron.axon.__to_tensorboard__( self.tensorboard, self.global_step) # ---- Save best loss and model ---- if self.training_loss and self.epoch % 10 == 0: if self.training_loss < self.best_train_loss: self.best_train_loss = self.training_loss # update best train loss logger.info( 'Saving/Serving model: epoch: {}, loss: {}, path: {}/model.torch' .format(self.epoch, self.best_train_loss, self.config.session.full_path)) torch.save( { 'epoch': self.epoch, 'model': self.model.state_dict(), 'loss': self.best_train_loss }, "{}/model.torch".format( self.config.session.full_path)) self.tensorboard.add_scalar( 'Neuron/Train_loss', self.training_loss, self.global_step) # --- Catch Errors ---- except Exception as e: logger.error('Exception in training script with error: {}', e) logger.info(traceback.print_exc()) logger.info('Continuing to train.') time.sleep(1) # ---- Train Epoch ---- def train(self): self.training_loss = 0.0 for local_step in range(self.config.session.epoch_length): # ---- Forward pass ---- inputs, targets = nsp_batch(self.dataset['train'], self.config.session.batch_size_train, bittensor.__tokenizer__()) output = self.model.remote_forward( self.neuron, inputs=inputs['input_ids'].to(self.model.device), attention_mask=inputs['attention_mask'].to(self.model.device), targets=targets.to(self.model.device)) # ---- Backward pass ---- loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss loss.backward() # Accumulates gradients on the model. self.optimizer.step() # Applies accumulated gradients. self.optimizer.zero_grad( ) # Zeros out gradients for next accummulation # ---- Train row weights ---- batch_weights = torch.mean(output.dendrite.weights, axis=0) # Average over batch. self.row = ( 1 - 0.03) * self.row + 0.03 * batch_weights # Moving avg update. self.row = F.normalize(self.row, p=1, dim=0) # Ensure normalization. # ---- Step logs ---- logger.info( 'GS: {} LS: {} Epoch: {}\tLocal Target Loss: {}\tRemote Target Loss: {}\tDistillation Loss: {}\tAxon: {}\tDendrite: {}', colored('{}'.format(self.global_step), 'red'), colored('{}'.format(local_step), 'blue'), colored('{}'.format(self.epoch), 'green'), colored('{:.4f}'.format(output.local_target_loss.item()), 'green'), colored('{:.4f}'.format(output.remote_target_loss.item()), 'blue'), colored('{:.4f}'.format(output.distillation_loss.item()), 'red'), self.neuron.axon, self.neuron.dendrite) logger.info('Codes: {}', output.dendrite.return_codes.tolist()) self.tensorboard.add_scalar('Neuron/Rloss', output.remote_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Lloss', output.local_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Dloss', output.distillation_loss.item(), self.global_step) # ---- Step increments ---- self.global_step += 1 self.training_loss += output.local_target_loss.item() # --- Memory clean up ---- torch.cuda.empty_cache() del output
class Miner(): def __init__(self, config: Munch = None): if config == None: config = Miner.build_config(); logger.info(bittensor.config.Config.toString(config)) self.config = config # ---- Neuron ---- self.neuron = bittensor.neuron.Neuron(self.config) # ---- Model ---- self.model = BertMLMSynapse( self.config ) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.config.miner.learning_rate, momentum=self.config.miner.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, 50, 300) # ---- Model Load/Save tools ---- self.model_toolbox = ModelToolbox(BertMLMSynapse, torch.optim.SGD) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('ag_news')['train'] # The collator accepts a list [ dict{'input_ids, ...; } ] where the internal dict # is produced by the tokenizer. self.data_collator = DataCollatorForLanguageModeling ( tokenizer=bittensor.__tokenizer__(), mlm=True, mlm_probability=0.15 ) # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir = self.config.miner.full_path) if self.config.miner.record_log: logger.add(self.config.miner.full_path + "/{}_{}.log".format(self.config.miner.name, self.config.miner.trial_uid),format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}") @staticmethod def build_config() -> Munch: parser = argparse.ArgumentParser(); Miner.add_args(parser) config = bittensor.config.Config.to_config(parser); Miner.check_config(config) return config @staticmethod def check_config(config: Munch): assert config.miner.momentum > 0 and config.miner.momentum < 1, "momentum must be a value between 0 and 1" assert config.miner.batch_size_train > 0, "batch_size_train must a positive value" assert config.miner.learning_rate > 0, "learning_rate must be a positive value." full_path = '{}/{}/{}'.format(config.miner.root_dir, config.miner.name, config.miner.trial_uid) config.miner.full_path = os.path.expanduser(full_path) if not os.path.exists(config.miner.full_path): os.makedirs(config.miner.full_path) BertMLMSynapse.check_config(config) bittensor.neuron.Neuron.check_config(config) @staticmethod def add_args(parser: argparse.ArgumentParser): parser.add_argument('--miner.learning_rate', default=0.01, type=float, help='Training initial learning rate.') parser.add_argument('--miner.momentum', default=0.98, type=float, help='Training initial momentum for SGD.') parser.add_argument('--miner.n_epochs', default=int(sys.maxsize), type=int, help='Number of training epochs.') parser.add_argument('--miner.epoch_length', default=500, type=int, help='Iterations of training per epoch') parser.add_argument('--miner.batch_size_train', default=1, type=int, help='Training batch size.') parser.add_argument('--miner.sync_interval', default=100, type=int, help='Batches before we sync with chain and emit new weights.') parser.add_argument('--miner.log_interval', default=10, type=int, help='Batches before we log miner info.') parser.add_argument('--miner.accumulation_interval', default=1, type=int, help='Batches before we apply acummulated gradients.') parser.add_argument('--miner.apply_remote_gradients', default=False, type=bool, help='If true, neuron applies gradients which accumulate from remotes calls.') parser.add_argument('--miner.root_dir', default='~/.bittensor/miners/', type=str, help='Root path to load and save data associated with each miner') parser.add_argument('--miner.name', default='bert-nsp', type=str, help='Trials for this miner go in miner.root / miner.name') parser.add_argument('--miner.trial_uid', default=str(time.time()).split('.')[0], type=str, help='Saved models go in miner.root_dir / miner.name / miner.uid') parser.add_argument('--miner.record_log', default=True, help='Record all logs when running this miner') parser.add_argument('--miner.config_file', type=str, help='config file to run this neuron, if not using cmd line arguments.') BertMLMSynapse.add_args(parser) bittensor.neuron.Neuron.add_args(parser) # --- Main loop ---- def run (self): # ---- Subscribe ---- with self.neuron: # ---- Weights ---- self.row = self.neuron.metagraph.row # --- Run state --- self.global_step = 0 self.best_train_loss = math.inf # --- Loop for epochs --- for self.epoch in range(self.config.miner.n_epochs): try: # ---- Serve ---- self.neuron.axon.serve( self.model ) # ---- Train Model ---- self.train() self.scheduler.step() # If model has borked for some reason, we need to make sure it doesn't emit weights # Instead, reload into previous version of model if torch.any(torch.isnan(torch.cat([param.view(-1) for param in self.model.parameters()]))): self.model, self.optimizer = self.model_toolbox.load_model(self.config) continue # ---- Emitting weights ---- self.neuron.metagraph.set_weights(self.row, wait_for_inclusion = True) # Sets my row-weights on the chain. # ---- Sync metagraph ---- self.neuron.metagraph.sync() # Pulls the latest metagraph state (with my update.) self.row = self.neuron.metagraph.row # --- Epoch logs ---- print(self.neuron.axon.__full_str__()) print(self.neuron.dendrite.__full_str__()) print(self.neuron.metagraph) # ---- Update Tensorboard ---- self.neuron.dendrite.__to_tensorboard__(self.tensorboard, self.global_step) self.neuron.metagraph.__to_tensorboard__(self.tensorboard, self.global_step) self.neuron.axon.__to_tensorboard__(self.tensorboard, self.global_step) # ---- Save best loss and model ---- if self.training_loss and self.epoch % 10 == 0: if self.training_loss < self.best_train_loss: self.best_train_loss = self.training_loss # update best train loss self.model_toolbox.save_model( self.config.miner.full_path, { 'epoch': self.epoch, 'model_state_dict': self.model.state_dict(), 'loss': self.best_train_loss, 'optimizer_state_dict': self.optimizer.state_dict(), } ) self.tensorboard.add_scalar('Neuron/Train_loss', self.training_loss, self.global_step) # --- Catch Errors ---- except Exception as e: logger.error('Exception in training script with error: {}', e) logger.info(traceback.print_exc()) logger.info('Continuing to train.') time.sleep(1) # ---- Train Epoch ---- def train(self): self.training_loss = 0.0 for local_step in range(self.config.miner.epoch_length): # ---- Forward pass ---- inputs, targets = mlm_batch(self.dataset, self.config.miner.batch_size_train, bittensor.__tokenizer__(), self.data_collator) output = self.model.remote_forward ( self.neuron, inputs = inputs.to(self.model.device), targets = targets.to(self.model.device) ) # ---- Backward pass ---- loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss loss.backward() # Accumulates gradients on the model. self.optimizer.step() # Applies accumulated gradients. self.optimizer.zero_grad() # Zeros out gradients for next accummulation # ---- Train row weights ---- batch_weights = torch.mean(output.router.weights, axis = 0) # Average over batch. self.row = (1 - 0.03) * self.row + 0.03 * batch_weights # Moving avg update. self.row = F.normalize(self.row, p = 1, dim = 0) # Ensure normalization. # ---- Step logs ---- logger.info('GS: {} LS: {} Epoch: {}\tLocal Target Loss: {}\tRemote Target Loss: {}\tDistillation Loss: {}\tAxon: {}\tDendrite: {}', colored('{}'.format(self.global_step), 'red'), colored('{}'.format(local_step), 'blue'), colored('{}'.format(self.epoch), 'green'), colored('{:.4f}'.format(output.local_target_loss.item()), 'green'), colored('{:.4f}'.format(output.remote_target_loss.item()), 'blue'), colored('{:.4f}'.format(output.distillation_loss.item()), 'red'), self.neuron.axon, self.neuron.dendrite) logger.info('Codes: {}', output.router.return_codes.tolist()) self.tensorboard.add_scalar('Neuron/Rloss', output.remote_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Lloss', output.local_target_loss.item(), self.global_step) self.tensorboard.add_scalar('Neuron/Dloss', output.distillation_loss.item(), self.global_step) # ---- Step increments ---- self.global_step += 1 self.training_loss += output.local_target_loss.item() # --- Memory clean up ---- torch.cuda.empty_cache() del output