def __init__(self, config: Munch): self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = BertNSPSynapse(self.config) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule( self.optimizer, 50, 300) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('bookcorpus') # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
def __init__(self, config: Munch): self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = BertMLMSynapse(self.config) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = WarmupCosineWithHardRestartsSchedule( self.optimizer, 50, 300) # ---- Dataset ---- # Dataset: 74 million sentences pulled from books. self.dataset = load_dataset('bookcorpus')['train'] # The collator accepts a list [ dict{'input_ids, ...; } ] where the internal dict # is produced by the tokenizer. self.data_collator = DataCollatorForLanguageModeling( tokenizer=bittensor.__tokenizer__(), mlm=True, mlm_probability=0.15) # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
def __init__(self, config: Munch): self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = DPNSynapse( config) # Feedforward neural network with PKMDendrite. self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.model.to(self.device) # Set model to device # ---- Optimizer ---- self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10.0, gamma=0.1) # ---- Dataset ---- self.train_data = torchvision.datasets.CIFAR10( root=self.config.session.root_dir + "datasets/", train=True, download=True, transform=transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ])) self.trainloader = torch.utils.data.DataLoader( self.train_data, batch_size=self.config.session.batch_size_train, shuffle=True, num_workers=2) self.test_data = torchvision.datasets.CIFAR10( root=self.config.session.root_dir + "datasets/", train=False, download=True, transform=transforms.ToTensor()) self.testloader = torch.utils.data.DataLoader( self.test_data, batch_size=self.config.session.batch_size_test, shuffle=False, num_workers=2) self.test_data = torchvision.datasets.CIFAR10( root=self.config.session.root_dir + "datasets/", train=False, download=True, transform=transforms.ToTensor()) # ---- Tensorboard ---- self.global_step = 0 self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
def __init__(self, config: Munch): if config == None: config = Session.build_config() self.config = config # ---- Neuron ---- self.neuron = Neuron(self.config) # ---- Model ---- self.model = FFNNSynapse( config ) # Feedforward neural network with PKMRouter. self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.model.to( self.device ) # Set model to device # ---- Optimizer ---- self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10.0, gamma=0.1) # ---- Dataset ---- self.train_data = torchvision.datasets.MNIST(root = self.config.session.root_dir + "datasets/", train=True, download=True, transform=transforms.ToTensor()) self.trainloader = torch.utils.data.DataLoader(self.train_data, batch_size = self.config.session.batch_size_train, shuffle=True, num_workers=2) self.test_data = torchvision.datasets.MNIST(root = self.config.session.root_dir + "datasets/", train=False, download=True, transform=transforms.ToTensor()) self.testloader = torch.utils.data.DataLoader(self.test_data, batch_size = self.config.session.batch_size_test, shuffle=False, num_workers=2) # ---- Tensorboard ---- self.global_step = 0 self.tensorboard = SummaryWriter(log_dir = self.config.session.full_path)
def new_neuron(): # 1. Init Config item. config = { 'session': { 'datapath': 'data/', 'learning_rate': 0.01, 'momentum': 0.9, 'batch_size_train': 64, 'batch_size_test': 64, 'log_interval': 10, 'sync_interval': 100, 'priority_interval': 100, 'name': 'mnist', 'trial_id': '1608070667' }, 'synapse': { 'target_dim': 10 }, 'dendrite': { 'key_dim': 100, 'topk': 10, 'stale_emit_filter': 10000, 'pass_gradients': True, 'timeout': 0.5, 'do_backoff': True, 'max_backoff': 100 }, 'axon': { 'local_port': 8091, 'external_ip': '191.97.53.53', 'max_workers': 5, 'max_gradients': 1000 }, 'nucleus': { 'max_workers': 5, 'queue_timeout': 5, 'queue_maxsize': 1000 }, 'metagraph': { 'chain_endpoint': '206.189.254.5:12345', 'stale_emit_filter': 10000 }, 'meta_logger': { 'log_dir': 'data/' }, 'neuron': { 'keyfile': None, 'keypair': None } } config = Munch.fromDict(config) logger.info(Config.toString(config)) mnemonic = Keypair.generate_mnemonic() keypair = Keypair.create_from_mnemonic(mnemonic) neuron = Neuron(config) neuron.keypair = keypair return neuron
def __init__(self, config: Munch): self.config = config # ---- Build Neuron ---- self.neuron = Neuron(config) # ---- Build FFNN Model ---- self.model = FFNNSynapse(self.config) self.model.to( torch.device("cuda" if torch.cuda.is_available() else "cpu")) self.neuron.axon.serve(self.model) # ---- Optimizer ---- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.config.session.learning_rate, momentum=self.config.session.momentum) # ---- Logging ---- self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path) if self.config.session.record_log: logger.add( self.config.session.full_path + "/{}_{}.log".format( self.config.session.name, self.config.session.trial_uid), format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")