def test_warmup_cosine_hard_restart_scheduler(self):
        scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, warmup_steps=2, cycles=2, t_total=10)
        lrs = unwrap_schedule(scheduler, self.num_steps)
        expected_learning_rates = [5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46, 0.0]
        self.assertEqual(len(lrs[0]), 1)
        self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=1e-2)

        scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, warmup_steps=2, cycles=2, t_total=10)
        lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
        self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
Пример #2
0
    def __init__(self, config: Munch):
        self.config = config

        # ---- Neuron ----
        self.neuron = Neuron(self.config)

        # ---- Model ----
        self.model = BertNSPSynapse(self.config)

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.config.session.learning_rate,
                                         momentum=self.config.session.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(
            self.optimizer, 50, 300)

        # ---- Dataset ----
        # Dataset: 74 million sentences pulled from books.
        self.dataset = load_dataset('bookcorpus')

        # ---- Logging ----
        self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path)
        if self.config.session.record_log:
            logger.add(
                self.config.session.full_path + "/{}_{}.log".format(
                    self.config.session.name, self.config.session.trial_uid),
                format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
Пример #3
0
    def __init__(self, config: Munch = None, **kwargs):
        if config == None:
            config = Miner.default_config();       
        bittensor.config.Config.update_with_kwargs(config.miner, kwargs) 
        Miner.check_config(config)
        self.config = config

        # ---- Model ----
        self.model = BertMLMSynapse( self.config )

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.config.miner.learning_rate, momentum=self.config.miner.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, 50, 300)

        # ---- Model Load/Save tools ----
        self.model_toolbox = ModelToolbox(BertMLMSynapse, torch.optim.SGD)

        # ---- Dataset ----
        # Dataset: 74 million sentences pulled from books.
        self.dataset = load_dataset('ag_news')['train']
        # The collator accepts a list [ dict{'input_ids, ...; } ] where the internal dict 
        # is produced by the tokenizer.
        self.data_collator = DataCollatorForLanguageModeling (
            tokenizer=bittensor.__tokenizer__(), mlm=True, mlm_probability=0.15
        )
        super( Miner, self ).__init__( self.config, **kwargs )
Пример #4
0
    def __init__(self, config: Munch = None):
        if config == None:
            config = Miner.build_config(); logger.info(bittensor.config.Config.toString(config))
        self.config = config

        # ---- Neuron ----
        self.neuron = bittensor.neuron.Neuron(self.config)

        # ---- Model ----
        self.model = GPT2LMSynapse( self.config )

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.config.miner.learning_rate, momentum=self.config.miner.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, 50, 300)

        # ---- Model Load/Save tools ----
        self.model_toolbox = ModelToolbox(GPT2LMSynapse, torch.optim.SGD)

        # ---- Dataset ----
        # Dataset: 74 million sentences pulled from books.
        # self.dataset = load_dataset('ag_news')['train']
        self.dataset = AdamCorpus(self.config.miner.custom_datasets)

        # ---- Logging ----
        self.tensorboard = SummaryWriter(log_dir = self.config.miner.full_path)
        if self.config.miner.record_log:
            logger.add(self.config.miner.full_path + "/{}_{}.log".format(self.config.miner.name, self.config.miner.trial_uid),format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
Пример #5
0
    def __init__(self, config: Munch = None, **kwargs):
        if config == None:
            config = Miner.default_config();       
        bittensor.config.Config.update_with_kwargs(config.miner, kwargs) 
        Miner.check_config(config)
        self.config = config

        # ---- Neuron ----
        self.neuron = bittensor.neuron.Neuron(self.config)

        # ---- Model ----
        self.model = BertNSPSynapse( self.config )

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.config.miner.learning_rate, momentum=self.config.miner.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, 50, 300)

        # ---- Model Load/Save tools ----
        self.model_toolbox = ModelToolbox(BertNSPSynapse, torch.optim.SGD)

        # ---- Dataset ----
        # Dataset: News headlines
        self.dataset = load_dataset('ag_news')['train']


        # ---- Logging ----
        self.tensorboard = SummaryWriter(log_dir = self.config.miner.full_path)
        if self.config.miner.record_log:
            logger.add(self.config.miner.full_path + "/{}_{}.log".format(self.config.miner.name, self.config.miner.trial_uid),format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
Пример #6
0
    def __init__(self, config: Munch):
        self.config = config

        # ---- Neuron ----
        self.neuron = Neuron(self.config)

        # ---- Model ----
        self.model = BertMLMSynapse(self.config)

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.config.session.learning_rate,
                                         momentum=self.config.session.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(
            self.optimizer, 50, 300)

        # ---- Dataset ----
        # Dataset: 74 million sentences pulled from books.
        self.dataset = load_dataset('bookcorpus')['train']
        # The collator accepts a list [ dict{'input_ids, ...; } ] where the internal dict
        # is produced by the tokenizer.
        self.data_collator = DataCollatorForLanguageModeling(
            tokenizer=bittensor.__tokenizer__(),
            mlm=True,
            mlm_probability=0.15)

        # ---- Logging ----
        self.tensorboard = SummaryWriter(log_dir=self.config.session.full_path)
        if self.config.session.record_log:
            logger.add(
                self.config.session.full_path + "/{}_{}.log".format(
                    self.config.session.name, self.config.session.trial_uid),
                format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
Пример #7
0
    def __init__(self, config: Munch = None, **kwargs):
        # ---- Load Config ----
        if config == None:
            config = Miner.default_config()
        config = copy.deepcopy(config)
        bittensor.config.Config.update_with_kwargs(config, kwargs)
        Miner.check_config(config)
        logger.info(bittensor.config.Config.toString(config))
        self.config = config

        # ---- Row Weights ----
        self.row_weights = torch.ones([1])

        # ---- Nucleus ----
        self.synapse = XLMSynapse(self.config)

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.synapse.parameters(),
                                         lr=self.config.miner.learning_rate,
                                         momentum=self.config.miner.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(
            self.optimizer, 50, 300)

        # ---- Dataset ----
        self.dataset = GenesisTextDataloader(
            self.config.miner.batch_size_train, 20)
        super(Miner, self).__init__(self.config, **kwargs)
Пример #8
0
    def __init__(self, config: Munch = None, **kwargs):
        if config == None:
            config = Miner.default_config()
        bittensor.config.Config.update_with_kwargs(config.miner, kwargs)
        Miner.check_config(config)
        self.config = config

        # ---- Neuron ----
        self.neuron = bittensor.neuron.Neuron(self.config)

        # ---- Model ----
        self.model = GPT2LMSynapse(self.config)

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.config.miner.learning_rate,
                                         momentum=self.config.miner.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(
            self.optimizer, 50, 300)

        # ---- Model Load/Save tools ----
        self.model_toolbox = ModelToolbox(GPT2LMSynapse, torch.optim.SGD)

        # ---- Dataset ----
        # The Genesis Dataset:
        # The dataset used to train Adam and his first 100 children.
        self.dataset = AdamCorpus(self.config.miner.custom_dataset)

        # ---- Logging ----
        self.tensorboard = SummaryWriter(log_dir=self.config.miner.full_path)
        if self.config.miner.record_log:
            logger.add(
                self.config.miner.full_path + "/{}_{}.log".format(
                    self.config.miner.name, self.config.miner.trial_uid),
                format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
Пример #9
0
    def __init__(self, config: Munch = None, **kwargs):
        if config == None:
            config = Miner.default_config();       
        bittensor.config.Config.update_with_kwargs(config.miner, kwargs) 
        Miner.check_config(config)
        self.config = config

        # ---- Neuron ----
        self.neuron = bittensor.neuron.Neuron(self.config)

        # ---- Model ----
        self.model = XLMSynapse( self.config )

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr = self.config.miner.learning_rate, momentum=self.config.miner.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, 50, 300)

        # ---- Model Load/Save tools ----
        self.model_toolbox = ModelToolbox(XLMSynapse, torch.optim.SGD)

        # ---- Dataset ----
        # Dataset: 74 million sentences pulled from books.
        self.dataset = load_dataset('amazon_reviews_multi', 'en')['train']

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        if self.config.synapse.device:
            self.device = torch.device(self.config.synapse.device)

        # ---- Logging ----
        self.tensorboard = SummaryWriter(log_dir = self.config.miner.full_path)
        if self.config.miner.record_log:
            logger.add(self.config.miner.full_path + "/{}_{}.log".format(self.config.miner.name, self.config.miner.trial_uid),format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}")
Пример #10
0
    def configure_optimizers(self):
        model = self.model
        optimizer = torch.optim.AdamW(model.parameters(), lr=self.hparams.lr)
        scheduler = WarmupCosineWithHardRestartsSchedule(optimizer=optimizer,
                                                         warmup_steps=1,
                                                         t_total=5)

        return [optimizer], [scheduler]
Пример #11
0
    def __init__(self, config: Munch = None, **kwargs):
        if config == None:
            config = Miner.default_config()
        bittensor.config.Config.update_with_kwargs(config.miner, kwargs)
        Miner.check_config(config)
        self.config = config

        # ---- Neuron ----
        self.neuron = bittensor.neuron.Neuron(self.config)

        # ---- Model ----
        self.model = BertMLMSynapse(self.config)

        # ---- Optimizer ----
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         lr=self.config.miner.learning_rate,
                                         momentum=self.config.miner.momentum)
        self.scheduler = WarmupCosineWithHardRestartsSchedule(
            self.optimizer, 50, 300)

        # ---- Model Load/Save tools ----
        self.model_toolbox = ModelToolbox(BertMLMSynapse, torch.optim.SGD)

        # ---- Dataset ----
        # Dataset: 74 million sentences pulled from books.
        self.dataset = load_dataset('ag_news')['train']
        # The collator accepts a list [ dict{'input_ids, ...; } ] where the internal dict
        # is produced by the tokenizer.
        self.data_collator = DataCollatorForLanguageModeling(
            tokenizer=bittensor.__tokenizer__(),
            mlm=True,
            mlm_probability=0.15)

        # ---- Logging ----
        self.tensorboard = SummaryWriter(log_dir=self.config.miner.full_path)
        if self.config.miner.record_log == True:
            filepath = self.config.miner.full_path + "/{}_{}.log".format(
                self.config.miner.name, self.config.miner.trial_uid),
            logger.add(
                filepath,
                format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
                rotation="250 MB",
                retention="10 days")
Пример #12
0
def train(args, train_dataset, model, tokenizer):
    tb_writer = SummaryWriter()
    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
    ## DATALOADER
    train_sampler = SequentialSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset,
                                  sampler=train_sampler,
                                  batch_size=args.train_batch_size)
    graph_train_dataloader_a, graph_train_dataloader_b = load_graph_examples(
        args)
    args.logging_steps = len(train_dataloader)
    args.save_steps = len(train_dataloader)
    if args.max_steps > 0:
        t_total = args.max_steps
        args.num_total_epochs = args.max_steps // (
            len(train_dataloader) // args.gradient_accumulation_steps) + 1
    else:
        t_total = len(
            train_dataloader
        ) // args.gradient_accumulation_steps * args.num_train_epochs
    assert len(train_dataset) == len(graph_train_dataloader_a.dataset)
    logger.info("***** Running training *****")
    logger.info("  Num examples = %d", len(train_dataset))
    logger.info("  Num Epochs = %d", args.num_train_epochs)
    logger.info("  Batch size per GPU = %d", args.per_gpu_train_batch_size)
    logger.info("  Total train batch size = %d",
                args.train_batch_size * args.gradient_accumulation_steps)
    logger.info("  Total optimization steps = %d", t_total)

    graph_optimizer = AdamW(model.graph_encoder.parameters(),
                            lr=args.graph_lr,
                            weight_decay=args.weight_decay)
    linear_optimizer = AdamW(model.classifier.parameters(),
                             lr=args.learning_rate,
                             weight_decay=args.weight_decay)
    linear_c_optimizer = AdamW(model.classifier_c.parameters(),
                               lr=args.learning_rate,
                               weight_decay=args.weight_decay)
    linear_type_optimizer = AdamW(model.classifier_type.parameters(),
                                  lr=args.learning_rate,
                                  weight_decay=args.weight_decay)

    # bert_optimizer_grouped_parameters = get_bert_param_groups(model, args)
    bert_optimizer_grouped_parameters = get_bert_param_groups(
        model.text_encoder, args)
    bert_optimizer = AdamW(bert_optimizer_grouped_parameters,
                           lr=args.learning_rate,
                           eps=args.adam_epsilon,
                           weight_decay=args.weight_decay)
    if args.scheduler == 'linear':
        scheduler = WarmupLinearSchedule(bert_optimizer,
                                         warmup_steps=args.warmup_steps,
                                         t_total=t_total)
        graph_scheduler = WarmupLinearSchedule(graph_optimizer,
                                               warmup_steps=args.warmup_steps,
                                               t_total=t_total)
        linear_scheduler = WarmupLinearSchedule(linear_optimizer,
                                                warmup_steps=args.warmup_steps,
                                                t_total=t_total)
        linear_c_scheduler = WarmupLinearSchedule(
            linear_c_optimizer,
            warmup_steps=args.warmup_steps,
            t_total=t_total)
        linear_type_scheduler = WarmupLinearSchedule(
            linear_type_optimizer,
            warmup_steps=args.warmup_steps,
            t_total=t_total)
    elif args.scheduler == 'cosine':
        scheduler = WarmupCosineWithHardRestartsSchedule(
            bert_optimizer,
            warmup_steps=args.warmup_steps,
            t_total=t_total,
            cycles=2.)
        graph_scheduler = WarmupCosineWithHardRestartsSchedule(
            graph_optimizer,
            warmup_steps=args.warmup_steps,
            t_total=t_total,
            cycles=2.)
        linear_scheduler = WarmupCosineWithHardRestartsSchedule(
            linear_optimizer,
            warmup_steps=args.warmup_steps,
            t_total=t_total,
            cycles=2.)
        linear_c_scheduler = WarmupCosineWithHardRestartsSchedule(
            linear_c_optimizer,
            warmup_steps=args.warmup_steps,
            t_total=t_total,
            cycles=2.)
        linear_type_scheduler = WarmupCosineWithHardRestartsSchedule(
            linear_type_optimizer,
            warmup_steps=args.warmup_steps,
            t_total=t_total,
            cycles=2.)

    ## TRAIN
    global_step = 0
    tr_loss, logging_loss = 0.0, 0.0
    model.zero_grad()
    set_seed(args)
    for _ in trange(int(args.num_train_epochs), desc='Epoch'):
        for batch, data_a, data_b in tqdm(zip(train_dataloader,
                                              graph_train_dataloader_a,
                                              graph_train_dataloader_b),
                                          desc='Iteration',
                                          total=len(train_dataloader)):
            model.train()
            batch = tuple(t.to(args.device) for t in batch)
            data_a, data_b = data_a.to(args.device), data_b.to(args.device)

            loss_fcts = {
                'mse': F.mse_loss,
                'smooth_l1': F.smooth_l1_loss,
                'l1': F.l1_loss
            }
            loss_fct = loss_fcts[args.loss_fct]
            loss_combined = 0.0
            for mode in ['medsts', 'medsts_c', 'medsts_type']:
                torch.cuda.empty_cache()
                logits = model(batch[0],
                               batch[1],
                               batch[2],
                               data_a,
                               data_b,
                               mode=mode)
                if mode == 'medsts':
                    loss = loss_fct(logits, data_a.label)
                elif mode == 'medsts_c':
                    loss = F.cross_entropy(logits, data_a.label_c)
                elif mode == 'medsts_type':
                    loss = F.cross_entropy(logits, data_a.label_type)
                loss_combined += loss

            loss_combined.backward()
            if args.clip_all:
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               args.max_grad_norm)
            else:
                torch.nn.utils.clip_grad_norm_(model.text_encoder.parameters(),
                                               args.max_grad_norm)

            tr_loss += loss.item()
            scheduler.step()
            bert_optimizer.step()

            graph_scheduler.step()
            linear_scheduler.step()
            graph_optimizer.step()

            # print('learning rate: {} \t graph optimizer lr: {}'.format(linear_optimizer.param_groups[0]['lr'], graph_optimizer.param_groups[0]['lr']))
            linear_optimizer.step()

            linear_c_scheduler.step()
            linear_type_scheduler.step()
            linear_c_optimizer.step()
            linear_type_optimizer.step()

            model.zero_grad()
            global_step += 1
            args.logging_steps = len(train_dataloader) // 4

            if args.logging_steps > 0 and global_step % args.logging_steps == 0:
                result = evaluate(args, model, tokenizer)
                tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
                tb_writer.add_scalar('loss', (tr_loss - logging_loss) /
                                     args.logging_steps, global_step)
                logging_loss = tr_loss

            if args.save_steps > 0 and global_step % args.save_steps == 0:
                output_dir = os.path.join(args.output_dir,
                                          'checkpoint-{}'.format(global_step))
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                # model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
                # model_to_save.save_pretrained(output_dir)
                torch.save(args, os.path.join(output_dir, 'training_args.bin'))
                # logger.info("Saving model checkpoint to %s", output_dir)

        # result = evaluate(args, model, tokenizer)
    tb_writer.close()
    return global_step, tr_loss / global_step