Пример #1
0
def run(config, folds_dir, balanced):
    model = get_model(config[MODEL_NAME], config[MODEL_PARAMS]).cuda()
    criterion = get_loss(config[LOSS_NAME], config[LOSS_PARAMS])
    optimizer = get_optimizer(config[OPTIM_NAME],
                              model.parameters(),
                              optimizer_params=config[OPTIM_PARAMS])

    last_epoch = -1
    scheduler = get_scheduler(config[SCHEDULER_NAME], optimizer, last_epoch,
                              config[SCHEDULER_PARAMS])

    datasets = {
        stage: CustomDataset(folds_dir, stage, config[FOLD_ID],
                             config[DATA_PREFIX], config[INPUT_SIZE])
        for stage in ['train', 'test']
    }

    print('Loading sampler')
    if balanced:
        train_sampler = BalancedBatchSampler(datasets['train'])
    else:
        train_sampler = None
    print('Sampler loaded')
    dataloaders = {
        stage: get_dataloader(datasets[stage], config[BATCH_SIZE],
                              train_sampler)
        for stage in ['train', 'test']
    }

    writer = SummaryWriter(config[TRAIN_DIR])
    train(config, model, dataloaders, criterion, optimizer, scheduler, writer,
          last_epoch + 1)
Пример #2
0
    def _init_train_components_ensemble(self, reinitialise=False):
        self.val_metrics = {
            'loss': metrics.Loss(BCEAndJaccardLoss(eval_ensemble=True, gpu_node=self.config.gpu_node)),
            'segment_metrics': SegmentationMetrics(num_classes=self.data_loaders.num_classes,
                                                   threshold=self.config.binarize_threshold,
                                                   eval_ensemble=True)
        }

        self.model_cfg.network_params.input_channels = self.data_loaders.input_channels
        self.model_cfg.network_params.num_classes = self.data_loaders.num_classes
        self.model_cfg.network_params.image_size = self.data_loaders.image_size

        self.criterion = self._init_criterion(not reinitialise)
        self.ens_models = list()
        self.ens_optimizers = list()
        self.ens_lr_schedulers = list()

        optimizer_cls = get_optimizer(self.optim_cfg)
        init_param_names = retrieve_class_init_parameters(optimizer_cls)
        optimizer_params = {k: v for k, v in self.optim_cfg.items() if k in init_param_names}

        for _ in range(self.len_models):
            self.ens_models.append(get_model(self.model_cfg).to(device=self.device))
            self.ens_optimizers.append(optimizer_cls(self.ens_models[-1].parameters(), **optimizer_params))

            lr_scheduler = self._init_lr_scheduler(self.ens_optimizers[-1])
            self.ens_lr_schedulers.append(lr_scheduler)

        if not reinitialise:
            self.main_logger.info(f'Using ensemble of {self.len_models} {self.ens_models[0]}')
            self.main_logger.info(f'Using optimizers {self.ens_optimizers[0].__class__.__name__}')

        self.trainer, self.evaluator = self._init_engines()

        self._init_handlers()
Пример #3
0
def get_test_max_landmark_of_one_model(config, gi, best_model_idx, key_group):
    test_img_list = gen_test_csv()
    print('test_img_list ', len(test_img_list))

    # result_set_whole = {}
    # for img_id in test_img_list:
    #初始化添加
    #  result_set_whole[img_id] = {}
    test_data_set = get_test_loader(config, test_img_list,
                                    get_transform(config, 'val'))
    model = get_model(config, gi)
    if torch.cuda.is_available():
        model = model.cuda()
    optimizer = get_optimizer(config, model.parameters())
    checkpoint = utils.checkpoint.get_model_saved(config, gi, best_model_idx)
    best_epoch, step = utils.checkpoint.load_checkpoint(
        model, optimizer, checkpoint)
    result_set = test_one_model(test_data_set, model, key_group)

    #
    result_list_whole = []
    for img_ps in result_set.keys():
        ps = result_set[img_ps]
        max_p_key = max(ps, key=ps.get)
        # result_set_whole[img_ps][max_p_key] = ps[max_p_key]
        result_list_whole.append((img_ps, max_p_key, ps[max_p_key]))

    test_pd = pd.DataFrame.from_records(
        result_list_whole, columns=['img_id', 'landmark_id', 'pers'])
    output_filename = os.path.join('./results/test/',
                                   'test_img_land_' + str(gi) + '.csv')
    test_pd.to_csv(output_filename, index=False)

    return
Пример #4
0
    def build_model(self):
        """Creates the model object, which is used to calculate topics, similar words, similar sentences, topic occurrences, and topic similarities

        Returns:
            model: Keras model object

        """
        optimizer = get_optimizer(self.args)
        self.logger.info('Building model')

        self.logger.info('   Number of training examples: %d',
                         len(self.train_x))
        self.logger.info('   Length of vocab: %d', len(self.vocab))

        def max_margin_loss(y_true, y_pred):
            return K.mean(y_pred)

        model = create_model(self.args, self.overall_maxlen, self.vocab)

        # Freeze the word embedding layer
        model.get_layer('word_emb').trainable = False

        # Check option to fix clusters instead of training them
        if self.args.fix_clusters == "yes":
            model.get_layer('topic_emb').trainable = False

        model.compile(optimizer=optimizer,
                      loss=max_margin_loss,
                      metrics=[max_margin_loss])

        return model
Пример #5
0
    def __init__(self, args, logger):
        self.args = args
        self.logger = logger
        self.writer = SummaryWriter(args.log_dir)
        cudnn.enabled = True

        # set up model
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = get_aux_net(args.network.arch)(aux_classes=args.aux_classes + 1, classes=args.n_classes)
        self.model = self.model.to(self.device)
        wandb.watch(self.model)

        if args.mode == 'train':
            # set up optimizer, lr scheduler and loss functions
            optimizer = get_optimizer(self.args.training.optimizer)
            optimizer_params = {k: v for k, v in self.args.training.optimizer.items() if k != "name"}
            self.optimizer = optimizer(self.model.parameters(), **optimizer_params)
            self.scheduler = get_scheduler(self.optimizer, self.args.training.lr_scheduler)

            self.class_loss_func = nn.CrossEntropyLoss()

            self.start_iter = 0

            # resume
            if args.training.resume:
                self.load(args.model_dir + '/' + args.training.resume)

            cudnn.benchmark = True

        elif args.mode == 'val':
            self.load(os.path.join(args.model_dir, args.validation.model))
        else:
            self.load(os.path.join(args.model_dir, args.testing.model))
Пример #6
0
def run(config):
    train_dir = config.train.dir
    model = get_model(config, model_type).to(device)
    print('The nubmer of parameters : %d' % count_parameters(model))
    criterion = get_loss(config)
    optimizer = get_optimizer(config, model)

    checkpoint = utils.checkpoint.get_initial_checkpoint(config,
                                                         model_type=model_type)
    if checkpoint is not None:
        last_epoch, step = utils.checkpoint.load_checkpoint(
            model, optimizer, checkpoint, model_type=model_type)
    else:
        last_epoch, step = -1, -1

    print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
    scheduler = get_scheduler(config, optimizer, last_epoch)

    print(config.data)
    dataloaders = {
        'train': get_train_dataloader(config),
        'val': get_valid_dataloader(config),
        'test': get_test_dataloader(config)
    }

    writer = SummaryWriter(config.train[model_type + '_dir'])
    visualizer = get_visualizer(config)
    train(config, model, dataloaders, criterion, optimizer, scheduler, writer,
          visualizer, last_epoch + 1)
def run(config):
    teacher_model = get_model(config, 'teacher').to(device)
    criterion = get_loss(config)

    # for teacher
    trainable_params = filter(lambda p: p.requires_grad,
                              teacher_model.parameters())
    optimizer_t = get_optimizer(config, teacher_model.parameters())
    checkpoint_t = utils.checkpoint.get_initial_checkpoint(
        config, model_type='teacher')
    if checkpoint_t is not None:
        last_epoch_t, step_t = utils.checkpoint.load_checkpoint(
            teacher_model, optimizer_t, checkpoint_t, model_type='teacher')
    else:
        last_epoch_t, step_t = -1, -1
    print('teacher model from checkpoint: {} last epoch:{}'.format(
        checkpoint_t, last_epoch_t))

    scheduler_t = get_scheduler(config, optimizer_t, last_epoch_t)

    print(config.data)
    dataloaders = {
        'train': get_train_dataloader(config),
        'val': get_valid_dataloader(config),
        'test': get_test_dataloader(config)
    }
    writer = SummaryWriter(config.train['teacher' + '_dir'])
    visualizer = get_visualizer(config)
    train(config, teacher_model, dataloaders, criterion, optimizer_t,
          scheduler_t, writer, visualizer, last_epoch_t + 1)
Пример #8
0
def run(config):
    train_dir = config.train.dir

    model = get_model(config).cuda()
    criterion = get_loss(config)
    optimizer = get_optimizer(config, model.parameters())

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, step = utils.checkpoint.load_checkpoint(
            model, optimizer, checkpoint)
    else:
        last_epoch, step = -1, -1

    print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
    scheduler = get_scheduler(config, optimizer, last_epoch)

    #     dataloaders = {split:get_dataloader(config, split, get_transform(config, split))
    #                    for split in ['train', 'val']}

    print(config.data)
    dataloaders = {
        'train': get_train_dataloader(config, get_transform(config)),
        'val': get_valid_dataloaders(config)[0]
    }
    writer = SummaryWriter(train_dir)
    train(config, model, dataloaders, criterion, optimizer, scheduler, writer,
          last_epoch + 1)
Пример #9
0
def run(config):
    model = get_model(config[MODEL_NAME], config[MODEL_PARAMS]).cuda()
    criterion = get_loss(config[LOSS_NAME], config[LOSS_PARAMS])
    optimizer = get_optimizer(config[OPTIM_NAME],
                              model.parameters(),
                              optimizer_params=config[OPTIM_PARAMS])

    last_epoch = -1
    scheduler = get_scheduler(config[SCHEDULER_NAME], optimizer, last_epoch,
                              config[SCHEDULER_PARAMS])

    datasets = {
        stage: CustomDataset(DATA_DIR, stage, config[FOLD_ID],
                             config[DATA_PREFIX], config[INPUT_SIZE])
        for stage in ['train', 'test']
    }

    dataloaders = {
        stage: get_dataloader(datasets[stage], config[BATCH_SIZE])
        for stage in ['train', 'test']
    }

    writer = SummaryWriter(config[TRAIN_DIR])
    clip_grad_value_(model.parameters(), 2.0)
    train(config, model, dataloaders, criterion, optimizer, scheduler, writer,
          last_epoch + 1)
    def initialization(self):
        WORK_PATH = self.config.WORK_PATH
        os.chdir(WORK_PATH)
        os.environ["CUDA_VISIBLE_DEVICES"] = self.config.CUDA_VISIBLE_DEVICES

        print("GPU is :", os.environ["CUDA_VISIBLE_DEVICES"])

        self.model = get_model(self.config)

        self.optimizer = get_optimizer(self.config, self.model.parameters())

        checkpoint = get_checkpoint(self.config)

        if torch.cuda.device_count() > 1:
            self.model = torch.nn.DataParallel(self.model)
        self.model = self.model.cuda()

        self.last_epoch, self.step = load_checkpoint(self.model,
                                                     self.optimizer,
                                                     self.center_model,
                                                     self.optimizer_center,
                                                     checkpoint)
        print("from checkpoint {} last epoch: {}".format(
            checkpoint, self.last_epoch))

        self.collate_fn = get_collate_fn(self.config,
                                         self.config.data.frame_num,
                                         self.sample_type)  #
Пример #11
0
def run(config):
    train_dir = config.train.dir

    task = get_task(config)
    optimizer = get_optimizer(config, task.get_model().parameters())

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, step = utils.checkpoint.load_checkpoint(
            task.get_model(), optimizer, checkpoint)
    else:
        last_epoch, step = -1, -1

    print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
    scheduler = get_scheduler(config, optimizer, last_epoch)

    preprocess_opt = task.get_preprocess_opt()
    dataloaders = {
        split: get_dataloader(config, split,
                              get_transform(config, split, **preprocess_opt))
        for split in ['train', 'dev']
    }

    writer = SummaryWriter(config.train.dir)
    train(config, task, dataloaders, optimizer, scheduler, writer,
          last_epoch + 1)
Пример #12
0
def find_lr(optimizer='Adam', verbose=0):
    fname = 'plots/mnist_lr_finder_for_{}.png'.format(optimizer)
    model = MLP()
    criterion = keras.losses.SparseCategoricalCrossentropy()
    if optimizer == 'Adam':
        optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
    else:
        optimizer = get_optimizer(optimizer, learning_rate=LEARNING_RATE)
    mnist = keras.datasets.mnist
    (x_train, y_train), (x_valid, y_valid) = mnist.load_data()
    x_train = x_train.reshape(60000, 784).astype('float32') / 255.0
    train_dataset = tf.data.Dataset.from_tensor_slices(
        (x_train, y_train)).batch(BATCH_SIZE)

    @tf.function
    def train_step(x_batch, y_batch):
        with tf.GradientTape() as tape:
            out = model(x_batch, training=True)
            loss = criterion(y_batch, out)
        grad = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grad, model.trainable_variables))
        return loss

    lr_finder = LRFinder(start_lr=1e-7, max_lr=1e-1)
    for idx, (x_batch, y_batch) in enumerate(train_dataset):
        loss = train_step(x_batch, y_batch)
        new_lr = lr_finder.step(loss.numpy())
        optimizer.lr.assign(new_lr)
        if lr_finder.done:
            break
    lr_finder.plot_lr(fname)
    if verbose:
        print(lr_finder.history)
Пример #13
0
def run(config_file):
    config = load_config(config_file)

    os.makedirs(config.work_dir, exist_ok=True)
    save_config(config, config.work_dir + '/config.yml')

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

    all_transforms = {}
    all_transforms['train'] = get_transforms(config.transforms.train)
    all_transforms['valid'] = get_transforms(config.transforms.test)

    dataloaders = {
        phase: make_loader(
            data_folder=config.data.train_dir,
            df_path=config.data.train_df_path,
            phase=phase,
            batch_size=config.train.batch_size,
            num_workers=config.num_workers,
            idx_fold=config.data.params.idx_fold,
            transforms=all_transforms[phase],
            num_classes=config.data.num_classes,
            pseudo_label_path=config.train.pseudo_label_path,
            task='cls'
        )
        for phase in ['train', 'valid']
    }

    # create model
    model = CustomNet(config.model.encoder, config.data.num_classes)

    # train setting
    criterion = get_loss(config)
    params = [
        {'params': model.base_params(), 'lr': config.optimizer.params.encoder_lr},
        {'params': model.fresh_params(), 'lr': config.optimizer.params.decoder_lr}
    ]
    optimizer = get_optimizer(params, config)
    scheduler = get_scheduler(optimizer, config)

    # model runner
    runner = SupervisedRunner(model=model)

    callbacks = [MultiClassAccuracyCallback(threshold=0.5), F1ScoreCallback()]
    if os.path.exists(config.work_dir + '/checkpoints/best.pth'):
        callbacks.append(CheckpointCallback(resume=config.work_dir + '/checkpoints/best_full.pth'))

    # model training
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=dataloaders,
        logdir=config.work_dir,
        num_epochs=config.train.num_epochs,
        callbacks=callbacks,
        verbose=True,
        fp16=True,
    )
Пример #14
0
 def test_wrong_optimizer_name(self, model_name):
     """Calling `get_optimizer` with a non-supported optimizer_name will 
     raise a ValueError
     """
     model = get_model(model_name)
     with pytest.raises(ValueError):
         optimizer = get_optimizer("blah", model.parameters(), lr=0.001)
def run(config):
    train_dir = config.train.dir

    student_model = get_model(config, model_type).to(device)
    criterion = get_loss(config)
    trainable_params = filter(lambda p: p.requires_grad,
                              student_model.parameters())
    optimizer = get_optimizer(config, trainable_params)
    checkpoint = utils.checkpoint.get_initial_checkpoint(config,
                                                         model_type=model_type)
    if checkpoint is not None:
        last_epoch, step = utils.checkpoint.load_checkpoint(
            student_model, optimizer, checkpoint, model_type=model_type)
    else:
        last_epoch, step = -1, -1
    print('student model from checkpoint: {} last epoch:{}'.format(
        checkpoint, last_epoch))

    scheduler = get_scheduler(config, optimizer, last_epoch)

    print(config.data)
    dataloaders = {
        'train': get_train_dataloader(config),
        'val': get_valid_dataloader(config),
        'test': get_test_dataloader(config)
    }
    writer = SummaryWriter(config.train['student' + '_dir'])
    visualizer = get_visualizer(config)
    train(config, student_model, dataloaders, criterion, optimizer, scheduler,
          writer, visualizer, last_epoch + 1)
Пример #16
0
def main():
    # read configuration file
    with open(sys.argv[1]) as configs:
        config_file = json.load(configs)

    # Load all the paths
    PATH_TO_IMAGES = config_file["path_to_images"]
    TRAIN_DATASET_CSV = config_file["path_to_train_csv"]
    TEST_DATASET_CSV = config_file["path_to_test_csv"]
    PATH_TO_WEIGHTS = config_file["path_to_weights"]
    PATH_TO_RESULTS = config_file["path_to_results"]
    WEIGHTS_FILE = PATH_TO_IMAGES + "weights.pt"

    # Creates the results folder
    # This folder will contain the train and test results, config file and weights of the model
    results_directory = PATH_TO_RESULTS + get_time() + "/"
    os.mkdir(results_directory)
    copy2(sys.argv[1], results_directory)

    # Transform of the images
    transform = transforms.Compose([
        transforms.Resize((config_file["image_size"],
                           config_file["image_size"])),  # Image size
        transforms.ToTensor()
    ])

    # Datasets
    train_dataset = BoneDataset(TRAIN_DATASET_CSV, PATH_TO_IMAGES, transform,
                                config_file["region"])
    test_dataset = BoneDataset(TEST_DATASET_CSV, PATH_TO_IMAGES, transform,
                               config_file["region"])

    # Train loader
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=config_file["train_batch_size"],
        shuffle=True)

    # Test loader
    test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=config_file["test_batch_size"],
        shuffle=True)

    # device, model, optimizer, criterion , MAE and results
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = get_model(config_file["model"], PATH_TO_WEIGHTS).to(device)
    optimizer = get_optimizer(model, config_file["optimizer"],
                              config_file["optimizer_hyperparameters"])
    criterion = get_criterion(config_file["criterion"])
    results = Results(results_directory)

    for epoch in range(1, config_file["epochs"] + 1):
        train(model, device, train_loader, optimizer, criterion, epoch,
              config_file["log_interval"], config_file["decay_lr_interval"],
              results)
        test(model, device, test_loader, criterion, results)
        torch.save(model.state_dict(), WEIGHTS_FILE)
        results.write_results()
Пример #17
0
 def _get_optimizer(self, model_params):
     optimizer = get_optimizer(
         model_params, 
         self.config['runner']['total_steps'],
         self.config['optimizer']
     )
     self._load_weight(optimizer, 'Optimizer')
     return optimizer
Пример #18
0
    def __init__(self, config):
        """Initialize Trainer

        Args:
            config (dict): Configuration dictionary
        """
        super(Trainer, self).__init__()

        # Define multi-task setting
        dataset = config['dataset']
        dataset_name = dataset['dataset_name']
        self.tasks_weighting = dataset['tasks_weighting']
        self.tasks = [k for k, v in self.tasks_weighting.items()]

        # Setup network
        model_config = config['model']
        self.model = get_module(model_config, dataset_name, self.tasks)
        print('Model constructed for {}'.format(' '.join(self.tasks)))

        if 'grouping' in model_config:
            print('groups = {}'.format(model_config['grouping']['groups']))
            print('grouping method = {}'.format(model_config['grouping']['method']))
            self.model = update_module(config, self.model, self.tasks)

        # Setup for a task-conditional setting
        model_params = config['model']['parameters']
        if 'common_mt_params' in model_params:
            self.task_conditional = not model_params['common_mt_params']
        else:
            self.task_conditional = False

        # Setup optimizers
        optimizer_config = config['optimizer']
        optimizer_cls = get_optimizer(optimizer_config['algorithm'])
        model_params = get_params(self.model, optimizer_config['parameters']['lr'], len(self.tasks),
                                  self.task_conditional, self.tasks)
        self.optimizer = optimizer_cls(model_params, **optimizer_config['parameters'])

        # Setup schedulers
        scheduler_config = config['scheduler']
        scheduler_cls = get_scheduler(scheduler_config['lr_policy'])
        self.scheduler = scheduler_cls(self.optimizer, **scheduler_config['parameters'])

        # Setup loss function
        losses_config = config['loss']
        self.criterions = get_loss_functions(self.tasks, losses_config)

        # Initialise performance meters
        self.best_val_loss = 1e9
        self.train_loss = {}
        self.val_loss = {}
        for task in self.tasks:
            self.train_loss[task] = get_running_meter()
            self.val_loss[task] = get_running_meter()

        # Initialize img logging for visualization
        self.img_logging = get_img_logging(dataset_name, self.tasks)
        self.pred_decoder = get_pred_decoder(dataset_name, self.tasks)
Пример #19
0
 def start_experiment(self, request):
     params = request.get('params')
     optimizer_title = request.get('optimizer', 'OnePlusOne')
     params_space = self.parse_params_space(params)
     self.logger.info(f'Params space defined: {str(params_space)}')
     optimizer = get_optimizer(optimizer_title)(parametrization=params_space)
     experiment_id = str(uuid.uuid4())
     self.experiments[experiment_id] = {
         'optimizer_title': optimizer_title,
         'optimizer': optimizer
     }
     return experiment_id
Пример #20
0
    def _get_optimizer(self, model_params):
        optimizer = get_optimizer(model_params,
                                  self.config['runner']['total_steps'],
                                  self.config['optimizer'])

        init_optimizer = self.init_ckpt.get('Optimizer')
        if init_optimizer:
            print(
                '[Runner] - Loading optimizer weights from the previous experiment'
            )
            optimizer.load_state_dict(init_optimizer)
        return optimizer
Пример #21
0
 def test_optimizer_type(self, optimizer_name, model_name):
     """Test that the specified optimizer is the one actually being loaded.
     """
     model = get_model(model_name)
     optimizer = get_optimizer(optimizer_name, model.parameters(), lr=0.001)
     if optimizer_name.lower() == "sgd":
         assert isinstance(optimizer, torch.optim.SGD)
     elif optimizer_name.lower() == "adam":
         assert isinstance(optimizer, torch.optim.Adam)
     else:
         raise ValueError("My test is flaky. Got unsupported optimizer "
                          "{} from conftest.".format(optimizer_name))
Пример #22
0
def search_once(config, policy):
    model = get_model(config).cuda()
    criterion = get_loss(config)
    optimizer = get_optimizer(config, model.parameters())
    scheduler = get_scheduler(config, optimizer, -1)

    transforms = {'train': get_transform(config, 'train', params={'policies': policy}),
                  'val': get_transform(config, 'val')}
    dataloaders = {split:get_dataloader(config, split, transforms[split])
                   for split in ['train', 'val']}

    score_dict = train(config, model, dataloaders, criterion, optimizer, scheduler, None, 0)
    return score_dict['f1_mavg']
Пример #23
0
    def set_model_and_optimizer(self, model, cfg):
        self._model = model.to(self.cfg.local_rank)
        if self.cfg.n_gpu > 1:
            self._model = torch.nn.parallel.DistributedDataParallel(
                self._model, device_ids=[self.cfg.local_rank],
                output_device=self.cfg.local_rank,
                find_unused_parameters=True)

        # Build Optimizer
        self._optimizer = get_optimizer(optimizer_type="adam",
                                        learning_rate=cfg.learning_rate,
                                        warmup_proportion=cfg.warmup_proportion,
                                        max_grad_norm=cfg.max_grad_norm,
                                        named_parameters=list(self.model_module.named_parameters()),
                                        gradient_accumulation_steps=cfg.gradient_accumulation_steps,
                                        num_steps_per_epoch=len(self._train_loader),
                                        epoch_num=cfg.epoch_num)
Пример #24
0
    def _init_optimizer(self, init=True):
        optimizer_cls = get_optimizer(self.optim_cfg)

        init_param_names = retrieve_class_init_parameters(optimizer_cls)
        optimizer_params = {k: v for k, v in self.optim_cfg.items() if k in init_param_names}

        optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)
        if init:
            self.main_logger.info(f'Using optimizer {optimizer.__class__.__name__}')

        if self.resume_cfg.resume_from is not None and self.resume_cfg.saved_optimizer is not None and init:
            optimizer_path = get_resume_optimizer_path(self.resume_cfg.resume_from, self.resume_cfg.saved_optimizer)
            if init:
                self.main_logger.info(f'Loading optimizer from {optimizer_path}')
            optimizer.load_state_dict(torch.load(optimizer_path))

        return optimizer
Пример #25
0
def run(config):
    teacher_model = get_model(config, 'teacher').to(device)
    student_model = get_model(config, 'student').to(device)
    print('The nubmer of parameters : %d'%count_parameters(student_model))
    criterion = get_loss(config)


    # for teacher
    optimizer_t = None
    checkpoint_t = utils.checkpoint.get_initial_checkpoint(config,
                                                         model_type='teacher')
    if checkpoint_t is not None:
        last_epoch_t, step_t = utils.checkpoint.load_checkpoint(teacher_model,
                                 optimizer_t, checkpoint_t, model_type='teacher')
    else:
        last_epoch_t, step_t = -1, -1
    print('teacher model from checkpoint: {} last epoch:{}'.format(
        checkpoint_t, last_epoch_t))

    # for student
    optimizer_s = get_optimizer(config, student_model)
    checkpoint_s = utils.checkpoint.get_initial_checkpoint(config,
                                                         model_type='student')
    if checkpoint_s is not None:
        last_epoch_s, step_s = utils.checkpoint.load_checkpoint(student_model,
                                 optimizer_s, checkpoint_s, model_type='student')
    else:
        last_epoch_s, step_s = -1, -1
    print('student model from checkpoint: {} last epoch:{}'.format(
        checkpoint_s, last_epoch_s))

    scheduler_s = get_scheduler(config, optimizer_s, last_epoch_s)

    print(config.data)
    dataloaders = {'train':get_train_dataloader(config, get_transform(config)),
                   'val':get_valid_dataloader(config)}
                   #'test':get_test_dataloader(config)}
    writer = SummaryWriter(config.train['student' + '_dir'])
    visualizer = get_visualizer(config)
    result = train(config, student_model, teacher_model, dataloaders,
          criterion, optimizer_s, scheduler_s, writer,
          visualizer, last_epoch_s+1)
    
    print('best psnr : %.3f, best epoch: %d'%(result['best_psnr'], result['best_epoch']))
def run_experiment(queries: List[Dict[str,
                                      Any]], best_index: int, n_trials: int,
                   dfs: Dict[str, pd.DataFrame], dataset_params: Dict[str,
                                                                      Any],
                   optimizer_params: Dict[str, Any], output_file: RichPath):

    times: List[float] = []
    total_time: float = 0.0
    best_count: float = 0.0
    avg_times: List[float] = []
    best_frac: List[float] = []
    selected_index: List[int] = []

    optimizer = get_optimizer(optimizer_params['name'], queries,
                              **optimizer_params['params'])
    for t in range(1, n_trials + 1):
        start = time.time()
        query_index, q = optimizer.get_query(time=t)
        df = merge_frames(q['from'], q['on'], options=[['home', 'away']])
        elapsed = time.time() - start
        optimizer.update(query_index, reward=-elapsed)

        df.copy()

        # Avoid time measurements on the first iteration due to caching
        selected_index.append(query_index)
        if t > 1:
            times.append(elapsed)
            total_time += elapsed
            avg_times.append(total_time / (t - 1))
            best_count += float(query_index == best_index)
            best_frac.append(best_count / (t - 1))

        if t % 100 == 0:
            print(f'Completed {t} trials')

    # Collect and write the metrics
    metrics = dict(times=times,
                   avg_times=avg_times,
                   best_frac=best_frac,
                   selected_index=selected_index)
    output_file.save_as_compressed_file(metrics)
Пример #27
0
def run(args, log):

    df = pd.read_csv(args.df_path)
    df_train = df[df['Fold']!=args.fold]
    df_valid = df[df['Fold']==args.fold]
    dfs = {}
    dfs['train'] = df_train
    dfs['val'] = df_valid
    
    model = get_model(args).cuda()
    
    if args.mode != 'segmentation':
        for param in model.model.encoder.parameters():
            param.requires_grad = True
        for param in model.model.decoder.parameters():
            param.requires_grad = True
        for params in model.model.classification_head.parameters():
            params.requires_grad = False

    elif args.mode == 'classification':
        for param in model.model.encoder.parameters():
            param.requires_grad = False
        for param in model.model.decoder.parameters():
            param.requires_grad = False
        for param in model.classification_head.parameters():
            param.requires_grad = True    

    criterion = get_loss(args)
    optimizer = get_optimizer(args, model)
    
    if args.initial_ckpt is not None:
        last_epoch, step = checkpoint.load_checkpoint(args, model, checkpoint=args.initial_ckpt)
        log.write(f'Resume training from {args.initial_ckpt} @ {last_epoch}\n')
    else:
        last_epoch, step = -1, -1
    
    dataloaders = {mode:get_dataloader(args.data_dir, dfs[mode], mode, args.pretrain, args.batch_size) for mode in ['train', 'val']}   
    seed_everything(seed=123)
    clr = CLR(optimizer, len(dataloaders['train']))

    train(args, model, dataloaders['train'], criterion, optimizer, clr)
Пример #28
0
def run(config):
    train_group_csv_dir = './data/group_csv/'
    writer = SummaryWriter(config.train.dir)
    train_filenames = list(glob.glob(os.path.join(train_group_csv_dir, 'data_train_group_*')))[1:]
    
    for ti, train_file in tqdm.tqdm(enumerate(train_filenames)):
        gi_tr = train_file.replace('data_train_group_', '')
        gi_tr = gi_tr.split('/')[-1]
        gi_tr = gi_tr.replace('.csv', '')
        group_idx = int(gi_tr)
        
        utils.prepare_train_directories(config, group_idx)
        
        model = get_model(config, group_idx)
        if torch.cuda.is_available():
            model = model.cuda()
        criterion = get_loss(config)
        optimizer = get_optimizer(config, model.parameters())
        
    

        checkpoint = utils.checkpoint.get_initial_checkpoint(config, group_idx)
        if checkpoint is not None:
            last_epoch, step = utils.checkpoint.load_checkpoint(model, optimizer, checkpoint)
        else:
            last_epoch, step = -1, -1

        if last_epoch > config.train.num_epochs:
            print('group -- ', str(group_idx), '-- index-', ti, '  ----已xl,跳过')
            continue
        print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
        print('group -- ', str(group_idx), '-- index-', ti)
        scheduler = get_scheduler(config, optimizer, last_epoch)
    
        dataloaders = {split:get_dataloader(config, group_idx, split, get_transform(config, split))
                   for split in ['train', 'val']}
    

    
        train(config,group_idx, model, dataloaders, criterion, optimizer, scheduler,
          writer, last_epoch+1)
Пример #29
0
    def __init__(self,
                 layers=[],
                 n_batches=10,
                 loss='categorical_crossentropy',
                 metric='accuracy_score',
                 optimizer='adam',
                 optimizer_params={},
                 save_weights=True,
                 shuffle=True,
                 random_seed=None):
        self.layers = layers
        self.n_batches = n_batches  # mini-batches will be generated in the stratified manner
        self.loss = loss
        self.metric = metric
        self.optimizer = optimizer
        self.optimizer_params = optimizer_params
        self.save_weights = save_weights
        self.template = 'tmp/nn_{0}_{1:.4f}.json'
        self.shuffle = shuffle
        self.random_seed = random_seed

        self.best_layers_ = None
        self.best_epoch_ = None
        self.best_val_score_ = 0.
        self.n_layers_ = len(self.layers)

        self._loss = get_metric(self.loss)
        if self.loss == 'categorical_crossentropy':
            self._loss_grad = lambda actual, predicted: -(actual - predicted)
        self._metric = get_metric(self.metric)
        self._optimizer = get_optimizer(self.optimizer,
                                        **self.optimizer_params)
        self._tts = TrainTestSplitter(shuffle=self.shuffle,
                                      random_seed=self.random_seed)

        self._initialized = False
        self._training = False
        super(NNClassifier, self).__init__(
            _y_required=True)  # TODO: split into multiple NNs later
    data_len = data.shape[0]
    dim = data.shape[1]

    while True:
        indices = np.random.choice(data_len, batch_size * neg_size)
        samples = data[indices].reshape(batch_size, neg_size, dim)
        yield samples


###############################################################################################################################
## Optimizaer algorithm
#

from optimizers import get_optimizer

optimizer = get_optimizer(args)

###############################################################################################################################
## Building model

from model import create_model
import keras.backend as K

logger.info('  Building model')
model = create_model(args, overall_maxlen, vocab)
# freeze the word embedding layer
model.get_layer('word_emb').trainable = False
model.compile(optimizer=optimizer,
              loss=U.max_margin_loss,
              metrics=[U.max_margin_loss])