Пример #1
0
    def _init_model(self):

        # Read the configure
        init_channel = self.cfg['searching']['init_channels']
        depth = self.cfg['searching']['depth']
        meta_node_num = self.cfg['searching']['meta_node_num']

        # Setup loss function
        self.criterion = SegmentationLosses(name=self.cfg['searching']['loss']['name']).to(self.device)
        self.logger.info("Using loss {}".format(self.cfg['searching']['loss']['name']))

        # Setup Model
        model = NasUnetSearch(self.in_channels, init_channel, self.n_classes, depth,
                              meta_node_num=meta_node_num, use_sharing=self.cfg['searching']['sharing_normal'],
                              double_down_channel=self.cfg['searching']['double_down_channel'],
                              multi_gpus=self.cfg['searching']['multi_gpus'],
                              device=self.device)

        if self.device.type == 'cuda':
            if torch.cuda.device_count() > 1 and self.cfg['searching']['multi_gpus']:
                self.logger.info('use: %d gpus', torch.cuda.device_count())
                self.model = nn.DataParallel(model)
            elif torch.cuda.is_available():
                self.logger.info('gpu device = %d' % self.device_id)
                torch.cuda.set_device(self.device)

        self.model = model.to(self.device)
        self.logger.info('param size = %fMB', calc_parameters_count(model))

        # Setup optimizer, lr_scheduler and loss function for model
        optimizer_cls1 = get_optimizer(self.cfg, phase='searching', optimizer_type='model_optimizer')
        optimizer_params1 = {k: v for k, v in self.cfg['searching']['model_optimizer'].items()
                            if k != 'name'}

        self.model_optimizer = optimizer_cls1(self.model.parameters(), **optimizer_params1)
        self.logger.info("Using model optimizer {}".format(self.model_optimizer))

        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.model_optimizer, self.cfg['searching']['epoch'], eta_min=1.0e-3)

        # Setup optimizer, lr_scheduler and loss function for architecture
        optimizer_cls2 = get_optimizer(self.cfg, phase='searching', optimizer_type='arch_optimizer')
        optimizer_params2 = {k: v for k, v in self.cfg['searching']['arch_optimizer'].items()
                            if k != 'name'}

        self.arch_optimizer = optimizer_cls2(self.model.alphas(), **optimizer_params2)

        self.architect = Architecture(self.model, arch_optimizer=self.arch_optimizer,
                                      criterion=self.criterion)
Пример #2
0
    def _init_model(self):

        # Setup loss function
        criterion = SegmentationLosses(name=self.cfg['training']['loss']['name'],
                                       aux_weight = self.cfg['training']['loss']['aux_weight'],
                                       weight = self.nweight,
                                       ignore_index=-1 # ignore background
                                       )
        self.criterion = criterion.to(self.device)

        self.show_dice_coeff = False
        if self.cfg['data']['dataset'] in ['bladder', 'chaos', 'ultrasound_nerve', 'promise12']:
            self.show_dice_coeff = True

        self.logger.info("Using loss {}".format(self.criterion))

        # Setup Model
        try:
            genotype = eval('geno_types.%s' % self.cfg['training']['geno_type'])
            init_channels = self.cfg['training']['init_channels']
            depth = self.cfg['training']['depth']
        except:
            genotype = None
            init_channels = 0
            depth = 0
        # aux_weight > 0 and the loss is cross_entropy, we will use FCN header for auxiliary layer. and the aux set to True
        # aux_weight > 0 and the loss is cross_entropy_with_dice, we will combine cross entropy loss with dice loss
        self.aux = True if self.cfg['training']['loss']['aux_weight'] > 0  \
                    and self.cfg['training']['loss']['name'] != 'cross_entropy_with_dice' else False
        model = get_segmentation_model(self.model_name,
                                       dataset = self.cfg['data']['dataset'],
                                       backbone=self.cfg['training']['backbone'],
                                       aux = self.aux,
                                       c = init_channels,
                                       depth = depth,
                                       # the below two are special for nasunet
                                       genotype=genotype,
                                       double_down_channel=self.cfg['training']['double_down_channel']
                                       )

        # init weight using hekming methods
        model.apply(weights_init)
        self.logger.info('Initialize the model weights: kaiming_uniform')

        if torch.cuda.device_count() > 1 and self.cfg['training']['multi_gpus']:
            self.logger.info('use: %d gpus', torch.cuda.device_count())
            model = nn.DataParallel(model)
        else:
            self.logger.info('gpu device = %d' % self.device_id)
            torch.cuda.set_device(self.device_id)
        self.model = model.to(self.device)
        self.logger.info('param size = %fMB', calc_parameters_count(model))

        # Setup optimizer, lr_scheduler for model
        optimizer_cls = get_optimizer(self.cfg, phase='training', optimizer_type='model_optimizer')
        optimizer_params = {k: v for k, v in self.cfg['training']['model_optimizer'].items()
                            if k != 'name'}

        self.model_optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)
        self.logger.info("Using model optimizer {}".format(self.model_optimizer))