コード例 #1
0
    def on_test_epoch_start(self):
        """
        Initialize ensemble members from saved checkpoints
        """
        print('\nInitializing ensemble members from checkpoints')

        # Remove first model from self.models
        self.models.clear()

        for i in range(self.ensemble_size):

            # Initialize ensemble members from different epochs in the training stage of the original model
            self.models.append(
                ECGResNet(self.hparams.in_channels, self.hparams.n_grps,
                          self.hparams.N, self.hparams.num_classes,
                          self.hparams.dropout, self.hparams.first_width,
                          self.hparams.stride, self.hparams.dilation))

            model_path = 'weights/ssensemble_model{}.pt'.format(i + 1)
            checkpoint = torch.load(model_path)
            self.models[i].load_state_dict(checkpoint['model_state_dict'])
            self.models[i].eval()

            print('Model {}/{} initialized\n'.format(i + 1,
                                                     self.ensemble_size))
コード例 #2
0
    def __init__(self, in_channels, n_grps, N, 
                 num_classes, dropout, first_width, stride, 
                 dilation, learning_rate, loss_weights=None, 
                 **kwargs):
        """
        Initializes the ECGResNetUncertaintySystem

        Args:
          in_channels: number of channels of input
          n_grps: number of ResNet groups
          N: number of blocks per groups
          num_classes: number of classes of the classification problem
          dropout: probability of an argument to get zeroed in the dropout layer
          first_width: width of the first input
          stride: tuple with stride value per block per group
          dilation: spacing between the kernel points of the convolutional layers
          learning_rate: the learning rate of the model
          loss_weights: array of weights for the loss term
        """
 
        super().__init__()
        self.save_hyperparameters()
        self.learning_rate = learning_rate

        self.model = ECGResNet(in_channels, 
                               n_grps, N, num_classes, 
                               dropout, first_width, 
                               stride, dilation)
        if loss_weights is not None:
            weights = torch.tensor(loss_weights, dtype = torch.float)
        else:
            weights = loss_weights

        self.loss = FocalLoss(gamma=1, weights = weights)
コード例 #3
0
    def __init__(self, in_channels, n_grps, N, 
                 num_classes, dropout, first_width, stride, 
                 dilation, learning_rate, ensemble_size, max_epochs, initial_lr, cyclical_learning_rate_type, loss_weights=None, 
                 **kwargs):
        """
        Initializes the ECGResNetSnapshotEnsembleSystem

        Args:
          in_channels: number of channels of input
          n_grps: number of ResNet groups
          N: number of blocks per groups
          num_classes: number of classes of the classification problem
          dropout: probability of an argument to get zeroed in the dropout layer
          first_width: width of the first input
          stride: tuple with stride value per block per group
          dilation: spacing between the kernel points of the convolutional layers
          learning_rate: the learning rate of the model
          ensemble_size: the number of models that make up the ensemble
          max_epochs: total number of epochs to train for
          initial_lr: the initial learning rate at the start of a learning cycle
          cyclical_learning_rate_type: the type of learning rate cycling to apply
          loss_weights: array of weights for the loss term
        """
        super().__init__()
        self.save_hyperparameters()
        self.learning_rate = learning_rate
        self.num_classes = num_classes
        self.ensemble_size = ensemble_size
        self.max_epochs = max_epochs
        self.initial_lr = initial_lr
        self.cyclical_learning_rate_type = cyclical_learning_rate_type

        self.register_buffer('IDs', torch.empty(0).type(torch.LongTensor))
        self.register_buffer('predicted_labels', torch.empty(0).type(torch.LongTensor))
        self.register_buffer('correct_predictions', torch.empty(0).type(torch.BoolTensor))
        self.register_buffer('epistemic_uncertainty', torch.empty(0).type(torch.FloatTensor))

        self.models = []
        self.optimizers = []

        # Device needs to be selected because PyTorch Lightning does not
        # recognize multiple models when in list
        manual_device = torch.device('cuda' if torch.cuda.is_available() and kwargs['gpus'] != 0 else 'cpu')
        self.manual_device = manual_device

        # Initialize a single model during training
        self.models.append(ECGResNet(in_channels, 
                           n_grps, N, num_classes, 
                           dropout, first_width, 
                           stride, dilation).to(manual_device))

        if loss_weights is not None:
            weights = torch.tensor(loss_weights, dtype = torch.float)
        else:
            weights = loss_weights

        self.loss = FocalLoss(gamma=1, weights = weights)
        create_weights_directory()
コード例 #4
0
    def __init__(self,
                 in_channels,
                 n_grps,
                 N,
                 num_classes,
                 dropout,
                 first_width,
                 stride,
                 dilation,
                 learning_rate,
                 ensemble_size,
                 loss_weights=None,
                 **kwargs):
        """
        Initializes the ECGResNetEnsembleSystem

        Args:
          in_channels: number of channels of input
          n_grps: number of ResNet groups
          N: number of blocks per groups
          num_classes: number of classes of the classification problem
          dropout: probability of an argument to get zeroed in the dropout layer
          first_width: width of the first input
          stride: tuple with stride value per block per group
          dilation: spacing between the kernel points of the convolutional layers
          learning_rate: the learning rate of the model
          ensemble_size: the number of models that make up the ensemble
          loss_weights: array of weights for the loss term
        """

        super().__init__()
        self.save_hyperparameters()
        self.learning_rate = learning_rate
        self.num_classes = num_classes
        self.ensemble_size = ensemble_size

        self.IDs = torch.empty(0).type(torch.LongTensor)
        self.predicted_labels = torch.empty(0).type(torch.LongTensor)
        self.correct_predictions = torch.empty(0).type(torch.BoolTensor)
        self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)

        self.models = []
        self.optimizers = []

        # Initialize mutliple ensemble members
        for i in range(self.ensemble_size):
            self.models.append(
                ECGResNet(in_channels, n_grps, N, num_classes, dropout,
                          first_width, stride, dilation))

        if loss_weights is not None:
            weights = torch.tensor(loss_weights, dtype=torch.float)
        else:
            weights = loss_weights

        self.loss = FocalLoss(gamma=1, weights=weights)