コード例 #1
0
ファイル: evaluate.py プロジェクト: rayadastidar/deepCR
def roc_lacosmic(image, mask, sigclip, ignore=None, sky=None, n_mask=1, seed=1, objlim=2, gain=1,
        dilate=False, rad=1):
    """ evaluate model on test set with the ROC curve

    :param model: deepCR object
    :param image: np.ndarray((N, W, H)) image array
    :param mask: np.ndarray((N, W, H)) CR mask array
    :param ignore: np.ndarray((N, W, H)) bad pixel array incl. saturation, etc.
    :param thresholds: np.ndarray(N) FPR grid on which to evaluate ROC curves
    :return: np.ndarray(N), np.ndarray(N): TPR and FPR
    """
    kernel = None
    if dilate:
        kernel = disk(rad)
    if type(image) == np.ndarray and len(image.shape) == 3:
        data = dataset(image, mask, ignore)
    elif type(image[0]) == str:
        data = DatasetSim(image, mask, sky=sky, n_mask=n_mask, seed=seed)
    else:
        raise TypeError('Input must be numpy data arrays or list of file paths!')
    (tpr, fpr), (tpr_dilate, fpr_dilate) = _roc_lacosmic(data, sigclip=sigclip, objlim=objlim, dilate=kernel, gain=gain)
    if dilate:
        return (tpr, fpr), (tpr_dilate, fpr_dilate)
    else:
        return tpr, fpr
コード例 #2
0
ファイル: test_dataset.py プロジェクト: bbw7561135/deepCR
def test_dataset():
    inputs = np.random.rand(10, 32, 32)
    sky = np.random.rand(10)
    data = dataset(image=inputs,
                   mask=inputs,
                   ignore=inputs,
                   sky=sky,
                   part='train',
                   aug_sky=[1, 1],
                   f_val=0.1)
    data0 = data[0]
    assert len(data) == 9
    assert (data0[0] == (inputs[0] + sky[0])).all()
    assert (data0[1] == inputs[0]).all()
    assert (data0[2] == inputs[0]).all()

    data = dataset(image=inputs, mask=inputs)
    assert len(data) == 10
コード例 #3
0
def roc(model,
        image,
        mask,
        ignore=None,
        thresholds=np.linspace(0.001, 0.999, 500)):
    """ evaluate model on test set with the ROC curve

    :param model: deepCR object
    :param image: np.ndarray((N, W, H)) image array
    :param mask: np.ndarray((N, W, H)) CR mask array
    :param ignore: np.ndarray((N, W, H)) bad pixel array incl. saturation, etc.
    :param thresholds: np.ndarray(N) FPR grid on which to evaluate ROC curves
    :return: np.ndarray(N), np.ndarray(N): TPR and FPR
    """
    data = dataset(image=image, mask=mask, ignore=ignore)
    tpr, fpr = _roc(model, data, thresholds=thresholds)
    return tpr, fpr
コード例 #4
0
ファイル: training.py プロジェクト: bbw7561135/deepCR
    def __init__(self,
                 image,
                 mask,
                 ignore=None,
                 sky=None,
                 aug_sky=[0, 0],
                 name='model',
                 hidden=32,
                 gpu=False,
                 epoch=50,
                 batch_size=16,
                 lr=0.005,
                 auto_lr_decay=True,
                 lr_decay_patience=4,
                 lr_decay_factor=0.1,
                 save_after=1e5,
                 plot_every=10,
                 verbose=True,
                 use_tqdm=False,
                 use_tqdm_notebook=False,
                 directory='./'):
        """ This is the class for training deepCR-mask.
        :param image: np.ndarray (N*W*W) training data: image array with CR.
        :param mask: np.ndarray (N*W*W) training data: CR mask array
        :param ignore: training data: Mask for taking loss. e.g., bad pixel, saturation, etc.
        :param sky: np.ndarray (N,) (optional) sky background
        :param aug_sky: [float, float]. If sky is provided, use random sky background in the range
          [aug_sky[0] * sky, aug_sky[1] * sky]. This serves as a regularizers to allow the trained model to adapt to a
          wider range of sky background or equivalently exposure time. Remedy the fact that exposure time in the
          training set is discrete and limited.
        :param name: model name. model saved to name_epoch.pth
        :param hidden: number of channels for the first convolution layer. default: 50
        :param gpu: True if use GPU for training
        :param epoch: Number of epochs to train. default: 50
        :param batch_size: training batch size. default: 16
        :param lr: learning rate. default: 0.005
        :param auto_lr_decay: reduce learning rate by "lr_decay_factor" after validation loss do not decrease for
          "lr_decay_patience" + 1 epochs.
        :param lr_decay_patience: reduce learning rate by lr_decay_factor after validation loss do not decrease for
          "lr_decay_patience" + 1 epochs.
        :param lr_decay_factor: multiplicative factor by which to reduce learning rate.
        :param save_after: epoch after which trainer automatically saves model state with lowest validation loss
        :param plot_every: for every "plot_every" epoch, plot mask prediction and ground truth for 1st image in
          validation set.
        :param verbose: print validation loss and detection rates for every epoch.
        :param use_tqdm: whether to show tqdm progress bar.
        :param use_tqdm_notebook: whether to use jupyter notebook version of tqdm. Overwrites tqdm_default.
        :param directory: directory relative to current path to save trained model.
        """
        if sky is None and aug_sky != [0, 0]:
            raise AttributeError(
                'Var (sky) is required for sky background augmentation!')
        if ignore is None:
            ignore = np.zeros_like(image)
        assert image.shape == mask.shape == ignore.shape
        assert image.shape[1] == image.shape[2]
        data_train = dataset(image,
                             mask,
                             ignore,
                             sky,
                             part='train',
                             aug_sky=aug_sky)
        data_val = dataset(image,
                           mask,
                           ignore,
                           sky,
                           part='val',
                           aug_sky=aug_sky)
        self.TrainLoader = DataLoader(data_train,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=1)
        self.ValLoader = DataLoader(data_val,
                                    batch_size=batch_size,
                                    shuffle=False,
                                    num_workers=1)
        self.shape = image.shape[1]
        self.name = name

        if gpu:
            self.dtype = torch.cuda.FloatTensor
            self.dint = torch.cuda.ByteTensor
            self.network = nn.DataParallel(UNet2Sigmoid(1, 1, hidden))
            self.network.type(self.dtype)
        else:
            self.dtype = torch.FloatTensor
            self.dint = torch.ByteTensor
            self.network = WrappedModel(UNet2Sigmoid(1, 1, hidden))
            self.network.type(self.dtype)

        self.optimizer = optim.Adam(self.network.parameters(), lr=lr)
        if auto_lr_decay:
            self.lr_scheduler = ReduceLROnPlateau(self.optimizer,
                                                  factor=lr_decay_factor,
                                                  patience=lr_decay_patience,
                                                  cooldown=2,
                                                  verbose=True,
                                                  threshold=0.005)
        else:
            self.lr_scheduler = self._void_lr_scheduler
        self.BCELoss = nn.BCELoss()
        self.validation_loss = []
        self.epoch_mask = 0
        self.save_after = save_after
        self.n_epochs = epoch
        self.every = plot_every
        self.directory = directory
        self.verbose = verbose
        self.mode0_complete = False

        if use_tqdm_notebook:
            self.tqdm = tqdm_notebook
        else:
            self.tqdm = tqdm
        self.disable_tqdm = not (use_tqdm_notebook or use_tqdm)