Example #1
0
 def __init__(self, data, target, dropout, kw):
     super(MDGRUClassification, self).__init__(data, target, dropout, kw)
     self.ignore_label = argget(kw, "ignore_label", None)
     self.fc_channels = argget(kw, "fc_channels", [25, 45, self.nclasses])
     self.mdgru_channels = argget(kw, "mdgru_channels", [16, 32, 64])
     self.strides = argget(kw, "strides",
                           [None for _ in self.mdgru_channels])
Example #2
0
 def __init__(self, data, dropout, kw):
     super(GANModel, self).__init__(data, dropout, None, kw)
     self.dropout = dropout
     self.learning_rate = argget(kw, "learning_rate", 0.001)
     self.momentum = argget(kw, "momentum", 0.9)
     self.nclasses = argget(kw, "nclasses", 2)
     self.fakedata = argget(kw, "fakedata", None)
Example #3
0
 def __init__(self, data, target, dropout, kw):
     super(RegressionModel, self).__init__(data, target, dropout, kw)
     self.target = target
     self.dropout = dropout
     self.learning_rate = argget(kw, "learning_rate", 0.001)
     self.nclasses = argget(kw, "nclasses", 1)
     self.momentum = argget(kw, "momentum", 0.9)
Example #4
0
 def __init__(self, data_shape, dropout, kw):
     super(MDGRUClassification, self).__init__(data_shape, dropout, kw)
     my_kw, kw = compile_arguments(MDGRUClassification, kw, transitive=False)
     for k, v in my_kw.items():
         setattr(self, k, v)
     self.fc_channels = argget(kw, "fc_channels", [25, 45, self.nclasses])
     self.mdgru_channels = argget(kw, "mdgru_channels", [16, 32, 64])
     self.strides = argget(kw, "strides", [None for _ in self.mdgru_channels])
     self.data_shape = data_shape
     # create logits:
     logits = []
     num_spatial_dims = len(data_shape[2:])
     last_output_channel_size = data_shape[1]
     for it, (mdgru, fcc, s) in enumerate(zip(self.mdgru_channels, self.fc_channels, self.strides)):
         mdgru_kw = {}
         mdgru_kw.update(kw)
         if it == len(self.mdgru_channels) - 1:
             mdgru_kw["noactivation"] = True
         if s is not None:
             mdgru_kw["strides"] = [s for _ in range(num_spatial_dims)] if np.isscalar(s) else s
         logits += [MDGRUBlock(num_spatial_dims, self.dropout, last_output_channel_size, mdgru, fcc, mdgru_kw)]
         last_output_channel_size = fcc if fcc is not None else mdgru
     self.model = th.nn.Sequential(*logits)
     self.losses = th.nn.modules.CrossEntropyLoss()
     print(self.model)
Example #5
0
 def __init__(self, num_input, num_units, kw):
     super(CRNNCell, self).__init__()
     crnn_kw, kw = compile_arguments(CRNNCell, kw, transitive=False)
     for k, v in crnn_kw.items():
         setattr(self, k, v)
     self._num_units = num_units
     self._num_inputs = num_input
     self.filter_size_x = argget(kw, "filter_size_x", [7, 7])
     self.filter_size_h = argget(kw, "filter_size_h", [7, 7])
     self.strides = argget(kw, "strides", None)
Example #6
0
    def __init__(self, data, target, dropout, kw):
        super(MDGRUClassificationWithGeneralizedDiceLoss,
              self).__init__(data, target, dropout, kw)
        self.dice_loss_label = argget(kw, "dice_loss_label", [])
        self.dice_loss_weight = argget(kw, "dice_loss_weight", [])
        self.dice_autoweighted = argget(kw, "dice_autoweighted", False)

        if len(self.dice_loss_label) != len(
                self.dice_loss_weight) and not self.dice_autoweighted:
            raise Exception(
                "dice_loss_label and dice_loss_weight need to be of the same length"
            )
Example #7
0
    def __init__(self, myshape, num_units, kw):
        """Base convolutional RNN method, implements common functions and serves as abstract class.

        Property defaults contains default values for all properties of a CGRUCell that are the same for one MDGRU
        and is used to filter valid arguments.

        Parameters
        ----------
        myshape : list
            Contains shape information on the input tensor.
        num_units : int
            Defines number of output channels.
        activation : tensorflow activation function
            Can be used to override tanh as activation function.
        periodic_convolution_x : bool
            Enables circular convolution for the input
        periodic_convolution_h : bool
            Enables circular convolution for the last output / state
        dropconnectx : tensorflow placeholder or None
            keeprate of dropconnect regularization on weights connecting to input
        dropconnecth : tensorflow placeholder or None
            keeprate of dropconnect regularization on weights connecting to previous state / output
        use_bernoulli : bool
            decide if bernoulli or Gaussian distributions should be used for the weight distributions with dropconnect
        """
        super(CRNNCell, self).__init__()
        crnn_kw, kw = compile_arguments(CRNNCell, kw, transitive=False)
        for k, v in crnn_kw.items():
            setattr(self, k, v)
        self._num_units = num_units
        self.filter_size_x = argget(kw, "filter_size_x", [7, 7])
        self.filter_size_h = argget(kw, "filter_size_h", [7, 7])
        self.strides = argget(kw, "strides", None)
        if myshape is None:
            raise Exception("myshape cant be None!")
        myshapein = deepcopy(myshape)
        myshapein.pop(-2)
        myshapeout = deepcopy(myshape)
        myshapeout.pop(-2)
        myshapeout[-1] = self._num_units
        if self.strides is not None:
            if len(myshapeout[1:-1]) != len(self.strides):
                raise Exception(
                    "stride shape should match myshapeout[1:-1]! strides: {}, myshape: {}"
                    .format(self.strides, myshapeout))
            myshapeout[1:-1] = [
                int(np.round((myshapeout[1 + si]) / self.strides[si]))
                for si in range(len(myshapeout) - 2)
            ]
        self.myshapes = (myshapein, myshapeout)
Example #8
0
    def __init__(self, data, target, dropout, kw):
        super(MDGRUClassificationWithGeneralizedDiceLoss, self).__init__(data, target, dropout, kw)
        self.dice_loss_label = argget(kw, "dice_loss_label", [])
        self.dice_loss_weight = argget(kw, "dice_loss_weight", [])
        self.dice_autoweighted = argget(kw, "dice_autoweighted", False)

        self.logger = logging.getLogger('runner.model')
        self.logger.debug('initialization MDGRUClassificationWithGeneralizedDiceLoss:')
        self.logger.debug(f' self.dice_loss_label: {self.dice_loss_label}')
        self.logger.debug(f' self.dice_loss_weight: {self.dice_loss_weight}')
        self.logger.debug(f' self.dice_autoweighted: {self.dice_autoweighted}')
        
        if len(self.dice_loss_label) != len(self.dice_loss_weight) and not self.dice_autoweighted:
            raise Exception("dice_loss_label and dice_loss_weight need to be of the same length")
Example #9
0
    def __init__(self, modelcls, datacls, kw):
        """
        Handler for the evaluation of model defined in modelcls using data coming from datacls.

        Parameters
        ----------
        modelcls : cls
            Python class defining the model to evaluate
        datacls : cls
            Python class implementing the data loading and storing

        """
        self.origargs = copy.copy(kw)
        eval_kw, kw = compile_arguments(SupervisedEvaluation, kw, transitive=False)
        for k, v in eval_kw.items():
            setattr(self, k, v)
        self.w = self.windowsize
        self.p = self.padding
        self.use_tensorboard = False
        # self.dropout_rate = argget(kw, "dropout_rate", 0.5)
        self.current_epoch = 0
        self.current_iteration = 0
        # create datasets for training, validation and testing:
        locs = [[None, l] if l is None or len(l) > 1 else [os.path.join(self.datapath, l[0]), None] for l in
                [self.locationtraining, self.locationvalidation, self.locationtesting]]
        paramstraining = [self.w, self.p] + locs[0]
        paramsvalidation = [self.windowsizevalidation if self.windowsizevalidation is not None else self.w,
                            self.paddingvalidation if self.paddingvalidation is not None else self.p] + locs[1]
        paramstesting = [self.windowsizetesting if self.windowsizetesting is not None else self.w,
                            self.paddingtesting if self.paddingtesting is not None else self.p] + locs[2]
        kwdata, kw = compile_arguments(datacls, kw, True, keep_entries=True)
        kwcopy = copy.copy(kwdata)
        kwcopy['nclasses'] = self.output_dims
        kwcopy['batch_size'] = self.batch_size
        self.trdc = datacls(*paramstraining, kw=copy.copy(kwcopy))
        testkw = copy.copy(kwcopy)
        testkw['batch_size'] = testkw['batch_size'] if not self.testbatchsize else self.testbatchsize
        valkw = copy.copy(testkw)
        testkw['ignore_missing_mask'] = True
        self.tedc = datacls(*paramstesting, kw=testkw)
        self.valdc = datacls(*paramsvalidation, kw=valkw)
        self.currit = 0

        self.show_dice = argget(kw, "show_dice", not self.show_f1)
        self.binary_evaluation = self.show_dice or self.show_f1 or self.show_f05 or self.show_f2
        self.estimatefilename = argget(kw, "estimatefilename", "estimate")
        self.gpu = argget(kw, "gpus", [0])
        self.get_train_session = lambda: self
        self.get_test_session = lambda: self
Example #10
0
    def __init__(self, data, dropout, kw):
        super(Model, self).__init__()
        model_kw, kw = compile_arguments(Model, kw, transitive=False)
        for k, v in model_kw.items():
            setattr(self, k, v)
        th.cuda.manual_seed_all(self.model_seed)
        th.manual_seed(self.model_seed)
        self.origargs = copy.copy(kw)

        if argget(kw, "whiten", False):
            print('parameter whiten not supported with pytorch version')
            # self.data = batch_norm(data, "bn", self.training, m=32)
        else:
            self.data = data
        pass
        self.dimensions = argget(kw, "dimensions", None)
Example #11
0
    def __init__(self, data, target, dropout, kw):
        self.origargs = copy.copy(kw)
        model_kw, kw = compile_arguments(Model, kw, transitive=False)
        for k, v in model_kw.items():
            setattr(self, k, v)

        # self.model_seed = argget(kw, 'model_seed', 12345678)
        tf.set_random_seed(self.model_seed)
        super(Model, self).__init__(data, target, dropout, kw)
        self.training = argget(kw, "training", tf.constant(True))
        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        self.use_tensorboard = argget(kw, "use_tensorboard", True)
        #
        # if argget(kw, "whiten", False):
        #     self.data = batch_norm(data, "bn", self.training, m=32)
        # else:
        self.data = data
        # pass
        self.dimensions = argget(kw, "dimensions", None)
Example #12
0
    def __init__(self, num_spatial_dims, dropout, num_input, num_hidden,
                 num_output, kw):
        super(MDGRUBlock, self).__init__()
        mdrnn_net_kw, kw = compile_arguments(MDGRUBlock, kw, transitive=False)
        for k, v in mdrnn_net_kw.items():
            setattr(self, k, v)
        self.mdrnn_kw, kw = compile_arguments(MDRNN, kw, transitive=True)
        self.crnn_kw, kw = compile_arguments(self.mdrnn_kw['crnn_class'],
                                             kw,
                                             transitive=True)

        spatial_dimensions = argget(kw, "dimensions", None)
        if spatial_dimensions is None:
            spatial_dimensions = [i for i in range(num_spatial_dims)]
        mdrnn_kw = {}
        mdrnn_kw.update(self.mdrnn_kw)
        mdrnn_kw.update(self.crnn_kw)
        mdrnn_kw.update(kw)

        mdrnn_kw["num_hidden"] = num_hidden
        mdrnn_kw["num_input"] = num_input
        mdrnn_kw["name"] = "mdgru"
        model = [MDRNN(dropout, spatial_dimensions, mdrnn_kw)]
        if num_spatial_dims == 2:
            convop = th.nn.Conv2d
            kernel = [1, 1]

        elif num_spatial_dims == 3:
            convop = th.nn.Conv3d
            kernel = [1, 1, 1]
        else:
            raise Exception(
                'pytorch cannot handle more than 3 dimensions for convolution')
        if num_output is not None:
            model += [convop(num_hidden, num_output, kernel)]
            if not self.noactivation:
                model += [self.vwfc_activation()]
        self.model = th.nn.Sequential(*model)
Example #13
0
 def __init__(self, data, dropout, kw):
     super(ReconstructionModel, self).__init__(data, dropout, None, kw)
     self.dropout = dropout
     self.learning_rate = argget(kw, "learning_rate", 0.001)
     self.nclasses = argget(kw, "nclasses", 2)
Example #14
0
 def __init__(self, data, dropout, kw):
     super(ClassificationModel, self).__init__(data, dropout, kw)
     self.dropout = dropout
     self.learning_rate = argget(kw, "learning_rate", 1)
     self.momentum = argget(kw, "momentum", 0.9)
     self.nclasses = argget(kw, "nclasses", 2)
Example #15
0
def remove_example_nifti_data(**kw):
    testdatadir = argget(kw, "testdatadir", ".")
    datafolder = argget(kw, "datafolder", "nifti")
    shutil.rmtree(os.path.join(testdatadir, datafolder))
Example #16
0
    def mdgru_bb(self, inp, dropout, num_hidden, num_output, noactivation=False,
                 name=None, **kw):
        """Convenience function to combine a MDRNN layer with a voxel-wise fully connected layer.

        :param inp: input data
        :param dropout: dropout rate
        :param num_hidden: number of hidden units, output units of the MDRNN
        :param num_output: number of output units of the voxel-wise fully connected layer
                           (Can be None -> no voxel-wise fully connected layer)
        :param noactivation: Flag to disable activation of voxel-wise fully connected layer
        :param name: Name for this particular MDRNN + vw fully connected layer
        :param kw: Arguments for MDRNN and the vw fully connected layer (can override this class' attributes)
        :return: Output of the voxelwise fully connected layer and MDRNN mix
        """
        dimensions = argget(kw, "dimensions", None)
        if dimensions is None:
            dimensions = [i + 1 for i, v in enumerate(inp.get_shape()[1:-1]) if v > 1]
        mdrnn_kw = {}
        mdrnn_kw.update(self.mdrnn_kw)
        mdrnn_kw.update(self.crnn_kw)
        mdrnn_kw.update(kw)

        add_e_bn = argget(kw, "add_e_bn", self.add_e_bn)
        resmdgru = argget(kw, "resmdgru", self.resmdgru)
        mdrnn_kw["num_hidden"] = num_hidden
        mdrnn_kw["name"] = "mdgru"
        with tf.variable_scope(name):
            mdgruclass = MDRNN(inp, dropout, dimensions, mdrnn_kw)
            mdgru = mdgruclass()
            if num_output is not None:
                mdgruinnershape = mdgru.get_shape()[1:-1].as_list()
                doreshape = False
                if len(mdgruinnershape) >= 3:
                    newshape = [-1, np.prod(mdgruinnershape), mdgru.get_shape().as_list()[-1]]
                    mdgru = tf.reshape(mdgru, newshape)
                    doreshape = True
                num_input = mdgru.get_shape().as_list()[-1]
                filtershape = [1 for _ in mdgru.get_shape()[1:-1]] + [num_input, num_output]

                numelem = (num_output + num_input) / 2
                uniform = False
                if self.vwfc_activation in [tf.nn.elu, tf.nn.relu]:
                    numelem = (num_input) / 2
                    uniform = False
                W = tf.get_variable(
                    "W", filtershape, dtype=tf.float32, initializer=get_modified_xavier_method(numelem, uniform))
                b = tf.get_variable("b", [num_output], initializer=tf.constant_initializer(0))

                mdgru = tf.nn.convolution(mdgru, W, padding="SAME")

                if resmdgru:
                    if doreshape:
                        inp = tf.reshape(inp,
                                         [-1, np.prod(inp.get_shape()[1:-1].as_list()), inp.get_shape().as_list()[-1]])
                    resW = tf.get_variable("resW",
                                           [1 for _ in inp.get_shape().as_list()[1:-1]] + [
                                               inp.get_shape().as_list()[-1], num_output],
                                           dtype=tf.float32, initializer=get_modified_xavier_method(num_output, False))
                    mdgru = tf.nn.convolution(inp, resW, padding="SAME") + mdgru
                if add_e_bn:
                    mdgru = batch_norm(mdgru, "bne", mdgruclass.istraining, bias=False, m=mdgruclass.min_mini_batch)
                mdgru = mdgru + b
                if doreshape:
                    mdgru = tf.reshape(mdgru, [-1] + mdgruinnershape + [mdgru.get_shape().as_list()[-1]])
            if noactivation:
                return mdgru
            else:
                return self.vwfc_activation(mdgru)
Example #17
0
    def __init__(self, evaluationinstance, **kw):
        """

        Parameters
        ----------
        evaluationinstance : instance of an evaluation class
            Will be used to call train and test routines on.
        """
        self.origargs = copy.deepcopy(kw)
        runner_kw, kw = compile_arguments(Runner, kw, transitive=False)
        for k, v in runner_kw.items():
            setattr(self, k, v)

        if self.save_each is None:
            self.save_each = self.test_each

        if self.notifyme:
            try:
                # import json
                data = json.load(open('../config.json'))
                nm = dict(chat_id=data['chat_id'], token=data['token'])
                try:
                    nm['chat_id'] = int(self.notifyme)
                except Exception:
                    pass
                finally:
                    self.notifyme = nm
            except:
                # we give up
                print('notifyme id not understood')

        # prelogging:
        # experiments = argget(kw, 'experimentloc', os.path.expanduser('~/experiments'))
        self.runfile = [
            f[1] for f in inspect.stack() if re.search("RUN.*\.py", f[1])
        ][0]

        if self.optionname is None:
            self.optionname = [
                hashlib.sha256(json.dumps(
                    self.fullparameters).encode('utf8')).hexdigest()
            ]
        elif not isinstance(self.optionname, list):
            self.optionname = [self.optionname]

        self.estimatefilenames = self.optionname
        if isinstance(self.optionname, list):
            pf = "-".join(self.optionname)
            if len(pf) > 40:
                pf = pf[:39] + "..."
        else:
            pf = self.optionname
        self.experiments_postfix = '_' + pf
        experiments_nots = os.path.join(
            self.experimentloc,
            '{}'.format(self.runfile[self.runfile.index("RUN_") + 4:-3] +
                        self.experiments_postfix))
        self.experiments = os.path.join(experiments_nots,
                                        str(int(time.time())))
        self.cachefolder = os.path.join(self.experiments, 'cache')
        os.makedirs(self.cachefolder)

        # Add Logging.FileHandler (StreamHandler was added already in RUN_mdgru.py)
        loggers = [
            logging.getLogger(n)
            for n in ['model', 'eval', 'runner', 'helper', 'data']
        ]
        formatter = logging.Formatter(
            '%(asctime)s %(name)s\t%(levelname)s:\t%(message)s')
        logfile = argget(kw, 'logfile',
                         os.path.join(self.cachefolder, 'log.txt'))
        fh = logging.FileHandler(logfile)
        fh.setLevel(argget(kw, 'logfileloglvl', logging.DEBUG))
        fh.setFormatter(formatter)
        # ch = logging.StreamHandler()
        # ch.setFormatter(formatter)
        # ch.setLevel(argget(kw, 'loglvl', logging.DEBUG))
        for logger in loggers:
            logger.setLevel(logging.DEBUG)
            # logger.addHandler(ch)
            logger.addHandler(fh)

        for k in self.origargs:
            logging.getLogger('runner').info('args runner {}:{}'.format(
                k, self.origargs[k]))
        self.ev = evaluationinstance
        for k in self.ev.origargs:
            logging.getLogger('runner').info(
                'args eval/data/model {}:{}'.format(k, self.ev.origargs[k]))
        # for k in self.ev.trdc.origargs:
        #     logging.getLogger('data').info(' trdc arg {}:{}'.format(k, self.ev.trdc.origargs[k]))
        # for k in self.ev.tedc.origargs:
        #     logging.getLogger('data').info(' tedc arg {}:{}'.format(k, self.ev.tedc.origargs[k]))
        # for k in self.ev.valdc.origargs:
        #     logging.getLogger('data').info('valdc arg {}:{}'.format(k, self.ev.valdc.origargs[k]))
        # for k in self.ev.model.origargs:
        #     logging.getLogger('model').info('arg {}:{}'.format(k, self.ev.model.origargs[k]))
        if self.only_train or (self.ev.trdc == self.ev.tedc
                               and self.ev.valdc != self.ev.trdc):
            self.episodes = ['train']
        elif self.only_test or (self.ev.trdc == self.ev.tedc
                                and self.ev.valdc == self.ev.tedc):
            self.episodes = ['evaluate']
        else:
            self.episodes = ['train', 'evaluate']
        # self.episodes = argget(kw, 'episodes', ['train', 'evaluate'])
        # self.epochs = argget(kw, 'epochs', 1)
        if self.iterations is None:
            self.its_per_epoch = self.ev.trdc.get_data_dims(
            )[0] // self.ev.batch_size
        else:
            self.epochs = 0
            self.its_per_epoch = self.iterations
        # self.its_per_epoch = argget(kw, 'its_per_epoch', self.ev.trdc.get_data_dims()[0] // self.ev.batch_size)
        # self.checkpointfiles = argget(kw, 'checkpointfiles', None)
        self.estimatefilenames = self.optionname  #argget(kw, 'estimatefilenames', None)
        if isinstance(self.checkpointfiles, list):
            if 'train' in self.episodes and len(self.checkpointfiles) > 1:
                logging.getLogger('runner').error(
                    'Multiple checkpoints are only allowed if only testing is performed.'
                )
                exit(1)
        else:
            self.checkpointfiles = [self.checkpointfiles]
        # if not isinstance(self.estimatefilenames, list):
        #     self.estimatefilenames = [self.estimatefilenames]
        if len(self.checkpointfiles) != len(self.estimatefilenames):
            if len(self.estimatefilenames) != 1:
                logging.getLogger('runner').error(
                    'Optionnames must match number of checkpoint files or have length 1!'
                )
                exit(1)
            else:
                self.estimatefilenames = [
                    self.estimatefilenames[0] +
                    "-{}-{}".format(i, os.path.basename(c))
                    for i, c in enumerate(self.checkpointfiles)
                ]

        self.plotfolder = os.path.join(self.experiments, 'plot')
        self.plot_scaling = argget(kw, 'plot_scaling', 1e-8)

        # self.display_each = argget(kw, 'display_each', 100)
        # self.test_each = argget(kw, 'test_each', 100)
        # self.save_each = argget(kw, 'save_each', self.test_each)
        # self.plot_each = argget(kw, 'plot_each', self.test_each)
        # self.test_size = argget(kw, 'test_size', 1)  # batch_size for tests
        # self.test_iters = argget(kw, 'test_iters', 1)
        self._test_pick_iteration = self.test_each - 1 if not self.test_first else 0
        # self._test_pick_iteration = argget(kw, 'test_first', ifset=0, default=self.test_each - 1)
        # self.perform_full_image_validation = argget(kw, 'perform_full_image_validation', 1)
        # self.save_validation_results = argget(kw, 'show_testing_results', False)
        force_symlink(self.experiments, os.path.join(experiments_nots,
                                                     "latest"))
        os.makedirs(self.plotfolder)
        # self.notifyme = argget(kw, 'notifyme', False)
        # self.results_to_csv = argget(kw, 'results_to_csv', False)

        self.train_losses = []
        self.test_losses = []
        self.val_losses = []

        # remove full parameters since it would not be an ignored parameter and confuse us
        # kw.pop('fullparameters')
        if kw:
            logging.getLogger('runner').warning(
                'the following args were ignored: {}'.format(kw))
Example #18
0
    def __init__(self, w, p, location=None, tps=None, kw={}):
        """

        Parameters
        ----------
        w : list
            subvolume/patchsize
        p : list
            amount of padding per dimension.
        location : str, optional
            Root folder where samples defined by featurefiles and maskfiles lie. Needs to be provided if tps is not.
        tps : list, optional
            List of locations or samples defined by featurefiles and maskfiles. Needs to be provided if location is not.

        """
        super(GridDataCollection, self).__init__(kw)
        self.origargs.update({"location": location, "tps": tps})
        self.w = np.ndarray.tolist(w) if not isinstance(w, list) else w
        self.p = p

        data_kw, kw = compile_arguments(GridDataCollection, kw, transitive=False)
        for k, v in data_kw.items():
            setattr(self, k, v)
        if (self.w) != 3:
            self.correct_orientation = False
        if tps is not None:
            self.tps = []
            [self.tps.extend(DataCollection.get_all_tps(t, self.featurefiles,
                                                        self.maskfiles if not self.ignore_missing_mask else [])) for t
             in tps]
        elif location is not None:
            if callable(location):
                self.tps = [location]
            else:
                self.tps = DataCollection.get_all_tps(location, self.featurefiles,
                                                      self.maskfiles if not self.ignore_missing_mask else [])
            if len(self.tps) == 0:
                raise Exception(
                    'no timepoints at location {} containing both {} and {}'.format(location, self.maskfiles,
                                                                                    self.featurefiles))
        else:
            raise Exception('either tps or location has to be set')

        if len(self.tps) == 0:
            raise Exception('there were no timepoints provided and location was not set')

        def oneorn(paramname):
            t = getattr(self, paramname)
            if len(t) == 1:
                setattr(self, paramname, t * len(self.w))
            elif len(t) == len(self.w) or len(t) == 0:
                return
            else:
                raise Exception('Parameter {} needs to have the same amount of entries as windowsize'.format(paramname))

        oneorn('p')
        oneorn('subtractGaussSigma')
        oneorn('deform')
        oneorn('deformSigma')
        oneorn('mirror')
        oneorn('scaling')
        oneorn('shift')
        oneorn('presize_for_normalization')

        self.deformrandomstate = np.random.RandomState(self.deformseed)
        if self.choose_mask_at_random:
            self.random_mask_state = np.random.RandomState(argget(kw, 'randommaskseed', 1337))
        self.imagedict = {}
        self.numoffeatures = argget(kw, 'numoffeatures', len(self._get_features_and_masks(self.tps[0])[0]))
        self.sample_counter = 0

        if not self.lazy and self.preloadall:
            self.preload_all()
Example #19
0
 def __init__(self,
              inputarr,
              dropout,
              dimensions=None,
              layers=1,
              num_hidden=100,
              name="mdgru",
              **kw):
     self.inputarr = inputarr
     self.generator = argget(kw, "generator", False)
     self.add_x_bn = argget(kw, "bnx", False)
     self.add_h_bn = argget(kw, "bnh", False)
     self.add_a_bn = argget(kw, "bna", False)
     self.form = argget(kw, "form", "NDHWC")
     self.istraining = argget(kw, "istraining", tf.constant(True))
     if dimensions is None:
         self.dimensions = [
             x + 1 for x in range(len(inputarr.get_shape()[1:-1]))
         ]
     else:
         self.dimensions = dimensions
     self.layers = layers
     self.num_hidden = num_hidden
     self.name = name
     self.dropout = dropout
     self.use_dropconnectx = argget(kw, "use_dropconnectx", True)
     self.use_dropconnecth = argget(kw, "use_dropconnecth", False)
     self.use_bernoulli = argget(kw, "use_bernoulli_dropconnect", False)
     self.mask_padding = argget(kw, "maskpadding", None)
     self.m = argget(kw, "min_mini_batch", None)
     self.favor_speed_over_memory = argget(kw, "favor_speed_over_memory",
                                           False)
     self.filter_sizes = argget(kw, "filter_sizes", [7, 7, 7])
     if any([self.add_x_bn, self.add_h_bn, self.add_a_bn]):
         raise Exception("bn not allowed for caffemdgru")
Example #20
0
def create_example_nifti_data(**kw):
    shape = argget(kw, "shape", (100, 100, 40))
    border_edges = argget(kw, "border_edges", [[30, 70], [30, 70], [10, 30]])
    edge_variation = argget(kw, "edge_variation", (15, 15, 8))
    rater_variation = argget(kw, "rater_variation", (2, 2, 2))
    patients = argget(kw, "patients", [
        "ruedi", "hans", "eva", "micha", "joerg", "maya", "frieda", "anna",
        "chelsea", "flynn"
    ])
    patient_belongs_to = argget(kw, "patient_belongs_to", [
        "train", "train", "train", "train", "train", "val", "val", "test",
        "test", "test"
    ])
    testdatadir = argget(kw, "testdatadir", ".")
    affine = np.zeros((4, 4))
    affine[0, 2] = 1
    affine[1, 0] = 1
    affine[2, 1] = 1
    affine[3, 3] = 1
    datafolder = argget(kw, "datafolder", "nifti")
    testdatadirnifti = os.path.join(testdatadir, datafolder)
    if os.path.exists(testdatadirnifti):
        print(
            'Files have already been generated. If something is amiss, delete the nifti folder and start again!'
        )
        return
    for f, pat in zip(patient_belongs_to, patients):
        patdir = os.path.join(os.path.join(testdatadirnifti, f), pat)
        if not os.path.exists(patdir):
            os.makedirs(patdir)
        gt_mask = np.zeros(shape)
        gt_borders = [[
            x[0] + (np.random.random() * 2 - 1) * e,
            x[1] + (np.random.random() * 2 - 1) * e
        ] for x, e in zip(border_edges, edge_variation)]
        gt_borders = np.uint32(gt_borders)
        gt_mask[gt_borders[0][0]:gt_borders[0][1],
                gt_borders[1][0]:gt_borders[1][1],
                gt_borders[2][0]:gt_borders[2][1]] = 1

        for file in ["flair", "mprage", "t2", "pd"]:
            myfile = os.path.join(patdir, file + ".nii.gz")
            if not os.path.exists(os.path.join(patdir, file + ".nii.gz")):
                dat = np.float32(np.random.random(shape)) * np.random.randint(
                    200, 2400) + np.random.randint(200, 800)
                dat += gt_mask * np.random.random(shape) * np.random.randint(
                    200, 400)
                nib.save(nib.Nifti1Image(dat, affine), myfile)
        for file in ["mask1", "mask2"]:
            myfile = os.path.join(patdir, file + ".nii.gz")
            if not os.path.exists(os.path.join(patdir, file + ".nii.gz")):
                dat = np.zeros(shape, dtype=np.uint8)
                rater_borders = [[
                    x[0] + (np.random.random() * 2 - 1) * e,
                    x[1] + (np.random.random() * 2 - 1) * e
                ] for x, e in zip(gt_borders, rater_variation)]
                rater_borders = np.uint32(rater_borders)
                dat[rater_borders[0][0]:rater_borders[0][1],
                    rater_borders[1][0]:rater_borders[1][1],
                    rater_borders[2][0]:rater_borders[2][1]] = 1
                nib.save(nib.Nifti1Image(dat, affine), myfile)