Esempio n. 1
0
 def __init__(self, model, collectioninst, **kw):
     self.resultsfile = argget(kw, 'resultsfile', 'temp.csv')
     self.evaluate_uncertainty_times = argget(kw, "evaluate_uncertainty_times", 1)
     self.evaluate_uncertainty_dropout = argget(kw, "evaluate_uncertainty_dropout",
                                                1.0)  # these standard values ensure that we dont evaluate uncertainty if nothing was provided.
     super(LargeVolumeLocationCoordinateClassificationEvaluation, self).__init__(model, collectioninst,
                                                                                 **kw)
Esempio n. 2
0
 def __init__(self, collectioninst, **kw):
     self.origargs = copy.deepcopy(kw)
     self.use_tensorboard = argget(kw, "use_tensorboard", True, keep=True)
     if self.use_tensorboard:
         self.image_summaries_each = argget(kw, 'image_summaries_each', 100)
     self.dropout_rate = argget(kw, "dropout_rate", 0.5)
     self.current_epoch = 0
     self.current_iteration = 0
     self.restore_optimistically = argget(kw, 'restore_optimistically',
                                          False)
Esempio n. 3
0
    def __init__(self, *w, **kw):
        super(CGRUCell, self).__init__(*w, **kw)
        self.bnx = argget(kw, "add_x_bn", False)
        self.bnh = argget(kw, "add_h_bn", False)
        self.bna = argget(kw, "add_a_bn", False)
        self.m = argget(kw, "m", None)
        self.istraining = argget(kw, 'istraining', tf.constant(True))
        self.resgrux = argget(kw, "resgrux", False)
        self.resgruh = argget(kw, "resgruh", False)
        self.put_r_back = argget(kw, "put_r_back", False)
        self.regularize_state = argget(kw, 'use_dropconnect_on_state', False)

        filter_shape_h_candidate = self.filter_size_h + [self._num_units] * 2
        filter_shape_h_gates = deepcopy(filter_shape_h_candidate)
        filter_shape_h_gates[-1] *= 2
        filter_shape_x_candidate = self.filter_size_x + [
            self.myshapes[0][-1], self._num_units
        ]
        filter_shape_x_gates = deepcopy(filter_shape_x_candidate)
        filter_shape_x_gates[-1] *= 2

        self.dc_h_factor_gates = self._get_dropconnect(filter_shape_h_gates,
                                                       self.dropconnecth,
                                                       "mydropconnecthgates")
        self.dc_h_factor_candidate = self._get_dropconnect(
            filter_shape_h_candidate, self.dropconnecth,
            "mydropconnecthcandidate")
        self.dc_x_factor_gates = self._get_dropconnect(filter_shape_x_gates,
                                                       self.dropconnectx,
                                                       "mydropconnectxgates")
        self.dc_x_factor_candidate = self._get_dropconnect(
            filter_shape_x_candidate, self.dropconnectx,
            "mydropconnectxcandidate")
Esempio n. 4
0
 def __init__(self, myshape, num_units, activation=tf.nn.tanh, **kw):
     super(CRNNCell, self).__init__()
     self._activation = activation
     self._num_units = num_units
     self.gate = argget(kw, 'gate', sigmoid)
     self.periodicconvolution_x = argget(kw, 'periodicconvolution_x', False)
     self.periodicconvolution_h = argget(kw, 'periodicconvolution_h', False)
     self.filter_size_x = argget(kw, 'filter_size_x', [7, 7])
     self.filter_size_h = argget(kw, 'filter_size_h', [7, 7])
     self.use_bernoulli = argget(kw, 'use_bernoulli', False)
     self.dropconnectx = argget(kw, "dropconnectx", None)
     self.dropconnecth = argget(kw, "dropconnecth", None)
     self.strides = argget(kw, "strides", None)
     if myshape is None:
         raise Exception('myshape cant be None!')
     myshapein = deepcopy(myshape)
     myshapein.pop(-2)
     myshapeout = deepcopy(myshape)
     myshapeout.pop(-2)
     myshapeout[-1] = self._num_units
     if self.strides is not None:
         if len(myshapeout[1:-1]) != len(self.strides):
             raise Exception(
                 'stride shape should match myshapeout[1:-1]! strides: {}, myshape: {}'.format(self.strides,
                                                                                               myshapeout))
         myshapeout[1:-1] = [int(np.round((myshapeout[1 + si]) / self.strides[si])) for si in
                             range(len(myshapeout) - 2)]
     self.myshapes = (myshapein, myshapeout)
Esempio n. 5
0
 def __init__(self, model, collectioninst, **kw):
     self.evaluate_uncertainty_times = argget(kw,
                                              "evaluate_uncertainty_times",
                                              1)
     self.evaluate_uncertainty_dropout = argget(
         kw, "evaluate_uncertainty_dropout", 1.0
     )  # these standard values ensure that we dont evaluate uncertainty if nothing was provided.
     self.evaluate_uncertainty_saveall = argget(
         kw, "evaluate_uncertainty_saveall", False)
     super(LargeVolumeEvaluation, self).__init__(model, collectioninst,
                                                 **kw)
Esempio n. 6
0
    def __init__(self,
                 featurefiles,
                 maskfiles=[],
                 location=None,
                 tps=None,
                 **kw):
        super(ThreadedGridDataCollection,
              self).__init__(featurefiles, maskfiles, location, tps, **kw)

        self._batchsize = argget(kw, 'batchsize', 1)
        self.num_threads = argget(kw, 'num_threads', 1)
        self.curr_thread = 0
        self._batch = [None for _ in range(self.num_threads)]
        self._batchlabs = [None for _ in range(self.num_threads)]
        self._preloadthreads = [
            Thread(target=self._preload_random_sample,
                   args=(
                       self._batchsize,
                       it,
                   )) for it in range(self.num_threads)
        ]
        for t in self._preloadthreads:
            t.start()
Esempio n. 7
0
def create_example_nifti_data_2d(**kw):
    shape = argget(kw, "shape", (256, 256))
    border_edges = argget(kw, "border_edges", [[64, 192], [64, 192]])
    edge_variation = argget(kw, "edge_variation", (25, 25))
    rater_variation = argget(kw, "rater_variation", (5, 5))
    patients = argget(kw, "patients",
                      ["ruedi", "hans", "eva", "micha", "joerg", "maya", "frieda", "anna", "chelsea", "flynn"])
    patient_belongs_to = argget(kw, "patient_belongs_to",
                                ["train", "train", "train", "train", "train", "val", "val", "test", "test", "test"])
    testdatadir = argget(kw, "testdatadir", ".")
    affine = np.eye(4)
    print("creating new testdata at ", testdatadir)
    datafolder = argget(kw, "datafolder", "nifti")
    testdatadir = os.path.join(testdatadir, datafolder)
    if os.path.exists(testdatadir):
        print("Files have already been generated. If something is amiss, delete the nifti folder and start again!")
        return
    for f, pat in zip(patient_belongs_to, patients):
        patdir = os.path.join(os.path.join(testdatadir, f), pat)
        if not os.path.exists(patdir):
            os.makedirs(patdir)
        gt_mask = np.zeros(shape)
        gt_borders = [[x[0] + (np.random.random() * 2 - 1) * e,
                       x[1] + (np.random.random() * 2 - 1) * e] for x, e in zip(border_edges, edge_variation)]
        gt_borders = np.uint32(gt_borders)
        gt_mask[
        gt_borders[0][0]:gt_borders[0][1],
        gt_borders[1][0]:gt_borders[1][1],
        ] = 1

        for file in ["flair", "mprage", "t2", "pd"]:
            myfile = os.path.join(patdir, file + ".nii.gz")
            if not os.path.exists(os.path.join(patdir, file + ".nii.gz")):
                dat = np.float32(np.random.random(shape)) * np.random.randint(200, 2400) + np.random.randint(200, 800)
                dat += gt_mask * np.random.random(shape) * np.random.randint(200, 400)
                nib.save(nib.Nifti1Image(dat, affine), myfile)
        for file in ["mask1", "mask2"]:
            myfile = os.path.join(patdir, file + ".nii.gz")
            if not os.path.exists(os.path.join(patdir, file + ".nii.gz")):
                dat = np.zeros(shape, dtype=np.uint8)
                rater_borders = [[x[0] + (np.random.random() * 2 - 1) * e,
                                  x[1] + (np.random.random() * 2 - 1) * e] for x, e in zip(gt_borders, rater_variation)]
                rater_borders = np.uint32(rater_borders)
                dat[rater_borders[0][0]: rater_borders[0][1],
                rater_borders[1][0]: rater_borders[1][1]] = 1
                nib.save(nib.Nifti1Image(dat, affine), myfile)
Esempio n. 8
0
    def __init__(self,
                 featurefiles,
                 maskfiles=[],
                 location=None,
                 tps=None,
                 **kw):
        super(GridDataCollection, self).__init__(**kw)
        self.origargs.update({
            "featurefiles": featurefiles,
            "maskfiles": maskfiles,
            "location": location,
            "tps": tps
        })

        if not isinstance(featurefiles, list):
            featurefiles = [featurefiles]
        if not isinstance(maskfiles, list):
            maskfiles = [maskfiles]
        if tps is not None:
            self.tps = tps
        elif location is not None:
            if callable(location):
                self.tps = [location]
            else:
                self.tps = DataCollection.get_all_tps(location, featurefiles,
                                                      maskfiles)
            if len(self.tps) == 0:
                raise Exception(
                    'no timepoints at location {} containing both {} and {}'.
                    format(location, maskfiles, featurefiles))
        else:
            raise Exception('either tps or location has to be set')

        if len(self.tps) == 0:
            raise Exception(
                'there were no timepoints provided and location was not set')

        self.featurefiles = featurefiles
        self.maskfiles = maskfiles
        w = argget(kw, 'w', self.w)
        if not isinstance(w, list):
            w = np.ndarray.tolist(w)
        self.w = w
        self.p = argget(kw, 'padding', np.zeros(np.shape(w)))

        self.deform = argget(kw, 'deformation', np.zeros(np.shape(w)))
        self.interpolate_always = argget(kw, 'interpolate_always', False)
        self.deformrandomstate = np.random.RandomState(
            argget(kw, 'deformseed', 1234))
        self.deformSigma = argget(kw, 'deformSigma', 5)
        if not np.isscalar(
                self.deformSigma) and len(self.deformSigma) != len(w):
            raise Exception(
                'we need the same sized deformsigma as w (hence, if we provide an array, it has to be the exact correct size)'
            )
        self.deformpadding = 2
        self.datainterpolation = argget(kw, 'datainterpolation', 3)
        self.dataextrapolation = argget(kw, 'dataextrapolation', 'constant')

        self.scaling = np.float32(argget(kw, 'scaling', np.zeros(np.shape(w))))
        self.rotation = np.float32(argget(kw, 'rotation', 0))
        self.shift = np.float32(argget(kw, 'shift', np.zeros(np.shape(w))))
        self.mirror = np.float32(argget(kw, 'mirror', np.zeros(np.shape(w))))
        self.gaussiannoise = np.float32(argget(kw, 'gaussiannoise', 0.0))
        self.vary_mean = np.float32(argget(kw, 'vary_mean', 0))
        self.vary_stddev = np.float32(argget(kw, 'vary_stddev', 0))
        self.regression = argget(kw, 'regression', False)
        self.softlabels = argget(kw, 'softlabels', True)
        self.whiten = argget(kw, "whiten", True)
        self.each_with_labels = argget(kw, "each_with_labels", 0)
        if self.each_with_labels > 0 and len(self.maskfiles) == 0:
            raise Exception(
                'need to provide at leas tone mask file, otherwise we cant make sure we have labels set obviously'
            )
        self.whiten_subvolumes = argget(kw, "whiten_subvolumes", False)
        self.presize_for_normalization = argget(kw,
                                                'presize_for_normalization',
                                                [None for w in self.w])
        self.half_gaussian_clip = argget(kw, 'half_gaussian_clip', False)
        self.pyramid_sampling = argget(kw, 'pyramid_sampling', False)
        self.subtractGauss = argget(kw, "subtractGauss", False)
        self.nooriginal = argget(kw, 'nooriginal', False)
        self.subtractGaussSigma = np.float32(argget(kw, "sigma", 5))
        self.choose_mask_at_random = argget(kw, "choose_mask_at_random", False)
        if self.choose_mask_at_random:
            self.random_mask_state = np.random.RandomState(
                argget(kw, 'randommaskseed', 1337))
        self.zero_out_label = argget(kw, 'zero_out_label', None)
        self.running_mean = 0
        self.running_num = 0
        self.running_var = 0
        self.lazy = argget(kw, 'lazy', True)
        self.imagedict = {}
        self.perform_one_hot_encoding = argget(kw, 'perform_one_hot_encoding',
                                               True)
        self.correct_nifti_orientation = argget(kw,
                                                'correct_nifti_orientation',
                                                len(self.w) == 3)
        if self.correct_nifti_orientation and len(self.w) != 3:
            self.correct_nifti_orientation = False
            logging.getLogger('data').warning(
                'Can only correct for orientation for 3d data so far!')
        self.numoffeatures = argget(
            kw, 'numoffeatures',
            len(self._get_features_and_masks(self.tps[0])[0]))
        self.sample_counter = 0
        self.minlabel = argget(kw, 'minlabel', 1)

        if self.lazy == False and argget(kw, 'preloadall', False):
            self.preload_all()
Esempio n. 9
0
 def __init__(self, model, collectioninst, **kw):
     self.nclasses = argget(kw, 'ncoords', [64, 64, 64], keep=True)
     super(LocationClassificationEvaluation, self).__init__(model, collectioninst,
                                                            **kw)
Esempio n. 10
0
def remove_example_nifti_data_2d(**kw):
    testdatadir = argget(kw, "testdatadir", ".")
    datafolder = argget(kw, "datafolder", "nifti")
    shutil.rmtree(os.path.join(testdatadir, datafolder))
Esempio n. 11
0
 def __init__(self, model, collectioninst, **kw):
     self.nclasses = argget(kw, 'nclasses', 2, keep=True)
     super(ClassificationEvaluation, self).__init__(model, collectioninst,
                                                    **kw)
Esempio n. 12
0
 def __init__(self, **kw):
     self.origargs = copy.deepcopy(kw)
     self.randomstate = np.random.RandomState(argget(kw, "seed", 12345678))
     self.nclasses = argget(kw, 'nclasses', 2)
Esempio n. 13
0
    def mdgru_bb(self,
                 inp,
                 dropout,
                 num_hidden,
                 num_output,
                 noactivation=False,
                 name=None,
                 **kw):

        dimensions = argget(kw, "dimensions", None)
        if dimensions is None:
            dimensions = [
                i + 1 for i, v in enumerate(inp.get_shape()[1:-1]) if v > 1
            ]
        bnx = argget(kw, "bnx", self.bnx)
        bnh = argget(kw, "bnh", self.bnh)
        bna = argget(kw, "bna", self.bna)
        bne = argget(kw, "bne", self.bne)
        resmdgru = argget(kw, 'resmdgru', self.resmdgru)
        use_dropconnectx = argget(kw, "use_dropconnectx",
                                  self.use_dropconnectx)
        use_dropconnecth = argget(kw, "use_dropconnecth",
                                  self.use_dropconnecth)
        cgru_activation = argget(kw, 'cgru_activation', self.cgru_activation)
        myMDGRU = MDGRU
        with tf.variable_scope(name):

            mdgruclass = myMDGRU(
                inp,
                dropout,
                dimensions,
                num_hidden=num_hidden,
                name="mdgru",
                bnx=bnx,
                bnh=bnh,
                bna=bna,
                use_dropconnectx=use_dropconnectx,
                use_dropconnecth=use_dropconnecth,
                resgrux=self.resgrux,
                resgruh=self.resgruh,
                m=self.m,
                return_cgru_results=self.return_cgru_results,
                swap_memory=self.swap_memory,
                put_r_back=self.put_r_back,
                cgru_activation=cgru_activation,
                use_static_rnn=self.use_static_rnn,
                no_avgpool=self.no_avgpool,
                filter_size_x=self.filter_size_x,
                filter_size_h=self.filter_size_h,
                use_dropconnect_on_state=self.use_dropconnect_on_state,
                legacy_cgru_addition=self.legacy_cgru_addition,
                **kw)
            mdgru = mdgruclass()
            if num_output is not None:
                mdgruinnershape = mdgru.get_shape()[1:-1].as_list()
                doreshape = False
                if len(mdgruinnershape) >= 3:
                    newshape = [
                        -1,
                        np.prod(mdgruinnershape),
                        mdgru.get_shape().as_list()[-1]
                    ]
                    mdgru = tf.reshape(mdgru, newshape)
                    doreshape = True
                num_input = mdgru.get_shape().as_list()[-1]
                filtershape = [1 for _ in mdgru.get_shape()[1:-1]
                               ] + [num_input, num_output]

                numelem = (num_output + num_input) / 2
                uniform = False
                if self.activation in [tf.nn.elu, tf.nn.relu]:
                    numelem = (num_input) / 2
                    uniform = False
                W = tf.get_variable("W",
                                    filtershape,
                                    dtype=tf.float32,
                                    initializer=get_modified_xavier_method(
                                        numelem, uniform))
                b = tf.get_variable('b', [num_output],
                                    initializer=tf.constant_initializer(0))

                mdgru = tf.nn.convolution(mdgru, W, padding="SAME")

                if resmdgru:
                    if doreshape:
                        inp = tf.reshape(inp, [
                            -1,
                            np.prod(inp.get_shape()[1:-1].as_list()),
                            inp.get_shape().as_list()[-1]
                        ])
                    resW = tf.get_variable(
                        'resW', [1 for _ in inp.get_shape().as_list()[1:-1]] +
                        [inp.get_shape().as_list()[-1], num_output],
                        dtype=tf.float32,
                        initializer=get_modified_xavier_method(
                            num_output, False))
                    mdgru = tf.nn.convolution(inp, resW,
                                              padding="SAME") + mdgru
                if bne:
                    mdgru = batch_norm(mdgru,
                                       "bne",
                                       mdgruclass.istraining,
                                       bias=False,
                                       m=mdgruclass.m)
                mdgru = mdgru + b
                if doreshape:
                    mdgru = tf.reshape(mdgru, [-1] + mdgruinnershape +
                                       [mdgru.get_shape().as_list()[-1]])
            if noactivation:
                return mdgru
            else:
                return self.activation(mdgru)
Esempio n. 14
0
 def __init__(self, data, target, dropout, **kw):
     super(MDGRUNet, self).__init__()
     self.bnx = argget(kw, "bnx", False)
     self.bnh = argget(kw, "bnh", False)
     self.bna = argget(kw, "bna", False)
     self.bne = argget(kw, "bne", False)
     self.use_dropconnectx = argget(kw, "use_dropconnectx", True)
     self.use_dropconnecth = argget(kw, "use_dropconnecth", False)
     self.resmdgru = argget(kw, "resmdgru", False)
     self.resgrux = argget(kw, 'resgrux', False)
     self.resgruh = argget(kw, 'resgruh', False)
     self.m = argget(kw, "m", None)
     self.swap_memory = argget(kw, "swap_memory", False)
     self.return_cgru_results = argget(kw, "return_cgru_results", False)
     self.put_r_back = argget(kw, "put_r_back", False)
     self.use_static_rnn = argget(kw, 'use_static_rnn', False)
     self.no_avgpool = argget(kw, 'no_avgpool', True)
     self.filter_size_x = argget(kw, 'filter_size_x', [7, 7, 7])
     self.filter_size_h = argget(kw, 'filter_size_h', [7, 7, 7])
     self.cgru_activation = argget(kw, 'rnn_activation', tf.nn.tanh)
     self.activation = argget(kw, 'activation', tf.nn.tanh)
     self.use_caffe_impl = argget(kw, "use_caffe_impl", False)
     self.favor_speed_over_memory = argget(kw, "favor_speed_over_memory",
                                           True)
     self.use_dropconnect_on_state = argget(kw, 'use_dropconnect_on_state',
                                            False)
     self.legacy_cgru_addition = argget(kw, 'legacy_cgru_addition', False)
Esempio n. 15
0
    def __init__(self, evaluationinstance, **kw):
        self.origargs = copy.deepcopy(kw)
        # prelogging:
        experiments = argget(kw, 'experimentloc',
                             os.path.expanduser('~/experiments'))
        self.runfile = [
            f[1] for f in inspect.stack() if re.search("RUN.*\.py", f[1])
        ][0]

        self.experiments_postfix = argget(kw, 'experiments_postfix', "")
        experiments_nots = os.path.join(
            experiments,
            '{}'.format(self.runfile[self.runfile.index("RUN_") + 4:-3] +
                        self.experiments_postfix))
        self.experiments = os.path.join(experiments_nots,
                                        str(int(time.time())))
        self.cachefolder = os.path.join(self.experiments, 'cache')
        os.makedirs(self.cachefolder)
        # logging:
        loggers = [
            logging.getLogger(n)
            for n in ['model', 'eval', 'runner', 'helper', 'data']
        ]
        formatter = logging.Formatter(
            '%(asctime)s %(name)s\t%(levelname)s:\t%(message)s')
        logfile = argget(kw, 'logfile',
                         os.path.join(self.cachefolder, 'log.txt'))
        fh = logging.FileHandler(logfile)
        fh.setLevel(argget(kw, 'logfileloglvl', logging.DEBUG))
        fh.setFormatter(formatter)

        self.only_cpu = argget(kw, 'only_cpu', False)

        ch = logging.StreamHandler()
        ch.setFormatter(formatter)
        ch.setLevel(argget(kw, 'loglvl', logging.WARNING))
        for logger in loggers:
            logger.addHandler(ch)
            logger.setLevel(logging.DEBUG)
            logger.addHandler(fh)

        for k in self.origargs:
            logging.getLogger('runner').info('arg {}:{}'.format(
                k, self.origargs[k]))
        self.ev = evaluationinstance
        for k in self.ev.origargs:
            logging.getLogger('eval').info('arg {}:{}'.format(
                k, self.ev.origargs[k]))
        for k in self.ev.trdc.origargs:
            logging.getLogger('data').info(' trdc arg {}:{}'.format(
                k, self.ev.trdc.origargs[k]))
        for k in self.ev.tedc.origargs:
            logging.getLogger('data').info(' tedc arg {}:{}'.format(
                k, self.ev.tedc.origargs[k]))
        for k in self.ev.valdc.origargs:
            logging.getLogger('data').info('valdc arg {}:{}'.format(
                k, self.ev.valdc.origargs[k]))
        for k in self.ev.model.origargs:
            logging.getLogger('model').info('arg {}:{}'.format(
                k, self.ev.model.origargs[k]))

        self.episodes = argget(kw, 'episodes', ['train', 'evaluate'])
        self.epochs = argget(kw, 'epochs', 1)
        self.batch_size = argget(kw, 'batch_size', 1)
        self.its_per_epoch = argget(
            kw, 'its_per_epoch',
            self.ev.trdc.get_data_dims()[0] // self.batch_size)
        self.checkpointfiles = argget(kw, 'checkpointfiles', None)
        self.estimatefilenames = argget(kw, 'estimatefilenames', None)
        if isinstance(self.checkpointfiles, list):
            if 'train' in self.episodes:
                logging.getLogger('runner').error(
                    'Multiple checkpoints are only allowed if only testing is performed.'
                )
                exit(1)
        else:
            self.checkpointfiles = [self.checkpointfiles]
        if not isinstance(self.estimatefilenames, list):
            self.estimatefilenames = [self.estimatefilenames]
        if len(self.checkpointfiles) != len(self.estimatefilenames):
            if len(self.estimatefilenames) != 1:
                logging.getLogger('runner').error(
                    'Optionnames must match number of checkpoint files or have length 1!'
                )
                exit(1)
            else:
                self.estimatefilenames = [
                    self.estimatefilenames[0] +
                    "-{}-{}".format(i, os.path.basename(c))
                    for i, c in enumerate(self.checkpointfiles)
                ]

        self.plotfolder = os.path.join(self.experiments, 'plot')
        self.plot_scaling = argget(kw, 'plot_scaling', 1e-8)
        self.display_each = argget(kw, 'display_each', 100)
        self.test_each = argget(kw, 'test_each', self.display_each)
        self.save_each = argget(kw, 'save_each', self.display_each)
        self.plot_each = argget(kw, 'plot_each', self.display_each)
        self.test_size = argget(kw, 'test_size', 1)  # batch_size for tests
        self.test_iters = argget(kw, 'test_iters', 1)
        self._test_pick_iteration = argget(kw,
                                           'test_first',
                                           ifset=0,
                                           default=self.test_each - 1)
        self.perform_n_times_full_validation = argget(
            kw, 'perform_n_times_full_validation', 0)
        self.perform_n_times_full_validation_dropout = argget(
            kw, 'perform_n_times_full_validation_dropout', 0.5)
        self.show_testing_results = argget(kw, 'show_testing_results', False)
        force_symlink(self.experiments, os.path.join(experiments_nots,
                                                     "latest"))
        os.makedirs(self.plotfolder)
        self.printIt = argget(kw, "print_testing_results", True)
        self.gpubound = argget(kw, 'gpubound', 1)
        self.notifyme = argget(kw, 'notifyme', None)
        self.results_to_csv = argget(kw, 'results_to_csv', False)

        self.train_losses = []
        self.test_losses = []
        self.val_losses = []

        # remove full parameters since it would not be an ignored parameter and confuse us
        kw.pop('fullparameters')
        if kw:
            logging.getLogger('runner').warning(
                'the following args were ignored: {}'.format(kw))
Esempio n. 16
0
 def __init__(self,
              inputarr,
              dropout,
              dimensions=None,
              layers=1,
              num_hidden=100,
              name="mdgru",
              **kw):
     '''
     @param inputarr: needs to be in batch, spatialdim1...spatialdimn,channel form
     '''
     self.inputarr = inputarr
     self.add_x_bn = argget(kw, "bnx", False)
     self.add_h_bn = argget(kw, "bnh", False)
     self.add_a_bn = argget(kw, "bna", False)
     self.istraining = argget(kw, 'istraining', tf.constant(True))
     if dimensions is None:
         self.dimensions = [
             x + 1 for x in range(len(inputarr.get_shape()[1:-1]))
         ]
     else:
         self.dimensions = dimensions
     self.layers = layers
     self.num_hidden = num_hidden
     self.name = name
     self.dropout = dropout
     self.use_dropconnectx = argget(kw, "use_dropconnectx", True)
     self.use_dropconnecth = argget(kw, "use_dropconnecth", False)
     self.use_bernoulli = argget(kw, 'use_bernoulli_dropconnect', False)
     self.m = argget(kw, "min_mini_batch", None)
     self.resgruh = argget(kw, "resgruh", False)
     self.resgrux = argget(kw, "resgrux", False)
     self.filter_size_x = argget(kw, 'filter_size_x', [7, 7, 7])
     self.filter_size_h = argget(kw, 'filter_size_h', [7, 7, 7])
     self.return_cgru_results = argget(kw, 'return_cgru_results', False)
     self.use_dropconnect_on_state = argget(kw, 'use_dropconnect_on_state',
                                            False)
     self.strides = argget(kw, "strides", None)
     self.swap_memory = argget(kw, "swap_memory", True)
     self.put_r_back = argget(kw, "put_r_back", False)
     self.cgru_activation = argget(kw, 'cgru_activation', tf.nn.tanh)
     self.use_static_rnn = argget(kw, 'use_static_rnn', False)
     self.no_avgpool = argget(kw, 'no_avgpool', True)
     self.legacy_cgru_addition = argget(kw, 'legacy_cgru_addition', False)
Esempio n. 17
0
 def __init__(self,
              inputarr,
              dropout,
              dimensions=None,
              layers=1,
              num_hidden=100,
              name="mdgru",
              **kw):
     self.inputarr = inputarr
     self.generator = argget(kw, "generator", False)
     self.add_x_bn = argget(kw, "bnx", False)
     self.add_h_bn = argget(kw, "bnh", False)
     self.add_a_bn = argget(kw, "bna", False)
     self.form = argget(kw, "form", "NDHWC")
     self.istraining = argget(kw, 'istraining', tf.constant(True))
     if dimensions is None:
         self.dimensions = [
             x + 1 for x in range(len(inputarr.get_shape()[1:-1]))
         ]
     else:
         self.dimensions = dimensions
     self.layers = layers
     self.num_hidden = num_hidden
     self.name = name
     self.dropout = dropout
     self.use_dropconnectx = argget(kw, "use_dropconnectx", True)
     self.use_dropconnecth = argget(kw, "use_dropconnecth", False)
     self.use_bernoulli = argget(kw, 'use_bernoulli_dropconnect', False)
     self.mask_padding = argget(kw, 'maskpadding', None)
     self.m = argget(kw, "min_mini_batch", None)
     self.favor_speed_over_memory = argget(kw, "favor_speed_over_memory",
                                           False)
     self.filter_sizes = argget(kw, 'filter_sizes', [7, 7, 7])
     if any([self.add_x_bn, self.add_h_bn, self.add_a_bn]):
         raise Exception("bn not allowed for caffemdgru")
Esempio n. 18
0
    def __init__(self, model, collectioninst, **kw):
        super(SupervisedEvaluation, self).__init__(collectioninst, **kw)
        self.setupCollections(collectioninst)
        self.currit = 0
        self.namespace = argget(kw, "namespace", "default")
        self.only_save_labels = argget(kw, "only_save_labels", False)
        with tf.variable_scope(self.namespace):
            self.training = tf.placeholder(dtype=tf.bool)
            self.dropout = tf.placeholder(dtype=tf.float32)
            self.data = tf.placeholder(dtype=tf.float32,
                                       shape=self.trdc.get_shape())
            if type(self.nclasses) == list:  # for location classification
                self.target = tf.placeholder(
                    dtype=tf.float32,
                    shape=self.trdc.get_target_shape()[:-1] +
                    [np.sum(self.nclasses)])
            else:
                self.target = tf.placeholder(
                    dtype=tf.float32,
                    shape=self.trdc.get_target_shape()[:-1] + [self.nclasses])
            kw_copy = copy.deepcopy(kw)
            self.model = model(self.data,
                               self.target,
                               self.dropout,
                               training=self.training,
                               **kw)
            self.model.optimize


# in the case we have a different testing set, we can construct 2 graphs, one for training and testing case
        if self.tedc.get_shape() != self.trdc.get_shape():
            self.test_graph = tf.Graph()
            with self.test_graph.as_default():
                with tf.variable_scope(self.namespace):
                    self.test_training = tf.placeholder(dtype=tf.bool)
                    self.test_dropout = tf.placeholder(dtype=tf.float32)
                    self.test_data = tf.placeholder(
                        dtype=tf.float32, shape=self.tedc.get_shape())
                    if type(self.nclasses
                            ) == list:  # for location classification
                        self.test_target = tf.placeholder(
                            dtype=tf.float32,
                            shape=self.tedc.get_target_shape()[:-1] +
                            [np.sum(self.nclasses)])
                    else:
                        self.test_target = tf.placeholder(
                            dtype=tf.float32,
                            shape=self.tedc.get_target_shape()[:-1] +
                            [self.nclasses])
                    self.test_model = model(self.test_data,
                                            self.test_target,
                                            self.test_dropout,
                                            training=self.test_training,
                                            **kw_copy)
                    self.test_model.prediction
                    self.test_model.cost
        else:
            self.test_graph = tf.get_default_graph()
            self.test_model = self.model
            self.test_training = self.training
            self.test_dropout = self.dropout
            self.test_data = self.data
            self.test_target = self.target

        self.batch_size = argget(kw, 'batch_size', 1)
        self.validate_same = argget(kw, 'validate_same', False)