Esempio n. 1
0
        tedc = self.tedc.get_states()
        valdc = self.valdc.get_states()
        states = {}
        if trdc:
            states['trdc'] = trdc
        if tedc:
            states['tedc'] = tedc
        if valdc:
            states['valdc'] = valdc
        states['epoch'] = self.current_epoch
        states['iteration'] = self.current_iteration
        pickle.dump(states, open(f + ".pickle", "wb"))
        return ckpt

    def add_summary_simple_value(self, text, value):
        raise NotImplementedError(
            "this needs to be implemented and only works with tensorflow backend."
        )

    def set_session(self, sess, cachefolder, train=False):
        return None

    def __enter__(self):
        pass

    def __exit__(self, exc_type, exc_val, exc_tb):
        pass


generate_defaults_info(SupervisedEvaluation)
Esempio n. 2
0
                if self.put_r_back:
                    state = state * r
                usedx = self.dropconnectx if self.use_dropconnect_on_state else None
                usedh = self.dropconnecth if self.use_dropconnect_on_state else None
                htx, hth, htb = self._convlinear(
                    [inputs, state],
                    self._num_units,
                    True,
                    dropconnectxmatrix=self.dc_x_factor_candidate,
                    dropconnecthmatrix=self.dc_h_factor_candidate,
                    dropconnectx=usedx,
                    dropconnecth=usedh,
                    strides=self.strides)
                if self.put_r_back:
                    htwb = htx + hth
                else:
                    htwb = htx + r * hth
            # Perform batch norm on candidate if needed.
            if self.add_a_bn:
                htwb = batch_norm(htwb,
                                  "bna",
                                  self.istraining,
                                  bias=None,
                                  m=self.min_mini_batch)
            # Update state/output.
            new_h = z * state + (1 - z) * self.crnn_activation(htwb + htb)
        return new_h, new_h


generate_defaults_info(CGRUCell)
Esempio n. 3
0
    """Abstract model class for regression tasks."""
    def __init__(self, data, dropout, kw):
        super(RegressionModel, self).__init__(data, dropout, kw)
        self.dropout = dropout
        self.learning_rate = argget(kw, "learning_rate", 0.001)
        self.nclasses = argget(kw, "nclasses", 1)
        self.momentum = argget(kw, "momentum", 0.9)


class ReconstructionModel(Model):
    """Abstract model class for reconstruction tasks."""
    def __init__(self, data, dropout, kw):
        super(ReconstructionModel, self).__init__(data, dropout, None, kw)
        self.dropout = dropout
        self.learning_rate = argget(kw, "learning_rate", 0.001)
        self.nclasses = argget(kw, "nclasses", 2)


class GANModel(Model):
    """Abstract model class for GANs."""
    def __init__(self, data, dropout, kw):
        super(GANModel, self).__init__(data, dropout, None, kw)
        self.dropout = dropout
        self.learning_rate = argget(kw, "learning_rate", 0.001)
        self.momentum = argget(kw, "momentum", 0.9)
        self.nclasses = argget(kw, "nclasses", 2)
        self.fakedata = argget(kw, "fakedata", None)


generate_defaults_info(Model)
Esempio n. 4
0
                if self.vwfc_activation in [tf.nn.elu, tf.nn.relu]:
                    numelem = (num_input) / 2
                    uniform = False
                W = tf.get_variable(
                    "W", filtershape, dtype=tf.float32, initializer=get_modified_xavier_method(numelem, uniform))
                b = tf.get_variable("b", [num_output], initializer=tf.constant_initializer(0))

                mdgru = tf.nn.convolution(mdgru, W, padding="SAME")

                if resmdgru:
                    if doreshape:
                        inp = tf.reshape(inp,
                                         [-1, np.prod(inp.get_shape()[1:-1].as_list()), inp.get_shape().as_list()[-1]])
                    resW = tf.get_variable("resW",
                                           [1 for _ in inp.get_shape().as_list()[1:-1]] + [
                                               inp.get_shape().as_list()[-1], num_output],
                                           dtype=tf.float32, initializer=get_modified_xavier_method(num_output, False))
                    mdgru = tf.nn.convolution(inp, resW, padding="SAME") + mdgru
                if add_e_bn:
                    mdgru = batch_norm(mdgru, "bne", mdgruclass.istraining, bias=False, m=mdgruclass.min_mini_batch)
                mdgru = mdgru + b
                if doreshape:
                    mdgru = tf.reshape(mdgru, [-1] + mdgruinnershape + [mdgru.get_shape().as_list()[-1]])
            if noactivation:
                return mdgru
            else:
                return self.vwfc_activation(mdgru)


generate_defaults_info(MDGRUNet)
Esempio n. 5
0
            location at which timepoints are searched
        featurefiles: list of str
            necessary featurefiles to be contained in a timepoint
        maskfiles: list of str
            necessary maskfiles to be contained in a timepoint

        Returns
        -------
        sorted list
            valid timepoints in string format
        '''
        comm = "find '" + os.path.join(
            folder, '') + "' -type d -exec test -e {}/" + featurefiles[0]
        for i in featurefiles[1:]:
            comm += " -a -e {}/" + i
        for i in maskfiles:
            comm += " -a -e {}/" + i
        comm += " \\; -print\n"
        res, err = subprocess.Popen(comm, stdout=subprocess.PIPE,
                                    shell=True).communicate()
        # print(comm)
        if (sys.version_info > (3, 0)):
            # Python 3 code in this block
            return sorted([str(r, 'utf-8') for r in res.split() if r])
        else:
            # Python 2 code in this block
            return sorted([str(r) for r in res.split() if r])


generate_defaults_info(DataCollection)
Esempio n. 6
0
File: tf.py Progetto: gtancev/MD-GRU
    def _load(self, f):
        if self.restore_optimistically:
            self._optimistic_restore(self.sess, f)
        else:
            try:
                self.saver.restore(self.sess, f)
            except Exception as e:
                import traceback
                traceback.print_exc()
                try:
                    reader = pywrap_tensorflow.NewCheckpointReader(f)
                    var_to_shape_map = reader.get_variable_to_shape_map()
                    tensor_names = []
                    for i, key in enumerate(sorted(var_to_shape_map)):
                        tensor_names.append(key)
                        if i == 10:
                            break
                    logging.getLogger('eval').warning(
                        'the following are the first tensor_names in checkpoint file {}: {}'
                        .format(f, ",".join(tensor_names)))
                finally:
                    raise e

    def add_summary_simple_value(self, text, value):
        summary = tf.Summary()
        summary.value.add(tag=text, simple_value=value)
        self.train_writer.add_summary(summary)


generate_defaults_info(SupervisedEvaluationTensorflow)
Esempio n. 7
0
        if dtype is not None or tp is not None:
            logging.getLogger('data').warning(
                'cant handle any special terms in random sample in this step, will use next one. this will just return preloaded stuff. terms were: {},{},{},{}'.format(
                    batch_size, dtype, tp, ",".join(kw) + "(" + ",".join(kw.values()) + ")"))
        if self._preloadthreads[self.curr_thread] is not None:
            self._preloadthreads[self.curr_thread].join()
        if batch_size != self.batch_size:
            logging.getLogger('data').warning(
                'fetched wrong number of samples, need to fetch it now in order to get correct number of samples. updated batchsize accordingly')
            logging.getLogger('data').warning(
                'Did you forget to provide the threaded class with the correct batchsize at initialization?')
            self.batch_size = batch_size
            self._preloadthreads[self.curr_thread] = Thread(target=self._preload_random_sample,
                                                            args=(self.batch_size, self.curr_thread,))
            self._preloadthreads[self.curr_thread].start()
            self._preloadthreads[self.curr_thread].join()
        batch = np.copy(self._batch[self.curr_thread])
        batchlabs = np.copy(self._batchlabs[self.curr_thread])
        self._preloadthreads[self.curr_thread] = Thread(target=self._preload_random_sample,
                                                        args=(self.batch_size, self.curr_thread,))
        self._preloadthreads[self.curr_thread].start()
        self.curr_thread = (self.curr_thread + 1) % self.num_threads
        return batch, batchlabs

    def _preload_random_sample(self, batchsize, container_id):
        self._batch[container_id], self._batchlabs[container_id] = super(ThreadedGridDataCollection,
                                                                         self).random_sample(batch_size=batchsize)

generate_defaults_info(GridDataCollection)
generate_defaults_info(ThreadedGridDataCollection)
Esempio n. 8
0
            output_shape[1:-2] = [
                int(np.ceil((myshape[1 + i]) / strides[i]))
                for i in range(len(strides))
            ]
        output_shape[-1] = mycell.output_size

        zeros_dims = tf.stack([
            tf.shape(minput)[0],
            np.prod(output_shape[1:-2]), mycell.output_size
        ])
        initial_state = tf.reshape(tf.fill(zeros_dims, 0.0),
                                   [-1, mycell.output_size])
        if self.use_static_rnn:
            trans_res_flattened, _ = tf.contrib.rnn.static_rnn(
                mycell,
                tf.unstack(trans_input_flattened, axis=-2),
                dtype=tf.float32,
                initial_state=initial_state)
        else:
            trans_res_flattened, _ = tf.nn.dynamic_rnn(
                mycell,
                trans_input_flattened,
                dtype=tf.float32,
                swap_memory=self.swap_memory,
                initial_state=initial_state)

        return tf.reshape(trans_res_flattened, shape=output_shape)


generate_defaults_info(MDRNN)
Esempio n. 9
0
        # save this file as txt to cachefolder:

        shutil.copyfile(self.runfile,
                        os.path.join(self.cachefolder, 'runfile.py'))

        if "train" in self.episodes:
            with self.ev.get_train_session() as sess:
                self.ev.set_session(sess, self.cachefolder, train=True)
                if self.checkpointfiles[0]:
                    self.ev.load(self.checkpointfiles[0])
                self.train()

        if "test" in self.episodes or "evaluate" in self.episodes:
            self.use_tensorboard = False  # no need, since we evaluate everything anyways.
            with self.ev.get_test_session() as sess:
                self.ev.set_session(sess, self.cachefolder)
                for est, ckpt in zip(self.estimatefilenames,
                                     self.checkpointfiles):
                    if ckpt:
                        self.ev.load(ckpt)
                    self.ev.estimatefilename = est
                    self.test()
        if self.notifyme:
            notify_user(self.notifyme['chat_id'],
                        self.notifyme['token'],
                        message='{} has/have finished'.format(" and ".join(
                            self.episodes)))


generate_defaults_info(Runner)
Esempio n. 10
0
        self._num_units = num_units
        self._num_inputs = num_input
        self.filter_size_x = argget(kw, "filter_size_x", [7, 7])
        self.filter_size_h = argget(kw, "filter_size_h", [7, 7])
        self.strides = argget(kw, "strides", None)

    @property
    def output_size(self):
        return self._num_units

    @property
    def state_size(self):
        return self._num_units

    def _get_dropconnect(self, t, keep_rate_training, keep_rate_testing=1):
        """Creates factors to be applied to filters to achieve either Bernoulli or Gaussian dropconnect."""
        if self.training:
            keep_rate = keep_rate_training
        else:
            keep_rate = keep_rate_testing
        if keep_rate is None:
            raise Exception('keeprate cannot be none if this is called')
        if self.use_bernoulli:
            dc = t.random_() < keep_rate
            t.fill_(1).mul_(dc).mul_(1/keep_rate)
        else:
            t.normal_(1, np.sqrt((1 - keep_rate) / keep_rate))


generate_defaults_info(CRNNCell)
Esempio n. 11
0
        mdrnn_kw = {}
        mdrnn_kw.update(self.mdrnn_kw)
        mdrnn_kw.update(self.crnn_kw)
        mdrnn_kw.update(kw)

        mdrnn_kw["num_hidden"] = num_hidden
        mdrnn_kw["num_input"] = num_input
        mdrnn_kw["name"] = "mdgru"
        model = [MDRNN(dropout, spatial_dimensions, mdrnn_kw)]
        if num_spatial_dims == 2:
            convop = th.nn.Conv2d
            kernel = [1, 1]

        elif num_spatial_dims == 3:
            convop = th.nn.Conv3d
            kernel = [1, 1, 1]
        else:
            raise Exception(
                'pytorch cannot handle more than 3 dimensions for convolution')
        if num_output is not None:
            model += [convop(num_hidden, num_output, kernel)]
            if not self.noactivation:
                model += [self.vwfc_activation()]
        self.model = th.nn.Sequential(*model)

    def forward(self, input):
        return self.model.forward(input)


generate_defaults_info(MDGRUBlock)