Exemplo n.º 1
0
    def next(self, timeout=None):
        """ next.

        Get the next feed dict.

        Returns:
            A TensorFlow feed dict, or 'False' if it has no more data.

        """
        self.data_status.update()
        fd = self.feed_dict_queue.get(timeout=timeout)
        # alex added this
        if fd:
            patches_idxs = fd.values()[0]
            x = np.empty([len(patches_idxs), 33, 33, 4])
            y = fd.values()[1]
            #print(x.shape[0], y)

            for ix, idxs in enumerate(patches_idxs):
                im_idx = idxs[0]
                i, j, k = idxs[1:]
                x[ix] = self.all_imgs[im_idx][i, j - 16:j + 17, k - 16:k + 17]
            inputs = tf.get_collection(tf.GraphKeys.INPUTS)
            targets = tf.get_collection(tf.GraphKeys.TARGETS)
            return utils.feed_dict_builder(x, y, inputs, targets)

        return fd
Exemplo n.º 2
0
    def evaluate(self, X, Y, batch_size=128):
        """ Evaluate.

        Evaluate model on given samples.

        Arguments:
            X: array, `list` of array (if multiple inputs) or `dict`
                (with inputs layer name as keys). Data to feed to train
                model.
            Y: array, `list` of array (if multiple inputs) or `dict`
                (with estimators layer name as keys). Targets (Labels) to
                feed to train model. Usually set as the next element of a
                sequence, i.e. for x[0] => y[0] = x[1].
            batch_size: `int`. The batch size. Default: 128.

        Returns:
            The metric score.

        """
        feed_dict = feed_dict_builder(X, Y, self.inputs, self.targets)
        return eval(self.trainer.session, self.net, feed_dict, batch_size)
Exemplo n.º 3
0
    def on_batch_end(self, training_state, snapshot=False):

        if training_state.step // self.validation_step > self.last_run_step:
            self.last_run_step += 1
            tflearn.is_training(False, session=self.tf_session)

            out_proba = np.array([])
            n_rep = self.total_n // self.batch_size + 1
            for rep in range(n_rep):

                fdict = feed_dict_builder(
                    self.data[rep * self.batch_size:(rep + 1) *
                              self.batch_size], None, self.t_inputs, None)
                pproba = self.predictor.predict(fdict)

                out_proba = np.concatenate([out_proba, 1 - pproba[:, 0]])

            sc = roc_auc_score(self.val_y, out_proba)
            print("Validation AUC: " + str(sc))

        else:
            pass
    def next(self, timeout=None):
        """ next.

        Get the next feed dict.

        Returns:
            A TensorFlow feed dict, or 'False' if it has no more data.

        """
        self.data_status.update()
        fd = self.feed_dict_queue.get(timeout=timeout)
        # we added this to extract patches using indices
        if fd:
            import datax.data
            fd_as_list = list(fd.values())
            patches_idxs = fd_as_list[0]
            x_shape_3d = [len(patches_idxs), p_sz, p_sz, p_sz, 2]
            x_shape_2d = [len(patches_idxs), p_sz, p_sz, 2]

            x = np.empty(x_shape_3d if config.patch_num_dim ==
                         3 else x_shape_2d)
            y = fd_as_list[1]

            for ix, idxs in enumerate(patches_idxs):
                im_id = idxs[0]
                vxl_idx = idxs[1:4]
                rot_arr = idxs[4:7]
                do_flip = idxs[-1]
                image_arr = config.data_bunch.get_image(im_id).get_image_arr()
                x[ix] = rotate_combine_normalize(image_arr, vxl_idx, rot_arr,
                                                 do_flip)

            inputs = tf.get_collection(tf.GraphKeys.INPUTS)
            targets = tf.get_collection(tf.GraphKeys.TARGETS)
            return utils.feed_dict_builder(x, y, inputs, targets)

        return fd
Exemplo n.º 5
0
    def fit(self,
            X_inputs,
            Y_targets,
            n_epoch=10,
            validation_set=None,
            show_metric=False,
            batch_size=None,
            shuffle=None,
            snapshot_epoch=True,
            snapshot_step=None,
            excl_trainops=None,
            run_id=None):
        if batch_size:
            for train_op in self.train_ops:
                train_op.batch_size = batch_size

        valX, valY = None, None
        if validation_set:
            if isinstance(validation_set, float):
                valX = validation_set
                valY = validation_set
            else:
                valX = validation_set[0]
                valY = validation_set[1]

        # For simplicity we build sync dict synchronously but
        # Trainer support asynchronous feed dict allocation
        feed_dict = feed_dict_builder(X_inputs, Y_targets, self.inputs,
                                      self.targets)
        feed_dicts = [feed_dict for i in self.train_ops]

        val_feed_dicts = None
        if not (is_none(valX) or is_none(valY)):
            if isinstance(valX, float):
                val_feed_dicts = valX
            else:
                val_feed_dict = feed_dict_builder(valX, valY, self.inputs,
                                                  self.targets)
                val_feed_dicts = [val_feed_dict for i in self.train_ops]

        # Retrieve data preprocesing and augmentation
        dprep_dict, daug_dict = {}, {}
        dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
        daug_collection = tf.get_collection(tf.GraphKeys.DATA_AUG)
        for i in range(len(self.inputs)):
            if dprep_collection[i] is not None:
                dprep_dict[self.inputs[i]] = dprep_collection[i]
            if daug_collection[i] is not None:
                daug_dict[self.inputs[i]] = daug_collection[i]

        self.trainer.fit(feed_dicts,
                         val_feed_dicts=val_feed_dicts,
                         n_epoch=n_epoch,
                         show_metric=show_metric,
                         snapshot_step=snapshot_step,
                         snapshot_epoch=snapshot_epoch,
                         shuffle_all=shuffle,
                         dprep_dict=dprep_dict,
                         daug_dict=daug_dict,
                         excl_trainops=excl_trainops,
                         run_id=run_id)
        self.predictor = Evaluator([self.net], session=self.trainer.session)
Exemplo n.º 6
0
 def evaluate(self, X, Y, batch_size=128):
     feed_dict = feed_dict_builder(X, Y, self.inputs, self.targets)
     return eval(self.trainer.session, self.net, feed_dict, batch_size)
Exemplo n.º 7
0
 def _predict(self, X):
     feed_dict = feed_dict_builder(X, None, self.inputs, None)
     return self.predictor.predict(feed_dict)
Exemplo n.º 8
0
    def fit(self, X_inputs, Y_targets, n_epoch=10, validation_set=None,
            show_metric=False, batch_size=None, shuffle=None,
            snapshot_epoch=True, snapshot_step=None, excl_trainops=None,
            run_id=None):
        """ Fit.

        Train model, feeding X_inputs and Y_targets to the network.

        NOTE: When not feeding dicts, data assignations is made by
            input/estimator layers creation order (For example, the second
            input layer created will be feeded by the second value of
            X_inputs list).

        Examples:
            ```python
            model.fit(X, Y) # Single input and output
            model.fit({'input1': X}, {'output1': Y}) # Single input and output
            model.fit([X1, X2], Y) # Mutliple inputs, Single output

            # validate with X_val and [Y1_val, Y2_val]
            model.fit(X, [Y1, Y2], validation_set=(X_val, [Y1_val, Y2_val]))
            # 10% of training data used for validation
            model.fit(X, Y, validation_set=0.1)
            ```

        Arguments:
            X_inputs: array, `list` of array (if multiple inputs) or `dict`
                (with inputs layer name as keys). Data to feed to train
                model.
            Y_targets: array, `list` of array (if multiple inputs) or `dict`
                (with estimators layer name as keys). Targets (Labels) to
                feed to train model. Usually set as the next element of a
                sequence, i.e. for x[0] => y[0] = x[1].
            n_epoch: `int`. Number of epoch to run. Default: None.
            validation_set: `tuple`. Represents data used for validation.
                `tuple` holds data and targets (provided as same type as
                X_inputs and Y_targets). Additionally, it also accepts
                `float` (<1) to performs a data split over training data.
            show_metric: `bool`. Display or not accuracy at every step.
            batch_size: `int` or None. If `int`, overrides all network
                estimators 'batch_size' by this value.
            shuffle: `bool` or None. If `bool`, overrides all network
                estimators 'shuffle' by this value.
            snapshot_epoch: `bool`. If True, it will snapshot model at the end
                of every epoch. (Snapshot a model will evaluate this model
                on validation set, as well as create a checkpoint if
                'checkpoint_path' specified).
            snapshot_step: `int` or None. If `int`, it will snapshot model
                every 'snapshot_step' steps.
            excl_trainops: `list` of `TrainOp`. A list of train ops to
                exclude from training process (TrainOps can be retrieve
                through `tf.get_collection_ref(tf.GraphKeys.TRAIN_OPS)`).
            run_id: `str`. Give a name for this run. (Useful for Tensorboard).

        """
        if batch_size:
            for train_op in self.train_ops:
                train_op.batch_size = batch_size

        valX, valY = None, None
        if validation_set:
            if isinstance(validation_set, float):
                valX = validation_set
                valY = validation_set
            else:
                valX = validation_set[0]
                valY = validation_set[1]

        # For simplicity we build sync dict synchronously but
        # Trainer support asynchronous feed dict allocation
        feed_dict = feed_dict_builder(X_inputs, Y_targets, self.inputs,
                                      self.targets)
        feed_dicts = [feed_dict for i in self.train_ops]

        val_feed_dicts = None
        if not (is_none(valX) or is_none(valY)):
            if isinstance(valX, float):
                val_feed_dicts = valX
            else:
                val_feed_dict = feed_dict_builder(valX, valY, self.inputs,
                                                  self.targets)
                val_feed_dicts = [val_feed_dict for i in self.train_ops]

        # Retrieve data preprocesing and augmentation
        dprep_dict, daug_dict = {}, {}
        dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
        daug_collection = tf.get_collection(tf.GraphKeys.DATA_AUG)
        for i in range(len(self.inputs)):
            if dprep_collection[i] is not None:
                dprep_dict[self.inputs[i]] = dprep_collection[i]
            if daug_collection[i] is not None:
                daug_dict[self.inputs[i]] = daug_collection[i]

        self.trainer.fit(feed_dicts, val_feed_dicts=val_feed_dicts,
                         n_epoch=n_epoch,
                         show_metric=show_metric,
                         snapshot_step=snapshot_step,
                         snapshot_epoch=snapshot_epoch,
                         shuffle_all=shuffle,
                         dprep_dict=dprep_dict,
                         daug_dict=daug_dict,
                         excl_trainops=excl_trainops,
                         run_id=run_id)
        self.predictor = Evaluator([self.net],
                                   session=self.trainer.session)