示例#1
0
文件: monitor.py 项目: adbrebs/spynet
 def compute_value(self):
     value = 0
     for i in xrange(self.n_batches):
         pred = self.compute_batch_classes(i)
         id1 = i * self.batch_size
         id2 = (i + 1) * self.batch_size
         value += error_rate(pred, np.argmax(self.ds.outputs[id1:id2], axis=1)) * self.batch_size
     if self.last_batch_size > 0:
         pred = self.compute_last_batch_classes()
         tg = np.argmax(self.ds.outputs[self.ds.n_data-self.last_batch_size:], axis=1)
         value += error_rate(pred, tg) * self.last_batch_size
     return value / self.ds.n_data
示例#2
0
 def compute_value(self):
     value = 0
     for i in xrange(self.n_batches):
         pred = self.compute_batch_classes(i)
         id1 = i * self.batch_size
         id2 = (i + 1) * self.batch_size
         value += error_rate(pred,
                             np.argmax(self.ds.outputs[id1:id2],
                                       axis=1)) * self.batch_size
     if self.last_batch_size > 0:
         pred = self.compute_last_batch_classes()
         tg = np.argmax(self.ds.outputs[self.ds.n_data -
                                        self.last_batch_size:],
                        axis=1)
         value += error_rate(pred, tg) * self.last_batch_size
     return value / self.ds.n_data
示例#3
0
    def predict_from_generator(self, batches_generator, scaler, pred_functions=None, raw=False):
        """
        Returns the predictions of the batches of voxels, features and targets yielded by the batches_generator
        """
        if pred_functions is None:
            pred_functions = {}
        ls_vx = []
        ls_pred = []
        id_batch = 0
        for vx_batch, patch_batch, tg_batch in batches_generator:
            id_batch += 1

            batch_size_current = len(vx_batch)
            if batch_size_current not in pred_functions:
                pred_functions[batch_size_current] = self.generate_testing_function(batch_size_current)

            if scaler is not None:
                scaler.scale(patch_batch)

            pred_raw = pred_functions[batch_size_current](patch_batch)

            pred = np.argmax(pred_raw, axis=1)
            err = error_rate(pred, np.argmax(tg_batch, axis=1))
            print("     {"+str(err)+"}")

            ls_vx.append(vx_batch)
            if raw:
                ls_pred.append(pred_raw)
            else:
                ls_pred.append(pred)

        # Count the number of voxels
        n_vx = 0
        for vx in ls_vx:
            n_vx += vx.shape[0]

        # Aggregate the data
        vx_all = np.zeros((n_vx, 3), dtype=int)
        if raw:
            pred_all = np.zeros((n_vx, 135), dtype=float)
        else:
            pred_all = np.zeros((n_vx,), dtype=int)
        idx = 0
        for vx, pred in zip(ls_vx, ls_pred):
            next_idx = idx+vx.shape[0]
            vx_all[idx:next_idx] = vx
            pred_all[idx:next_idx] = pred
            idx = next_idx

        return vx_all, pred_all
示例#4
0
文件: network.py 项目: adbrebs/spynet
    def predict_from_generator(self, batches_generator, scaler, pred_functions=None):
        """
        Returns the predictions of the batches of voxels, features and targets yielded by the batches_generator
        """
        if pred_functions is None:
            pred_functions = {}
        ls_vx = []
        ls_pred = []
        id_batch = 0
        for vx_batch, patch_batch, tg_batch in batches_generator:
            id_batch += 1

            batch_size_current = len(vx_batch)
            if batch_size_current not in pred_functions:
                pred_functions[batch_size_current] = self.generate_testing_function(batch_size_current)

            if scaler is not None:
                scaler.scale(patch_batch)

            pred_raw = pred_functions[batch_size_current](patch_batch)

            pred = np.argmax(pred_raw, axis=1)
            err = error_rate(pred, np.argmax(tg_batch, axis=1))
            print "     {}".format(err)

            ls_vx.append(vx_batch)
            ls_pred.append(pred)

        # Count the number of voxels
        n_vx = 0
        for vx in ls_vx:
            n_vx += vx.shape[0]

        # Aggregate the data
        vx_all = np.zeros((n_vx, 3), dtype=int)
        pred_all = np.zeros((n_vx,), dtype=int)
        idx = 0
        for vx, pred in zip(ls_vx, ls_pred):
            next_idx = idx+vx.shape[0]
            vx_all[idx:next_idx] = vx
            pred_all[idx:next_idx] = pred
            idx = next_idx

        return vx_all, pred_all
示例#5
0
    def stat_of_all_models(self, img_true, n_out):
        '''
            return two lists of dice coefficient and error of all models in the ensemble
        '''
        dices = []
        errs = []
        count = 1
        for pred_raw in self.pred_raws:
            # Compute img_pred
            pred_all = np.argmax(pred_raw, axis=1)
            img_pred = create_img_from_pred(self.vx_all, pred_all,
                                            img_true.shape)

            # Compute the dice coefficient and the error
            non_zo = img_pred.nonzero() or img_true.nonzero()
            pred = img_pred[non_zo]
            true = img_true[non_zo]
            dice_regions = compute_dice(pred, true, n_out)
            dices.append(dice_regions.mean())
            err_global = error_rate(pred, true)
            errs.append(err_global)
            count = count + 1

        return dices, errs
示例#6
0
                                                       region_centroids,
                                                       batch_size, True)

        vx_all, pred_all = net.predict_from_generator(brain_batches, scaler,
                                                      pred_functions)

        # Construct the predicted image
        img_true = data_gen.atlases[atlas_id][1]
        img_pred = create_img_from_pred(vx_all, pred_all, img_true.shape)

        # Compute the dice coefficient and the error
        non_zo = img_pred.nonzero() or img_true.nonzero()
        pred = img_pred[non_zo]
        true = img_true[non_zo]
        dice_regions = compute_dice(pred, true, n_out)
        err_global = error_rate(pred, true)

        end_time = time.clock()
        print "It took {} seconds to evaluate the whole brain.".format(
            end_time - start_time)
        print "The mean dice is {}".format(dice_regions.mean())
        print "The error rateis {}".format(err_global)

        # Save the results
        errors[atlas_id] = err_global
        dices[atlas_id, :] = dice_regions

        # Diff Image
        img_diff = (img_pred == img_true).astype(np.uint8)
        img_diff += 1
        img_diff[img_pred == 0] = 0
        ls_vx = []
        ls_pred = []
        brain_batches = data_gen.generate_single_atlas(atlas_id, None, region_centroids, batch_size, True)

        vx_all, pred_all = net.predict_from_generator(brain_batches, scaler, pred_functions)

        # Construct the predicted image
        img_true = data_gen.atlases[atlas_id][1]
        img_pred = create_img_from_pred(vx_all, pred_all, img_true.shape)

        # Compute the dice coefficient and the error
        non_zo = img_pred.nonzero() or img_true.nonzero()
        pred = img_pred[non_zo]
        true = img_true[non_zo]
        dice_regions = compute_dice(pred, true, n_out)
        err_global = error_rate(pred, true)

        end_time = time.clock()
        print "It took {} seconds to evaluate the whole brain.".format(end_time - start_time)
        print "The mean dice is {}".format(dice_regions.mean())
        print "The error rateis {}".format(err_global)

        # Save the results
        errors[atlas_id] = err_global
        dices[atlas_id, :] = dice_regions

        # Diff Image
        img_diff = (img_pred == img_true).astype(np.uint8)
        img_diff += 1
        img_diff[img_pred == 0] = 0