示例#1
0
def print_prediction(config):
    cache_m = CacheManager()

    if not os.path.isfile(cache_m.fileLocation('test_pred.pik')):
        return None

    #Load predictions
    (expected, Y_pred, nclasses) = cache_m.load('test_pred.pik')
    y_pred = np.argmax(Y_pred, axis=1)

    #Output metrics
    if nclasses > 2:
        f1 = metrics.f1_score(expected, y_pred, average='weighted')
    else:
        f1 = metrics.f1_score(expected, y_pred, pos_label=1)
    print("F1 score: {0:.2f}".format(f1))

    m_conf = PrintConfusionMatrix(y_pred, expected, nclasses, config, "TILs")

    #ROC AUC
    #Get positive scores (binary only)
    if nclasses == 2:
        scores = Y_pred.transpose()[1]
        fpr, tpr, thresholds = metrics.roc_curve(expected, scores, pos_label=1)
        print("AUC: {0:f}".format(metrics.roc_auc_score(expected, scores)))

    print("Accuracy: {0:.3f}".format(m_conf[nclasses + 2][nclasses]))

    if config.verbose > 1:
        print("False positive rates: {0}".format(fpr))
        print("True positive rates: {0}".format(tpr))
        print("Thresholds: {0}".format(thresholds))
示例#2
0
class KNet(GenericModel):
    """
    Implements abstract methods from GenericModel.
    Model is the same as in: https://keras.io/examples/mnist_cnn/
    """
    def __init__(self, config, ds, name=None):
        super().__init__(config, ds, name=name)
        if name is None:
            self.name = "KerasNet"
        self._modelCache = "{0}-model.h5".format(self.name)
        self._weightsCache = "{0}-weights.h5".format(self.name)
        self._mgpu_weightsCache = "{0}-mgpu-weights.h5".format(self.name)

        self.cache_m = CacheManager()
        self.cache_m.registerFile(
            os.path.join(config.model_path, self._modelCache),
            self._modelCache)
        self.cache_m.registerFile(
            os.path.join(config.weights_path, self._weightsCache),
            self._weightsCache)
        self.cache_m.registerFile(
            os.path.join(config.weights_path, self._mgpu_weightsCache),
            self._mgpu_weightsCache)

        self.single = None
        self.parallel = None

    def get_model_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._modelCache)

    def get_weights_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._weightsCache)

    def get_mgpu_weights_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._mgpu_weightsCache)

    def build(self, **kwargs):

        model, parallel_model = self._build(**kwargs)

        self.single = model
        self.parallel = parallel_model

        return (model, parallel_model)

    def _build(self, **kwargs):
        """
        @param pre_trained <boolean>: returned model should be pre-trained or not
        @param data_size <int>: size of the training dataset
        """
        width, height, channels = self._check_input_shape()

        if 'data_size' in kwargs:
            self.data_size = kwargs['data_size']

        if 'training' in kwargs:
            training = kwargs['training']
        else:
            training = True

        if 'feature' in kwargs:
            feature = kwargs['feature']
        else:
            feature = False

        if backend.image_data_format() == 'channels_first':
            input_shape = (channels, height, width)
        else:
            input_shape = (height, width, channels)

        self.cache_m = CacheManager()

        model = self._build_architecture(input_shape, training, feature)

        #Check if previous training and LR is saved, if so, use it
        lr_cache = "{0}_learning_rate.txt".format(self.name)
        self.cache_m.registerFile(os.path.join(self._config.cache, lr_cache),
                                  lr_cache)
        l_rate = 0.0005
        if os.path.isfile(self.cache_m.fileLocation(
                lr_cache)) and not self._config.new_net:
            l_rate = float(self.cache_m.read(lr_cache))
            if self._config.info:
                print("Found previous learning rate: {0}".format(l_rate))

        #opt = optimizers.SGD(lr=l_rate, decay=1.5e-4, momentum=0.9, nesterov=True)
        #opt = optimizers.Adam(lr = l_rate)
        opt = optimizers.Adadelta()

        #Return parallel model if multiple GPUs are available
        parallel_model = None

        if self._config.gpu_count > 1:
            with tf.device('/cpu:0'):
                model.compile(loss='categorical_crossentropy',
                              optimizer=opt,
                              metrics=['accuracy'])

            parallel_model = multi_gpu_model(model,
                                             gpus=self._config.gpu_count)
            parallel_model.compile(
                loss='categorical_crossentropy',
                optimizer=opt,
                metrics=['accuracy'],
                #options=p_opt,
                #run_metadata=p_mtd
            )
        else:
            model.compile(
                loss='categorical_crossentropy',
                optimizer=opt,
                metrics=['accuracy'],
                #options=p_opt,
                #run_metadata=p_mtd
            )

        return (model, parallel_model)

    def _build_architecture(self, input_shape, training=None, feature=False):

        model = Sequential()
        model.add(
            Convolution2D(32,
                          kernel_size=(3, 3),
                          activation='relu',
                          input_shape=input_shape))
        model.add(Convolution2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self._ds.nclasses, activation='softmax'))

        return model
示例#3
0
class Inception(GenericModel):
    """
    Implements abstract methods from GenericModel.
    Model is the same as in: https://github.com/keras-team/keras-applications/blob/master/keras_applications/inception_resnet_v2.py
    Addapted to provide a Bayesian model
    """
    def __init__(self, config, ds, name=None):
        super().__init__(config, ds, name=name)
        if name is None:
            self.name = "Inception"
        self._modelCache = "{0}-model.h5".format(self.name)
        self._weightsCache = "{0}-weights.h5".format(self.name)
        self._mgpu_weightsCache = "{0}-mgpu-weights.h5".format(self.name)

        self.cache_m = CacheManager()
        self.cache_m.registerFile(
            os.path.join(config.model_path, self._modelCache),
            self._modelCache)
        self.cache_m.registerFile(
            os.path.join(config.weights_path, self._weightsCache),
            self._weightsCache)
        self.cache_m.registerFile(
            os.path.join(config.weights_path, self._mgpu_weightsCache),
            self._mgpu_weightsCache)

        self.single = None
        self.parallel = None

    def get_model_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._modelCache)

    def get_weights_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._weightsCache)

    def get_mgpu_weights_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._mgpu_weightsCache)

    def get_npweights_cache(self, add_ext=False):
        """
        Returns path to model cache.

        @param add_ext <boolean>: add numpy file extension to file name.
        """
        if add_ext:
            return "{}.npy".format(
                self.cache_m.fileLocation(self._weightsCache).split('.')[0])
        else:
            return self.cache_m.fileLocation(self._weightsCache).split('.')[0]

    def get_npmgpu_weights_cache(self, add_ext=False):
        """
        Returns path to model cache

        @param add_ext <boolean>: add numpy file extension to file name.
        """
        if add_ext:
            return "{}.npy".format(
                self.cache_m.fileLocation(
                    self._mgpu_weightsCache).split('.')[0])
        else:
            return self.cache_m.fileLocation(
                self._mgpu_weightsCache).split('.')[0]

    def register_ensemble(self, m):
        self._model_n = m
        self._weightsCache = "{0}-EM{1}-weights.h5".format(self.name, m)
        self._mgpu_weightsCache = "{0}-EM{1}-mgpu-weights.h5".format(
            self.name, m)
        self._modelCache = "{0}-EM{1}-model.h5".format(self.name, m)

        self.cache_m.registerFile(
            os.path.join(self._config.weights_path, self._weightsCache),
            self._weightsCache)
        self.cache_m.registerFile(
            os.path.join(self._config.weights_path, self._mgpu_weightsCache),
            self._mgpu_weightsCache)
        self.cache_m.registerFile(
            os.path.join(self._config.model_path, self._modelCache),
            self._modelCache)

    def return_model_n(self):
        if hasattr(self, '_model_n'):
            return self._model_n
        else:
            return -1

    def build(self, **kwargs):
        """
        @param pre_trained <boolean>: returned model should be pre-trained or not
        @param data_size <int>: size of the training dataset
        """
        model, parallel_model = self._build(**kwargs)

        self.single = model
        self.parallel = parallel_model

        return (model, parallel_model)

    def build_extractor(self, **kwargs):
        """
        Builds a feature extractor.
        
        Weights should be loaded by caller!

        Key word arguments:
        preload_w: return model with weights already loaded? True -> Yes
        parallel: return parallel model (overrides gpu_count avaliation)? True -> Yes
        """
        #Weight loading for the feature extraction is done latter by requesting party
        kwargs['preload_w'] = False

        if 'parallel' in kwargs and not kwargs['parallel']:
            s, p = self._build(**kwargs)
            return (s, None)
        else:
            return self._build(**kwargs)

    def build_ensemble(self, **kwargs):
        """
        Builds an ensemble of M Inception models.

        Weights are loaded here because of the way ensembles should be built.

        Default build: avareges the output of the corresponding softmaxes

        @param npfile <boolean>: loads weights from numpy files
        """

        if 'npfile' in kwargs:
            npfile = kwargs['npfile']
        else:
            npfile = False

        s_models = []
        p_models = []
        for m in range(self._config.emodels):
            self.register_ensemble(m)
            single, parallel = self._build(**kwargs)

            if not parallel is None:
                if npfile and hasattr(model, 'get_npmgpu_weights_cache'):
                    parallel.set_weights(
                        np.load(model.get_npmgpu_weights_cache(),
                                allow_pickle=True))
                    if self._config.info:
                        print("[Inception] loaded ensemble weights: {}".format(
                            model.get_npmgpu_weights_cache()))
                elif os.path.isfile(model.get_mgpu_weights_cache()):
                    parallel.load_weights(model.get_mgpu_weights_cache(),
                                          by_name=True)
                    if self._config.info:
                        print("[Inception] loaded ensemble weights: {}".format(
                            model.get_mgpu_weights_cache()))
            else:
                parallel = None

            if npfile and hasattr(model, 'get_npweights_cache'):
                single.set_weights(
                    np.load(model.get_weights_cache(), allow_pickle=True))
                if self._config.info:
                    print("[Inception] loaded ensemble weights: {}".format(
                        model.get_npweights_cache()))
            elif os.path.isfile(model.get_weights_cache()):
                single.load_weights(model.get_weights_cache(), by_name=True)
            else:
                if self._config.info:
                    print(
                        "[Inception] Could not load ensemble weights (model {})"
                        .format(m))
                single = None
            s_models.append(single)
            p_models.append(parallel)

        s_inputs = [inp for s in s_models for inp in s.inputs]
        s_outputs = [out for s in s_models for out in s.outputs]
        p_models = list(filter(lambda x: not x is None, p_models))
        if len(p_models) > 0:
            p_inputs = [inp for p in p_models for inp in p.inputs]
            p_outputs = [out for p in p_models for out in p.outputs]
        else:
            p_inputs = None
            p_outputs = None

        #Build the ensemble output from individual models
        s_model, p_model = None, None
        ##Single GPU model
        x = Average()(s_outputs)
        s_model = Model(inputs=s_inputs, outputs=x)

        ##Parallel model
        if not p_inputs is None:
            x = Average()(p_outputs)
            p_model = Model(inputs=p_inputs, outputs=x)

        return s_model, p_model

    def _build(self, **kwargs):

        width, height, channels = self._check_input_shape()

        if 'data_size' in kwargs:
            self.data_size = kwargs['data_size']

        if 'training' in kwargs:
            training = kwargs['training']
        else:
            training = True

        if 'feature' in kwargs:
            feature = kwargs['feature']
        else:
            feature = False

        if 'preload_w' in kwargs:
            preload = kwargs['preload_w']
        else:
            preload = True

        if 'allocated_gpus' in kwargs and not kwargs['allocated_gpus'] is None:
            allocated_gpus = kwargs['allocated_gpus']
        else:
            allocated_gpus = self._config.gpu_count

        if backend.image_data_format() == 'channels_first':
            input_shape = (channels, height, width)
        else:
            input_shape = (height, width, channels)

        self.cache_m = CacheManager()

        model = self._build_architecture(input_shape, training, feature,
                                         preload)

        #Check if previous training and LR is saved, if so, use it
        lr_cache = "{0}_learning_rate.txt".format(self.name)
        self.cache_m.registerFile(os.path.join(self._config.cache, lr_cache),
                                  lr_cache)
        l_rate = 0.00005
        if os.path.isfile(self.cache_m.fileLocation(
                lr_cache)) and not self._config.new_net:
            l_rate = float(self.cache_m.read(lr_cache))
            if self._config.info:
                print("Found previous learning rate: {0}".format(l_rate))

        #opt = optimizers.SGD(lr=l_rate, decay=1.5e-4, momentum=0.9, nesterov=True)
        opt = optimizers.Adam(lr=l_rate)
        #opt = optimizers.Adadelta(lr=l_rate)

        #Return parallel model if multiple GPUs are available
        parallel_model = None

        if allocated_gpus > 1:
            with tf.device('/cpu:0'):
                model.compile(loss='categorical_crossentropy',
                              optimizer=opt,
                              metrics=['accuracy'])
            parallel_model = multi_gpu_model(model, gpus=allocated_gpus)
            parallel_model.compile(
                loss='categorical_crossentropy',
                optimizer=opt,
                metrics=['accuracy'],
                #options=p_opt,
                #run_metadata=p_mtd
            )
        else:
            model.compile(
                loss='categorical_crossentropy',
                optimizer=opt,
                metrics=['accuracy'],
                #options=p_opt,
                #run_metadata=p_mtd
            )

        return (model, parallel_model)

    def _build_architecture(self,
                            input_shape,
                            training=None,
                            feature=False,
                            preload=True):
        from . import inception_resnet_v2

        kwargs = {
            'training': training,
            'feature': feature,
            'custom_top': False,
            'preload': preload,
            'batch_n': True if self._config.gpu_count <= 1 else False
        }

        inp = Input(shape=input_shape)

        inception_body = inception_resnet_v2.InceptionResNetV2(
            include_top=False,
            weights='imagenet',
            input_tensor=inp,
            input_shape=input_shape,
            pooling='avg',
            classes=self._ds.nclasses,
            **kwargs)

        return inception_body
示例#4
0
class VGG16(GenericModel):
    """
    Implements abstract methods from GenericModel.
    Producess a VGG16 model as implemented by Keras, with convolutional layers
    FC layers are substituted by Conv2D, as defined in:
    https://github.com/ALSM-PhD/quip_classification/blob/master/NNFramework_TF/sa_networks/vgg.py
    """
    def __init__(self, config, ds, name=None):
        super().__init__(config, ds, name=name)
        if name is None:
            self.name = "VGG16_A1"
        self._modelCache = "{0}-model.h5".format(self.name)
        self._weightsCache = "{0}-weights.h5".format(self.name)
        self._mgpu_weightsCache = "{0}-mgpu-weights.h5".format(self.name)
        self.cache_m = CacheManager()
        self.cache_m.registerFile(
            os.path.join(config.model_path, self._modelCache),
            self._modelCache)
        self.cache_m.registerFile(
            os.path.join(config.weights_path, self._weightsCache),
            self._weightsCache)
        self.cache_m.registerFile(
            os.path.join(config.weights_path, self._mgpu_weightsCache),
            self._mgpu_weightsCache)

    def get_model_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._modelCache)

    def get_weights_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._weightsCache)

    def get_mgpu_weights_cache(self):
        """
        Returns path to model cache
        """
        return self.cache_m.fileLocation(self._mgpu_weightsCache)

    def build(self, **kwargs):
        """
        Returns a VGG 16 model instance, final fully-connected layers are substituted by Conv2Ds
        
        @param pre_trained <boolean>: returned model should be pre-trained or not
        """
        width, height, channels = self._check_input_shape()

        if backend.image_data_format() == 'channels_first':
            input_shape = (channels, height, width)
        else:
            input_shape = (height, width, channels)

        if 'data_size' in kwargs:
            self.data_size = kwargs['data_size']

        self.cache_m = CacheManager()

        model = self._build_architecture(input_shape)

        #Check if previous training and LR is saved, if so, use it
        lr_cache = "{0}_learning_rate.txt".format(self.name)
        self.cache_m.registerFile(os.path.join(self._config.cache, lr_cache),
                                  lr_cache)
        l_rate = 0.0005
        if os.path.isfile(self.cache_m.fileLocation(
                lr_cache)) and not self._config.new_net:
            l_rate = float(self.cache_m.read(lr_cache))
            if self._config.info:
                print("Found previous learning rate: {0}".format(l_rate))

        sgd = optimizers.SGD(lr=l_rate,
                             decay=1.5e-4,
                             momentum=0.9,
                             nesterov=True)
        #adam = optimizers.Adam(lr = l_rate)

        #Return parallel model if multiple GPUs are available
        parallel_model = None

        if self._config.gpu_count > 1:
            with tf.device('/cpu:0'):
                model.compile(loss='categorical_crossentropy',
                              optimizer=sgd,
                              metrics=['accuracy'])

            parallel_model = multi_gpu_model(model,
                                             gpus=self._config.gpu_count)
            parallel_model.compile(
                loss='categorical_crossentropy',
                optimizer=sgd,
                metrics=['accuracy'],
                #options=p_opt,
                #run_metadata=p_mtd
            )
        else:
            model.compile(
                loss='categorical_crossentropy',
                optimizer=sgd,
                metrics=['accuracy'],
                #options=p_opt,
                #run_metadata=p_mtd
            )

        self.single = model
        self.parallel = parallel_model

        return (model, parallel_model)

    def _build_architecture(self, input_shape):
        original_vgg16 = vgg16.VGG16(
            weights=self.cache_m.fileLocation('vgg16_weights_notop.h5'),
            include_top=False,
            input_shape=input_shape)

        #Freeze initial layers, except for the last 3:
        #for layer in original_vgg16.layers[:-2]:
        #    layer.trainable = False

        model = Sequential()
        model.add(original_vgg16)
        model.add(
            Convolution2D(4096, (7, 7),
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal'))
        model.add(Activation('relu'))
        model.add(Dropout(0.75))
        model.add(
            Convolution2D(4096, (1, 1),
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal'))
        model.add(Activation('relu'))
        model.add(Dropout(0.75))
        model.add(
            Convolution2D(self._ds.nclasses, (1, 1),
                          strides=1,
                          padding='valid',
                          kernel_initializer='he_normal'))
        model.add(Flatten())
        model.add(Dense(self._ds.nclasses))
        model.add(Activation('softmax'))

        return model
示例#5
0
    def run_test(self, model, x_test=None, y_test=None, load_full=True):
        """
        This should be executed after a model has been trained
        """

        cache_m = CacheManager()
        split = None
        if os.path.isfile(cache_m.fileLocation('split_ratio.pik')):
            split = cache_m.load('split_ratio.pik')
        else:
            print(
                "[Predictor] A previously trained model and dataset should exist. No previously defined spliting found."
            )
            return Exitcodes.RUNTIME_ERROR

        #Priority is for given data as parameters. If None is given, try to load metadata as configured
        if x_test is None or y_test is None:
            if self._config.testdir is None:
                #Load sampled data if required by command line
                if self._config.sample < 1.0:
                    _, _, (x_test, y_test) = self._ds.split_metadata(
                        split=split,
                        data=self._ds.sample_metadata(self._config.sample))
                else:
                    _, _, (x_test, y_test) = self._ds.split_metadata(split)
            else:
                x_test, y_test = self._ds._run_dir(self._config.testdir)

        if self._config.verbose > 0:
            unique, count = np.unique(y_test, return_counts=True)
            l_count = dict(zip(unique, count))
            if len(unique) > 2:
                print("Test items:")
                print("\n".join([
                    "label {0}: {1} items".format(key, l_count[key])
                    for key in unique
                ]))
            else:
                if not 1 in l_count:
                    l_count[1] = 0
                print(
                    "Test labels: {0} are 0; {1} are 1;\n - {2:.2f} are positives"
                    .format(l_count[0], l_count[1],
                            (l_count[1] / (l_count[0] + l_count[1]))))
            print("Test set: {} items".format(len(y_test)))

        X, Y = self._ds.load_data(data=(x_test, y_test), keepImg=self._keep)
        if self._config.verbose > 1:
            print("Y original ({1}):\n{0}".format(Y, Y.shape))
        Y = to_categorical(Y, self._ds.nclasses)

        # session setup
        sess = K.get_session()
        ses_config = tf.ConfigProto(
            device_count={
                "CPU": self._config.cpu_count,
                "GPU": self._config.gpu_count
            },
            intra_op_parallelism_threads=self._config.cpu_count
            if self._config.gpu_count == 0 else self._config.gpu_count,
            inter_op_parallelism_threads=self._config.cpu_count
            if self._config.gpu_count == 0 else self._config.gpu_count,
            log_device_placement=True if self._verbose > 1 else False)
        sess.config = ses_config
        K.set_session(sess)

        #During test phase multi-gpu mode is not used (maybe done latter)
        if self._ensemble:
            #Weights should be loaded during ensemble build
            if hasattr(model, 'build_ensemble'):
                pred_model = model.build_ensemble(training=False, npfile=True)
            else:
                if self._config.info:
                    print(
                        '[Predictor] Model not prepared to build ensembles, implement or choose other model'
                    )
                return None
        elif load_full and os.path.isfile(model.get_model_cache()):
            try:
                pred_model = load_model(model.get_model_cache())
                if self._config.info:
                    print("Model loaded from: {0}".format(
                        model.get_model_cache()))
            except ValueError:
                pred_model, _ = model.build(training=False, pre_load_w=False)
                pred_model.load_weights(model.get_weights_cache())
                if self._config.info:
                    print("Model weights loaded from: {0}".format(
                        model.get_weights_cache()))
        elif os.path.isfile(model.get_weights_cache()):
            pred_model, _ = model.build(training=False, pre_load_w=False)
            pred_model.load_weights(model.get_weights_cache())
            if self._config.info:
                print("Model weights loaded from: {0}".format(
                    model.get_weights_cache()))

        else:
            if self._config.info:
                print("No trained model or weights file found")
            return None

        bsize = self._config.batch_size
        stp = round((len(X) / bsize) + 0.5)

        image_generator = ImageDataGenerator(
            samplewise_center=self._config.batch_norm,
            samplewise_std_normalization=self._config.batch_norm)

        if self._ensemble:
            if not self._config.tdim is None:
                fix_dim = self._config.tdim
            else:
                fix_dim = self._ds.get_dataset_dimensions()[0][
                    1:]  #Only smallest image dimensions matter here
            test_generator = SingleGenerator(
                dps=(X, Y),
                classes=self._ds.nclasses,
                dim=fix_dim,
                batch_size=self._config.batch_size,
                image_generator=image_generator,
                extra_aug=self._config.augment,
                shuffle=False,
                verbose=self._verbose,
                input_n=self._config.emodels)
        else:
            test_generator = image_generator.flow(x=X,
                                                  y=Y,
                                                  batch_size=bsize,
                                                  shuffle=False)

        if self._config.progressbar:
            l = tqdm(desc="Making predictions...", total=stp)

        Y_pred = np.zeros((len(X), self._ds.nclasses), dtype=np.float32)
        for i in range(stp):
            start_idx = i * bsize
            example = test_generator.next()
            Y_pred[start_idx:start_idx + bsize] = pred_model.predict_on_batch(
                example[0])
            if self._config.progressbar:
                l.update(1)
            elif self._config.info:
                print("Batch prediction ({0}/{1})".format(i, stp))
            if self._config.verbose > 1:
                if not np.array_equal(Y[start_idx:start_idx + bsize],
                                      example[1]):
                    print(
                        "Datasource label ({0}) and batch label ({1}) differ".
                        format(Y[start_idx:start_idx + bsize], example[1]))

        del (X)
        del (test_generator)

        if self._config.progressbar:
            l.close()

        y_pred = np.argmax(Y_pred, axis=1)
        expected = np.argmax(Y, axis=1)

        if self._config.verbose > 0:
            if self._config.verbose > 1:
                np.set_printoptions(threshold=np.inf)
                print("Predicted probs ({1}):\n{0}".format(
                    Y_pred, Y_pred.shape))
            #print("Y ({1}):\n{0}".format(Y,Y.shape))
            print("expected ({1}):\n{0}".format(expected, expected.shape))
            print("Predicted ({1}):\n{0}".format(y_pred, y_pred.shape))

        #Save predictions
        cache_m.dump((expected, Y_pred, self._ds.nclasses), 'test_pred.pik')

        #Output metrics
        print_prediction(self._config)