def _predict(self,
                 data_generator_function,
                 steps_per_epoch,
                 include_datum=True):
        data_generator = data_generator_function(include_datum=True)
        enqueuer = GeneratorEnqueuer(data_generator, pickle_safe=False)
        enqueuer.start(workers=self._WORKERS, max_q_size=self._MAX_Q_SIZE)

        caption_results = []
        datum_results = []
        for _ in tqdm(range(steps_per_epoch)):
            generator_output = None
            while enqueuer.is_running():
                if not enqueuer.queue.empty():
                    generator_output = enqueuer.queue.get()
                    break
                else:
                    sleep(self._WAIT_TIME)

            X, y, datum_batch = generator_output
            captions_pred_str = self._predict_batch(X, y)
            caption_results += captions_pred_str
            datum_results += datum_batch

        enqueuer.stop()

        if include_datum:
            return zip(caption_results, datum_results)
        else:
            return caption_results
    def _predict(self,
                 data_generator_function,
                 steps_per_epoch,
                 include_datum=True):
        data_generator = data_generator_function(include_datum=True)
        enqueuer = GeneratorEnqueuer(data_generator, pickle_safe=False)
        enqueuer.start(workers=self._WORKERS, max_q_size=self._MAX_Q_SIZE)

        caption_results = []
        datum_results = []
        for _ in tqdm(range(steps_per_epoch)):
            generator_output = None
            while enqueuer.is_running():
                if not enqueuer.queue.empty():
                    generator_output = enqueuer.queue.get()
                    break
                else:
                    sleep(self._WAIT_TIME)

            X, y, datum_batch = generator_output
            captions_pred_str = self._predict_batch(X, y)
            caption_results += captions_pred_str
            datum_results += datum_batch

        enqueuer.stop()

        if include_datum:
            return zip(caption_results, datum_results)
        else:
            return caption_results
Exemple #3
0
def get_batch(num_workers=10, **kwargs):
    try:
        enqueuer = GeneratorEnqueuer(generator(**kwargs), pickle_safe=True)
        enqueuer.start(max_q_size=24, workers=num_workers)
        generator_output = None
        while True:
            while enqueuer.is_running():
                if not enqueuer.queue.empty():
                    # print self.enqueuer.queue.qsize()
                    generator_output = enqueuer.queue.get()
                    break
                else:
                    time.sleep(0.01)
            yield generator_output
            generator_output = None
    finally:
        if enqueuer is not None:
            enqueuer.stop()
Exemple #4
0
def get_batch(num_workers=10, **kwargs):
    try:
        enqueuer = GeneratorEnqueuer(generator(**kwargs), pickle_safe=True)
        enqueuer.start(max_q_size=24, workers=num_workers)
        generator_output = None
        while True:
            while enqueuer.is_running():
                if not enqueuer.queue.empty():
                    # print self.enqueuer.queue.qsize()
                    generator_output = enqueuer.queue.get()
                    break
                else:
                    time.sleep(0.01)
            yield generator_output
            generator_output = None
    finally:
        if enqueuer is not None:
            enqueuer.stop()
Exemple #5
0
    def predict(self, test_gen, tag='pred'):
        if self.cf.pred_model:
            # TODO model predict method for other tasks
            print('\n > Predicting the model...')

            if self.cf.problem_type == 'segmentation':
                # Load best trained model
                self.model.load_weights(self.cf.weights_file)

                # Create output directory
                if not os.path.exists(
                        os.path.join(self.cf.savepath, 'Predictions')):
                    os.makedirs(os.path.join(self.cf.savepath, 'Predictions'))

                # Create a data generator
                enqueuer = GeneratorEnqueuer(test_gen, wait_time=0.05)
                enqueuer.start(workers=1, max_queue_size=1)
                output_generator = enqueuer.get()

                # Process the dataset
                start_time = time.time()
                for i in range(
                        int(
                            math.ceil(self.cf.dataset.n_images_test /
                                      float(self.cf.batch_size_test)))):

                    # Get data for this minibatch
                    data = next(output_generator)
                    x_true = data[0]
                    y_true = data[1].astype('int32')

                    # Get prediction for this minibatch
                    y_pred = self.model.predict(x_true)

                    # Reshape y_true and compute the y_pred argmax
                    if K.image_dim_ordering() == 'th':
                        print(
                            'Predict method not implemented for th dim ordering.'
                        )
                        return
                    else:
                        # Find the most probable class of each pixel
                        y_pred = np.argmax(y_pred, axis=2)
                        y_true = np.argmax(y_true, axis=2)

                        # Reshape from (?,172800) to (?, 360, 480)
                        # print('Acc with shapes y_pred={} and y_true{}: {}'.format(y_pred.shape,y_true.shape,np.mean(np.ravel(y_pred==y_true))))
                        y_true = np.reshape(y_true,
                                            (x_true.shape[0], x_true.shape[1],
                                             x_true.shape[2]))
                        y_pred = np.reshape(y_pred,
                                            (x_true.shape[0], x_true.shape[1],
                                             x_true.shape[2]))
                        # print('Acc with shapes y_pred={} and y_true{}: {}'.format(y_pred.shape,y_true.shape,np.mean(np.ravel(y_pred==y_true))))

                    # Save output images
                    save_img3(x_true, y_true, y_pred,
                              os.path.join(self.cf.savepath, 'Predictions'), 0,
                              self.cf.dataset.color_map,
                              self.cf.dataset.classes, tag + str(i),
                              self.cf.dataset.void_class)

                # Stop data generator
                if enqueuer is not None:
                    enqueuer.stop()
            else:
                print('Predict method not implemented.')
                return

            total_time = time.time() - start_time
            fps = float(self.cf.dataset.n_images_test) / total_time
            s_p_f = total_time / float(self.cf.dataset.n_images_test)
            print('   Predicting time: {}. FPS: {}. Seconds per Frame: {}'.
                  format(total_time, fps, s_p_f))
Exemple #6
0
def fit_generator(self, generator,
                  steps_per_epoch,
                  epochs=1,
                  verbose=1,
                  callbacks=None,
                  validation_data=None,
                  validation_steps=None,
                  class_weight=None,
                  max_q_size=10,
                  workers=1,
                  pickle_safe=False,
                  initial_epoch=0):
    """Fits the model on data yielded batch-by-batch by a Python generator.

    The generator is run in parallel to the model, for efficiency.
    For instance, this allows you to do real-time data augmentation
    on images on CPU in parallel to training your model on GPU.

    # Arguments
        generator: a generator.
            The output of the generator must be either
            - a tuple (inputs, targets)
            - a tuple (inputs, targets, sample_weights).
            All arrays should contain the same number of samples.
            The generator is expected to loop over its data
            indefinitely. An epoch finishes when `steps_per_epoch`
            samples have been seen by the model.
        steps_per_epoch: Total number of steps (batches of samples)
            to yield from `generator` before declaring one epoch
            finished and starting the next epoch. It should typically
            be equal to the number of unique samples if your dataset
            divided by the batch size.
        epochs: integer, total number of iterations on the data.
        verbose: verbosity mode, 0, 1, or 2.
        callbacks: list of callbacks to be called during training.
        validation_data: this can be either
            - a generator for the validation data
            - a tuple (inputs, targets)
            - a tuple (inputs, targets, sample_weights).
        validation_steps: Only relevant if `validation_data`
            is a generator. Total number of steps (batches of samples)
            to yield from `generator` before stopping.
        class_weight: dictionary mapping class indices to a weight
            for the class.
        max_q_size: maximum size for the generator queue
        workers: maximum number of processes to spin up
            when using process based threading
        pickle_safe: if True, use process based threading.
            Note that because
            this implementation relies on multiprocessing,
            you should not pass
            non picklable arguments to the generator
            as they can't be passed
            easily to children processes.
        initial_epoch: epoch at which to start training
            (useful for resuming a previous training run)

    # Returns
        A `History` object.

    # Example

    ```python
        def generate_arrays_from_file(path):
            while 1:
                f = open(path)
                for line in f:
                    # create numpy arrays of input data
                    # and labels, from each line in the file
                    x1, x2, y = process_line(line)
                    yield ({'input_1': x1, 'input_2': x2}, {'output': y})
                f.close()

        model.fit_generator(generate_arrays_from_file('/my_file.txt'),
                            steps_per_epoch=10000, epochs=10)
    ```

    # Raises
        ValueError: In case the generator yields
            data in an invalid format.
    """
    wait_time = 0.01  # in seconds
    epoch = initial_epoch

    do_validation = bool(validation_data)
    self._make_train_function()
    if do_validation:
        self._make_test_function()

    # python 2 has 'next', 3 has '__next__'
    # avoid any explicit version checks
    val_gen = (hasattr(validation_data, 'next') or
               hasattr(validation_data, '__next__'))
    if val_gen and not validation_steps:
        raise ValueError('When using a generator for validation data, '
                         'you must specify a value for '
                         '`validation_steps`.')

    out_labels = self.metrics_names
    callback_metrics = out_labels + ['val_' + n for n in out_labels]

    # prepare callbacks
    self.history = cbks.History()
    callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
    if verbose:
        callbacks += [cbks.ProgbarLogger(count_mode='steps')]
    callbacks = cbks.CallbackList(callbacks)

    # it's possible to callback a different model than self:
    if hasattr(self, 'callback_model') and self.callback_model:
        callback_model = self.callback_model
    else:
        callback_model = self
    callbacks.set_model(callback_model)
    callbacks.set_params({
        'epochs': epochs,
        'steps': steps_per_epoch,
        'verbose': verbose,
        'do_validation': do_validation,
        'metrics': callback_metrics,
    })
    callbacks.on_train_begin()

    if do_validation and not val_gen:
        if len(validation_data) == 2:
            val_x, val_y = validation_data
            val_sample_weight = None
        elif len(validation_data) == 3:
            val_x, val_y, val_sample_weight = validation_data
        else:
            raise ValueError('validation_data should be a tuple '
                             '`(val_x, val_y, val_sample_weight)` '
                             'or `(val_x, val_y)`. Found: ' +
                             str(validation_data))
        val_x, val_y, val_sample_weights = self._standardize_user_data(
            val_x, val_y, val_sample_weight)
        for cbk in callbacks:
            cbk.validation_data = val_x + [val_y, val_sample_weights]
    enqueuer = None

    try:
        enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
        enqueuer.start(max_q_size=max_q_size, workers=workers)

        callback_model.stop_training = False
        while epoch < epochs:
            callbacks.on_epoch_begin(epoch)
            steps_done = 0
            batch_index = 0
            while steps_done < steps_per_epoch:
                generator_output = None
                while enqueuer.is_running() or not enqueuer.queue.empty():
                    if not enqueuer.queue.empty():
                        generator_output = enqueuer.queue.get()
                        break
                    else:
                        time.sleep(wait_time)

                # Added by ZZ.
                if not enqueuer.is_running() and enqueuer.queue.empty() and generator_output is None:
                    continue

                if not hasattr(generator_output, '__len__'):
                    raise ValueError('output of generator should be '
                                     'a tuple `(x, y, sample_weight)` '
                                     'or `(x, y)`. Found: ' +
                                     str(generator_output))
                if len(generator_output) == 2:
                    x, y = generator_output
                    sample_weight = None
                elif len(generator_output) == 3:
                    x, y, sample_weight = generator_output
                else:
                    raise ValueError('output of generator should be '
                                     'a tuple `(x, y, sample_weight)` '
                                     'or `(x, y)`. Found: ' +
                                     str(generator_output))
                # build batch logs
                batch_logs = {}
                if isinstance(x, list):
                    batch_size = x[0].shape[0]
                elif isinstance(x, dict):
                    batch_size = list(x.values())[0].shape[0]
                else:
                    batch_size = x.shape[0]
                batch_logs['batch'] = batch_index
                batch_logs['size'] = batch_size
                callbacks.on_batch_begin(batch_index, batch_logs)

                outs = self.train_on_batch(x, y,
                                           sample_weight=sample_weight,
                                           class_weight=class_weight)

                if not isinstance(outs, list):
                    outs = [outs]
                for l, o in zip(out_labels, outs):
                    batch_logs[l] = o

                callbacks.on_batch_end(batch_index, batch_logs)

                # Construct epoch logs.
                epoch_logs = {}
                batch_index += 1
                steps_done += 1

                # Epoch finished.
                if steps_done >= steps_per_epoch and do_validation:
                    if val_gen:
                        val_outs = self.evaluate_generator(
                            validation_data,
                            validation_steps,
                            max_q_size=max_q_size,
                            workers=workers,
                            pickle_safe=pickle_safe)
                    else:
                        # No need for try/except because
                        # data has already been validated.
                        val_outs = self.evaluate(
                            val_x, val_y,
                            batch_size=batch_size,
                            sample_weight=val_sample_weights,
                            verbose=0)
                    if not isinstance(val_outs, list):
                        val_outs = [val_outs]
                    # Same labels assumed.
                    for l, o in zip(out_labels, val_outs):
                        epoch_logs['val_' + l] = o

            callbacks.on_epoch_end(epoch, epoch_logs)
            epoch += 1
            if callback_model.stop_training:
                break

    finally:
        if enqueuer is not None:
            enqueuer.stop()

    callbacks.on_train_end()
    return self.history
Exemple #7
0
    def predict(self, test_gen, tag='pred', prob=False, weights=None):
        if self.cf.pred_model:
            print('Predict method not implemented.')
            return
            # TODO fix model predict method
            print('\n > Predicting the model...')
            # Load best trained model
            # self.model.load_weights(os.path.join(self.cf.savepath, "weights.hdf5"))
            if weights == None:
                self.model.load_weights(self.cf.weights_file)
            else:
                self.model.load_weights(weights)

            # Create a data generator
            enqueuer = GeneratorEnqueuer(test_gen)
            enqueuer.start(max_q_size=1, nb_worker=1)
            #            data_gen_queue, _stop, _generator_threads = GeneratorEnqueuer(test_gen, max_q_size=1)

            # Process the dataset
            start_time = time.time()
            predictions = np.zeros(
                (self.cf.dataset.n_images_test, self.cf.target_size_test[0],
                 self.cf.target_size_test[1], self.cf.dataset.n_classes))
            true = np.zeros(
                (self.cf.dataset.n_images_test, self.cf.target_size_test[0],
                 self.cf.target_size_test[1], 1))

            #            for _ in range(int(math.ceil(self.cf.dataset.n_images_train/float(self.cf.batch_size_test)))):
            for i in range(
                    int(
                        math.ceil(self.cf.dataset.n_images_test /
                                  float(self.cf.batch_size_test)))):

                # Get data for this minibatch
                # data = enqueuer.queue.get()
                while enqueuer.is_running():
                    if not enqueuer.queue.empty():
                        generator_output = enqueuer.queue.get()
                        break
                    else:
                        time.sleep(0.01)

                x_true = generator_output[0]
                y_true = generator_output[1].astype('int32')

                # Get prediction for this minibatch
                y_pred = self.model.predict(x_true)

                # Compute the argmax
                if not prob:
                    y_pred = np.argmax(y_pred, axis=1)

                predictions[i:i + y_pred.shape[0]] = y_pred
                true[i:i + y_pred.shape[0]] = y_true

                # # Reshape y_true
                # y_true = np.reshape(y_true, (y_true.shape[0], y_true.shape[2],
                #                              y_true.shape[3]))
                #
                # save_img3(x_true, y_true, y_pred, self.cf.savepath, 0,
                #           self.cf.dataset.color_map, self.cf.dataset.classes, tag+str(_), self.cf.dataset.void_class)

            # Stop data generator
            if enqueuer is not None:
                enqueuer.stop()

            total_time = time.time() - start_time
            fps = float(self.cf.dataset.n_images_test) / total_time
            s_p_f = total_time / float(self.cf.dataset.n_images_test)
            print('   Predicting time: {}. FPS: {}. Seconds per Frame: {}'.
                  format(total_time, fps, s_p_f))
            return predictions, true, self.model.metrics_names
    def on_epoch_end(self, epoch, logs={}):
        self.nepoch = epoch
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))
        enqueuer = GeneratorEnqueuer(self.val_gen, pickle_safe=False)
        enqueuer.start(nb_worker=1, max_q_size=5, wait_time=0.05)
        plt.ioff()
        plt.plot(self.losses, 'b-')
        plt.plot(self.val_losses, 'c-')
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.ylim(0, 0.2)
        #plt.draw()
        plt.pause(0.02)
        plt.savefig(self.directory + '_loss.png')
        #print self.losses[0]
        #if self.count >100:
        #    self.count = 0
        #pprint(self.model, indent=2)

        # Get data for this minibatch
        data = None
        while enqueuer.is_running():
            if not enqueuer.queue.empty():
                data = enqueuer.queue.get()
                break
            else:
                plt.pause(0.05)
        #data = data_gen_queue.get()
        x_true = data[0]
        #print (x_true.shape)
        img = x_true[0].transpose(1, 2, 0)
        #            plt.imshow(img)
        #            plt.show()
        #            plt.pause(0.01)
        #            y_true = data[1]
        #            img = y_true[0].transpose(1,2,0)
        #            plt.imshow(img)
        #            plt.show()
        #            plt.pause(0.01)
        #print (y_true.shape)

        # Get prediction for this minibatch
        y_pred = self.model.predict(x_true)

        # Reshape y_true and compute the y_pred argmax
        #            if K.image_dim_ordering() == 'th':
        #                y_pred = np.argmax(y_pred, axis=1)
        #                y_true = np.array(y_true).transpose(0,3,1,2)
        #            else:
        #                y_pred = np.argmax(y_pred, axis=3)
        for i in range(y_pred.shape[0]):
            img = (y_pred[i] - np.min(y_pred[i]))

            img = (img / np.max(img)).transpose(1, 2, 0)
            #print (img.shape)
            #y_pred = y_pred[0]
            gt = np.array(x_true[i]).transpose(1, 2, 0)
            #print (gt.shape)
            #y_pred = y_pred.transpose(1,2,0)
            #y_gt = y_gt.transpose(1,2,0)
            result = np.concatenate((gt, img), axis=1)
            #print np.max(y_pred), np.min(y_pred), np.max(y_gt), np.min(y_gt)
            #plt.imshow(result)
            #plt.show()
            #pause(0.01)
            oname = os.path.join(self.directory,
                                 str(self.nepoch) + '_' + str(i) + '.jpg')
            #print oname
            #time.sleep(5)
            #self.count += 1
            cv2.imwrite(oname, cv2.cvtColor(result * 255, cv2.COLOR_RGB2BGR))


#                    y_true = np.reshape(y_true, (y_true.shape[0], y_true.shape[1],
#                                                 y_true.shape[2]))
# Save output images

# Stop data generator
        if enqueuer is not None:
            enqueuer.stop()
        '''
            val_samples =20
            pred = self.model.predict_generator(self.val_gen, val_samples)
            #plt.imshow(y_pred[0,:,:,:].transpose(1,2,0))
            #plt.show()
            #plt.pause(0.01)
            #print (y_pred.shape)
            #y_pred = self.model.predict(self.model.validation_data[0][self.count:self.count+1,:,:,:])
            #print y_pred.shape
            #print type(y_pred)
            for i in range(val_samples):
                y_pred = (pred[i,:,:,:] - np.min(pred[i,:,:,:]))
            
                y_pred = y_pred / np.max(y_pred)
                y_pred = y_pred[0]
                y_gt = model.validation_data[1][self.count]
                y_pred = y_pred.transpose(1,2,0)
                y_gt = y_gt.transpose(1,2,0)
                result = np.concatenate((y_gt,y_pred ), axis=1)
            #print np.max(y_pred), np.min(y_pred), np.max(y_gt), np.min(y_gt)
            #plt.imshow(result)
            #plt.show()
            #pause(0.01)
            oname = os.path.join(self.directory, str(self.count)+'_'+str(self.nepoch)+'_'+'.jpg')
            #print oname
            #time.sleep(5)
            self.count += 1
            cv2.imwrite(oname, result*255)
            '''
        return
            for _ in range(5):
                x_test, y_test = my_gen_train.next()
                y_pred = netG.predict(x_test)
                draw_batch_images(x_test, y_pred, index_pro=111, is_save=False)

                x_pred = netF.predict(y_test)
                draw_batch_images(x_pred, y_test, index_pro=222, is_save=False)
            exit()

        print("load total samples:" + str(data_total_len))
        nb_batches = int(data_total_len / batch_size)
        progress_bar = Progbar(target=nb_batches)

        try:
            enqueuer = GeneratorEnqueuer(my_gen_train, pickle_safe=pickle_safe)
            enqueuer.start(max_q_size=max_q_size, workers=workers)

            true_dis_sample = np.ones((batch_size, patch_size, patch_size, 1))
            false_dis_sample = np.zeros(
                (batch_size, patch_size, patch_size, 1))
            true_false_dis_sample = np.concatenate(
                [true_dis_sample, false_dis_sample])

            for epoch in range(nb_epochs):
                print('Epoch {} of {}'.format(epoch, nb_epochs))
                print("    ")

                for index in range(0, nb_batches):
                    progress_bar.update(index)
                    generator_output = None
                    while enqueuer.is_running():
Exemple #10
0
    def predict(self, test_gen, tag='pred', max_q_size=10, workers=1, pickle_safe=False, wait_time = 0.01):
        if self.cf.pred_model and test_gen is not None:
            print('\n > Predicting the model...')
            aux =  'image_result'
            result_path = os.path.join(self.cf.savepath,aux)
            if not os.path.exists(result_path):
                os.makedirs(result_path)
            # Load best trained model
            # self.model.load_weights(os.path.join(self.cf.savepath, "weights.hdf5"))
            self.model.load_weights(self.cf.weights_file)
            
            if self.cf.problem_type == 'detection':
                priors = self.cf.dataset.priors
                anchors = np.array(priors)
                thresh = 0.6
                nms_thresh = 0.3
                classes = self.cf.dataset.classes
                # Create a data generator
                data_gen_queue = GeneratorEnqueuer(test_gen, pickle_safe=pickle_safe)
                data_gen_queue.start(workers, max_q_size)
                # Process the dataset
                start_time = time.time()
                image_counter = 1
                for _ in range(int(math.ceil(self.cf.dataset.n_images_train/float(self.cf.batch_size_test)))):
                    data = None
                    while data_gen_queue.is_running():
                        if not data_gen_queue.queue.empty():
                            data = data_gen_queue.queue.get()
                            break
                        else:
                            time.sleep(wait_time)               
                    x_true = data[0]
                    y_true = data[1].astype('int32')
    
                    # Get prediction for this minibatch
                    y_pred = self.model.predict(x_true)
                    if self.cf.model_name == "yolo" or self.cf.model_name == "tiny-yolo" or self.cf.model_name == "yolt":
                        for i in range(len(y_pred)):
                            #Process the YOLO output to obtain final BBox per image                  
                            boxes = yolo_postprocess_net_out(y_pred[i], anchors, classes, thresh, nms_thresh)
                            #Draw the Bbox in the image to visualize
                            im = yolo_draw_detections(boxes, x_true[i], anchors, classes, thresh, nms_thresh)
                            out_name = os.path.join(result_path, 'img_' + str(image_counter).zfill(4)+ '.png')
                            scipy.misc.toimage(im).save(out_name)
                            image_counter = image_counter+1
                    elif self.cf.model_name == "ssd":
                        results = self.cf.bbox_util.detection_out(y_pred)
                        for j in range(len(results)):
                            # Parse the outputs.
                            if np.any(results[j]):
                                det_label = results[j][:, 0]
                                det_conf = results[j][:, 1]
                                det_xmin = results[j][:, 2]
                                det_ymin = results[j][:, 3]
                                det_xmax = results[j][:, 4]
                                det_ymax = results[j][:, 5]
                            
                                # Get detections with confidence higher than 0.6.
                                top_indices = [i for i, conf in enumerate(det_conf) if conf >= thresh]
                                top_conf = det_conf[top_indices]
                                top_label_indices = det_label[top_indices].tolist()
                                top_xmin = det_xmin[top_indices]
                                top_ymin = det_ymin[top_indices]
                                top_xmax = det_xmax[top_indices]
                                top_ymax = det_ymax[top_indices]
                                out_name = os.path.join(result_path, 'img_' + str(image_counter).zfill(4)+ '.png')
                                if top_indices:
                                    im = self.cf.bbox_util.ssd_draw_detections(top_conf, top_label_indices, top_xmin, top_ymin,
                                                                           top_xmax, top_ymax, x_true[j], classes, out_name)
                                
                                #scipy.misc.toimage(im).save(out_name)
                                #im.savefig(out_name)
                                image_counter = image_counter+1
                    else:
                        raise ValueError("No model name defined or valid: " + self.model_name)
                # Stop data generator
                data_gen_queue.stop()

            total_time = time.time() - start_time
            fps = float(self.cf.dataset.n_images_test) / total_time
            s_p_f = total_time / float(self.cf.dataset.n_images_test)
            print ('   Predicting time: {}. FPS: {}. Seconds per Frame: {}'.format(total_time, fps, s_p_f))