def do(self):
        save_diagram = True
        show_diagram = True
        config_file = self.config.get_data('config_file')

        # check config file
        if not check_if_file_exists(self.config.get_data('config_file')):
            raise AssertionError('The given config file "%s" does not exist.' %
                                 config_file)

        # load config file
        self.start_timer('load json config file')
        self.config.load_json_from_config_file(config_file)
        self.finish_timer('load json config file')

        # build train graph
        self.build_train_graph(show_diagram, save_diagram)
    def do(self):
        # some configs
        show_image = True

        # load config file
        self.start_timer('load json config file')
        self.config.load_json_from_config_file(
            self.config.get_data('config_file'))
        self.finish_timer('load json config file')

        # rebuild model dict
        self.config.rebuild_model_dict()
        self.start_timer('save json config file')
        self.config.save_json()
        self.finish_timer('save json config file')

        # get some configs
        model_file = self.config.get_data('model_file_best')['model_file']
        evaluation_path = self.config.get_data('evaluation_path')

        # check model file
        check_if_file_exists(model_file)

        # check given evaluation path
        if not os.path.isdir(evaluation_path):
            raise AssertionError(
                'The given evaluation path "%s" must be a direcory.' %
                evaluation_path)

        # check given evaluation path
        number_of_files = sum(
            [len(files) for r, d, files in os.walk(evaluation_path)])
        if number_of_files > 0:
            question = 'The directory is not empty and contains %d files. Should I empty it?' % number_of_files
            negative = 'Cancelled by user.'

            # print files
            click.echo('\ncontent')
            click.echo('-------')
            for r, _, files in os.walk(evaluation_path):
                for file in files:
                    click.echo('- %s/%s' % (r, file))
            click.echo('-------')

            # ask to delete these files
            positive = self.query_yes_no('\n%s' % question)

            # cancel if the files should not be deleted
            if not positive:
                if negative is not None:
                    click.echo(negative)
                sys.exit()

            # clear folder
            clear_folder(evaluation_path)
            click.echo('Folder "%s" cleared.' % evaluation_path)

            # check given evaluation path again
            number_of_files = sum(
                [len(files) for r, d, files in os.walk(evaluation_path)])
            if number_of_files > 0:
                raise AssertionError(
                    'The given evaluation path "%s" must be empty.' %
                    evaluation_path)

        # load model
        self.start_timer('load model file %s' % model_file)
        model = self.load_model(model_file)
        self.finish_timer('load model file %s' % model_file)

        click.echo('')
        click.echo(
            'Ready for evaluation. Now add the images to be evaluated to the folder "%s"...'
            % self.config.get_data('evaluation_path'))
        click.echo('')

        # start service
        while True:
            # wait some time
            time.sleep(0.5)

            # get evaluation files
            files = os.listdir(evaluation_path)

            # predict if we found some images
            if len(files) > 0:
                evaluation_file = '%s/%s' % (evaluation_path, files[0])

                # check that the file is ready
                if 'crdownload' in evaluation_file:
                    continue

                self.evaluate_file(model, evaluation_file, show_image)
                os.remove(evaluation_file)
    def get_evaluation_data(self,
                            model_file,
                            data_path,
                            files_validation,
                            evaluate_type,
                            save_evaluation_file=True):

        # some needed variables
        root_path = os.path.dirname(self.config.get_data('config_file'))
        json_file = os.path.join(root_path,
                                 'evaluation-file-%s.json' % evaluate_type)

        # use already calculated json file if it exists
        if os.path.isfile(json_file):
            with open(json_file) as f:
                return json.load(f)

        # collect all evaluation files
        evaluation_files = self.get_evaluation_files(data_path,
                                                     files_validation)

        # data array
        data = {
            'root_path': root_path,
            'classes': [],
            'data': {},
            'top_k': {
                'correctly_classified_top_1': [],
                'incorrectly_classified_top_1': [],
                'correctly_classified_top_5': [],
                'incorrectly_classified_top_5': []
            }
        }

        # check model file
        check_if_file_exists(model_file)

        # load model
        self.start_timer('load model file "%s"' % model_file)
        model = self.load_model(model_file)
        self.finish_timer('load model file "%s"' % model_file)

        # evaluate all collected files
        for evaluation_file in evaluation_files:
            self.evaluate_file(model, evaluation_file)

        # evaluate all collected files
        for evaluation_file in evaluation_files:
            evaluation_data = self.evaluate_file(model, evaluation_file)
            data['classes'] = evaluation_data['classes']

            del evaluation_data['prediction_overview']
            del evaluation_data['classes']

            evaluation_data['evaluation_file'] = evaluation_data[
                'evaluation_file'].replace('%s/' % data['root_path'], '')
            index_key = evaluation_data['evaluation_file']

            data['data'][index_key] = evaluation_data

            if evaluation_data['is_top_1']:
                data['top_k']['correctly_classified_top_1'].append(index_key)
            else:
                data['top_k']['incorrectly_classified_top_1'].append(index_key)

            if evaluation_data['is_top_5']:
                data['top_k']['correctly_classified_top_5'].append(index_key)
            else:
                data['top_k']['incorrectly_classified_top_5'].append(index_key)

        # save evaluation file
        if save_evaluation_file:
            self.start_timer('Write json file "%s"' % json_file)
            with open(json_file, 'w') as outfile:
                json.dump(data, outfile, indent=4)
            self.finish_timer('Write json file "%s"' % json_file)

        return data
Exemple #4
0
    def do(self):
        # prepare some vars
        model_files = {}
        models = {}

        config_file = self.config.get_data('config_file')
        config_file_2 = self.config.get_data('config_file_2')

        # add config (in that moment flower)
        models['flower'] = {
            'json_config': self.loadConfig(config_file).get_json()
        }

        # load second config (in that moment food)
        if config_file_2 is not None:
            models['food'] = {
                'json_config': self.loadConfig(config_file_2).get_json()
            }

        # iterate through all models
        for model_type in models:
            # load json config
            self.config.load_json(models[model_type]['json_config'], True)

            # get best model paths
            model_path = self.config.get_data('model_file_best')['model_file']

            # check all model files
            check_if_file_exists(model_path)

            # load models
            self.start_timer('load model "%s" file %s' %
                             (model_type, model_path))
            model = None if self.config.get('debug') else self.load_model(
                model_path)
            self.finish_timer('load model "%s" file %s' %
                              (model_type, model_path))

            # save model and file
            models[model_type]['model_path'] = model_path
            models[model_type]['model'] = model

        # set hooks
        SimpleHTTPRequestHandler.set_hook('POST_prediction', {
            'lambda': self.POST_prediction_hook,
            'arguments': [models]
        })
        SimpleHTTPRequestHandler.set_hook('POST_prediction_get_model', {
            'lambda': self.POST_prediction_get_model_hook,
            'arguments': [models]
        })
        SimpleHTTPRequestHandler.set_hook('GET_prediction_get_model', {
            'lambda': self.GET_prediction_get_model_hook,
            'arguments': [models]
        })
        SimpleHTTPRequestHandler.set_property(
            'root_data_path',
            get_root_data_path(self.config.get_data('config_file')))
        SimpleHTTPRequestHandler.set_property('root_data_path_web', '/')
        SimpleHTTPRequestHandler.set_property('root_project_path',
                                              get_root_project_path())

        click.echo('')
        click.echo('Ready for evaluation. Now upload some images...')
        click.echo('')

        try:
            use_ssl = False
            port = self.config.get('port_ssl',
                                   'http') if use_ssl else self.config.get(
                                       'port', 'http')
            ip = self.config.get('bind_ip', 'http')
            httpd = HTTPServer((ip, port), SimpleHTTPRequestHandler)
            print('Webserver started on port %s:%d..' % (ip, port))

            # activate ssl (openssl req -newkey rsa:2048 -new -nodes -keyout key.pem -out csr.pem)
            if use_ssl:
                httpd.socket = ssl.wrap_socket(httpd.socket,
                                               keyfile='./key.pem',
                                               certfile='./csr.pem',
                                               server_side=True)

            httpd.serve_forever()

        except KeyboardInterrupt:
            print('^C received, shutting down the web server')
            httpd.socket.close()
    def do(self):

        show_diagram = True
        model_file = None

        if self.config.gettl('continue'):
            model_file = self.config.get_data('model_source')
            click.echo('Continue learning from model %s' % model_file)

        # load the model the model
        if model_file is None:
            self.start_timer('load model "%s"' % self.config.getml('transfer_learning_model'))
            model = self.get_model()
            self.finish_timer('load model "%s"' % self.config.getml('transfer_learning_model'))
        else:
            self.start_timer('load model file %s' % model_file)
            check_if_file_exists(model_file)
            model = self.load_model(model_file)
            self.finish_timer('load model file %s' % model_file)

        # preparations (image generator, train & validation generator)
        self.start_timer('preparations')
        image_train_generator = self.get_image_generator()
        image_val_generator = self.get_image_generator(image_train_generator)
        train_generator = self.get_train_generator(image_train_generator)
        validation_generator = self.get_validation_generator(image_val_generator)

        files = {
            'train': {},
            'validation': {}
        }

        # generate train list
        for file in train_generator.filenames:
            namespace = 'train'
            p = pathlib.Path(file)
            if p.parts[0] not in files[namespace]:
                files[namespace][p.parts[0]] = []
            files[namespace][p.parts[0]].append(' '.join(p.parts[1:]))

        # generate validation list
        for file in validation_generator.filenames:
            namespace = 'validation'
            p = pathlib.Path(file)
            if p.parts[0] not in files[namespace]:
                files[namespace][p.parts[0]] = []
            files[namespace][p.parts[0]].append(' '.join(p.parts[1:]))

        self.config.set_environment('files', files)
        self.finish_timer('preparations')

        # prints out some informations
        if self.config.get('verbose'):
            click.echo('LAYERS')
            click.echo('------')
            for i, layer in enumerate(model.layers):
                print(i, ': ', layer.name, '(trainable)' if layer.trainable else '(not trainable)')
            click.echo('------\n\n')

            click.echo('CLASSES')
            click.echo('-------')
            click.echo(train_generator.class_indices)
            click.echo('-------\n\n')

        # save the model to import within dl4j
        self.start_timer('save config')
        self.config.save_json()
        self.finish_timer('save config')

        # train the model
        self.start_timer('fit')
        history = self.train(model, train_generator, validation_generator)
        self.finish_timer('fit')

        # save the model to import within dl4j
        self.start_timer('save model')
        self.config.save_model(model)
        self.finish_timer('save model')

        # save config data from model to import within dl4j
        self.config.rebuild_model_dict()
        self.start_timer('save config')
        self.config.set_environment('classes', train_generator.class_indices, flip=True, flip_as_array=True)
        self.config.set_environment('accuracies_trained', history.history['acc'], flip=True, flip_as_array=True)
        self.config.set_environment('accuracies_validated', history.history['val_acc'], flip=True, flip_as_array=True)
        self.config.save_json()
        self.finish_timer('save config')

        # save accuracy diagram
        plt.plot(history.history['acc'], label='train')
        plt.plot(history.history['val_acc'], label='test')
        plt.legend()
        plt.savefig(self.config.get_data('accuracy_file'))

        # show accuracy diagram
        if show_diagram:
            plt.show()
    def do(self):

        show_image = True
        save_image = True

        # load config file
        self.start_timer('load json config file')
        self.config.load_json_from_config_file(self.config.get_data('config_file'))
        self.finish_timer('load json config file')

        # rebuild model dict
        self.config.rebuild_model_dict()
        self.start_timer('save json config file')
        self.config.save_json()
        self.finish_timer('save json config file')

        # get some configs
        model_file = self.config.get_data('model_file_best')['model_file']
        evaluation_files = []

        # check model file
        check_if_file_exists(model_file)

        # the given evaluation path is a folder with file inside
        if os.path.isdir(self.config.get_data('evaluation_path')):
            # load model
            self.start_timer('load model file "%s"' % model_file)
            model = self.load_model(model_file)
            self.finish_timer('load model file "%s"' % model_file)

            # build the generators
            self.start_timer('preparations')
            image_val_generator = self.get_image_generator()
            validation_generator = self.get_validation_generator(image_val_generator)
            self.finish_timer('preparations')

            # evaluate the given path
            self.start_timer('evaluation')
            self.evaluate_path(
                model,
                validation_generator,
                self.config.get_data('evaluation_path'),
                show_image,
                save_image
            )
            self.finish_timer('evaluation')

            return

        # the given evaluation path is a single file
        elif os.path.isfile(self.config.get_data('evaluation_path')):

            check_if_file_exists(self.config.get_data('evaluation_path'))
            evaluation_files.append(self.config.get_data('evaluation_path'))

            # load model
            self.start_timer('load model file "%s"' % model_file)
            model = self.load_model(model_file)
            self.finish_timer('load model file "%s"' % model_file)
        else:
            raise AssertionError('Unknown given path "%s"' % self.config.get_data('evaluation_path'))

        # evaluate all collected files
        for evaluation_file in evaluation_files:
            self.evaluate_file(model, evaluation_file, show_image, save_image)