def create_all_off_files(self):
        path = "{0}/docs/SIMPLEX_TREES/".format(utils.get_module_path())
        if not os.path.isdir(path):
            os.makedirs(path)

        filename = "{0}{1}".format(path, "simplex_tree_20.02.19__16.20.47.txt")

        filt_list = SimplexTreeFileParser.get_filtration_values(filename)

        for filt in filt_list:
            # flist = [i for i in filt_list if i != filt]

            filtration_predicates = [FiltrationEqualPredicate(filt)
                                     ]  #, FiltrationLowerThanPredicate(filt)]
            #, FiltrationGreaterThanPredicate(filt)]
            for predicate in filtration_predicates:
                self.create_off_file(path, filename, predicate)

            del filtration_predicates

            # for filt_end in flist:
            #     predicate = FiltrationOnOpenIntervalPredicate(filt, filt_end)
            #     self.create_off_file(path, filename, predicate)
            #     del predicate

        return
    def save_metrics(self):
        path = "{0}/docs/CLASSIFIER_EVALUATION/".format(
            utils.get_module_path())
        file_name = time.strftime("{0}_{1}_{2}_%y.%m.%d__%H.%M.%S.txt".format(
            path, self.method_name, "metrics"))

        fmetrics = open(file_name, "w")
        for idx, metric in enumerate(self.metrics_list):
            metric.save_to_file(fmetrics)
        fmetrics.close()
    def save_picture(self, title):
        path = "{0}/docs/CLASSIFIER_EVALUATION/{1}/".format(
            utils.get_module_path(), self.selection_type)
        if not os.path.isdir(path):
            os.makedirs(path)
        file_name = time.strftime("{0}_{1}_{2}_%y.%m.%d__%H.%M.%S.png".format(
            path, self.method_name, title))

        plt.title(title)
        plt.savefig(file_name)
    def show_off(self, index_off=None):
        filepath = "{0}/docs/SIMPLEX_TREES/".format(utils.get_module_path())
        all_off_files = utils.get_all_filenames(root_path=filepath,
                                                file_pattern=".off")

        if len(all_off_files) == 0:
            return

        if index_off is None or index_off < 0 or len(
                all_off_files) - 1 < index_off:
            file_name = all_off_files[-1]
        else:
            file_name = all_off_files[index_off]

        os.system("geomview {0}".format(all_off_files[index_off]))
Exemplo n.º 5
0
 def test_module_update(self):
     module_name = os.path.basename(utils.get_module_path())
     props = utils.get_module_unit_test_conf()
     try:
         utils.run_odoo_server([
             '-c',
             os.path.join(props['odoo_server_folder'],
                          'openerp-server.conf'), '-d', props['db_name'],
             '-u', module_name, '--stop-after-init', '--test-enable'
         ])
     except SystemExit as e:
         if e.code < 0:
             raise
         else:
             self.assertEqual(e.code, 0)
    def load_dataset(self):
        if self.dataset:
            self.dataset.clear()
            del self.dataset
            self.dataset = []

        if self.dataset_type == IRIS:
            self.load_iris()
        elif self.dataset_type == SWISSROLL:
            self.load_swiss_roll()
        elif self.dataset_type == DAILY_AND_SPORTS:
            self.load_daily_and_sport_activities()
        elif self.dataset_type == LIGHT_CURVES:
            self.load_light_curves()
        else:
            self.from_csv_file("{0}/dataset/iris.csv".format(utils.get_module_path()))

        self.assign_tags()
Exemplo n.º 7
0
def run_unit_tests(module_name, dbname, position=runs_at_install):
    """
    :returns: ``True`` if all of ``module_name``'s tests succeeded, ``False``
              if any of them failed.
    :rtype: bool
    """
    _logger.info('CURRENT_MODULE %s running tests.', module_name)
    global current_test
    current_test = module_name
    mods = toExtends.get_test_modules(module_name)
    threading.currentThread().testing = True
    r = True
    suite = unittest.TestSuite()
    for m in mods:
        suite.addTests(unittest.TestLoader().loadTestsFromModule(m))

    if suite.countTestCases():
        t0 = time.time()
        t0_sql = openerp.sql_db.sql_counter
        _logger.info('%s running tests.', module_name)
        
        report_file = os.path.join(
            utils.get_module_path(), 
            utils.get_module_unit_test_conf()['test_module_report_file']
        )
        
        outfile = open(report_file, 'wb')
        runner = HTMLTestRunner.HTMLTestRunner(stream=outfile, title="Testing module: {0}".format(module_name),verbosity = 2)
        result = runner.run(suite)
        if time.time() - t0 > 5:
            _logger.log(25, "%s tested in %.2fs, %s queries", module_name, time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
        if not result.wasSuccessful():
            r = False
            _logger.error("Module %s: %d failures, %d errors", module_name, len(result.failures), len(result.errors))

    current_test = None
    threading.currentThread().testing = False
    return r
# -*- encoding: utf-8 -*-
# @author -  Alexander Escalona Fernández <*****@*****.**>

import os
import unittest
import json
import utils

utils.populate_sys_path()
props = utils.get_module_unit_test_conf()
project_path = utils.get_module_path()

import HTMLTestRunner
import override_openerp_modules_module as override
import openerp.modules.module as toExtends
toExtends.run_unit_tests = override.run_unit_tests

suite = unittest.TestSuite()
test_loader = unittest.TestLoader()
suite = test_loader.discover('.', pattern='test_*.py')

module_name = utils.get_module_metadata()['name']

report_file = os.path.join(project_path,
                           props['test_module_update_process_report_file'])
outfile = open(report_file, 'wb')
report_title = "Testing module UPDATING process: {0}".format(module_name)
runner = HTMLTestRunner.HTMLTestRunner(stream=outfile,
                                       title=report_title,
                                       verbosity=2)
runner.run(suite)
    def execute(self):
        tdabc_iris = TDABasedClassifier4IPYNB(dataset_name=IRIS, dim_2_read=2)
        # tdabc_iris.load_data()
        # y_real, y_pred = tdabc_iris.execute()
        #
        # print("y_real\n\n", y_real)
        # print("y_pred\n\n", y_pred)

        ##### modify test
        tdabc_iris.load_data()
        tdabc_iris.split_dataset()

        training = [i for i in tdabc_iris.training]
        test = [
            tdabc_iris.tags_position[tdabc_iris.tags_training[i]]
            for i in tdabc_iris.tags_training
        ]

        # tdabc_iris.draw_data()
        X = np.array(tdabc_iris.dataset
                     )  # we only take the first two features. We could
        # avoid this ugly slicing by using a two-dim dataset

        x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
        y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
        xx, yy = np.meshgrid(np.linspace(x_min, x_max, 5),
                             np.linspace(y_min, y_max, 5))
        v = np.c_[xx.ravel(), yy.ravel()]
        values = [[a, b] for a, b in v]

        print("values: =====>>> ", values)

        fold_sizes = [5, 10, 15, 20, 25, 30, 35]
        for split_data in fold_sizes:

            tdabc_results, knn_results, algorithms = tdabc_iris.execute(
                set_2_classify=values, split_data=split_data)

            ########################

            titles = ["TDABC decision boundaries", "k-NN decision boundaries"]

            # data_plotter = DatasetPlotter(tdabc_iris.dataset)
            cm = plt.cm.RdBu
            cm_bright = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
            values = np.array(values)
            _xxx = np.array(training)

            # Plot the predicted probabilities. For that, we will assign a color to
            # each point in the mesh [x_min, m_max]x[y_min, y_max].

            # plt.show()

            # just plot the dataset first

            for algorithm_idx in algorithms:
                plt.figure(figsize=(15, 5))
                algorithm_name = algorithms[algorithm_idx]
                tdabc_alg_result = tdabc_results[algorithm_idx]
                (knn_classifier, knn_alg_result) = knn_results[algorithm_idx]
                ttags = knn_classifier.training_tags
                ttraining = knn_classifier.training
                # ttraining.extend(knn_classifier.new_data)
                xxx = np.array(ttraining)

                # plt.figure(figsize=(10, 5))
                plt.subplot(1, 3, 1)

                Z = np.array(test)

                plt.scatter(_xxx[:, 0],
                            _xxx[:, 1],
                            c=np.array(['#FF0000', '#00FF00',
                                        '#0000FF'])[test],
                            edgecolors="k",
                            cmap=cm_bright,
                            label=tdabc_iris.labels)

                # Plot the testing points

                plt.xlabel('Sepal length')
                plt.ylabel('Sepal width')
                plt.xlim(xx.min(), xx.max())
                plt.ylim(yy.min(), yy.max())
                plt.xticks(())
                plt.yticks(())
                plt.title("{0}".format("Iris Dataset"))

                plt.tight_layout()
                plt.title("Iris Dataset")

                for i, clf in enumerate((tdabc_alg_result, knn_alg_result)):
                    # Plot the predicted probabilities. For that, we will assign a color to
                    # each point in the mesh [x_min, m_max]x[y_min, y_max].
                    plt.subplot(1, 3, i + 2)

                    Z = np.array(clf)

                    # Put the result into a color plot
                    _Z = Z.reshape((xx.shape))
                    plt.contourf(xx, yy, _Z, cmap=cm, alpha=.8)

                    # Plot the training points

                    plt.scatter(xxx[:, 0],
                                xxx[:, 1],
                                c=np.array(['#FF0000', '#00FF00',
                                            '#0000FF'])[ttags],
                                edgecolors="k",
                                cmap=cm_bright,
                                label=tdabc_iris.labels)

                    # Plot the testing points

                    plt.scatter(values[:, 0],
                                values[:, 1],
                                c=np.array(["r", "g", "b"])[Z],
                                edgecolors="k",
                                alpha=0.5,
                                label=tdabc_iris.labels)
                    plt.xlabel('Sepal length')
                    plt.ylabel('Sepal width')
                    plt.xlim(xx.min(), xx.max())
                    plt.ylim(yy.min(), yy.max())
                    plt.xticks(())
                    plt.yticks(())
                    plt.title("{0}".format(titles[i]))

                plt.tight_layout()

                path = "{0}/docs/DATA_GRAPHICS/comparison/".format(
                    utils.get_module_path())
                if not os.path.isdir(path):
                    os.makedirs(path)
                file_name = time.strftime(
                    "{0}_{1}_%y.%m.%d__%H.%M.%S.png".format(
                        path, algorithm_name))

                plt.title(titles[i])
                # plt.legend(fontsize=20)
                plt.savefig(file_name)
Exemplo n.º 10
0
E_DATANOTFOUND = -104
ES_DATANOTFOUND = "data not found"
E_INVALIDCHAR = -105
ES_INVALIDCHAR = "invalid char"
E_NOTSUPPORT = 106
ES_NOTSUPPORT = "not support"

def get_log_path(base_path, dir_name):
    path = os.path.join(base_path, dir_name)
    if not os.path.exists(path):
        os.mkdir(path)
        os.chmod(path, 0777)
    return path

# 设置日志路径
log_path = get_log_path(utils.get_module_path(), "log")
crash_path = get_log_path(log_path, "crash")
rlog_path = get_log_path(log_path, "rlog")

# 设置日志记录器
logging.config.fileConfig("logging.conf")
logger = logging.getLogger("common")
logger_exception = logging.getLogger("exception")
logger_detail = logging.getLogger("detail")

# 设置字符集
print sys.getdefaultencoding()
if sys.getdefaultencoding() != 'utf-8':
    reload(sys)
    sys.setdefaultencoding('utf-8')
    logger.critical("setdefaultencoding to utf-8")