Exemplo n.º 1
0
    def get_router_manual_list() -> []:
        """
        Read the Router Manual Config file

        :return: List with any Router objects from the file
        """
        output = ConfigManager.get_router_manual_config()

        router_list = []

        for i in range(0, len(output)):
            router_info = output[i]

            if not len(router_info) == 9:
                Logger().error(
                    "List must be length of 9 but has a length of {0}".format(
                        len(output)))
                return

            try:
                v = Router(i, router_info['Name'], router_info['Id'],
                           router_info['IP'], router_info['IP_Mask'],
                           router_info['CONFIG_IP'],
                           router_info['CONFIG_IP_MASK'],
                           router_info['Username'], router_info['Password'],
                           router_info['PowerSocket'])
                router_list.append(v)

            except Exception as ex:
                Logger().error(
                    "Error at building the list of Router's\nError: {0}".
                    format(ex))

        return router_list
Exemplo n.º 2
0
    def import_firmwares(self, release_model: str):
        """
        Imports the stored Firmwares, so the firmware_handler can use them.
        :param release_model: stable, beta, experimental
        """
        path = self.FIRMWARE_PATH + '/' + release_model + '/' + self.UPDATE_TYPE + '/'
        Logger().debug("Import Firmwares from '" + path + "'", 2)
        count = 0

        try:
            files = os.listdir(path)
        except Exception:
            Logger().debug(
                "No Firmwares available for download at path '" + path + "'",
                3)
            return

        for firmware_name in files:
            try:
                freifunk_verein = firmware_name.split('-')[1]
                firmware_version = firmware_name.split('-')[2]
                file = path + firmware_name
                url = self.url + '/' + release_model + '/' + self.UPDATE_TYPE + '/' + firmware_name
                self.firmwares.append(
                    Firmware(firmware_name, firmware_version, freifunk_verein,
                             release_model, file, url))
                count += 1
            except Exception:
                Logger().warning("[-] Couldn't import " + firmware_name, 3)
                continue
        Logger().debug(str(count) + " Firmwares imported", 3)
Exemplo n.º 3
0
	def get_invest_month(self,cost):
		bottom = VauleConfig.fluctuation_bottom
		top = VauleConfig.fluctuation_top
		rate = random.uniform(bottom,top)
		msg = '[fluctuation_rate]{}'.format(rate)
		Logger.log(msg, tag_name=LogConst.FLUCTUATION_RATE)
		return cost * (1+rate)
Exemplo n.º 4
0
 def run(self):
     Logger().info("Start WebServer on port " + str(WebServer.PORT_WEBSERVER) + " ...", 1)
     try:
         self.httpd.serve_forever()
     except Exception as e:
         Logger().debug("[-] WebServer couldn't get started", 2)
         raise e
Exemplo n.º 5
0
    class TestNotifier(object):
        def __init__(self):
            self.service_name = 'test_service'
            self.meta = meta
            self.request_body = {
                'jobId': 1,
                'stepId': 1,
                'batchId': 0,
                'retry': 0,
                'status': 3,
                'message': 'RUNNING',
                'groupName': 'TestGroupName'
            }
            self.is_notifiable = True
            #self.is_notifiable = False
            self.logger = Logger(log_level="info",
                                 vendor_key=20,
                                 retailer_key=100)
            self.logger.info(
                'tttttttttttttttteeeeeeeeeeeeeeeeeeessssssssssstttttttttttttt')

        @Notifier
        def test(self):
            print('yoyo')

        @Notifier
        def test_failure(self):
            raise RuntimeError('Calling error.')

        def main(self):
            self.test()
            #self.test_failure()
            print('done')
Exemplo n.º 6
0
class ImportCSV(SparkOperator):
    '''import csv formate data'''


    OP_NAME = 'import-csv'
    OP_CATEGORY = 'data-import'

    def __init__(self):
        super(ImportCSV, self).__init__()
        self.op_input_num = 0
        self.op_output_num = 1
        self.op_status = OperatorStatus.INIT
        self.op_script_location = 'resources/spark_operators/data_import/import_csv.py'
        self.op_backend = 'spark'

        self.input_path = None
        self.delimiter = None

    def init_operator(self, op_json_param):
        self.op_json_param = op_json_param
        self.op_running_mode = self.op_json_param['running-mode'] if 'running-mode' in self.op_json_param else 'script'
        self.op_local = bool(self.op_json_param['local']) if 'local' in self.op_json_param else True

        if self.op_local:
            self.op_script_location = os.getcwd() + '/' + self.op_script_location

        self.op_working_directory = self.op_json_param['op-working-directory'] if 'op-working-directory' in self.op_json_param else None 
        self.op_logger = Logger(self.op_working_directory + '/log/import-csv_' + str(self.op_json_param['op-index']))

        self.input_path = self.op_json_param['input-path']
        self.delimiter = self.op_json_param['delimiter'] if 'delimiter' in self.op_json_param else ','

    def run_function_mode(self):
        return self.op_status

    def run_script_mode(self):
        run_command = 'spark-submit --master '
        if self.op_local:
            run_command = run_command + 'local[2] ' 

        self.op_result.append(self.op_working_directory + 'output/' + self.op_json_param['op-index'] + '-output')
        run_command = run_command + self.op_script_location + ' ' + self.input_path + ' ' + self.op_result[0] + ' ' + self.delimiter
        sub_proc = subprocess.Popen(run_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
        
        for line in iter(sub_proc.stdout.readline, b''):
            self.op_logger.info(line)

        sub_proc.stdout.close()
        sub_proc.wait()
        self.op_status = sub_proc.returncode
        return self.op_status

    def azkaban_script(self):
        run_command = 'spark-submit --master '
        if self.op_local:
            run_command = run_command + 'local[2] '
            
        self.op_result.append(self.op_working_directory + 'output/' + self.op_json_param['op-index'] + '-output')
        run_command = run_command + self.op_script_location + ' ' + self.input_path + ' ' + self.op_result[0] + ' ' + self.delimiter
        return run_command
Exemplo n.º 7
0
def year_end_tax(salary):
    taxRates = {
        36000: 0.03,
        144000: 0.1,
        300000: 0.2,
        420000: 0.25,
        660000: 0.3,
        960000: 0.35,
        99999999999: 0.45
    }
    START = 0 * 12

    temp = salary - START
    idvTax = 0
    preBar = 0
    for bar, rate in taxRates.items():
        if temp <= 0:
            break
        if temp - (bar - preBar) <= 0:
            idvTax += rate * temp
            break
        else:
            idvTax += rate * (bar - preBar)
        temp -= bar - preBar
        preBar = bar

    msg = "税前年收入: {} 年纳税额: {} 税后年收入: {}".format(salary, idvTax,
                                                salary - idvTax)
    Logger.log(msg, tag_name=LogConst.YEAR_END_INCOME)

    return salary - idvTax
Exemplo n.º 8
0
 def _verified_download(self, firmware: Firmware, hash_firmware: str,
                        max_attempts: int) -> bool:
     """
     Downloads the given Firmware and checks if the download was correct and if the hash is matching
     :param firmware:
     :param hash_firmware:
     :param max_attempts: max number of attemps to download a firmware
     :return: bool
     """
     valid_download = False
     count = 0
     while (not valid_download) & (count < max_attempts):
         valid_download = self._download_file(firmware.url, firmware.file,
                                              firmware.release_model)
         if valid_download:
             valid_download = firmware.check_hash(hash_firmware)
         count += 1
     if count >= max_attempts:
         Logger().debug(
             "[-] The Firmware(" + firmware.name +
             ") couldn't be downloaded", 3)
         return False
     else:
         Logger().debug(
             "[+] The Firmware(" + firmware.name +
             ") was successfully downloaded", 3)
         return True
def svr_xml_converter(raw_data):
    '''

    This method converts the supplied xml file-object to a python dictionary.

    @raw_data, generally a file (or json string) containing the raw dataset(s),
        to be used when computing a corresponding model. If this argument is a
        file, it needs to be closed.

    @list_observation_label, is a list containing dependent variable
        labels.

    '''

    feature_count = None
    list_dataset = []
    list_observation_label = []
    logger = Logger(__name__, 'error', 'error')

    # convert xml file to python 'dict'
    dataset = xmltodict.parse(raw_data)

    # build 'list_dataset'
    for observation in dataset['dataset']['observation']:
        for key in observation:
            if key == 'criterion':
                observation_label = observation['criterion']
                list_observation_label.append(observation[key])
            elif key == 'predictor':
                for predictor in observation[key]:
                    predictor_label = predictor['label']
                    predictor_value = predictor['value']

                    validate_value = Validate_Dataset(predictor_value)
                    validate_value.validate_value()
                    list_error_value = validate_value.get_errors()
                    if list_error_value:
                        logger.log(list_error_value)
                        return None
                    else:
                        list_dataset.append({
                            'dep_variable_label':
                            str(observation_label),
                            'indep_variable_label':
                            str(predictor_label),
                            'indep_variable_value':
                            predictor_value
                        })

        # generalized feature count in an observation
        if not feature_count:
            feature_count = len(observation['predictor'])

    # save observation labels, and return
    raw_data.close()
    return {
        'dataset': list_dataset,
        'observation_labels': list_observation_label,
        'feature_count': feature_count
    }
Exemplo n.º 10
0
    def send_data(self, local_file: str, remote_file: str):
        """
        Sends Data via sftp to the RemoteSystem

        :param local_file: Path to the local file
        :param remote_file: Path on the Router, where the file should be saved
        """
        try:
            # TODO: If sftp is installed on the Router
            '''
            sftp = self.ssh.open_sftp()
            sftp.put(local_file, remote_file)
            sftp.close()
            '''
            command = 'sshpass  -p' + str(self.remote_system.usr_password) + ' scp ' + local_file + ' ' + \
                      str(self.remote_system.usr_name) + '@' + str(self.remote_system.ip) + ':' + remote_file
            os.system(command)

            # TODO: Paramiko_scp have to installed
            '''
            scp = SCPClient(self.ssh.get_transport())
            scp.put(local_file, remote_file)
            '''
            Logger().debug("[+] Sent data '" + local_file + "' to RemoteSystem '" +
                           str(self.remote_system.usr_name) + "@" + str(self.remote_system.ip) +
                           ":" + remote_file + "'", 2)
        except Exception as e:
            Logger().error("[-] Couldn't send '" + local_file + "' to RemoteSystem '" +
                           str(self.remote_system.usr_name) + "@" + str(self.remote_system.ip) +
                           ":" + remote_file + "'", 2)
            Logger().error(str(e), 2)
Exemplo n.º 11
0
    def __init__(self, nsp_name: str, ipdb: IPDB):
        """
        Creats a namespace for a specific vlan_iface

        :param nsp_name:
        :param vlan_iface_name:
        :param ipdb: IPDB is a transactional database, containing records, representing network stack objects.
                    Any change in the database is not reflected immidiately in OS, but waits until commit() is called.
        """
        Logger().debug("Create Namespace ...", 2)
        self.nsp_name = nsp_name
        self.id = id
        self.vlan_iface_name = ""
        self.vlan_iface_ip = "0.0.0.0"
        self.ipdb = ipdb
        self.ipdb_netns = None
        try:
            self.ipdb_netns = IPDB(nl=NetNS(nsp_name))
            netns.setns(nsp_name)
            self.ipdb_netns.interfaces['lo'].up().commit()
            Logger().debug("[+] Namespace(" + nsp_name + ") successfully created", 3)
            # self.encapsulate_interface()
        except Exception as e:
            Logger().debug("[-] Couldn't create Namespace(" + nsp_name + ")", 3)
            for tb in traceback.format_tb(sys.exc_info()[2]):
                Logger().error(tb, 3)
            Logger().error(str(e), 3)
            self.remove()
Exemplo n.º 12
0
    def create_interface(self,
                         vlan_iface_ip: str = None,
                         vlan_iface_ip_mask: int = None):
        """
         Creats a virtual interface on a existing interface (like eth0)

        :param vlan_iface_ip: ip of the virtual interface
        :param vlan_iface_ip_mask: network-mask of the virtual interface
        """
        Logger().debug("Create VLAN Interface ...", 2)
        try:
            link_iface = self.ipdb.interfaces[self.link_iface_name]
            with self.ipdb.create(kind="vlan",
                                  ifname=self.vlan_iface_name,
                                  link=link_iface,
                                  vlan_id=self.vlan_iface_id).commit() as i:
                if vlan_iface_ip:
                    i.add_ip(vlan_iface_ip, vlan_iface_ip_mask)
                i.mtu = 1400
            if not vlan_iface_ip:
                self._wait_for_ip_assignment()
                vlan_iface_ip = self._get_ipv4_from_dictionary(
                    self.ipdb.interfaces[self.vlan_iface_name])
            Logger().debug(
                "[+] " + self.vlan_iface_name + " created with: Link=" +
                self.link_iface_name + ", VLAN_ID=" + str(self.vlan_iface_id) +
                ", IP=" + vlan_iface_ip, 3)
        except Exception as e:
            Logger().debug(
                "[-] " + self.vlan_iface_name + " couldn't be created", 3)
            Logger().error(str(e), 3)
Exemplo n.º 13
0
def do_tax(salary, gongjijin_rate=0.07):
    res_insuranced = after_insurance(salary, gongjijin_rate=gongjijin_rate)
    res = after_tax(res_insuranced)
    msg = 'salary {} after_insurance {} after tax {}'.format(
        salary, res_insuranced, res)
    Logger.log(msg, tag_name=LogConst.MONTHLY_INCOME)
    return res
Exemplo n.º 14
0
    def generate_report(self):
        if self._steps_output is not None:
            for step_output in self._steps_output:
                self._ordered_steps[step_output.get_title()[:step_output.get_title().index('_', 4)]]\
                    .append(step_output)

        count = 0
        for group in TextPrinter._OWASP_STEP_GROUPS:
            Logger.get_logger().info('\n[{0}] {1}'.format(
                group, TextPrinter._OWASP_STEP_GROUPS_DESC[count]),
                                     bold=True)
            self._ordered_steps[group].sort(key=lambda x: x.get_title(),
                                            reverse=False)
            for step_output in self._ordered_steps[group]:
                output_msg = '[{0}] {1}: {2}'.format(
                    step_output.get_title(), step_output.get_description(),
                    step_output.get_msg())
                if step_output.get_status(
                ) == OwaspStepBase.status.error.value:
                    Logger.get_logger().error(output_msg)
                elif step_output.get_status(
                ) == OwaspStepBase.status.warning.value:
                    Logger.get_logger().warn(output_msg)
                elif step_output.get_status(
                ) == OwaspStepBase.status.passed.value:
                    Logger.get_logger().passed(output_msg)
                elif step_output.get_status(
                ) == OwaspStepBase.status.info.value:
                    Logger.get_logger().info(output_msg)
            count += 1
Exemplo n.º 15
0
    def __init__(self, host=None, user=None, passwd=None):
        '''@__init__

        This constructor is responsible for defining class variables.

        '''

        self.db_settings = Database()
        self.list_error = []
        self.proceed = True

        # database logger
        self.logger = Logger(__name__, 'database', 'database', 'debug')

        # host address
        if host:
            self.host = host
        else:
            self.host = self.db_settings.get_db_host()

        # sql username for above host address
        if user:
            self.user = user
        else:
            self.user = self.db_settings.get_db_username()

        # sql password for above username
        if passwd:
            self.passwd = passwd
        else:
            self.passwd = self.db_settings.get_db_password()
Exemplo n.º 16
0
    def __init__(self, stock_id, db_conn = None, redis_conn = None):
        self.stock_id = stock_id
        self.stock_name = 'Not Set'
        self.pair_queue = PairQueue(stock_id)
        self.logger = Logger('engine')
        self.db_conn = db_conn
        self.limit = 2
        self.close_price = 10
        now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
        self.stock_name = 'sss'



        if redis_conn is None:
            self.r = redis.Redis()
        else:
            self.r = redis_conn
        cursor = self.db_conn.cursor()
        try:
            cursor.execute('select stock_name from stock_set where stock_id=%s',
                           [str(self.stock_id)])
            result = cursor.fetchall()[0]
            self.stock_name = result[0]
            cursor.execute('select gains,decline,status,close_price from stock_state where stock_id=%s',[str(self.stock_id)])
            result = cursor.fetchall()
            self.limit = result[0][0]
            self.gains = self.limit
            self.decline = result[0][1]
            status = int(result[0][2])
            self.close_price = float(result[0][3])
            self.last_price = self.close_price
            self.exist = True
            self.r.hset(stock_id, 'engine_exist', True)
            if status == 1:
                self.redis_init(True,True,self.gains,self.decline,self.close_price)
                self.on = True
                print(str(stock_id) + " is running")
            else:
                self.redis_init(False,True,self.gains,self.decline,self.close_price)
                self.on = False
                print(str(stock_id) + " is pending")
            # self.last_price = float(self.r.hget(self.stock_id,'newest_price').decode('utf-8'))

            # self.close_price = self.last_price
            # if self.close_price == 0:
            #     self.close_price = 10
        except Exception as e:
            self.redis_init(False,False,0,0,0)
            self.close_price = 0
            self.on = False
            self.last_price = 0
            print( str(stock_id) +" fails: " + str(e))
            self.exist = False
        finally:
            self.set_open_price()
            # cursor.execute('insert into today_stock (stock_id,stock_name,price,date) values (%s,%s,%s,%s)',
            #                [self.stock_id, self.stock_name, self.close_price, now])
            self.db_conn.commit()
            cursor.close()
Exemplo n.º 17
0
 def call(self, cfg, dataset, train_brick):
     acc = train_brick.metric['train_accurency'].get_avg()
     loss = train_brick.metric['train_loss'].get_avg()
     message = f'[{train_brick.epoch}/{cfg.MAX_EPOCH} {train_brick.step}]  acc={acc[0]}/{acc[1]}  loss={loss[0]}/{loss[1]}'
     Logger.debug(message,
                  show_type=Logger.LOG_STYLE.DEFAULT,
                  forground=Logger.LOG_FRONT_COLOR.GREEN,
                  background=Logger.LOG_BACK_COLOR.DEFAULT)
Exemplo n.º 18
0
    def test_log(self):
        """
        Tests critical
        :return: Tests results
        """
        Logger().log(20, "Log from Logger() {0}".format(Logger()))
        Logger().close()

        self.assertEqual(True, True)
Exemplo n.º 19
0
 def join(self):
     Logger().info("Stop WebServer ...", 1)
     time.sleep(2)
     try:
         self.httpd.shutdown()
         Logger().debug("[+] WebServer successfully stoped", 2)
     except Exception as e:
         Logger().debug("[-] WebServer couldn't stoped", 2)
         Logger().error(str(e), 1)
Exemplo n.º 20
0
 def process_year_end(self):
     income = self.income_manager.calc_income_year_end()
     outcome = self.outcome_manager.calc_outcome_year_end()
     invest = income - outcome
     msg = '[income]{} [outcome]{} [invest]{}'.format(
         income, outcome, invest)
     Logger.log(msg, tag_name=LogConst.EXPECTATION_LOG_NAME)
     self.invest_manager.throw_money(income)
     self.print_account()
Exemplo n.º 21
0
    def test_warning(self):
        """
        Tests warning
        :return: Tests results
        """
        Logger().warning("Warning from Logger() {0}".format(Logger()))
        Logger().close()

        self.assertEqual(True, True)
Exemplo n.º 22
0
    def test_debug(self):
        """
        Tests debug
        :return: Tests results
        """
        Logger().debug("Debug from Logger() {0}".format(Logger()))
        Logger().close()

        self.assertEqual(True, True)
Exemplo n.º 23
0
    def test_info(self):
        """
        Tests info
        :return: Tests results
        """
        Logger().info("Hello from logger: {0}".format(Logger()))
        Logger().close()

        self.assertEqual(True, True)
Exemplo n.º 24
0
    def test_log_level_tab(self):
        """
        Test log level tab
        :return: Test results
        """
        tabs = Logger().get_log_level_tab(2)
        Logger().close()

        self.assertEqual(True, tabs == "\t\t")
Exemplo n.º 25
0
    def test_error(self):
        """
        Tests error
        :return: Tests results
        """
        Logger().error("Error from Logger() {0}".format(Logger()))
        Logger().close()

        self.assertEqual(True, True)
Exemplo n.º 26
0
def create_app():
    # define configuration
    with open('hiera/settings.yaml', 'r') as stream:
        try:
            # local variables
            app = Flask(
                __name__,
                template_folder='interface/templates',
                static_folder='interface/static'
            )
            settings = yaml.load(stream)

            # register blueprint
            app.register_blueprint(blueprint)

            # local logger: used for this module
            root = settings['general']['root']
            LOG_PATH = root + '/' + settings['webserver']['flask_log_path']
            HANDLER_LEVEL = settings['application']['log_level']

            # flask attributes: accessible across application
            app.config.update(
                HOST=settings['general']['host'],
                PORT_REDIS=settings['redis']['port'],
                ROOT=settings['general']['root'],
                DB_LOG_PATH=settings['database']['log_path'],
                DB_ML=settings['database']['name'],
                DB_USERNAME=settings['database']['username'],
                DB_PASSWORD=settings['database']['password'],
                LOG_LEVEL=HANDLER_LEVEL,
                FLASK_LOG_PATH=settings['webserver']['flask_log_path'],
                ERROR_LOG_PATH=settings['application']['error_log_path'],
                WARNING_LOG_PATH=settings['application']['warning_log_path'],
                INFO_LOG_PATH=settings['application']['info_log_path'],
                DEBUG_LOG_PATH=settings['application']['debug_log_path'],
                MODEL_TYPE=settings['application']['model_type']
            )
        except yaml.YAMLError as error:
            logger = Logger('error', 'yaml')
            logger.log(error)

    # log handler: requires the below logger
    formatter = logging.Formatter(
        "[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
    handler = RotatingFileHandler(LOG_PATH, maxBytes=10000000, backupCount=5)
    handler.setLevel(HANDLER_LEVEL)
    handler.setFormatter(formatter)
    app.logger.addHandler(handler)

    # logger: complements the log handler
    log = logging.getLogger('werkzeug')
    log.setLevel(logging.DEBUG)
    log.addHandler(handler)

    # return
    return app
Exemplo n.º 27
0
    def test_close(self):
        """
        Tests close
        :return: Tests results
        """
        logger = Logger()
        logger.setup()
        logger.close()

        self.assertEqual(False, logger.is_loaded)
Exemplo n.º 28
0
	def print_details(self):
		msg = '{}: 总借{:.2f} 总还{:.2f} 每月还{:.2f} 年数{} 利率{} 折扣{}' \
			  ''.format(self.tag_name,
						self.loan_money,
						self.final_repayment(),
						self.repay_per_month(),
						self.years,
						self.year_percent,
						self.discount)
		Logger.log(msg, tag_name=LogConst.LOAN_LOG_NAME)
Exemplo n.º 29
0
def test_import_stock():
    log = Logger("import_data.log")
    try:
        stock_data = BasicData()
        stock_num = stock_data.import_stock()
        log.print_info("导入" + str(stock_num) + "条股票数据")
        log.write_info("导入" + str(stock_num) + "条股票数据")
    except Exception as err:
        log.print_error("导入股票基础信息错误:" + str(err))
        log.write_error("导入股票基础信息错误:" + str(err))
Exemplo n.º 30
0
    def test_is_loaded(self):
        """
        Tests is loaded
        :return: Tests results
        """
        Logger().setup()
        result = Logger().is_loaded
        Logger().close()

        self.assertEqual(True, result)
Exemplo n.º 31
0
    def test_logger(self):
        """
        Tests logger
        :return: Tests results
        """
        result = Logger().logger

        self.assertEqual(True, Logger().logger is result)

        Logger().close()
Exemplo n.º 32
0
 def __init__(self,
              service=None,
              endpoint=None,
              api_key=None,
              export=EXPORT_CLIPBOARD):
     self.export = export
     self.api_request = ApiRequest(service=service,
                                   endpoint=endpoint,
                                   api_key=api_key)
     self.logger = Logger().get_logger()
Exemplo n.º 33
0
def svr_xml_converter(raw_data):
    """

    This method converts the supplied xml file-object to a python dictionary.

    @raw_data, generally a file (or json string) containing the raw dataset(s),
        to be used when computing a corresponding model. If this argument is a
        file, it needs to be closed.

    @list_observation_label, is a list containing dependent variable
        labels.

    """

    feature_count = None
    list_dataset = []
    list_observation_label = []
    logger = Logger(__name__, "error", "error")

    # convert xml file to python 'dict'
    dataset = xmltodict.parse(raw_data)

    # build 'list_dataset'
    for observation in dataset["dataset"]["observation"]:
        for key in observation:
            if key == "criterion":
                observation_label = observation["criterion"]
                list_observation_label.append(observation[key])
            elif key == "predictor":
                for predictor in observation[key]:
                    predictor_label = predictor["label"]
                    predictor_value = predictor["value"]

                    validate_value = Validate_Dataset(predictor_value)
                    validate_value.validate_value()
                    list_error_value = validate_value.get_errors()
                    if list_error_value:
                        logger.log(list_error_value)
                        return None
                    else:
                        list_dataset.append(
                            {
                                "dep_variable_label": str(observation_label),
                                "indep_variable_label": str(predictor_label),
                                "indep_variable_value": predictor_value,
                            }
                        )

        # generalized feature count in an observation
        if not feature_count:
            feature_count = len(observation["predictor"])

    # save observation labels, and return
    raw_data.close()
    return {"dataset": list_dataset, "observation_labels": list_observation_label, "feature_count": feature_count}
Exemplo n.º 34
0
    def test_setup(self):
        """
        Tests setup
        :return: Tests results
        """
        l = Logger()

        l.setup(10)

        self.assertEqual(True, l.is_loaded)

        l.close()
Exemplo n.º 35
0
    def test_debug_level(self):
        """
        Test debug level
        :return: Test results
        """
        logger = Logger()
        logger.setup(10, 10, 10, "logger.log", "", "", 5, None)
        level = logger.max_detail_log_level()
        logger.close()

        self.assertEqual(True, level == 5)
Exemplo n.º 36
0
def svm_xml_converter(raw_data):
    '''@svm_xml_converter

    This method converts the supplied xml file-object to a python dictionary.

    @raw_data, generally a file (or json string) containing the raw dataset(s),
        to be used when computing a corresponding model. If this argument is a
        file, it needs to be closed.

    @list_observation_label, is a list containing dependent variable
        labels.

    '''

    feature_count = None
    list_dataset = []
    list_observation_label = []
    logger = Logger(__name__, 'error', 'error')

    # convert xml file to python 'dict'
    dataset = xmltodict.parse(raw_data)

    # build 'list_dataset'
    for observation in dataset['dataset']['observation']:
        observation_label = observation['dependent-variable']

        validate = Validate_Dataset(observation_label)
        validate.validate_label()

        list_error = validate.get_errors()
        if list_error:
            logger.log(list_error)
            return None
        else:
            list_observation_label.append(observation_label)

        for feature in observation['independent-variable']:
            feature_label = feature['label']
            feature_value = feature['value']

            validate_label = Validate_Dataset(feature_label)
            validate_value = Validate_Dataset(feature_value)

            validate_label.validate_label()
            validate_value.validate_value()

            list_error_label = validate.get_errors()
            list_error_value = validate.get_errors()
            if list_error_label or list_error_value:
                logger.log(list_error_label)
                logger.log(list_error_value)
                return None
            else:
                list_dataset.append({
                    'dep_variable_label': observation_label,
                    'indep_variable_label': feature_label,
                    'indep_variable_value': feature_value
                })

        # generalized feature count in an observation
        if not feature_count:
            feature_count = len(observation['independent-variable'])

    # save observation labels, and return
    raw_data.close()
    return {
        'dataset': list_dataset,
        'observation_labels': list_observation_label,
        'feature_count': feature_count
    }
Exemplo n.º 37
0
 def process_response(self, request, response, spider):
     Logger.log({"response_status": response.status, "url": response.url}, "response")
     return response
Exemplo n.º 38
0
def svm_csv_converter(raw_data):
    '''

    This method converts the supplied csv file-object, intended for an svm
    model, to a python dictionary.

    @raw_data, generally a file (or json string) containing the raw dataset(s),
        to be used when computing a corresponding model. If this argument is a
        file, it needs to be closed.

    @list_observation_label, is a list containing dependent variable labels.

    Note: we use the 'Universal Newline Support' with the 'U' parameter when
          opening 'raw_data'. This allows newlines to be understood regardless,
          if the newline character was created in osx, windows, or linux.

    Note: since 'row' is a list, with one comma-delimited string element, the
          following line is required in this method:

          row = row[0].split(',')

        '''

    feature_count = None
    list_dataset = []
    list_observation_label = []
    list_feature_label = []
    logger = Logger(__name__, 'error', 'error')

    # open temporary 'csvfile' reader object
    dataset_reader = csv.reader(
        raw_data,
        delimiter=' ',
        quotechar='|'
    )

    # iterate first row of csvfile
    for row in islice(dataset_reader, 0, 1):

        # iterate each column in a given row
        row_indep_label = row[0].split(',')
        for value in islice(row_indep_label, 1, None):
            list_feature_label.append(str(value))

    # iterate all rows of csvfile
    for dep_index, row in enumerate(islice(dataset_reader, 0, None)):

        # iterate first column of each row (except first)
        row_dep_label = row[0].split(',')
        for value in row_dep_label[:1]:
            list_observation_label.append(str(value))

        # generalized feature count in an observation
        row_indep_variable = row[0].split(',')
        if not feature_count:
            feature_count = len(row_indep_variable) - 1

        # iterate each column in a given row
        for indep_index, value in enumerate(
            islice(row_indep_variable, 1, None)
        ):

            try:
                validate = Validate_Dataset(value)
                validate.validate_value()

                list_error = validate.get_errors()
                if list_error:
                    logger.log(list_error)
                    return None
                else:
                    value = float(value)
            except Exception as error:
                logger.log(error)
                return False

            list_dataset.append({
                'dep_variable_label': list_observation_label[dep_index],
                'indep_variable_label': list_feature_label[indep_index],
                'indep_variable_value': value
            })

    # close file, save observation labels, and return
    raw_data.close()
    return {
        'dataset': list_dataset,
        'observation_labels': list_observation_label,
        'feature_count': feature_count
    }
Exemplo n.º 39
0
def svm_json_converter(raw_data, is_json):
    '''

    This method converts the supplied json file-object to a python
    dictionary.

    @raw_data, generally a file (or json string) containing the raw dataset(s),
        to be used when computing a corresponding model. If this argument is a
        file, it needs to be closed.

    @is_json, flag indicating 'raw_data' is a json string.

    @observation_labels, is a list containing dependent variable labels.

    '''

    # local variables
    feature_count = None
    list_dataset = []
    observation_labels = []
    logger = Logger(__name__, 'error', 'error')

    # web-interface
    if not is_json:
        dataset = json.load(raw_data)

        for observation_label in dataset:
            # variables
            observations = dataset[observation_label]

            # dependent variable with single observation
            if type(observations) == dict:
                for feature_label, feature_value in observations.items():
                    # validation
                    validate_fvalue = Validate_Dataset(feature_value)
                    validate_fvalue.validate_value()

                    if validate_fvalue.get_errors():
                        logger.log(validate_fvalue.get_errors())
                    else:
                        # restructured data
                        list_dataset.append({
                            'dep_variable_label': str(observation_label),
                            'indep_variable_label': str(feature_label),
                            'indep_variable_value': feature_value
                        })

                # generalized feature count in an observation
                if not feature_count:
                    feature_count = len(observations)

            # dependent variable with multiple observations
            elif type(observations) == list:
                for observation in observations:
                    for feature_label, feature_value in observation.items():
                        # validation
                        validate_fvalue = Validate_Dataset(feature_value)
                        validate_fvalue.validate_value()

                        if validate_fvalue.get_errors():
                            logger.log(validate_fvalue.get_errors())
                        else:
                            # restructured data
                            list_dataset.append({
                                'dep_variable_label': str(observation_label),
                                'indep_variable_label': str(feature_label),
                                'indep_variable_value': feature_value
                            })

                    # generalized feature count in an observation
                    if not feature_count:
                        feature_count = len(observation)

            # list of observation label
            observation_labels.append(observation_label)

    # programmatic-interface
    else:
        dataset = raw_data
        observation_label = raw_data[0]

        # list of observation label
        observation_labels.append(observation_label)

        # dependent variable with single observation
        if type(raw_data[1]) == dict:
            for label, feature in raw_data[1].items():
                # validation
                validate_fvalue = Validate_Dataset(feature)
                validate_fvalue.validate_value()

                if validate_fvalue.get_errors():
                    logger.log(validate_fvalue.get_errors())
                else:
                    # restructured data
                    list_dataset.append({
                        'dep_variable_label': str(observation_label),
                        'indep_variable_label': str(label),
                        'indep_variable_value': feature
                    })

            # generalized feature count in an observation
            if not feature_count:
                feature_count = len(raw_data[1])

        # dependent variable with multiple observations
        if type(raw_data[1]) == list:
            for feature_set in raw_data[1]:
                for feature_label, feature_value in feature_set.items():
                    # validation
                    validate_fvalue = Validate_Dataset(feature_value)
                    validate_fvalue.validate_value()

                    if validate_fvalue.get_errors():
                        logger.log(validate_fvalue.get_errors())
                    else:
                        # restructured data
                        list_dataset.append({
                            'dep_variable_label': str(observation_label),
                            'indep_variable_label': str(feature_label),
                            'indep_variable_value': feature_value
                        })

                # generalized feature count in an observation
                if not feature_count:
                    feature_count = len(feature_set)

    # close file
    if not is_json:
        raw_data.close()

    # save observation labels, and return
    return {
        'dataset': list_dataset,
        'observation_labels': observation_labels,
        'feature_count': feature_count
    }
Exemplo n.º 40
0
def svr_json_converter(raw_data, is_json):
    '''@svr_json_converter

    This method converts the supplied json file-object to a python
    dictionary.

    @raw_data, generally a file (or json string) containing the raw dataset(s),
        to be used when computing a corresponding model. If this argument is a
        file, it needs to be closed.

    @is_json, flag indicating 'raw_data' is a json string.

    @observation_labels, is a list containing dependent variable labels.

    '''

    # local variables
    feature_count = None
    list_dataset = []
    observation_labels = []
    logger = Logger(__name__, 'error', 'error')

    # web-interface
    if not is_json:
        dataset = json.load(raw_data)
        for criterion, predictors in dataset.items():
            observation_label = criterion

            # list of observation label
            observation_labels.append(criterion)

            # criterion with single observation
            if type(predictors) == dict:
                for label, predictor in predictors.items():
                    # validation (part 1)
                    validate_predictor = Validate_Dataset(str(predictor))
                    validate_predictor.validate_value()

                    if validate_predictor.get_errors():
                        logger.log(validate_predictor.get_errors())
                    else:
                        # restructured data
                        list_dataset.append({
                            'dep_variable_label': observation_label,
                            'indep_variable_label': str(label),
                            'indep_variable_value': predictor
                        })

                # generalized feature count in an observation
                if not feature_count:
                    feature_count = len(predictors)

            # criterion with multiple observation
            if type(predictors) == list:
                for criterion in predictors:
                    for label, predictor in criterion.items():
                        # validation (part 1)
                        validate_predictor = Validate_Dataset(predictor)
                        validate_predictor.validate_value()

                        if validate_predictor.get_errors():
                            logger.log(validate_predictor.get_errors())
                        else:
                            # restructured data
                            list_dataset.append({
                                'dep_variable_label': str(observation_label),
                                'indep_variable_label': str(label),
                                'indep_variable_value': predictor
                            })

                        # generalized feature count in an observation
                        if not feature_count:
                            feature_count = len(criterion.items())

    # programmatic-interface
    else:
        dataset = raw_data

        for criterion, predictors in dataset.items():
            # list of observation label
            observation_labels.append(criterion)

            # criterion with single observation
            if type(predictors) == dict:
                for label, predictor in predictors.items():
                    # validation (part 1)
                    validate_predictor = Validate_Dataset(predictor)
                    validate_predictor.validate_value()

                    if validate_predictor.get_errors():
                        logger.log(validate_predictor.get_errors())
                    else:
                        # restructured data
                        list_dataset.append({
                            'dep_variable_label': str(criterion),
                            'indep_variable_label': str(label),
                            'indep_variable_value': predictor
                        })

            # generalized feature count in an observation
            if not feature_count:
                feature_count = len(predictors.items())

            # criterion with multiple observation
            if type(predictors) == list:
                for single_predictors in predictors:
                    for label, predictor in single_predictors.items():
                        # validation (part 1)
                        validate_predictor = Validate_Dataset(predictor)
                        validate_predictor.validate_value()

                        if validate_predictor.get_errors():
                            logger.log(validate_predictor.get_errors())
                        else:
                            # restructured data
                            list_dataset.append({
                                'dep_variable_label': str(criterion),
                                'indep_variable_label': str(label),
                                'indep_variable_value': predictor
                            })

                    # generalized feature count in an observation
                    if not feature_count:
                        feature_count = len(single_predictors.items())

    # close file
    if not is_json:
        raw_data.close()

    # save observation labels, and return
    return {
        'dataset': list_dataset,
        'observation_labels': observation_labels,
        'feature_count': feature_count
    }
Exemplo n.º 41
0
def svm_model(kernel_type, session_id, feature_request, list_error):
    '''@svm_model

    This method generates an svm prediction using the provided prediction
    feature input(s), and the stored corresponding model, within the NoSQL
    datastore.

    @grouped_features, a matrix of observations, where each nested vector,
        or python list, is a collection of features within the containing
        observation.

    @encoded_labels, observation labels (dependent variable labels),
        encoded into a unique integer representation.

    '''

    # local variables
    dataset = feature_request.get_dataset(session_id)
    feature_count = feature_request.get_count(session_id)
    label_encoder = preprocessing.LabelEncoder()
    logger = Logger(__name__, 'error', 'error')

    # get dataset
    if dataset['error']:
        logger.log(dataset['error'])
        list_error.append(dataset['error'])
        dataset = None
    else:
        dataset = numpy.asarray(dataset['result'])

    # get feature count
    if feature_count['error']:
        logger.log(feature_count['error'])
        list_error.append(feature_count['error'])
        feature_count = None
    else:
        feature_count = feature_count['result'][0][0]

    # check dataset integrity, build model
    if len(dataset) % feature_count == 0:
        features_list = dataset[:, [[0], [2], [1]]]
        current_features = []
        grouped_features = []
        observation_labels = []
        feature_labels = []

        # group features into observation instances, record labels
        for index, feature in enumerate(features_list):
            if not (index+1) % feature_count == 0:
                # observation labels
                current_features.append(feature[1][0])

                # general feature labels in every observation
                if not len(feature_labels) == feature_count:
                    feature_labels.append(feature[2][0])
            else:
                # general feature labels in every observation
                if not len(feature_labels) == feature_count:
                    feature_labels.append(feature[2][0])

                current_features.append(feature[1][0])
                grouped_features.append(current_features)
                observation_labels.append(feature[0][0])
                current_features = []

        # convert observation labels to a unique integer representation
        label_encoder = preprocessing.LabelEncoder()
        label_encoder.fit(dataset[:, 0])
        encoded_labels = label_encoder.transform(observation_labels)

        # create svm model
        clf = svm.SVC(kernel=kernel_type)
        clf.fit(grouped_features, encoded_labels)

        # get svm title, and cache (model, encoded labels, title)
        entity = Retrieve_Entity()
        title = entity.get_title(session_id)['result'][0][0]
        Cache_Model(clf).cache(
            'svm_rbf_model',
            str(session_id) + '_' + title
        )
        Cache_Model(label_encoder).cache('svm_rbf_labels', session_id)
        Cache_Hset().cache('svm_rbf_title', session_id, title)

        # cache svm feature labels, with respect to given session id
        Cache_Hset().cache(
            'svm_rbf_feature_labels',
            str(session_id),
            json.dumps(feature_labels)
        )

        # return error(s) if exists
        return {'error': list_error}
Exemplo n.º 42
0
def sv_model(model, kernel_type, session_id, feature_request, list_error):
    '''

    This method generates an sv (i.e. svm, or svr) model using feature data,
    retrieved from the database. The generated model, is then stored within the
    NoSQL datastore.

    @grouped_features, a matrix of observations, where each nested vector,
        or python list, is a collection of features within the containing
        observation.
    @encoded_labels, observation labels (dependent variable labels),
        encoded into a unique integer representation.

    '''

    # local variables
    dataset = feature_request.get_dataset(session_id, model)
    get_feature_count = feature_request.get_count(session_id)
    label_encoder = preprocessing.LabelEncoder()
    logger = Logger(__name__, 'error', 'error')
    list_model_type = current_app.config.get('MODEL_TYPE')

    # get dataset
    if dataset['error']:
        logger.log(dataset['error'])
        list_error.append(dataset['error'])
        dataset = None
    else:
        dataset = numpy.asarray(dataset['result'])

    # get feature count
    if get_feature_count['error']:
        logger.log(get_feature_count['error'])
        list_error.append(get_feature_count['error'])
        feature_count = None
    else:
        feature_count = get_feature_count['result'][0][0]

    # check dataset integrity, build model
    if len(dataset) % feature_count == 0:
        features_list = dataset[:, [[0], [2], [1]]]
        current_features = []
        grouped_features = []
        observation_labels = []
        feature_labels = []

        # group features into observation instances, record labels
        for index, feature in enumerate(features_list):
            # svm: observation labels
            if model == list_model_type[0]:
                current_features.append(feature[1][0])

                if (index+1) % feature_count == 0:
                    grouped_features.append(current_features)
                    observation_labels.append(feature[0][0])
                    current_features = []

            # svr: observation labels
            elif model == list_model_type[1]:
                current_features.append(float(feature[1][0]))

                if (index+1) % feature_count == 0:
                    grouped_features.append(current_features)
                    observation_labels.append(float(feature[0][0]))
                    current_features = []

            # general feature labels in every observation
            if not len(feature_labels) == feature_count:
                feature_labels.append(feature[2][0])

        # case 1: svm model
        if model == list_model_type[0]:
            # convert observation labels to a unique integer representation
            label_encoder = preprocessing.LabelEncoder()
            label_encoder.fit(dataset[:, 0])
            encoded_labels = label_encoder.transform(observation_labels)

            # create model
            clf = svm.SVC(kernel=kernel_type, probability=True)

            # cache encoded labels
            Cache_Model(label_encoder).cache(model + '_labels', session_id)

            # fit model
            clf.fit(grouped_features, encoded_labels)

        # case 2: svr model
        elif model == list_model_type[1]:
            # create model
            clf = svm.SVR(kernel=kernel_type)

            # fit model
            clf.fit(grouped_features, observation_labels)

            # compute, and cache coefficient of determination
            r2 = clf.score(grouped_features, observation_labels)
            Cache_Hset().cache(
                model + '_r2',
                session_id,
                r2
            )

        # get title
        entity = Retrieve_Entity()
        title = entity.get_title(session_id)['result'][0][0]

        # cache model, title
        Cache_Model(clf).cache(
            model + '_model',
            str(session_id) + '_' + title
        )
        Cache_Hset().cache(model + '_title', session_id, title)

        # cache feature labels, with respect to given session id
        Cache_Hset().cache(
            model + '_feature_labels',
            str(session_id),
            json.dumps(feature_labels)
        )

        # return error(s) if exists
        return {'error': list_error}