Exemple #1
0
	def parse(self, s):
		if not s:
			return

		s = str(s)
		buf = io.BytesIO(s)

		config = ConfigParser()
		config.readfp(buf)

		# Read all data from the configuration file in the _config dict.
		for section in config.sections():
			items = dict(config.items(section))

			if section == "DEFAULT":
				section = "main"

			try:
				self._config[section].update(items)
			except KeyError:
				self._config[section] = items

		# Update the logger, because the logging configuration may
		# have been altered.
		logger.setup_logging(self)
def main():
    setup_logging(__file__)

    try:
        all_stocks = StockInfo.objects().timeout(False)
    except Exception, e:
        logging.error("Error when query StockInfo:" + str(e))
        raise e
Exemple #3
0
	def run(self):
		"""
			Runs the pakfire daemon with provided settings.
		"""
		# Read the configuration file for the daemon.
		self.config = config.ConfigDaemon()
		logger.setup_logging(self.config)

		# Create daemon instance.
		d = daemon.PakfireDaemon(self.config)
		try:
			d.run()

		# We cannot just kill the daemon, it needs a smooth shutdown.
		except (SystemExit, KeyboardInterrupt):
			d.shutdown()
def main():
    root_path = os.path.dirname(os.path.abspath(__file__))
    os.chdir(root_path)
    result = do_input_validation()
    result.verbose = True
    setup_logging(logfile="bt_report.log", scrnlog=result.verbose)
    logger.info("Running Script with -> Vertica Server: %s, Vertica User: %s, Customer ID: %s, DeviceType ID: %s, "
                "Date: %s, CSV File: %s, Verbose: %s" % (result.server, result.username, result.customer,
                                                         result.device_type_id, result.start_date, result.csv_file,
                                                         result.verbose))
    date_list, end_date = get_dates(start_date=result.start_date)
    date_formatter = datetime.strptime(end_date, TIME_FORMAT_MDY)
    year_month_format = "%s%02d" % (date_formatter.year, date_formatter.month)
    report_name = "50_Network Intrusion Prevention_Detection Service Report_%s" % year_month_format
    report_output_file_path = get_output_file_path(base_path=root_path, template_file=report_name)
    logger.info("Report Generation Started. Result file: %s" % report_output_file_path)
    # print report_output_file_path
    try:
        vertica_db_instance = VerticaDatabase(
            server=result.server,
            user=result.username,
            password=result.password
        )
        final_data_dict, severity_records, top_5_alarms = fetch_records(
            db_instance=vertica_db_instance,
            customer_id=result.customer,
            device_type_ids=result.device_type_id,
            start_date=result.start_date,
            end_date=end_date,
            date_range=date_list,
            csv_file_path=result.csv_file
        )
        # print top_5_alarms
        # print final_data_dict
        workbook = ExcelWriter(report_output_file_path)
        sheet_name = "50_NIDS_IPS_Report_%s" % year_month_format
        workbook.write_to_document_file(sheet_name=sheet_name, date_str=end_date)
        workbook.write_data_worksheet(sheet_name="DATA", data=final_data_dict, top_alarms=top_5_alarms)
        workbook.draw_top5_charts(sheet_name="TOP 5")
        workbook.write_main_worksheet(sheet_name="MAIN", data=severity_records, start_date=result.start_date,
                                      end_date=end_date)
        workbook.close()
        logger.info("Report Generation Completed. Result file: %s" % report_output_file_path)
        print("Report Generation Completed. Result file: %s" % report_output_file_path)
    except Exception, ex:
        logger.exception(ex)
        sys.exit()
    def __init__(self, service=None):
        """ Constructor to create ServiceInstanceTemplate object

        @param vsm object on which ServiceInstanceTemplate has to be configured
        """
        super(ServiceInstanceTemplate, self).__init__()
        self.log = logger.setup_logging(self.__class__.__name__)
        self.schema_class = 'service_instance_template_schema.ServiceInstanceTemplateSchema'
        self.set_connection(service.get_connection())
        self.set_create_endpoint("/si/service/" + str(service.id) + "/serviceinstancetemplate")
        self.id = None
        self.update_as_post = False
    def __init__(self, vsm=None):
        """ Constructor to create EventThresholds object

        @param vsm object on which EventThresholds object has to be configured
        """
        super(EventThresholds, self).__init__()
        self.log = logger.setup_logging(self.__class__.__name__)
        self.schema_class = 'event_thresholds_schema.EventThresholdsSchema'
        self.set_connection(vsm.get_connection())
        self.set_create_endpoint("/firewall/stats/eventthresholds")
        self.set_read_endpoint("/firewall/stats/eventthresholds")

        self.create_as_put = True
Exemple #7
0
    def __init__(self, config):
        """
        class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving
        and logging module.
        :param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.
        :param resume: String, path to the checkpoint being loaded.
        :param modification: Dict keychain:value, specifying position values to be replaced from config dict.
        :param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default
        """
        # load config file and apply modification
        run_id = config.run_id
        if config.device is not None:
            os.environ["CUDA_VISIBLE_DEVICES"] = config.device

        self._config = config

        # set save_dir where trained model and log will be saved.
        save_dir = Path(self.config['trainer']['save_dir'])

        exper_name = self.config['name']
        if run_id is None: # use timestamp as default run-id
            run_id = datetime.now().strftime(r'%m%d_%H%M%S')
        self._save_dir = save_dir / exper_name / 'models' #/ run_id
        self._log_dir = save_dir / exper_name / 'log' #/ run_id

        # make directory for saving checkpoints and log.
        # exist_ok = run_id == ''
        exist_ok = os.path.isdir(self.save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.log_dir.mkdir(parents=True, exist_ok=exist_ok)

        # configure logging module
        setup_logging(self.log_dir)
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }
    def __init__(self, config, resume=None, modification=None, run_id=None):
        """
        解包json文件,处理训练、断点保存、记录模块的超参数
        class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving
        and logging module.
        :param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.
        :param resume: String, path to the checkpoint being loaded.
        :param modification: Dict keychain:value, specifying position values to be replaced from config dict.
        :param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default
        """
        # load config file and apply modification
        self._config = _update_config(config, modification)
        self.resume = resume

        # set save_dir where trained model and log will be saved.
        save_dir = Path(self.config['trainer']['save_dir'])    # 模型保存路径

        exper_name = self.config['name']    # 任务名
        if run_id is None:  # use timestamp as default run-id
            run_id = datetime.now().strftime(r'%m%d_%H%M%S')    # 任务id: 月日_时分秒
        self._save_dir = save_dir / 'models' / exper_name / run_id    # 模型保存路径
        self._log_dir = save_dir / 'log' / exper_name / run_id    # 记录路径

        # make directory for saving checkpoints and log.
        exist_ok = run_id == ''
        self.save_dir.mkdir(parents=True, exist_ok=exist_ok)    # 创建路径
        self.log_dir.mkdir(parents=True, exist_ok=exist_ok)

        # save updated config file to the checkpoint dir
        write_json(self.config, self.save_dir / 'config.json')    # 将保存后的模型路径存入json中,并进行保存

        # configure logging module
        setup_logging(self.log_dir)    # 根据logger_config.json文件,配置log的方式,
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }
    def __init__(self, config, resume=None, modification=None, run_id=None):
        """
        class to parse configuration json file. Handles hyperparameters for training,
        initializations of modules, checkpoint saving and logging module.

        :param config: Dict containing configurations, hyperparameters for training.
            contents of `config.json` file for example.
        :param resume: String, path to the checkpoint being loaded.
        :param modification: Dict keychain:value, specifying position values to
            be replaced from config dict.
        :param run_id: Unique Identifier for training processes.
            Used to save checkpoints and training log. Timestamp used as default
        """
        # load config file and apply modification
        self._config = _update_config(config, modification)
        self.resume = resume

        # set save_dir where trained model and log will be saved.
        save_dir = Path(self.config["trainer"]["save_dir"])

        exper_name = self.config["name"]
        if run_id is None:  # use timestamp as default run-id
            run_id = datetime.now().strftime(r"%m%d_%H%M%S")
        self._save_dir = save_dir / "models" / exper_name / run_id
        self._log_dir = save_dir / "log" / exper_name / run_id

        # make directory for saving checkpoints and log.
        exist_ok = run_id == ""
        self.save_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.log_dir.mkdir(parents=True, exist_ok=exist_ok)

        # save updated config file to the checkpoint dir
        write_json(self.config, self.save_dir / "config.json")

        # configure logging module
        setup_logging(self.log_dir)
        self.log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
Exemple #10
0
    def __init__(self, args, mode, timestamp=True):
        args = args.parse_args()
        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        if args.resume:
            self.resume = Path(args.resume)
            self.cfg_fname = self.resume / 'config.json'
            # self.cfg_fname = Path(args.config)
        else:
            msg_no_cfg = "Config file must be specified"
            assert args.config is not None, msg_no_cfg
            self.resume = None
            self.cfg_fname = Path(args.config)

        self._config = read_json(self.cfg_fname)

        if mode == 'train':
            save_dir = Path(self.config['trainer']['save_dir'])
            if self.resume:
                self._save_dir = self.resume
                self._log_dir = self.resume
            else:
                self._save_dir = save_dir / args.logdir
                self._log_dir = save_dir / args.logdir
        else:
            if args.mode == 'val':
                save_dir = Path(self.config['trainer']['save_dir'])
                self._save_dir = self.resume / "val"
                self._log_dir = self.resume / "val"
                self._config['data_loader']['args']['mode'] = 'val'
            elif args.mode == 'test':
                save_dir = Path(self.config['trainer']['save_dir'])
                self._save_dir = self.resume / "test"
                self._log_dir = self.resume / "test"
                self._config['data_loader']['args']['mode'] = 'test'

        #model_name = self.cfg_fname.parent.stem
        #exper_name = f"{model_name}-{self.cfg_fname.stem}"
        #self._save_dir = save_dir / logdir
        #self._log_dir = save_dir / logdir
        #self._exper_name = exper_name
        self._args = args

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)
        
        write_json(self.config, self._save_dir / 'config.json')
        self.log_path = setup_logging(self.log_dir)
Exemple #11
0
    def __init__(self, args):
        args = args.parse_args()
        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device
        if args.resume:
            self.resume = Path(args.resume)
            self.cfg_fname = self.resume.parent / 'config.json'
        else:
            self.resume = None
            self.cfg_fname = Path(args.config)
        assert self.cfg_fname is not None, "Configuration file need to be specified. Add '-c config.json', for example."

        self.config = read_json(self.cfg_fname)
        self.exper_name = self.config['name']

        # set save_dir where trained model and log will be saved.
        save_dir = Path(self.config['trainer']['save_dir'])
        timestamp = datetime.now().strftime(
            '%m%d_%H%M%S')  # if timestamp else ''

        self.save_dir = save_dir / 'models' / self.exper_name / timestamp
        self.log_dir = save_dir / 'log' / self.exper_name / timestamp

        self.save_dir.mkdir(parents=True)
        self.log_dir.mkdir(parents=True)

        # copy the config file to the checkpoint dir # NOTE: str() can be removed from here on python 3.6
        copyfile(str(self.cfg_fname), str(self.save_dir / 'config.json'))

        # configure logging module
        setup_logging(self.log_dir)
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }
 def __init__(self, line_sz, total_sz, logger=None, console=True, filelog=False, level=logging.ERROR):
     import sys
     assert isinstance(line_sz, int)  \
         and isinstance(total_sz, int)  \
         and line_sz > 0 \
         and total_sz > 0
     self.line_size = line_sz
     self.total_size = total_sz
     self.line_count = total_sz / line_sz
     if logger is None:
         from logger import setup_logging
         self.logger = setup_logging(console=console, logfile='mem_simulation.log', filelog=filelog, level=level)
     else:
         self.logger = logger
     self.line_szbit = int(math.log(self.line_size, 2))
     self.total_szbit = int(math.log(self.total_size, 2))
Exemple #13
0
    def __init__(self, args, timestamp=True):
        args = args.parse_args()
        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        if args.resume:
            self.resume = Path(args.resume)
            self.cfg_fname = self.resume.parent / 'config.json'
            # self.cfg_fname = Path(args.config)
        else:
            msg_no_cfg = "Config file must be specified"
            assert args.config is not None, msg_no_cfg
            self.resume = None
            self.cfg_fname = Path(args.config)

        self._config = read_json(self.cfg_fname)

        if "trainer" in self.config:
            save_dir = Path(self.config['trainer']['save_dir'])
        else:
            save_dir = Path(self.config['tester']['save_dir'])

        timestamp = datetime.now().strftime(
            r"%Y-%m-%d_%H-%M-%S") if timestamp else ""

        model_name = self.cfg_fname.parent.stem
        exper_name = f"{model_name}-{self.cfg_fname.stem}"
        self._save_dir = save_dir / 'models' / exper_name / timestamp
        self._log_dir = save_dir / 'log' / exper_name / timestamp
        self._exper_name = exper_name
        self._args = args

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)

        write_json(self.config, self._save_dir / 'config.json')
        self.log_path = setup_logging(self.log_dir)
Exemple #14
0
from logger import setup_logging
from message_handler import render_msg, parse_data
import asyncio
from random import randrange

log = setup_logging(__name__)


class ChatClient(asyncio.Protocol):
    message = 'This is the message. It will be echoed.'
    client = 'CLIENT{:0>3}'.format(randrange(0,1000))
    transport = None

    def connection_made(self, transport):
        self.transport = transport

        log.debug("Setting up connection!")

        # Do Register
        self.register()

        res = True
        while(res):
            res = self.send_msg()

        log.debug("Disconnecting from server!")
        self.transport.close()

    def data_received(self, data):
        mstring = data.decode()
        log.debug('Data received: [{}]'.format(mstring))
Exemple #15
0
"""
This will write the timestamp, meter power value, PV power value, sum of the powers to a CSV file.
"""

import csv
import os.path
import config as cfg
import logger
from datetime import datetime

simulator_log = logger.setup_logging("pv_simulator")


async def write(file_path: str, data: dict) -> None:
    """
    :param file_path: path to the file
    :param data: data with fields ['timestamp', 'meter_power_value', 'PV_power_value', 'sum_of_powers']
    :return: None
    """
    simulator_log.info("File writer requested for filepath %s" % file_path)
    new_file = True
    if os.path.isfile(file_path):
        new_file = False
    with open(file_path, mode='a+', newline='') as csv_file:
        fieldnames = cfg.CSV_FIELDS
        writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
        if new_file:
            writer.writeheader()
        writer.writerow(data)
        simulator_log.info("File writer finished writing to file path %s" % file_path)
            logger.info('Error setting relay state')


if __name__ == '__main__':
    # Argument parsing
    parser = argparse.ArgumentParser(
        description='Start and stop a generator based on conditions')

    parser.add_argument('-d',
                        '--debug',
                        help='set logging level to debug',
                        action='store_true')
    parser.add_argument('-r',
                        '--retries',
                        help='Retries on error',
                        default=300,
                        type=int)

    args = parser.parse_args()

    print '-------- dbus_generator, v' + softwareversion + ' is starting up --------'
    logger = setup_logging(args.debug)

    # Have a mainloop, so we can send/receive asynchronous calls to and from dbus
    DBusGMainLoop(set_as_default=True)

    generator = DbusGenerator(args.retries)
    # Start and run the mainloop
    mainloop = gobject.MainLoop()
    mainloop.run()
def main():
    # parse arguments
    aparser = argparse.ArgumentParser(description='Convert SystemRDL files to outputs like RTL')
    aparser.add_argument('files', type=str, nargs='+', help='Input RDL files')
    aparser.add_argument('--perl-preproc', action='store_true',
                         dest='perl_pp', help='Enable perl preprocessing')
    aparser.add_argument('--print', action='store_true',
                         dest='v_print', help='Print register information')
    aparser.add_argument('--lang', choices=['verilog', 'vhdl'], default='vhdl',
                         dest='lang', help='Select target language')
    log_level_nos = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]
    log_level_names = [logging.getLevelName(i).lower() for i in log_level_nos]
    log_levels = dict(zip(log_level_names, log_level_nos))
    aparser.add_argument('--log-level', choices=log_level_names, default='INFO',
                         dest='log_level', type=str.lower, help='Select log level')
    aparser.add_argument('--debug', action='store_true',
                         dest='debug', help=argparse.SUPPRESS)
    args = aparser.parse_args()

    # setup logger
    logger.setup_logging(log_levels[args.log_level], args.debug, None)
    log = logging.getLogger()

    # preprocessing
    log.info('Start preprocessing..')
    data = ''
    line_infos = []
    for fn in args.files:
        (data_pp, ln) = preproc(fn, args.perl_pp)
        data += data_pp
        line_infos += ln
    log.info('Preprocessing done.')

    log.handlers[0].formatter.line_info = Common.flatten(line_infos, [])

    # parsing
    log.info('Start parsing..')
    inputstream = antlr4.InputStream(data)

    lexer = SystemRDLLexer(inputstream)
    lexer.removeErrorListeners()
    lexer._listeners = [CustomErrorListener()]

    stream = antlr4.CommonTokenStream(lexer)
    parser = SystemRDLParser(stream)
    parser._listeners = [CustomErrorListener()]

    tree = parser.root()
    listener = Listener(parser)
    walker = antlr4.ParseTreeWalker()
    walker.walk(listener, tree)
    log.info('Parsing done.')
    for am in listener.addrmaps:
        log.info(f'{am.def_id} assigned address space till 0x{am.last_addr:x}')
        if args.v_print:
            am.pprint()
        log.info(f'Generating RTL for AddrMap {am.def_id}..')
        rtl_gen.generate_rtl(args.lang, am, listener.signals, listener.internal_signals)

    log.info('Done.')
    logging.shutdown()
def main():
    setup_logging(__file__)
    if not is_weekend():
        logging.info("Start Collect %s Trading Data" % datetime.date.today())
        collect_stock_daily_trading()
        logging.info("Collect %s Trading Data Success" % datetime.date.today())
Exemple #19
0
def main():
    setup_logging(__file__, logging.WARNING)
    if not is_weekend():
        logging.info('Start Collect %s Trading Data' % datetime.date.today())
        collect_stock_daily_trading()
        logging.info('Collect %s Trading Data Success' % datetime.date.today())
Exemple #20
0
import config
import daemon
import logger
import packages
import repository
import server
import transaction
import util

from system import system
from constants import *
from i18n import _

# Initialize a very simple logging that is removed when a Pakfire instance
# is started.
logger.setup_logging()

class Cli(object):
	pakfire = base.Pakfire

	def __init__(self):
		self.parser = argparse.ArgumentParser(
			description = _("Pakfire command line interface."),
		)

		self.parse_common_arguments()

		self.parser.add_argument("--root", metavar="PATH",
			default="/",
			help=_("The path where pakfire should operate in."))
def main():
    setup_logging(__file__, logging.WARNING)
    if not is_weekend():
        logging.info('Start Collect %s Trading Data' % datetime.date.today())
        collect_stock_daily_trading()
        logging.info('Collect %s Trading Data Success' % datetime.date.today())
    def __init__(self, args, options='', timestamp=True, slave_mode=False):
        # slave_mode - when calling the config parser form an existing process, we
        # avoid reinitialising the logger and ignore sys.argv when argparsing.

        # parse default and custom cli options
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)

        if slave_mode:
            args = args.parse_args(args=[])
        else:
            args = args.parse_args()

        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        if args.resume and not slave_mode:
            self.resume = Path(args.resume)
            # self.cfg_fname = self.resume.parent / 'config.json'
        else:
            msg_no_cfg = "Config file must be specified"
            assert args.config is not None, msg_no_cfg
            self.resume = None
        self.cfg_fname = Path(args.config)

        config = self.load_config(self.cfg_fname)
        self._config = _update_config(config, options, args)

        if self._config.get("eval_config", False):
            # validate path to evaluation file
            eval_cfg_path = self._config.get("eval_config")
            msg = f"eval_config was specified, but `{eval_cfg_path}` does not exist"
            assert Path(self._config.get("eval_config")).exists(), msg

        # set save_dir where trained model and log will be saved.
        if "tester" in self.config:
            save_dir = Path(self.config['tester']['save_dir'])
        else:
            save_dir = Path(self.config['trainer']['save_dir'])
        timestamp = datetime.now().strftime(r"%Y-%m-%d_%H-%M-%S") if timestamp else ""

        if slave_mode:
            timestamp = f"{timestamp}-eval-worker"

        exper_name = self.set_exper_name(args, config=config)

        if getattr(args, "group_id", False):
            subdir = Path(args.group_id) / f"seed-{args.group_seed}" / timestamp
        else:
            subdir = timestamp

        self._save_dir = save_dir / 'models' / exper_name / subdir
        self._web_log_dir = save_dir / 'web' / exper_name / subdir
        self._log_dir = save_dir / 'log' / exper_name / subdir
        self._exper_name = exper_name
        self._args = args

        # if set, remove all previous experiments with the current config
        if vars(args).get("purge_exp_dir", False):
            for dirpath in (self._save_dir, self._log_dir, self._web_log_dir):
                config_dir = dirpath.parent
                existing = list(config_dir.glob("*"))
                print(f"purging {len(existing)} directories from config_dir...")
                tic = time.time()
                os.system(f"rm -rf {config_dir}")
                print(f"Finished purge in {time.time() - tic:.3f}s")

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)

        # save updated config file to the checkpoint dir
        write_json(self.config, self.save_dir / 'config.json')

        # configure logging module
        if not slave_mode:
            self.log_path = setup_logging(self.log_dir)

        self.log_levels = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
Exemple #23
0
from message_handler import *
from logger import setup_logging

import asyncio
from itertools import count

# DEBUG STUFF
_d_conn_counter = 0  # Counter for conns.
_d_conn_rev = 0

# Logging
log = setup_logging(__name__)

# List of all connections
connections = {}


class ChatServer(asyncio.Protocol):
    name = None  # Name of client
    transport = None  # Transport i/f to

    def connection_made(self, transport):
        global _d_conn_counter
        _d_conn_counter += 1

        host, port = transport.get_extra_info('peername')
        log.debug("Connection made from {1}:{2}. Total = {0}".format(
            _d_conn_counter, host, port))

        # Storing this connection...
        self.transport = transport
import sys
import unittest

# our own packages
test_dir = os.path.dirname(__file__)
sys.path.insert(1, os.path.join(test_dir, '..', 'ext', 'velib_python', 'test'))
sys.path.insert(1, os.path.join(test_dir, '..'))
import dbus_systemcalc
import vedbus
from logger import setup_logging
from mock_dbus_monitor import MockDbusMonitor
from mock_dbus_service import MockDbusService
from mock_settings_device import MockSettingsDevice


dbus_systemcalc.logger = setup_logging()


class TestSystemCalcBase(unittest.TestCase):
	def __init__(self, methodName='runTest'):
		unittest.TestCase.__init__(self, methodName)
		self._service = MockDbusService('com.victronenergy.system')
		self._system_calc = dbus_systemcalc.SystemCalc(\
			lambda x: MockDbusMonitor(x), \
			lambda x: MockDbusService(x), \
			lambda x, y: MockSettingsDevice(x, y))
		self._monitor = self._system_calc._dbusmonitor
		self._service = self._system_calc._dbusservice
		self._settings = self._system_calc._settings

	def _update_values(self):
#coding:utf-8
__author__ = 'liuyix'

from logger import setup_logging

logger = setup_logging(console=True, logfile='locate_src.log', filelog=True)


def find_src(trace_name, filter_name, result_file):
    import linecache

    with open(filter_name) as filter:
        with open(result_file, 'w') as result:
            last_src_info = []
            for line in filter:
                if line.isspace():
                    continue
                if not line.split()[0].strip().isdigit():
                    result.write(line)
                else:
                    src_info = linecache.getline(trace_name, int(line.split()[-1])).split(' : ')[-3:]
                    if src_info != last_src_info:
                        last_src_info = src_info
                        result.write(line.rstrip() + ' ' + ' '.join(src_info))
                    else:
                        result.write(line)


def usage():
    help_message = 'locate_src.py 修改后的trace文件 filtered_file result_file'
    print help_message
Exemple #26
0
from core_modules.election_news_classifier.election_classifier import SemanticFeatureExtractor
from logger import setup_logging

__author__ = 'pralav'
def classify_news(path='/news_classified.pkl',articles=None,logging=None):
    vec_name="new_vectorizers.pkl"
    mod_name="new_models.pkl"
    kbest_name="new_kbests.pkl"
    sem = SemanticFeatureExtractor(logging=logging,vec_name=vec_name,model_name=mod_name,kbest_name=kbest_name)
    # sem.train_examples()
    sem.classify_db_articles_batch(reset=False,limit=5000)

if __name__ == '__main__':

    logging=setup_logging()
    classify_news(logging=logging)
Exemple #27
0
	def __del__(self):
		# Reset logging.
		logger.setup_logging()
        if hybrid:
            csvhelper.write(['spm total', 'spm block', 'spm miss', 'cache total', 'cache line', 'cache miss'])
            test_hybrid(p, csvhelper)
        if cache:
            csvhelper.write(['cache size', 'cache line', 'miss'])
            test_cache(p, csvhelper)
    trace_fobj.close()


def simple_trace():
    trace='./simple_matrix/thread0-trace.out'
    trace_file = open(trace, 'r')
    file_line_count = wccount(trace)
    trace_begin = int(68365.0 / 268569.0 * file_line_count)
    trace_end = int(151920.0 / 268569.0 * file_line_count)
    phase = trace_file.readlines()[trace_begin:trace_end]

    logger.info("line count: %d", file_line_count)
    logger.info("begin: %d, end: %d", trace_begin, trace_end)
    return phase

if __name__ == "__main__":
    import sys

    from logger import setup_logging
    logger = setup_logging(console=False, logfile='mem_simulation.log')
    #do_simple_test()
    #test_cache()
    #filter_result_parser()
    #extract_phases()
    simulate_main()
import datetime
import calendar

# our own packages
test_dir = os.path.dirname(__file__)
sys.path.insert(0, test_dir)
sys.path.insert(1, os.path.join(test_dir, '..', 'ext', 'velib_python', 'test'))
sys.path.insert(1, os.path.join(test_dir, '..'))
import dbus_generator
import gobject
from logger import setup_logging
from mock_dbus_monitor import MockDbusMonitor
from mock_dbus_service import MockDbusService
from mock_settings_device import MockSettingsDevice

dbus_generator.logger = setup_logging()


class MockGenerator(dbus_generator.Generator):
	def _create_dbus_monitor(self, *args, **kwargs):
		return MockDbusMonitor(*args, **kwargs)

	def _create_settings(self, *args, **kwargs):
		return MockSettingsDevice(*args, **kwargs)

	def _create_dbus_service(self):
		return MockDbusService('com.victronenergy.generator.startstop0')


class TestGeneratorBase(unittest.TestCase):
	def __init__(self, methodName='runTest'):
Exemple #30
0
import logging
import numpy as np
import pandas as pd

from sklearn.metrics import classification_report
from logger import setup_logging
from classifier.classifier import IsReturningCustomerClassifier as Classifier

setup_logging(
    filename="main"
)
logger = logging.getLogger(__name__)


def get_feature_importance(model):
    """
        Get feature importances
        Args:
            model  : model

        Returns:
            feature importances
    """
    importances = model.feature_important

    return importances


def get_classification_report(actual, predictions):
    """
        Get classification report, f1 score
Exemple #31
0
            self.utils.save_file_joblib(
                params, "%s/%s_%s.npy" % (out_path, model_name, e))
            self.utils.save_file_joblib(
                enc_params,
                "%s/encoding_%s_%s.npy" % (out_path, model_name, e))
            all_params = (self.max_chars, self.num_filters, self.encoder_size,
                          self.decoder_size, self.char_vocab_size,
                          self.grad_clip, self.norm_constraint,
                          self.learning_rate)
            self.utils.save_file_joblib(
                all_params,
                "%s/encoding_%s_%s_global.npy" % (out_path, model_name, e))


if __name__ == '__main__':
    logging = setup_logging(save_path=LOG_PATH + "/training.log")
    parser = argparse.ArgumentParser(description='Trainer')

    parser.add_argument('-m',
                        '--model_name',
                        help='Model Name',
                        dest='model_name',
                        default='tweet_word')
    parser.add_argument('-f',
                        '--num_filters',
                        help='No. of filters',
                        dest='num_filters',
                        default=256,
                        type=int)
    parser.add_argument('-L',
                        '--encoder_len',
    def __init__(self, args, options='', timestamp=True):
        # parse default and custom cli options
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)
        args = args.parse_args()

        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        self.bert_config_path = None

        if args.resume:
            self.resume = Path(args.resume)
            self.cfg_fname = self.resume.parent / 'config.json'
            self.bert_config_path = str(self.resume.parent / 'BertConfig.json')
        else:
            msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
            assert args.config is not None, msg_no_cfg
            self.resume = None
            self.cfg_fname = 'config' / Path(args.config)

        # load config file and apply custom cli options
        config = read_json(self.cfg_fname)
        self._config = _update_config(config, options, args)

        # set save_dir where trained model and log will be saved.
        self.base_save_dir = Path(self.config['trainer']['save_dir'])
        self.exper_name = self.config['processor']['args']['data_name'] + \
            '_' + self.config['arch']['type']

        timestamp = datetime.now().strftime(
            r'%m%d_%H%M%S') if timestamp else ''
        self._save_dir = self.base_save_dir / 'models' / self.exper_name / timestamp
        self._log_dir = self.base_save_dir / 'log' / self.exper_name / timestamp

        self.log_dir.mkdir(parents=True, exist_ok=True)

        # configure logging module
        setup_logging(self.log_dir)
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }

        setup_seed(self._config['seed'])

        self.debug_mode = args.debug if "debug" in args else False
        self.all = args.all if "all" in args else False
        self.reset = args.reset if "reset" in args else False
        self.search_mode = args.searchMode if "searchMode" in args else "disable"

        self.gradient_accumulation_steps = self.config['trainer'][
            'gradient_accumulation_steps']

        if self.search_mode != 'disable':
            self.config['trainer']['tensorboardX'] = False

        if self.all:
            self.config["data_loader"]["args"]["validation_split"] = 0.0

        if self.debug_mode:
            self.config["trainer"]["epochs"] = 2
def main():
    setup_logging(__file__, logging.WARNING)
    date = setup_argparse()
    logging.info('Start Collect %s Trading Data' % datetime.date.today())
    collect_stock_daily_trading(date)
    logging.info('Collect %s Trading Data Success' % datetime.date.today())
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
config
"""
import sys
import yaml
import logging
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from logger import setup_logging
setup_logging()


class YamlConfigReader(object):
    """
    YamlConfigReader
    """
    def __init__(self, config_file):
        self.logger = logging.getLogger("YamlConfigReader")
        self.config_file = config_file
        self.config_dict = self.load_conf(self.config_file)

    def load_conf(self, config_file):
        """
        load_conf
        """
        try:
            with open(config_file, 'rt') as f:
                config_dict = yaml.load(f)
				r = v
			else:
				r += v
	return r


if __name__ == "__main__":
	# Argument parsing
	parser = argparse.ArgumentParser(
		description='Converts readings from AC-Sensors connected to a VE.Bus device in a pvinverter ' +
					'D-Bus service.'
	)

	parser.add_argument("-d", "--debug", help="set logging level to debug",
					action="store_true")

	args = parser.parse_args()

	print("-------- dbus_systemcalc, v" + softwareVersion + " is starting up --------")
	logger = setup_logging(args.debug)

	# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
	DBusGMainLoop(set_as_default=True)

	systemcalc = SystemCalc()

	# Start and run the mainloop
	logger.info("Starting mainloop, responding only on events")
	mainloop = gobject.MainLoop()
	mainloop.run()
Exemple #36
0
import boto3
import requests
import pytz
from Crypto.Cipher import AES
from Crypto.Util import Counter
from logger import setup_logging
from pyspark.sql import Row, SparkSession
from pyspark.sql.types import *
from pyspark.sql import functions as F

keys_map = {}  # for caching dks key

the_logger = setup_logging(
    log_level=os.environ["ADG_LOG_LEVEL"].upper()
    if "ADG_LOG_LEVEL" in os.environ else "INFO",
    log_path="${log_path}",
)


class CollectionData:
    def __init__(self, collection_name, staging_hive_table, tag_value):
        self.collection_name = collection_name
        self.staging_hive_table = staging_hive_table
        self.tag_value = tag_value


def main():
    database_name = get_staging_db_name()
    secrets_response = retrieve_secrets()
    collections = get_collections(secrets_response)
    stock_info.business_scope = survey_table[32].text
    stock_info.company_introduce = survey_table[31].text
    stock_info.area = survey_table[23].text

    core_concept_url = core_concept.format(query_id)
    concept_html = send_request(core_concept_url)
    concept_soup = BeautifulSoup(concept_html, 'lxml').find('div', class_='summary').find('p').text
    stock_info.market_plate = concept_soup.replace(u'要点一:所属板块 ', '').replace(u'。', '').split(u',')
    stock_info.update_time = datetime.datetime.now()
    stock_info.save()


def start_collect_detail():
    try:
        all_stocks = StockInfo.objects().timeout(False)
    except Exception, e:
        logging.error('Error when query StockInfo:' + str(e))
        raise e

    for i in all_stocks:
        try:
            collect_company_survey(i)
        except Exception, e:
            logging.error('Error when collect %s data: %s' % (i.stock_number, e))
        time.sleep(random.random())


if __name__ == '__main__':
    setup_logging(__file__)
    start_collect_detail()
Exemple #38
0
def create_app(dev_server=False):
    app = Flask(__name__)
    app.config.from_envvar('SETTINGS_FILE')
    app.jinja_options['extensions'].append('jinja2.ext.do')

    if install_logging:
        logger.setup_logging(app)

    for extension in (cdn, csrf, cache, db, mail, assets, toolbar):
        extension.init_app(app)

    migrate.init_app(app, db, render_as_batch=True)

    login_manager.setup_app(app, add_context_processor=True)
    app.login_manager.login_view = 'users.login'

    from models.user import User
    from models import site_state, feature_flag

    @login_manager.user_loader
    def load_user(userid):
        user = User.query.filter_by(id=userid).first()
        if user:
            _request_ctx_stack.top.user_email = user.email
        return user

    if app.config.get('TICKETS_SITE'):
        gocardless.environment = app.config['GOCARDLESS_ENVIRONMENT']
        gocardless.set_details(
            app_id=app.config['GOCARDLESS_APP_ID'],
            app_secret=app.config['GOCARDLESS_APP_SECRET'],
            access_token=app.config['GOCARDLESS_ACCESS_TOKEN'],
            merchant_id=app.config['GOCARDLESS_MERCHANT_ID'])

        stripe.api_key = app.config['STRIPE_SECRET_KEY']

        @app.before_request
        def load_per_request_state():
            site_state.get_states()
            feature_flag.get_db_flags()

    if app.config.get('DEBUG'):
        # Prevent staging site from being displayed on Google
        @app.after_request
        def send_noindex_header(response):
            response.headers['X-Robots-Tag'] = 'noindex, nofollow'
            return response

        # Prevent DB connections and random numbers being shared
        ppid = os.getpid()

        @app.before_request
        def fix_shared_state():
            if os.getpid() != ppid:
                db.engine.dispose()
                random.seed()

    @app.before_request
    def simple_cache_warning():
        if not dev_server and app.config.get('CACHE_TYPE', 'null') == 'simple':
            logging.warn(
                'Per-process cache being used outside dev server - refreshing will not work'
            )

    @app.after_request
    def send_security_headers(response):
        use_hsts = app.config.get('HSTS', False)
        if use_hsts:
            max_age = app.config.get('HSTS_MAX_AGE', 3600 * 24 * 7 * 4)
            response.headers[
                'Strict-Transport-Security'] = 'max-age=%s' % max_age

        response.headers['X-Frame-Options'] = 'deny'
        response.headers['X-Content-Type-Options'] = 'nosniff'

        return response

    @app.errorhandler(404)
    def handle_404(e):
        return render_template('errors/404.html'), 404

    @app.errorhandler(500)
    def handle_500(e):
        return render_template('errors/500.html'), 500

    from apps.common import load_utility_functions
    load_utility_functions(app)

    from apps.base import base
    from apps.users import users
    from apps.tickets import tickets
    from apps.payments import payments
    from apps.cfp import cfp
    from apps.cfp_review import cfp_review
    from apps.schedule import schedule
    from apps.arrivals import arrivals
    from apps.admin import admin
    app.register_blueprint(base)
    app.register_blueprint(users)
    app.register_blueprint(tickets)
    app.register_blueprint(payments)
    app.register_blueprint(cfp)
    app.register_blueprint(cfp_review, url_prefix='/cfp-review')
    app.register_blueprint(schedule)
    app.register_blueprint(arrivals, url_prefix='/arrivals')
    app.register_blueprint(admin, url_prefix='/admin')

    return app
Exemple #39
0
import logging

from queue import QueueManager, Queue
from logger import setup_logging

if __name__ == '__main__':
    os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
    try:
        from tendo import singleton
        me = singleton.SingleInstance()
    except Exception, e:
        print "Another instance of script is running. Exiting!!!"
        sys.exit(2)
        
    try:
        setup_logging("Notification", os.path.join("data", "log"), logfile="queue.log", scrnlog = False)
        logger = logging.getLogger("Notification")
        
        logger.info("cron job initiated")
        if(len(sys.argv) <= 1):
            #process Queue
            logger.info("Processing queue")
            queue_manager = QueueManager()
            for case_id, queue_item in queue_manager.get_current_queue().iteritems():
                logger.info("Processing Queue. Case ID: %s", case_id)
                queue_item.data.print_data()
                main_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 
                                  "main.py")
                print os.system("python '%s' -i %s" % (main_file_path, str(case_id)) )
        elif len(sys.argv) == 2:
            if(sys.argv[1].lower() == "--export"):
Exemple #40
0
        self.logger.info(
            "Writing %s failed ingestion(s) out to %s" % (len(self.failed_ingestions), outfile))
        writer.writerow(dict((fn,fn) for fn in fieldnames))
        for f in self.failed_ingestions:
            writer.writerow(f)

if __name__ == '__main__':
    # Separate the task and arguments.
    task, args = sys.argv[1], sys.argv[2:]

    # Setup Logging
    log_file_name = "_".join((
        "ingestion", task, datetime.today().strftime('%Y_%m_%d_%H_%M_%S'),
        )) + ".log"
    logger.setup_logging(
        log_file_name=log_file_name,
        send_mail="-no-email" not in args and EMAIL['enabled'])
    main_logger = logging.getLogger('Main')

    # If the -h argument is passed at the command line, display the internal documentation and exit.
    if "-h" in sys.argv:
        sys.stdout.write(INTERNAL_DOCUMENTATION)
        log_and_exit(0)

    # Run the task with the arguments.
    perform = Task(args)
    if task in Task.valid_tasks:
        main_logger.info("-")
        main_logger.info(
            "Running ingestion task '%s' with command-line arguments '%s'" % (
                task, " ".join(args)))
Exemple #41
0
def main(config):
    cfg_trainer = config['trainer_colab'] if config[
        'colab'] == True else config['trainer']
    run_id = config['resume'].split('/')[-2]
    file_name = config['resume'].split('/')[-1].split('.')[0]
    output_dir = os.path.join(cfg_trainer['output_dir'], run_id, file_name)
    (os.path.exists(output_dir) or os.makedirs(
        output_dir, exist_ok=True)) and rmdir(output_dir, remove_parent=False)
    setup_logging(output_dir)
    logger = logging.getLogger('test')

    use_gpu = cfg_trainer['n_gpu'] > 0 and torch.cuda.is_available()
    device = torch.device('cuda:0' if use_gpu else 'cpu')
    map_location = "cuda:0" if use_gpu else torch.device('cpu')

    datamanager, _ = build_datamanager(config['type'], config['data'])

    model, _ = build_model(config,
                           num_classes=len(
                               datamanager.datasource.get_attribute()))

    logger.info('Loading checkpoint: {} ...'.format(config['resume']))
    checkpoint = torch.load(config['resume'], map_location=map_location)

    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    model.to(device)

    preds = []
    labels = []

    with tqdm(total=len(datamanager.get_dataloader('test'))) as epoch_pbar:
        with torch.no_grad():
            for batch_idx, (data, _labels) in enumerate(
                    datamanager.get_dataloader('test')):
                data, _labels = data.to(device), _labels.to(device)

                out = model(data)

                _preds = torch.sigmoid(out)
                preds.append(_preds)
                labels.append(_labels)
                epoch_pbar.update(1)
    preds = torch.cat(preds, dim=0)
    labels = torch.cat(labels, dim=0)
    preds = preds.cpu().numpy()
    labels = labels.cpu().numpy()

    # # get best threshold
    # from sklearn.metrics import roc_curve, auc, precision_recall_curve

    # precision = dict()
    # recall = dict()
    # thresholds_pr = dict()
    # pr_auc = dict()
    # best_threshold = dict()

    # fpr = dict()
    # tpr = dict()
    # roc_auc = dict()
    # thresholds_roc = dict()

    # for i in range(len(datamanager.datasource.get_attribute())):
    #     precision[i], recall[i], thresholds_pr[i] = precision_recall_curve(labels[:, i], preds[:, i])
    #     pr_auc[i] = auc(recall[i], precision[i])
    #     best_threshold[i] = np.argmax((2 * precision[i] * recall[i]) / (precision[i] + recall[i]))

    #     fpr[i], tpr[i], thresholds_roc[i] = roc_curve(labels[:, i], preds[:, i])
    #     roc_auc[i] = auc(fpr[i], tpr[i])

    #     fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
    #     ax1.plot(recall[i], precision[i], label='Precision-Recall Curve, mAP: %f' % pr_auc[i])
    #     ax1.scatter(
    #         recall[i][best_threshold[i]],
    #         precision[i][best_threshold[i]],
    #         marker='o',
    #         color='black',
    #         label='Best threshold %f' % (thresholds_pr[i][best_threshold[i]]))

    #     ax1.set_xlabel('Recall')
    #     ax1.set_ylabel('Precision')
    #     ax1.set_title('Attribute: %s' % datamanager.datasource.get_attribute()[i])
    #     # ax1.legend(loc="lower right")

    #     fig, ax2 = plt.subplots(122)
    #     ax2.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % (roc_auc[i]))
    #     ax2.plot([0, 1], [0, 1], 'k--')
    #     ax2.scatter(fpr[i][best_threshold[i]], tpr[i][best_threshold[i]], marker='o', color='black', label='Best threshold %f' % (thresholds[i][best_threshold[i]]))
    #     ax2.set_xlim([0.0, 1.0])
    #     ax2.set_ylim([0.0, 1.05])
    #     ax2.set_xlabel('False Positive Rate')
    #     ax2.set_ylabel('True Positive Rate')
    #     ax2.set_title('Attribute: %s' % datamanager.datasource.get_attribute()[i])
    #     # ax2.legend(loc="lower right")

    # plt.show()

    result_label, result_instance = recognition_metrics(labels, preds)
    log_test(logger.info, datamanager.datasource.get_attribute(),
             datamanager.datasource.get_weight('test'), result_label,
             result_instance)
Exemple #42
0
from flask.ext.assets import Environment, Bundle
from flask_wtf import CsrfProtect

import gocardless
import stripe

import logging
import logger

logging.basicConfig(level=logging.NOTSET)

app = Flask(__name__)
csrf = CsrfProtect(app)
app.config.from_envvar('SETTINGS_FILE')

logger.setup_logging(app)

db = SQLAlchemy(app)
mail = Mail(app)
login_manager = LoginManager()

assets = Environment(app)
css_all = Bundle('css/main.css',
                  output='gen/packed.css', filters='cssmin')
css_admin = Bundle('css/admin.css',
                   output='gen/admin-packed.css', filters='cssmin')
css_print = Bundle('css/print.css',
                   output='gen/print-packed.css', filters='cssmin')
assets.register('css_all', css_all)
assets.register('css_admin', css_admin)
assets.register('css_print', css_print)
Exemple #43
0
from flask.ext.assets import Environment, Bundle
from flask_wtf import CsrfProtect

import gocardless
import stripe

import logging
import logger

logging.basicConfig(level=logging.NOTSET)

app = Flask(__name__)
csrf = CsrfProtect(app)
app.config.from_envvar('SETTINGS_FILE')

logger.setup_logging(app)

db = SQLAlchemy(app)
mail = Mail(app)
login_manager = LoginManager()

assets = Environment(app)
css_all = Bundle('css/main.css', output='gen/packed.css', filters='cssmin')
css_admin = Bundle('css/admin.css',
                   output='gen/admin-packed.css',
                   filters='cssmin')
css_print = Bundle('css/print.css',
                   output='gen/print-packed.css',
                   filters='cssmin')
assets.register('css_all', css_all)
assets.register('css_admin', css_admin)
Exemple #44
0
def create_app(dev_server=False):
    app = Flask(__name__)
    app.config.from_envvar('SETTINGS_FILE')
    app.jinja_options['extensions'].append('jinja2.ext.do')

    if install_logging:
        logger.setup_logging(app)

    for extension in (cdn, csrf, cache, db, mail, assets, toolbar):
        extension.init_app(app)

    migrate.init_app(app, db, render_as_batch=True)

    login_manager.setup_app(app, add_context_processor=True)
    app.login_manager.login_view = 'users.login'

    from models.user import User
    from models import site_state, feature_flag

    @login_manager.user_loader
    def load_user(userid):
        user = User.query.filter_by(id=userid).first()
        if user:
            _request_ctx_stack.top.user_email = user.email
        return user

    if app.config.get('TICKETS_SITE'):
        gocardless.environment = app.config['GOCARDLESS_ENVIRONMENT']
        gocardless.set_details(app_id=app.config['GOCARDLESS_APP_ID'],
                               app_secret=app.config['GOCARDLESS_APP_SECRET'],
                               access_token=app.config['GOCARDLESS_ACCESS_TOKEN'],
                               merchant_id=app.config['GOCARDLESS_MERCHANT_ID'])

        stripe.api_key = app.config['STRIPE_SECRET_KEY']

        @app.before_request
        def load_per_request_state():
            site_state.get_states()
            feature_flag.get_db_flags()

    if app.config.get('DEBUG'):
        # Prevent staging site from being displayed on Google
        @app.after_request
        def send_noindex_header(response):
            response.headers['X-Robots-Tag'] = 'noindex, nofollow'
            return response

        # Prevent DB connections and random numbers being shared
        ppid = os.getpid()
        @app.before_request
        def fix_shared_state():
            if os.getpid() != ppid:
                db.engine.dispose()
                random.seed()

    @app.before_request
    def simple_cache_warning():
        if not dev_server and app.config.get('CACHE_TYPE', 'null') == 'simple':
            logging.warn('Per-process cache being used outside dev server - refreshing will not work')

    @app.after_request
    def send_security_headers(response):
        use_hsts = app.config.get('HSTS', False)
        if use_hsts:
            max_age = app.config.get('HSTS_MAX_AGE', 3600 * 24 * 7 * 4)
            response.headers['Strict-Transport-Security'] = 'max-age=%s' % max_age

        response.headers['X-Frame-Options'] = 'deny'
        response.headers['X-Content-Type-Options'] = 'nosniff'

        return response

    @app.errorhandler(404)
    def handle_404(e):
        return render_template('errors/404.html'), 404

    @app.errorhandler(500)
    def handle_500(e):
        return render_template('errors/500.html'), 500

    from apps.common import load_utility_functions
    load_utility_functions(app)

    from apps.base import base
    from apps.users import users
    from apps.tickets import tickets
    from apps.payments import payments
    from apps.cfp import cfp
    from apps.cfp_review import cfp_review
    from apps.schedule import schedule
    from apps.arrivals import arrivals
    from apps.admin import admin
    app.register_blueprint(base)
    app.register_blueprint(users)
    app.register_blueprint(tickets)
    app.register_blueprint(payments)
    app.register_blueprint(cfp)
    app.register_blueprint(cfp_review, url_prefix='/cfp-review')
    app.register_blueprint(schedule)
    app.register_blueprint(arrivals, url_prefix='/arrivals')
    app.register_blueprint(admin, url_prefix='/admin')

    return app
Exemple #45
0
    def __init__(self, config, modification=None, timestamp=None):
        # load config file and apply modification
        self._config = _update_config(config, modification)

        # logger
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }

        verbosity = self['trainer']['verbosity']
        msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(
            verbosity, self.log_levels.keys())
        assert verbosity in self.log_levels, msg_verbosity

        self._logger = logging.getLogger('default')
        self._logger.setLevel(self.log_levels[verbosity])

        # set save_dir where trained model and log will be saved.
        run_id = timestamp

        save_dir = Path(self.config['trainer']['save_dir'])
        exper_name = self.config['name']
        dir = save_dir / exper_name / run_id

        self._dir = dir
        self._log_dir = dir / 'log'
        self._save_dir = dir / 'models'
        self._tensors_dir = dir / 'tensors'
        self._samples_dir = dir / 'samples'

        self._im_dir = dir / 'images'
        self._fields_dir = dir / 'fields'
        self._grids_dir = dir / 'grids'
        self._norms_dir = dir / 'norms'

        # segmentation IDs
        self.structures_dict = {
            'left_thalamus': 10,
            'left_caudate': 11,
            'left_putamen': 12,
            'left_pallidum': 13,
            'brain_stem': 16,
            'left_hippocampus': 17,
            'left_amygdala': 18,
            'left_accumbens': 26,
            'right_thalamus': 49,
            'right_caudate': 50,
            'right_putamen': 51,
            'right_pallidum': 52,
            'right_hippocampus': 53,
            'right_amygdala': 54,
            'right_accumbens': 58
        }

        # make directories for saving checkpoints and log.
        exist_ok = run_id == ''

        self.log_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.save_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.tensors_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.samples_dir.mkdir(parents=True, exist_ok=exist_ok)

        samples_VI_dir = self.samples_dir / 'VI'
        samples_MCMC_dir = self.samples_dir / 'MCMC'

        samples_VI_dir.mkdir(parents=True, exist_ok=exist_ok)
        samples_MCMC_dir.mkdir(parents=True, exist_ok=exist_ok)

        self.im_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.fields_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.grids_dir.mkdir(parents=True, exist_ok=exist_ok)
        self.norms_dir.mkdir(parents=True, exist_ok=exist_ok)

        # configure logging
        setup_logging(self.log_dir)

        # save updated config file to the checkpoint dir
        self.config_str = json.dumps(self.config, indent=4,
                                     sort_keys=False).replace('\n', '')
        write_json(self.config, dir / 'config.json')
Exemple #46
0
        if not is_duplicate(smt):
            smt.save()


def is_duplicate(smt):
    duplicate_data = SMT.objects(Q(stock_number=smt.stock_number) &
                                 Q(stock_name=smt.stock_name) &
                                 Q(rz_net_buy_amount=smt.rz_net_buy_amount) &
                                 Q(rq_repay_volume=smt.rq_repay_volume) &
                                 Q(rq_sell_volume=smt.rq_sell_volume) &
                                 Q(rq_remaining_volume=smt.rq_remaining_volume) &
                                 Q(rz_repay_amount=smt.rz_repay_amount) &
                                 Q(rz_buy_amount=smt.rz_buy_amount) &
                                 Q(rz_remaining_amount=smt.rz_remaining_amount))
    if duplicate_data:
        return True
    else:
        return False


if __name__ == '__main__':
    setup_logging(__file__, logging.WARNING)
    logging.info('Start to collect stock margin trading')
    for url in rzrq_api:
        try:
            collect_margin_trading(url)
        except Exception, e:
            logging.error('Collect margin trading %s failed:%s' % (url, e))
    logging.info('collect stock margin trading success')
parser = argparse.ArgumentParser(
    description='dummy dbus service'
)

parser.add_argument("-n", "--name",
    help="the D-Bus service you want me to claim",
    type=str, default="com.victronenergy.battery.socketcan_can0")

parser.add_argument("-i", "--instance",
	help="DeviceInstance",
	type=int, default=0)

args = parser.parse_args()

print(__file__ + " is starting up, use -h argument to see optional arguments")
logger = setup_logging(debug=True)

# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)

s = DbusDummyService(
    servicename=args.name,
    deviceinstance=args.instance,
    paths={
        '/Alarms/CellImbalance': {'initial': 0},
        '/Alarms/HighChargeCurrent': {'initial': 0},
        '/Alarms/HighChargeTemperature': {'initial': 0},
        '/Alarms/HighDischargeCurrent': {'initial': 0},
        '/Alarms/HighTemperature': {'initial': 0},
        '/Alarms/HighVoltage': {'initial': 0},
        '/Alarms/InternalFailure': {'initial': 0},
        self.logger.info(
            "Writing %s failed ingestion(s) out to %s" % (len(self.failed_ingestions), outfile))
        writer.writerow(dict((fn,fn) for fn in fieldnames))
        for f in self.failed_ingestions:
            writer.writerow(f)

if __name__ == '__main__':
    # Separate the task and arguments.
    task, args = sys.argv[1], sys.argv[2:]

    # Setup Logging
    log_file_name = "_".join((
        "ingestion", task, datetime.today().strftime('%Y_%m_%d_%H_%M_%S'),
        )) + ".log"
    logger.setup_logging(
        log_file_name=log_file_name,
        send_mail="-no-email" not in args and EMAIL['enabled'],
        info_to_console="-v" in args)
    main_logger = logging.getLogger('Main')
    logging.getLogger("requests").setLevel(logging.WARNING)

    # If the -h argument is passed at the command line, display the internal documentation and exit.
    if "-h" in sys.argv:
        sys.stdout.write(INTERNAL_DOCUMENTATION)
        log_and_exit(0)

    # Run the task with the arguments.
    perform = Task(args)
    if task in Task.valid_tasks:
        task_start_time = datetime.now()
        main_logger.info(
            "Running ingestion task '%s' with command-line arguments '%s'" % (
import simulation
import step
import cuboid
import vessel
import options
import process
import grid
import scoring
import entity

# singleton modules are not classes
import toolbox
import computer
import logger

logger.setup_logging()

try:
    reload
except NameError:
    # Python 3
    from imp import reload

# inject test_shim before we load the domain
from sys import modules
try:
    test_shim = modules['test_shim']
except KeyError:
    import test_shim as test_shim

reload(cuboid)
Exemple #50
0
 def __del__(self):
     # Reset logging.
     logger.setup_logging()
    def __init__(self, args, options='', timestamp=True):
        # parse default and custom cli options
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)
        args = args.parse_args()
        self._name = None
        self._out_name = None

        if hasattr(args, 'device'):
            if args.device:
                os.environ["CUDA_VISIBLE_DEVICES"] = args.device

        self.cfg_fname = None
        if hasattr(args, 'resume'):
            if args.resume:
                self.resume = Path(args.resume)
                if hasattr(args, 'config') and args.config is not None:
                    self.cfg_fname = Path(args.config)
                else:
                    self.cfg_fname = self.resume.parent / 'config.json'

        if self.cfg_fname is None:
            if hasattr(args, 'config'):
                msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
                assert args.config is not None, msg_no_cfg
                self.resume = None
                self.cfg_fname = Path(args.config)

        if hasattr(args, 'name'):
            if args.name:
                self._name = str(args.name)

        if hasattr(args, 'oname'):
            if args.oname:
                self._out_name = str(args.oname)

        # load config file and apply custom cli options
        config = read_json(self.cfg_fname)
        self._config = _update_config(config, options, args)

        # set save_dir where trained model and log will be saved.
        save_dir = Path(self.config['trainer']['save_dir'])
        timestamp = datetime.now().strftime(
            r'%d%m%y_%H%M%S') if timestamp else ''

        exper_name = self.config['name']
        self._save_dir = save_dir / 'models' / exper_name / timestamp
        self._log_dir = save_dir / 'log' / exper_name / timestamp
        self._temp_dir = save_dir / 'temp' / exper_name / timestamp

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)
        self.temp_dir.mkdir(parents=True, exist_ok=True)

        # save updated config file to the checkpoint dir
        write_json(self.config, self.save_dir / 'config.json')

        # configure logging module
        setup_logging(self.log_dir)
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        }
Exemple #52
0
def main():
    signal.signal(signal.SIGINT, exit)

    parser = argparse.ArgumentParser(
        description='Publishes values from the D-Bus to an MQTT broker')
    parser.add_argument('-d',
                        '--debug',
                        help='set logging level to debug',
                        action='store_true')
    parser.add_argument('-q',
                        '--mqtt-server',
                        nargs='?',
                        default=None,
                        help='name of the mqtt server')
    parser.add_argument('-u',
                        '--mqtt-user',
                        default=None,
                        help='mqtt user name')
    parser.add_argument('-P',
                        '--mqtt-password',
                        default=None,
                        help='mqtt password')
    parser.add_argument(
        '-c',
        '--mqtt-certificate',
        default=None,
        help='path to CA certificate used for SSL communication')
    parser.add_argument('-b', '--dbus', default=None, help='dbus address')
    parser.add_argument('-k',
                        '--keep-alive',
                        default=60,
                        help='keep alive interval in seconds',
                        type=int)
    parser.add_argument(
        '-i',
        '--init-broker',
        action='store_true',
        help='Tries to setup communication with VRM MQTT broker')
    args = parser.parse_args()

    print("-------- dbus_mqtt, v{} is starting up --------".format(
        SoftwareVersion))
    logger = setup_logging(args.debug)

    # This allows us to use gobject code in new threads
    gobject.threads_init()

    mainloop = gobject.MainLoop()
    # Have a mainloop, so we can send/receive asynchronous calls to and from dbus
    DBusGMainLoop(set_as_default=True)
    keep_alive_interval = args.keep_alive if args.keep_alive > 0 else None
    handler = DbusMqtt(mqtt_server=args.mqtt_server,
                       ca_cert=args.mqtt_certificate,
                       user=args.mqtt_user,
                       passwd=args.mqtt_password,
                       dbus_address=args.dbus,
                       keep_alive_interval=keep_alive_interval,
                       init_broker=args.init_broker,
                       debug=args.debug)

    # Handle SIGUSR1 and dump a stack trace
    signal.signal(signal.SIGUSR1, dumpstacks)

    # Start and run the mainloop
    try:
        mainloop.run()
    except KeyboardInterrupt:
        pass
    try:
        all_stocks = StockInfo.objects()
    except Exception, e:
        logging.error('Error when query StockInfo:' + str(e))
        raise e

    stocks_count = len(all_stocks)
    skip = 0

    while skip < stocks_count:
        try:
            stocks = StockInfo.objects().skip(skip).limit(query_step)
        except Exception, e:
            logging.error('Error when query skip %s  StockInfo:%s' % (skip, e))
            stocks = []

        for i in stocks:
            try:
                collect_company_survey(i)
            except Exception, e:
                logging.error('Error when collect %s data: %s' % (i.stock_number, e))
            time.sleep(random.random())
        skip += query_step


if __name__ == '__main__':
    setup_logging(__file__, logging.WARNING)
    logging.info('Start to collect stock detail info')
    start_collect_detail()
    logging.info('Collect stock detail info Success')
def log():
    logger.setup_logging(40, False, None)
    yield logging.getLogger()
    logging.shutdown()