Exemplo n.º 1
0
 def add_fault(uuid, json):
     faultobj = Root.create_object_from_json_extract_classname(json)
     if faultobj is None:
         print(f"fail creating obj from uuid={uuid} json={json}")
         return 0
     Configuration.set_fault(uuid, faultobj)
     return uuid
Exemplo n.º 2
0
 def faults(self, fault_id=None):
     method = cherrypy.request.method
     params = cherrypy.request.params
     json = None if method == 'DELETE' else cherrypy.request.json
     print(f'fault_id={fault_id}')
     print(f'method={method}')
     print(f'params={params}')
     print(f'json={json}')
     if method == 'GET':
         if fault_id is None:
             ids = {'faults ids': Configuration.get_all_faults_ids()}
             return ids
         else:
             return self.faults[fault_id]
     if method == 'POST' or method == 'CREATE' or method == 'PUT':
         fault_id = str(uuid.uuid4())
         faultobj = Root.create_object_from_json_extract_classname(json)
         if faultobj:
             Configuration.set_fault(fault_id, faultobj)
         else:
             fault_id = "0"
         return {'fault_id': fault_id}
     if method == 'DELETE':
         removed_uuid = Configuration.remove_fault(fault_id)
         if removed_uuid:
             cherrypy.response.status = 200
         else:
             cherrypy.response.status = 404
         return {'fault_id': fault_id}
def main():

    strategy = get_strategy()
    args_namespace = strategy.create_argument_parser(False).parse_args()
    strategy_params_special = strategy.get_strategy_params(args_namespace)

    events_log_file = '{}/events.log'.format(args_namespace.output_directory)

    strategy_params = dict(stop_loss_pips=args_namespace.stop_loss,
                           take_profit_pips=args_namespace.take_profit)
    strategy_params.update(strategy_params_special)

    configuration = Configuration(data_handler_name=OandaDataHandler,
                                  execution_handler_name=OandaExecutionHandler)
    configuration.set_option(Configuration.OPTION_ACCOUNT_ID,
                             os.environ.get('OANDA_API_ACCOUNT_ID'))
    configuration.set_option(Configuration.OPTION_ACCESS_TOKEN,
                             os.environ.get('OANDA_API_ACCESS_TOKEN'))
    configuration.set_option(Configuration.OPTION_TIMEFRAME,
                             args_namespace.time_frame)
    configuration.set_option(
        Configuration.OPTION_NUMBER_OF_BARS_PRELOAD_FROM_HISTORY,
        max(strategy_params['sma_short_period'],
            strategy_params['sma_long_period']))

    trading = Trading(args_namespace.output_directory,
                      list(args_namespace.symbols), 0, configuration,
                      DataHandlerFactory(), ExecutionHandlerFactory(),
                      Portfolio, strategy, FixedPositionSize(0.01),
                      TextLogger(events_log_file), [Trading.LOG_TYPE_EVENTS],
                      strategy_params, 'equity.csv', 'trades.csv')

    trading.run()
    trading.print_performance()
def main():

    strategy = get_strategy()
    args_namespace = strategy.create_argument_parser(False).parse_args()

    events_log_file = '{}/events.log'.format(args_namespace.output_directory)

    strategy_params = strategy.get_strategy_params(args_namespace)
    strategy_params['send_notifications'] = True
    strategy_params['webhook'] = os.environ.get('WEBHOOK_PINBAR_NOTIFIER')

    configuration = Configuration(data_handler_name=OandaDataHandler,
                                  execution_handler_name=OandaExecutionHandler)
    configuration.set_option(
        Configuration.OPTION_NUMBER_OF_BARS_PRELOAD_FROM_HISTORY, '0')

    configuration.set_option(Configuration.OPTION_ACCOUNT_ID,
                             os.environ.get('OANDA_API_ACCOUNT_ID'))
    configuration.set_option(Configuration.OPTION_ACCESS_TOKEN,
                             os.environ.get('OANDA_API_ACCESS_TOKEN'))
    configuration.set_option(Configuration.OPTION_TIMEFRAME,
                             args_namespace.time_frame)

    trading = Trading(args_namespace.output_directory,
                      list(args_namespace.symbols), 0, configuration,
                      DataHandlerFactory(), ExecutionHandlerFactory(),
                      Portfolio, strategy, FixedPositionSize(0.01),
                      TextLogger(events_log_file), [Trading.LOG_TYPE_EVENTS],
                      strategy_params, 'equity.csv', 'trades.csv')

    trading.run()
    trading.print_performance()
Exemplo n.º 5
0
	def __init__(self, job, eventdriver):
		# unique job name
		self.job = job

		# import data map config, such as MAP_ITORANGE
		self.config = Configuration()
		self.config.import_internal_config(job)
		self.config.print_config()

		# refresh config
		# config_listener = threading.Thread(target=self.config.update_config)
		# config_listener.setDaemon(True)
		# config_listener.start()

		self._lcurl = Lcurl()
		self._logger = Nlog()

		# event driver
		self._eventdriver = eventdriver
		self._eventdriver.add_event_listener(job, self.process)

		try:
			pull_strategy = self.config.CONFIG['GLOBAL']['JOB'][job]['PULL_STRATEGY']
			print('[PULL STRATEGY] %s' % pull_strategy)
			if pull_strategy is not None:
				pull_method = eval('self.pull_from_' + pull_strategy)
				setattr(self, 'pull', pull_method)
		except Exception as e:
			raise InternalError('[pusher constructor ERROR]', e)
Exemplo n.º 6
0
def run_backtest_instance(args_namespace, events_log_file, heartbeat, sl, tp,
                          short_window, long_window, equity_filename,
                          trained_model_file):

    trades_filename = 'trades.csv'

    strategy_params = dict(stop_loss_pips=sl,
                           take_profit_pips=tp,
                           trained_model_file=trained_model_file,
                           sma_short_period=short_window,
                           sma_long_period=long_window)

    configuration = Configuration(
        data_handler_name=HistoricCSVDataHandler,
        execution_handler_name=SimulatedExecutionHandler)
    configuration.set_option(Configuration.OPTION_CSV_DIR,
                             args_namespace.data_directory)

    backtest = Backtest(args_namespace.output_directory,
                        args_namespace.symbols,
                        args_namespace.initial_capital_usd, heartbeat,
                        args_namespace.start_date, configuration,
                        DataHandlerFactory(), ExecutionHandlerFactory(),
                        Portfolio, EurUsdDailyForecastStrategy,
                        FixedPositionSize(0.5), TextLogger(events_log_file),
                        [Backtest.LOG_TYPE_EVENTS], strategy_params,
                        equity_filename, trades_filename)
    backtest.run()

    return backtest.stats
Exemplo n.º 7
0
def main():

    # Load, prepare and validate configuration
    config = Configuration(args)

    # Initiate OpenStreetMap helper containing data
    data = OsmConnector(config)

    # Refresh argument option calls
    if args.refresh_routes:
        data.get_routes(refresh=True)
    elif args.refresh_stops:
        data.get_stops(refresh=True)
    elif args.refresh_osm:
        data.get_routes(refresh=True)
        data.get_stops(refresh=True)
    elif args.refresh_schedule_source:
        config.get_schedule_source(refresh=True)
    elif args.refresh_all:
        data.get_routes(refresh=True)
        data.get_stops(refresh=True)
        config.get_schedule_source(refresh=True)

    # Define (transitfeed) object for GTFS creation
    feed = transitfeed.Schedule()

    # Initiate creators for GTFS components through an object factory
    factory = CreatorFactory(config)
    agency_creator = factory.get_agency_creator()
    feed_info_creator = factory.get_feed_info_creator()
    routes_creator = factory.get_routes_creator()
    stops_creator = factory.get_stops_creator()
    schedule_creator = factory.get_schedule_creator()
    trips_creator = factory.get_trips_creator()

    # Add data to feed
    agency_creator.add_agency_to_feed(feed)
    feed_info_creator.add_feed_info_to_feed(feed)
    stops_creator.add_stops_to_feed(feed, data)
    routes_creator.add_routes_to_feed(feed, data)
    schedule_creator.add_schedule_to_data(data)
    trips_creator.add_trips_to_feed(feed, data)

    # Remove unused data from feed
    stops_creator.remove_unused_stops_from_feed(feed)
    routes_creator.remove_unused_routes_from_feed(feed)

    # Validate GTFS
    feed.Validate(transitfeed.ProblemReporter())

    # Write GTFS
    feed.WriteGoogleTransitFeed(config.output)

    sys.exit()
Exemplo n.º 8
0
def start_charybdisfs(source: str,  # noqa: C901  # ignore "is too complex" message
                      target: str,
                      debug: bool,
                      rest_api: bool,
                      rest_api_port: int,
                      mount: bool,
                      static_enospc: bool,
                      static_enospc_probability: float) -> None:
    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG if debug else logging.INFO, format=LOG_FORMAT)

    if not rest_api and not mount:
        raise click.UsageError(message="can't run --no-rest-api and --no-mount simultaneously")

    if debug:
        sys.addaudithook(sys_audit_hook)

    if static_enospc:
        static_enospc_probability = max(0, min(100, round(static_enospc_probability * 100)))
        LOGGER.info("Going to add ENOSPC fault for all syscalls with probability %s%%", static_enospc_probability)
        enospc_fault = ErrorFault(sys_call=SysCall.ALL, probability=static_enospc_probability, error_no=errno.ENOSPC)
        Configuration.add_fault(fault_id=generate_fault_id(), fault=enospc_fault)
        LOGGER.debug("Faults added: %s", Configuration.get_all_faults())

    if rest_api:
        api_server_thread = \
            threading.Thread(target=start_charybdisfs_api_server,
                             kwargs={"port": rest_api_port, },
                             name="RestServerApi",
                             daemon=True)
        api_server_thread.start()
        atexit.register(stop_charybdisfs_api_server)

    if mount:
        if source is None or target is None:
            raise click.BadArgumentUsage("both source and target parameters are required for CharybdisFS mount")

        fuse_options = set(pyfuse3.default_options)
        fuse_options.add("fsname=charybdisfs")
        if debug:
            fuse_options.add("debug")

        operations = CharybdisOperations(source=source)

        pyfuse3.init(operations, target, fuse_options)
        atexit.register(pyfuse3.close)

    try:
        if mount:
            trio.run(pyfuse3.main)
        else:
            api_server_thread.join()
    except KeyboardInterrupt:
        LOGGER.info("Interrupted by user...")
        sys.exit(0)
Exemplo n.º 9
0
    def faults(self,
               fault_id: Optional[FaultID] = None
               ):  # noqa: C901  # ignore "is too complex" message
        method = cherrypy.request.method

        sys.audit("charybdisfs.api", method, fault_id, cherrypy.request)

        if method == "GET":
            if fault_id is None:
                return {"faults_ids": Configuration.get_all_faults_ids()}
            if fault := Configuration.get_fault_by_uuid(fault_id=fault_id):
                return {"fault_id": fault_id, "fault": fault.to_dict()}
            raise cherrypy.NotFound()
Exemplo n.º 10
0
def main():
    strategy = get_strategy()
    args_namespace = strategy.create_argument_parser().parse_args()
    strategy_params_special = strategy.get_strategy_params(args_namespace)

    heartbeat = 0

    events_log_file = '{}/events.log'.format(args_namespace.output_directory)

    strategy_params = dict(stop_loss_pips=args_namespace.stop_loss,
                           take_profit_pips=args_namespace.take_profit)
    strategy_params.update(strategy_params_special)

    configuration = Configuration(data_handler_name=OandaDataHandler,
                                  execution_handler_name=OandaExecutionHandler)

    configuration.set_option(Configuration.OPTION_ACCOUNT_ID,
                             os.environ.get('OANDA_API_ACCOUNT_ID'))
    configuration.set_option(Configuration.OPTION_ACCESS_TOKEN,
                             os.environ.get('OANDA_API_ACCESS_TOKEN'))
    configuration.set_option(Configuration.OPTION_TIMEFRAME,
                             TimeFrame.TIME_FRAME_S5)

    trading = Trading(args_namespace.output_directory, args_namespace.symbols,
                      heartbeat, configuration, DataHandlerFactory(),
                      ExecutionHandlerFactory(), Portfolio, get_strategy(),
                      FixedPositionSize(0.01), TextLogger(events_log_file),
                      [Trading.LOG_TYPE_EVENTS], strategy_params, 'equity.csv',
                      'trades.csv')

    trading.run()
    trading.print_performance()
Exemplo n.º 11
0
def main():

    # Load, prepare and validate configuration
    config = Configuration(args)

    # Initiate OpenStreetMap helper containing data
    data = OsmConnector(config)

    # Refresh argument option calls
    if args.refresh_routes:
        data.get_routes(refresh=True)
    elif args.refresh_stops:
        data.get_stops(refresh=True)
    elif args.refresh_schedule_source:
        config.get_schedule_source(refresh=True)
    elif args.refresh_all:
        data.get_routes(refresh=True)
        data.get_stops(refresh=True)

    # Define (transitfeed) object for GTFS creation
    feed = transitfeed.Schedule()

    # Initiate creators for GTFS components through an object factory
    factory = CreatorFactory(config)
    agency_creator = factory.get_agency_creator()
    feed_info_creator = factory.get_feed_info_creator()
    routes_creator = factory.get_routes_creator()
    stops_creator = factory.get_stops_creator()
    schedule_creator = factory.get_schedule_creator()
    trips_creator = factory.get_trips_creator()

    # Add data to feed
    agency_creator.add_agency_to_feed(feed)
    feed_info_creator.add_feed_info_to_feed(feed)
    stops_creator.add_stops_to_feed(feed, data)
    routes_creator.add_routes_to_feed(feed, data)
    schedule_creator.add_schedule_to_data(data)
    trips_creator.add_trips_to_feed(feed, data)

    # Validate GTFS
    feed.Validate(transitfeed.ProblemReporter())

    # Write GTFS
    feed.WriteGoogleTransitFeed(config.output)

    # Add feed_info.txt to GTFS
    add_feed_info(feed, config.output)

    sys.exit()
Exemplo n.º 12
0
def main() -> None:
    # create the argument parser
    ap = argparse.ArgumentParser(prog="CryptoHFT.py")
    ap.add_argument("-w", "--warranty",
                    help="shows the details about the warranty.",
                    action="store_true")
    ap.add_argument("-c", "--conditions",
                    help="shows the details about the redistribution.",
                    action="store_true")
    ap.add_argument("-i", "--init",
                    help="create a new configuration.",
                    action="store_true")

    # retrieve the arguments
    args = ap.parse_args()

    # check if the -w flag or the -c flag has been written
    shownWarrantyOrConditions = False
    if args.conditions:
        printConditions()
        shownWarrantyOrConditions = True

    if args.warranty:
        printWarranty()
        shownWarrantyOrConditions = True

    if shownWarrantyOrConditions:
        sys.exit(0)

    # shows the license
    printLicense()

    # check if it's necessary to create a new configuration file
    if args.init:
        Configuration.createConfiguration(CONFIG_FILE_PATH)

    # load the configuration
    userConfig = Configuration(CONFIG_FILE_PATH)

    # create the bot object
    bot = Bot(userConfig)

    # setup the bot
    bot.setup()

    # enter the trade loop
    bot.tradeLoop()
Exemplo n.º 13
0
    def update_config(cls, valid=None):
        preferences = Dict['preferences'] or {}

        # If no validation provided, use last stored result or assume true
        if valid is None:
            valid = preferences.get('valid', True)

        preferences['valid'] = valid

        Configuration.process(preferences)

        # Ensure preferences dictionary is stored
        Dict['preferences'] = preferences
        Dict.Save()

        log.info('Preferences updated %s', preferences)
        EventManager.fire('preferences.updated', preferences)
Exemplo n.º 14
0
def main(env):
    # event driver loop
    eventdriver = EventDriver(maxtask=20)
    eventdriver.start(True)

    # import global config
    config = Configuration()
    config.import_global_config(env)

    task_config = config.CONFIG['GLOBAL']['JOB']
    for i in task_config:
        # add task when key 'DO_MR2P' equals 1
        if 0 == int(task_config[i].get(
                "DO_MR2P", 0)) or not "MR2P_PROCESSOR" in task_config[i]:
            continue
        new_task = eval(task_config[i]['MR2P_PROCESSOR'] + "(i, eventdriver)")
        new_task.run()
Exemplo n.º 15
0
    def update_config(cls, valid=None):
        preferences = Dict['preferences'] or {}

        # If no validation provided, use last stored result or assume true
        if valid is None:
            valid = preferences.get('valid', True)

        preferences['valid'] = valid

        Configuration.process(preferences)

        # Ensure preferences dictionary is stored
        Dict['preferences'] = preferences
        Dict.Save()

        log.info('Preferences updated %s', preferences)
        EventManager.fire('preferences.updated', preferences)
Exemplo n.º 16
0
def main(env):
    # import global config
    config = Configuration()
    config.import_global_config(env)

    task_config = config.CONFIG['GLOBAL']['JOB']
    task_list = []
    for job_name in task_config:
        # add task when key 'DO_MR2P' equals 1
        if 0 == int(task_config[job_name].get(
                "DO_MR2P",
                0)) or not "MR2P_PROCESSOR" in task_config[job_name]:
            continue
        new_task = eval(task_config[job_name]['MR2P_PROCESSOR'] + "(job_name)")
        task_list.append(new_task)

    o = scheduler("mr2p", task_list)
    o.run()
Exemplo n.º 17
0
    def create_from_settings(configuration: Configuration,
                             data_handler: DataHandler,
                             events_per_symbol: Dict[str, queue.Queue],
                             logger: Logger) -> ExecutionHandler:

        if configuration.execution_handler_name == SimulatedExecutionHandler:
            return ExecutionHandlerFactory.create_historic_csv_execution_handler(
                data_handler, events_per_symbol)

        if configuration.execution_handler_name == OandaExecutionHandler:
            return ExecutionHandlerFactory.create_oanda_execution_handler(
                data_handler, events_per_symbol,
                configuration.get_option(Configuration.OPTION_ACCOUNT_ID),
                configuration.get_option(Configuration.OPTION_ACCESS_TOKEN),
                logger)

        raise Exception('Unknown ExecutionHandler for {}'.format(
            configuration.execution_handler_name))
Exemplo n.º 18
0
    def create_from_settings(configuration: Configuration, events_per_symbol: Dict[str, queue.Queue],
                             symbol_list: list, logger: Logger) -> DataHandler:

        if configuration.data_handler_name == HistoricCSVDataHandler:
            csv_dir = Configuration.OPTION_CSV_DIR
            return DataHandlerFactory.create_historic_csv_data_handler(events_per_symbol, symbol_list,
                                                                       configuration.get_option(csv_dir))

        if configuration.data_handler_name == OandaDataHandler:
            bars_from_history = Configuration.OPTION_NUMBER_OF_BARS_PRELOAD_FROM_HISTORY
            access_token = Configuration.OPTION_ACCESS_TOKEN
            timeframe = Configuration.OPTION_TIMEFRAME

            return DataHandlerFactory.create_oanda_data_handler(events_per_symbol, symbol_list,
                                                                configuration.get_option(access_token),
                                                                configuration.get_option(timeframe),
                                                                int(configuration.get_option(bars_from_history)),
                                                                logger)

        raise Exception('Unknown DataHandler for {}'.format(configuration.data_handler_name))
Exemplo n.º 19
0
    def update_config(cls, valid=None):
        preferences = Dict['preferences'] or {}

        # If no validation provided, use last stored result or assume true
        if valid is None:
            valid = preferences.get('valid', True)

        preferences['valid'] = valid

        Configuration.process(preferences)

        # Ensure preferences dictionary is stored
        Dict['preferences'] = preferences
        Dict.Save()

        # Update plex.metadata.py `Matcher` preferences
        Matcher.set_caper(preferences['matcher'] == 'plex_extended')
        Matcher.set_extend(preferences['matcher'] == 'plex_extended')

        log.info('Preferences updated %s', preferences)
Exemplo n.º 20
0
def run():
    log.info("=== Singularity ===")
    args = init_args_parser()
    log.info(f'Current environment: {args.env}')
    config = Configuration(args.env)
    log.info("=== Spark project is running... ===")
    log.info("== Configurations ==")

    log.info(f'app_name: {config.app_name}')
    log.info(f'input_mongodb_uri: {config.input_mongodb_uri}')
    log.info(f'output_mongodb_uri: {config.output_mongodb_uri}')
    log.info(f'jars_dir: {config.jars_dir}')
Exemplo n.º 21
0
Arquivo: pusher.py Projeto: Dkner/mr2p
    def __init__(self, job):
        # unique job name
        self.job = job

        # import data map config, such as MAP_ITORANGE
        global_config = Configuration()
        self.config = copy.deepcopy(global_config)
        self.config.import_internal_config(job)
        self.config.print_config()

        self._lcurl = Lcurl()
        self._loop = None
Exemplo n.º 22
0
def main():

    strategy = get_strategy()
    args_namespace = strategy.create_argument_parser(True).parse_args()
    strategy_params_special = strategy.get_strategy_params(args_namespace)

    events_log_file = '{}/events.log'.format(args_namespace.output_directory)

    strategy_params = dict(stop_loss_pips=args_namespace.stop_loss,
                           take_profit_pips=args_namespace.take_profit)
    strategy_params.update(strategy_params_special)

    configuration = Configuration(
        data_handler_name=HistoricCSVDataHandler,
        execution_handler_name=SimulatedExecutionHandler)
    configuration.set_option(Configuration.OPTION_CSV_DIR,
                             args_namespace.data_directory)

    backtest = Backtest(
        args_namespace.output_directory,
        args_namespace.symbols,
        args_namespace.initial_capital_usd,
        0,
        args_namespace.start_date,
        configuration,
        DataHandlerFactory(),
        ExecutionHandlerFactory(),
        Portfolio,
        strategy,
        FixedPositionSize(0.5),
        TextLogger(events_log_file),
        [Backtest.LOG_TYPE_EVENTS],
        strategy_params,
        'equity.csv',
        'trades.csv',
    )

    backtest.run()
    backtest.print_performance()
def main():

    strategy = get_strategy()
    args_namespace = strategy.create_argument_parser(True).parse_args()

    events_log_file = '{}/events.log'.format(args_namespace.output_directory)

    strategy_params = strategy.get_strategy_params(args_namespace)
    strategy_params['send_notifications'] = False
    strategy_params['webhook'] = ''

    configuration = Configuration(
        data_handler_name=HistoricCSVDataHandler,
        execution_handler_name=SimulatedExecutionHandler)

    configuration.set_option(Configuration.OPTION_CSV_DIR,
                             args_namespace.data_directory)

    simulation = Backtest(
        args_namespace.output_directory,
        args_namespace.symbols,
        args_namespace.initial_capital_usd,
        0,
        args_namespace.start_date,
        configuration,
        DataHandlerFactory(),
        ExecutionHandlerFactory(),
        Portfolio,
        strategy,
        FixedPositionSize(0.5),
        TextLogger(events_log_file),
        [Backtest.LOG_TYPE_EVENTS],
        strategy_params,
        'equity.csv',
        'trades.csv',
    )

    simulation.run()
    simulation.print_performance()
Exemplo n.º 24
0
def main(env):
    config = Configuration()
    config.import_global_config(env)
    task_config = config.CONFIG['GLOBAL']['JOB']
    task_list = []
    if not isinstance(task_config, dict):
        return
    else:
        for i in task_config:
            # add task when key 'DO_MR2P' equals 1
            if not "DO_MR2P" in task_config or not "MR2P_PROCESSOR" in task_config[
                    i]:
                continue
            elif '0' == task_config[i]['DO_MR2P']:
                continue
            new_task = eval(task_config[i]['MR2P_PROCESSOR'] + "(i)")
            processor_num = int(task_config[i].get('PROCESSOR_NUM', 1))
            for i in range(processor_num):
                task_list.append(new_task)

    o = scheduler("mr2p", task_list)
    o.run()
Exemplo n.º 25
0
	def __init__(self, name, job_list):
		self.config = Configuration()
		# refresh config
		config_listener = threading.Thread(target=self.config.update_config)
		config_listener.setDaemon(True)
		config_listener.start()

		# job_name => job(subclass of pusher), flag(start or pause), process
		self._register = {}
		for job in job_list:
			self._register[job.job] = {
				'class': job,
				'process': None
			}
Exemplo n.º 26
0
def init():
    init_dependencies()
    log.info("=== Singularity ===")
    args = init_args_parser()
    log.info(f'Current environment: {args.env}')
    config = Configuration(args.env)
    log.info("=== Spark project is running... ===")
    log.info("== Configurations ==")

    log.info(f'app_name: {config.app_name}')
    log.info(f'input_mongodb_uri: {config.input_mongodb_uri}')
    log.info(f'output_mongodb_uri: {config.output_mongodb_uri}')
    log.info(f'jars_dir: {config.jars_dir}')
    log.info(f'master: {config.master}')

    sp = SparkManager(config)
    sp.run()
Exemplo n.º 27
0
    def __init__(self, labels, input_folder, mode):
        super().__init__()

        # init UI state
        self.title = 'PyQt5 - Annotation tool for assigning image classes'
        self.left = 200
        self.top = 100
        self.width = 1100
        self.height = 770
        # img panal size should be square-like to prevent some problems with different aspect ratios
        self.img_panel_width = 650
        self.img_panel_height = 650

        # state variables
        self.configuration = Configuration(
            input_folder,
            labels,
            mode,
        )

        self.counter = 0
        self.assigned_labels = {}

        # initialize list to save all label buttons
        self.label_buttons = []

        # Initialize Labels
        self.image_box = QLabel(self)
        self.img_name_label = QLabel(self)
        self.progress_bar = QLabel(self)
        self.curr_image_headline = QLabel('Current image', self)
        self.csv_note = QLabel(
            '(csv will be also generated automatically after closing the app)',
            self)
        self.csv_generated_message = QLabel(self)
        self.show_next_checkbox = QCheckBox(
            "Automatically show next image when labeled", self)
        self.generate_xlsx_checkbox = QCheckBox("Also generate .xlsx file",
                                                self)

        # create label folders
        if mode == 'copy' or mode == 'move':
            self.create_label_folders(labels, self.configuration.input_folder)

        # init UI
        self.init_ui()
Exemplo n.º 28
0
def main():

    # Load, prepare and validate configuration
    config = Configuration()

    # Initiate gateways for communication through an object factory
    factory = GatewayFactory(config)
    sms_gateway = factory.get_sms_gateway()
    you_gateway = factory.get_you_gateway()

    # Check if the flag to only run one direction is set.
    # If nothing is set, both will run (default behaviour).
    if args.you2sms:
        _you2sms(sms_gateway, you_gateway)
    elif args.sms2you:
        _sms2you(sms_gateway, you_gateway)
    else:
        _sms2you(sms_gateway, you_gateway)
        _you2sms(sms_gateway, you_gateway)

    sys.exit()
Exemplo n.º 29
0
def run(p_port=None, verbose=False, config_path=None):
    global config
    config = Configuration(config_path=config_path)
    path = config.get_log_file_path()
    # choose the log file path
    if path:
        log.add_handler(path)
    config.set_verbose(verbose)
    # recheck if its locked because the last check is maybe a false lock
    pid = os.getpid()
    file_lock_path = "/tmp/SeaGoat.lock"
    if is_lock(file_lock_path):
        sys.exit(-1)

    file_lock = open(file_lock_path, "w")
    file_lock.write("%s" % pid)
    file_lock.close()

    if not p_port:
        port = config.get_tcp_output_config()
    else:
        port = p_port

    # Create and register the service
    server = jsonrpc_server.JsonrpcServer(port)
    try:
        server.register()
    except BaseException as e:
        log.printerror_stacktrace(logger, e)
        server.close()
        sys.exit(1)

    # Start the server
    logger.info('Serving on port %s - pid %s', port, pid)
    logger.info('Waiting command')
    try:
        server.run()
    except (KeyboardInterrupt, SystemExit):
        logger.info("Close SeaGoat. See you later!")
    except BaseException:
        raise
    finally:
        server.close()
        # force closing the file
        os.remove(file_lock_path)
Exemplo n.º 30
0
Arquivo: nlog.py Projeto: Dkner/mr2p
	def __init__(self):
		config = Configuration().CONFIG['GLOBAL']
		self._ip = config['NLOG']['ip']
		self._port = int(config['NLOG']['port'])
Exemplo n.º 31
0
import sys
sys.path.append("..")
import getopt
from core.configuration import Configuration
from plugins.company2b import Company2b

if __name__ == '__main__':
    optlist, args = getopt.getopt(sys.argv[1:], 'e:', ['env='])
    env = ''
    for k, v in optlist:
        if k == '-e' or k == '--env':
            env = v
    if env != '':
        config = Configuration()
        config.import_global_config(env)
        script = Company2b('CNLINFO')
        script.run()
    else:
        print('[ERROR] lack env config')
Exemplo n.º 32
0
import sys
sys.path.append("..")
import getopt
from core.configuration import Configuration
from plugins.pusher import pusher

if __name__ == '__main__':
	optlist, args = getopt.getopt(sys.argv[1:], 'e:j:', ['env=','job=','skip='])
	opt_dict = dict(optlist)
	try:
		config = Configuration()
		config.import_global_config(opt_dict['--env'])
		script = pusher(opt_dict['--job'])
		script.mongo2redis(opt_dict.get('--skip', 0))
	except Exception as e:
		print(e)
Exemplo n.º 33
0
 def remove_fault(uuid):
     uuid_removed = Configuration.remove_fault(uuid)
     return uuid_removed