Exemplo n.º 1
0
def main():

  config.load_config('{}/{}'.format('config', 'test_config.ini'))

  formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  file_handler = RotatingFileHandler(getattr(config, 'cfg').get('box', 'log_file'),
                                     maxBytes=5242880, backupCount=4)
  file_handler.setLevel(logging.DEBUG)
  file_handler.setFormatter(formatter)
  root_logger = logging.getLogger()
  root_logger.addHandler(file_handler)
  root_logger.setLevel(logging.DEBUG)

  try:
    camera = CameraController()
    camera.initialize()

    camera.capture_and_download('test1')
    code_scanner = CodeScanner()
    code_information = code_scanner.scan_image('/home/pi/pictures/test1.jpg')
    if code_information is not None:
      print('Decoded from QRCode: "{}"'.format(code_information.decode('utf8')))
    else:
      print('No decodable QRCode found')
    camera.close()
  except ConnectionError as e:
    print('Connection Error "%s"' % e)
  except CaptureError as e:
    print('Capture Error "%s"' % e)
Exemplo n.º 2
0
def connect(file_name, environment_name):
    config = load_config(file_name)
    environment_websocket = None
    for environment in config['environments']:
        if environment['name'] == environment_name:
            master = Master(
                environment_name,
                C2Manager(
                    Factory(PROVIDER_AWS_WEBSOCKETS,
                            {'url': environment['url']}),
                    environment['master-password'], environment_name))
            master.show_c2_menu()
    if environment_websocket is None:
        print(
            '[-] "{}" environment not found in the "{}" configuration file. Do you want to sync "{}" configuration file from AWS? Y/n'
            .format(environment_name, file_name, file_name))
        user_input = input()
        if user_input.lower() == 'y' or user_input == '':
            sync_environments(file_name)
    config = load_config(file_name)
    for environment in config['environments']:
        if environment['name'] == environment_name:
            master = Master(
                environment_name,
                C2Manager(
                    Factory(PROVIDER_AWS_WEBSOCKETS,
                            {'url': environment['url']}),
                    environment['master-password'], environment_name))
            master.show_c2_menu()

    print(
        '[-] {} environment not found in the {} configuration file. Add it manually to config file or create a new environment with -d (--deploy) option'
        .format(environment_name, file_name))
Exemplo n.º 3
0
def load_test_config():
    config = load_config("tests/static/config.json")
    config[CONFIG_EVALUATOR] = load_config(
        "tests/static/evaluator_config.json", False)
    config[CONFIG_TRADING_TENTACLES] = load_config(
        "tests/static/trading_config.json", False)
    init_config_time_frame_for_tests(config)
    return config
Exemplo n.º 4
0
def load_test_config():
    config = load_config(f"{TEST_CONFIG_FOLDER}/config.json")
    config[CONFIG_EVALUATOR] = load_config(
        f"{TEST_CONFIG_FOLDER}/evaluator_config.json", False)
    config[CONFIG_TRADING_TENTACLES] = load_config(
        f"{TEST_CONFIG_FOLDER}/trading_config.json", False)
    init_config_time_frame_for_tests(config)
    return config
Exemplo n.º 5
0
    def reload_tentacle_config(config):
        config[CONFIG_EVALUATOR] = load_config(CONFIG_EVALUATOR_FILE_PATH,
                                               False)
        if config[CONFIG_EVALUATOR] is None:
            raise ConfigEvaluatorError

        config[CONFIG_TRADING_TENTACLES] = load_config(
            CONFIG_TRADING_FILE_PATH, False)
        if config[CONFIG_TRADING_TENTACLES] is None:
            raise ConfigTradingError

        return config
Exemplo n.º 6
0
def main():

  config.load_config('{}/{}'.format('../config', 'test_config.ini'))

  formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  file_handler = RotatingFileHandler(getattr(config, 'cfg').get('box', 'log_file'),
                                     maxBytes=5242880, backupCount=4)
  file_handler.setLevel(logging.DEBUG)
  file_handler.setFormatter(formatter)
  root_logger = logging.getLogger()
  root_logger.addHandler(file_handler)


  global cnt
  cnt = 6

  # set by the gpio_controllers module:
  #GPIO.setmode(GPIO.BCM)
  leds = LedController()
  leds.initialize()

  inputs = InputController()
  inputs.initialize(cb_door, cb_start)
  #inputs.register_start_button_cb(cb_start)

  try:
    while True:
      while (cnt  > 0):
        cnt = cnt - 1
        leds.switch_red(True)
        time.sleep(0.05)
        leds.switch_red(False)
        leds.switch_orange(True)
        time.sleep(0.05)
        leds.switch_orange(False)
        leds.switch_green(True)
        time.sleep(0.05)
        leds.switch_green(False)
        leds.switch_blue(True)
        time.sleep(0.05)
        leds.clear_all()
        time.sleep(0.05)
      time.sleep(0.01)

  except KeyboardInterrupt:
    leds.clear_all()
    print
    print('Stopped by Ctrl-C')
  except:
    print('caught error')
  finally:
    GPIO.cleanup()
Exemplo n.º 7
0
 def load_config(self):
     config_file = self.get_config_file_name()
     # try with this class name
     if os.path.isfile(config_file):
         self.social_config = load_config(config_file)
     else:
         # if it's not possible, try with any super-class' config file
         for super_class in self.get_parent_evaluator_classes(SocialEvaluator):
             super_class_config_file = super_class.get_config_file_name()
             if os.path.isfile(super_class_config_file):
                 self.social_config = load_config(super_class_config_file)
                 return
     # set default config if nothing found
     if not self.social_config:
         self.set_default_config()
Exemplo n.º 8
0
 def load_config(self):
     config_file = self.get_config_file_name()
     # try with this class name
     if os.path.isfile(config_file):
         self.social_config = load_config(config_file)
     else:
         # if it's not possible, try with any super-class' config file
         for super_class in self.get_parent_evaluator_classes(SocialEvaluator):
             super_class_config_file = super_class.get_config_file_name()
             if os.path.isfile(super_class_config_file):
                 self.social_config = load_config(super_class_config_file)
                 return
     # set default config if nothing found
     if not self.social_config:
         self.set_default_config()
Exemplo n.º 9
0
def inference(config_path, use_zoom_tta, f):
    config = load_config(config_path)
    model = get_model(config.model.name, config.model.pretrained_model_path)
    make_output_dir(config, f)

    if use_zoom_tta:
        tta_zoom_list = [1.0, 0.9, 0.8]
    else:
        tta_zoom_list = [config.data.tta_zoom]

    for tta_zoom in tta_zoom_list:
        test_dataset = ImetDataset(
            batch_size=config.eval.batch_size,
            mode="test",
            img_size=config.data.img_size,
            tta_zoom=tta_zoom,
            valid_csv=config.data.valid_csv).get_loader()

        pickle_path = os.path.join(
            config.path.out, "{}_{}_{}.pickle".format(
                config.model.name,
                os.path.basename(config.model.pretrained_model_path),
                tta_zoom))

        inference_for_submit(test_dataset,
                             model,
                             config.data.img_size,
                             pickle_name=pickle_path)
Exemplo n.º 10
0
def validate(config_path, use_zoom_tta, f):
    config = load_config(config_path)
    model = get_model(config.model.name, config.model.pretrained_model_path)
    make_output_dir(config, f)

    if use_zoom_tta:
        tta_zoom_list = [1.0, 0.9, 0.8]
    else:
        tta_zoom_list = [config.data.tta_zoom]

    for tta_zoom in tta_zoom_list:
        valid_loader = ImetDataset(
            batch_size=config.eval.batch_size,
            mode="valid",
            img_size=config.data.img_size,
            tta_zoom=tta_zoom,
            valid_csv=config.data.valid_csv).get_loader()

        pickle_path = os.path.join(
            config.path.out, "{}_{}_{}.pickle".format(
                config.model.name,
                os.path.basename(config.model.pretrained_model_path),
                tta_zoom))
        valid_one_epoch(valid_loader,
                        model,
                        flip_tta=True,
                        pickle_name=pickle_path)

    search(config.path.out)
Exemplo n.º 11
0
def test_load_config():
    result = load_config("tests/static/config.json")
    assert "crypto-currencies" in result
    assert "services" in result
    assert "exchanges" in result
    assert "trading" in result
    assert "Bitcoin" in result["crypto-currencies"]
Exemplo n.º 12
0
def request_aws_credentials(file_name):
    global aws_id
    global aws_secret
    if aws_id is not None and aws_secret is not None:
        return aws_id, aws_secret

    aws_steps_shown = False
    config = load_config(file_name)
    if 'aws_id' not in config or config['aws_id'] == '':
        print('[*] AWS ID not found in the "{}" configuration file.'.format(
            file_name))
        print('[*] Access Key ID needed')
        show_aws_key_steps()
        aws_steps_shown = True
        access_key_id = input('[*] Access Key ID: ')
        aws_id = access_key_id
    else:
        aws_id = config['aws_id']
        print('[*] Access Key ID: {}'.format(aws_id))

    if not aws_steps_shown:
        print('[*] Secret key needed')
        show_aws_key_steps()
    aws_secret = getpass('[*] Insert your Secret Key: ')

    if not check_aws_credentials(aws_id, aws_secret):
        exit(1)
    else:
        print('[+] Credentials OK.')

    return aws_id, aws_secret
Exemplo n.º 13
0
 def _load_previous_state_metadata(self, target_exchanges, config):
     if path.isfile(self.save_file):
         try:
             potential_previous_state = load_config(self.save_file)
             if isinstance(potential_previous_state, dict):
                 if not self._check_required_values(
                         potential_previous_state):
                     return False
                 # check data
                 found_currencies_prices = {
                     currency: False
                     for currency in ConfigManager.get_all_currencies(
                         config)
                 }
                 if not self._check_exchange_data(config,
                                                  found_currencies_prices):
                     return False
                 if not self._check_missing_symbols(
                         found_currencies_prices):
                     return False
             if not self._check_no_missing_exchanges(target_exchanges):
                 return False
         except Exception as e:
             self.logger.warning(f"{self.ERROR_MESSAGE}{e}")
             return False
         return True
     else:
         return False
Exemplo n.º 14
0
def train(config_path, f):
    config = load_config(config_path)
    make_output_dir(config, f)

    train_loader = ImetDataset(batch_size=config.train.batch_size,
                               mode="train",
                               img_size=config.data.img_size,
                               train_csv=config.data.train_csv).get_loader()
    valid_loader = ImetDataset(batch_size=config.eval.batch_size,
                               mode="valid",
                               img_size=config.data.img_size,
                               valid_csv=config.data.valid_csv).get_loader()
    model = get_model(config.model.name, config.model.pretrained_model_path,
                      config.model.multi)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.train.lr)
    train_total = len(train_loader.dataset)
    scheduler = CyclicLRWithRestarts(optimizer,
                                     config.train.batch_size,
                                     train_total,
                                     restart_period=2,
                                     t_mult=1.)
    criterion = FocalLoss()

    for epoch in range(config.train.num_epochs):
        scheduler.step()
        train_one_epoch(train_loader, model, optimizer, scheduler, criterion,
                        epoch, config.path.out)
        valid_one_epoch(valid_loader, model, epoch)
Exemplo n.º 15
0
    def __new__(cls):
        if cls._instance is None:
            cls._instance = object.__new__(cls)
            # TODO: take this value from config.json
            config_obj = config.load_config()
            # db_config = {'database': config_obj.database_name, 'host': config_obj.db_host,
            # 'password': config_obj.db_password, 'port': config_obj.db_port, 'user': config_obj.db_username}
            db_config = {
                'database': 'rbac',
                'host': '127.0.0.1',
                'password': '******',
                'port': 3306,
                'user': '******'
            }
            try:
                logObj.debug('connecting to MySQL database...')
                connection = MySQL._instance.connection = mysql.connector.connect(
                    **db_config)
                cursor = MySQL._instance.cursor = connection.cursor()
                cursor.execute('SELECT VERSION()')
                db_version = cursor.fetchone()
            except Exception as error:
                logObj.error(
                    'Error: connection not established {}'.format(error))
                MySQL._instance = None
            else:
                logObj.info('connection established\n{}'.format(db_version[0]))

        return cls._instance
 def load_config(self):
     config_file = self.get_config_file_name()
     if os.path.isfile(config_file):
         self.set_default_config()
         self.specific_config = {**self.specific_config, **load_config(config_file)}
     else:
         self.set_default_config()
Exemplo n.º 17
0
def remove(file_name, environment_name):
    global aws_id, aws_secret

    print('[!] Are you sure you want to delete "{}" environment? y/N'.format(
        environment_name))
    user_input = input()

    if user_input.lower() != 'y' and user_input == '':
        exit()

    aws_id, aws_secret = request_aws_credentials(file_name)

    config = load_config(file_name)
    if 'environments' not in config:
        config['environments'] = []
    environment_to_delete = None
    for environment in config['environments']:
        if environment['name'] == environment_name:
            environment_to_delete = environment
            break
    if environment_to_delete is None:
        print(
            '[!] There is no "{}" environment in your local "{}" configuration file. Do you want to sync the config file with AWS? Y/n'
            .format(environment_name, file_name))
        user_input = input()
        if user_input.lower() == 'y' or user_input == '':
            sync_environments(file_name)
            time.sleep(1)

    config = load_config(file_name)
    environment_to_delete = None
    for environment in config['environments']:
        if environment['name'] == environment_name:
            environment_to_delete = environment
            break
    if environment_to_delete is None:
        print(
            '[!] Environment not found. Do you want to try to delete it anyway? y/N'
            .format(environment_name))
        user_input = input()
        if user_input.lower() == 'n' or user_input == '':
            return

    print('[+] Removing "{}" environment from AWS'.format(environment_name))
    AwsDeplpyment().deploy(aws_id, aws_secret, environment_name, None, True)
    remove_environment_from_config(environment_name, file_name)
Exemplo n.º 18
0
 def check_config(config_file):
     try:
         valid, e = ConfigManager.validate_config_file(
             load_config(config_file=config_file, error=True))
         if not valid:
             raise e
     except Exception as e:
         raise e
Exemplo n.º 19
0
    def load_config(self):
        config_file = self.get_config_file_name()
        # try with this class name
        if os.path.isfile(config_file):
            self.trading_config = load_config(config_file)

        # set default config if nothing found
        if not self.trading_config:
            self.set_default_config()
Exemplo n.º 20
0
 def get_specific_config(cls, raise_exception=True, raw_file=False):
     try:
         if raw_file:
             with open(cls.get_config_file_name()) as file:
                 return file.read()
         else:
             return load_config(cls.get_config_file_name())
     except Exception as e:
         if raise_exception:
             raise e
Exemplo n.º 21
0
def create_blank_config_using_loaded_one(loaded_config, other_config=None):
    new_config = other_config if other_config else load_config()
    trading_tentacles_config = deepcopy(loaded_config[CONFIG_TRADING_TENTACLES])
    risk = deepcopy(loaded_config[CONFIG_TRADING][CONFIG_TRADER_RISK])
    starting_portfolio = deepcopy(loaded_config[CONFIG_SIMULATOR][CONFIG_STARTING_PORTFOLIO])
    fees_config = deepcopy(loaded_config[CONFIG_SIMULATOR][CONFIG_SIMULATOR_FEES])
    new_config[CONFIG_TRADING_TENTACLES] = trading_tentacles_config
    new_config[CONFIG_TRADING][CONFIG_TRADER_RISK] = risk
    new_config[CONFIG_SIMULATOR][CONFIG_STARTING_PORTFOLIO] = starting_portfolio
    new_config[CONFIG_SIMULATOR][CONFIG_SIMULATOR_FEES] = fees_config
    new_config[CONFIG_EVALUATOR] = deepcopy(loaded_config[CONFIG_EVALUATOR])
    add_config_default_backtesting_values(new_config)
    return new_config
Exemplo n.º 22
0
    def config_health_check(config):
        # 1 ensure api key encryption
        should_replace_config = False
        if CONFIG_EXCHANGES in config:
            for exchange, exchange_config in config[CONFIG_EXCHANGES].items():
                for key in CONFIG_EXCHANGE_ENCRYPTED_VALUES:
                    try:
                        if not ConfigManager._handle_encrypted_value(
                                key, exchange_config, verbose=True):
                            should_replace_config = True
                    except Exception as e:
                        get_logger().error(
                            f"Exception when checking exchange config encryption: {e}"
                        )
                        get_logger().exception(e)

        # 2 ensure single trader activated
        try:
            trader_enabled = ConfigManager.get_trader_enabled(config)
            if trader_enabled:
                simulator_enabled = ConfigManager.get_trader_simulator_enabled(
                    config)
                if simulator_enabled:
                    get_logger().error(
                        f"Impossible to activate a trader simulator additionally to a real trader, "
                        f"simulator deactivated.")
                    config[CONFIG_SIMULATOR][CONFIG_ENABLED_OPTION] = False
                    should_replace_config = True
        except KeyError as e:
            get_logger().error(
                f"KeyError when checking traders activation: {e}. Activating trader simulator."
            )
            get_logger().exception(e)
            config[CONFIG_SIMULATOR][CONFIG_ENABLED_OPTION] = True
            config[CONFIG_TRADER][CONFIG_ENABLED_OPTION] = False
            should_replace_config = True

        # 3 save fixed config if necessary
        if should_replace_config:
            try:
                ConfigManager.save_config(
                    get_user_config(),
                    config,
                    TEMP_RESTORE_CONFIG_FILE,
                    json_data=ConfigManager.dump_json(config))
                return config
            except Exception as e:
                get_logger().error(
                    f"Save of the health checked config failed : {e}, will use the initial config"
                )
                return load_config(error=False, fill_missing_fields=True)
Exemplo n.º 23
0
async def create_app(mode):
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    app = web.Application()
    app['config'] = load_config(mode)

    app.on_startup.append(init_pg)
    app.on_cleanup.append(close_pg)

    aiohttp_debugtoolbar.setup(app)
    fernet_key = fernet.Fernet.generate_key()
    secret_key = base64.urlsafe_b64decode(fernet_key)
    setup(app, EncryptedCookieStorage(secret_key))
    setup_routers(app)

    return app
Exemplo n.º 24
0
def test_get_market_pair():
    config = load_config("tests/static/config.json")

    pair, inverted = ConfigManager.get_market_pair(
        config, config[CONFIG_TRADING][CONFIG_TRADER_REFERENCE_MARKET])
    assert pair == ""
    assert inverted is False

    pair, inverted = ConfigManager.get_market_pair(config, "")
    assert pair == ""
    assert inverted is False

    pair, inverted = ConfigManager.get_market_pair(config, "VEN")
    assert pair == "VEN/BTC"
    assert inverted is False

    pair, inverted = ConfigManager.get_market_pair(config, "USDT")
    assert pair == "BTC/USDT"
    assert inverted is True

    pair, inverted = ConfigManager.get_market_pair(config, "XBT")
    assert pair == ""
    assert inverted is False

    # now change config reference market
    config[CONFIG_TRADING][CONFIG_TRADER_REFERENCE_MARKET] = "USDT"

    pair, inverted = ConfigManager.get_market_pair(config, "BTC")
    assert pair == "BTC/USDT"
    assert inverted is False

    pair, inverted = ConfigManager.get_market_pair(config, "VEN")
    assert pair == ""
    assert inverted is False

    config[CONFIG_TRADING].pop(CONFIG_TRADER_REFERENCE_MARKET)

    # now use config/__init__.py reference market
    pair, inverted = ConfigManager.get_market_pair(config, "ADA")
    assert pair == "ADA/BTC"
    assert split_symbol(pair)[1] == DEFAULT_REFERENCE_MARKET
    assert inverted is False

    config.pop(CONFIG_TRADING)
    pair, inverted = ConfigManager.get_market_pair(config, "ADA")
    assert pair == ""
    assert inverted is False
Exemplo n.º 25
0
def list_environments(file_name):
    if not config_exists(file_name):
        print('[-] Configuration file "{}" not found'.format(file_name))
        exit(1)
    else:
        config = load_config(file_name)
        if 'environments' not in config or len(config['environments']) == 0:
            print('[!] No environments found in the {} configuration file'.
                  format(file_name))
        else:
            print('\nEnvironments from {} configuration file:\n'.format(
                file_name))
            for environment in config['environments']:
                print('- {} ({}) | {}'.format(environment['name'],
                                              environment['url'],
                                              environment['master-password']))
            print()
Exemplo n.º 26
0
    def __init__(self):
        # Logger
        fileConfig('config/logging_config.ini')
        self.logger = logging.getLogger(self.__class__.__name__)
        sys.excepthook = self.log_uncaught_exceptions

        # Version
        self.logger.info("Version : {0}".format(VERSION))

        # Config
        self.logger.info("Load config file...")
        self.config = load_config()

        # Advanced
        AdvancedManager.create_class_list(self.config)

        # Interfaces
        self.web_app = WebApp(self.config)
        if self.web_app.enabled():
            self.web_app.start()

        # Debug tools
        self.performance_analyser = None
        if CONFIG_DEBUG_OPTION_PERF in self.config and self.config[
                CONFIG_DEBUG_OPTION_PERF]:
            self.performance_analyser = PerformanceAnalyser()

        # TODO : CONFIG TEMP LOCATION
        self.time_frames = [
            TimeFrames.THIRTY_MINUTES, TimeFrames.ONE_HOUR,
            TimeFrames.FOUR_HOURS, TimeFrames.ONE_DAY
        ]
        self.exchanges = [ccxt.binance]

        # Add services to self.config[CONFIG_CATEGORY_SERVICES]
        ServiceCreator.create_services(self.config)

        # Notifier
        self.config[CONFIG_NOTIFICATION_INSTANCE] = Notification(self.config)

        self.symbols_threads_manager = []
        self.exchange_traders = {}
        self.exchange_trader_simulators = {}
        self.exchanges_list = {}
        self.symbol_evaluator_list = []
        self.dispatchers_list = []
Exemplo n.º 27
0
def deploy(file_name, environment_name):
    global aws_id, aws_secret

    if not config_exists(file_name):
        print('[*] Configuration file "{}" not found, creating one.'.format(
            file_name))
        create_config_file(file_name)
    else:
        print('[*] Loading configuration file "{}"'.format(file_name))

    config = load_config(file_name)
    if environment_exists(environment_name, file_name):
        print(
            '[-] The "{}" environment was created before as indicated in the "{}" configuration file. Indicate another name with the -n (--env-name) option or delete it from the configuration file.'
            .format(environment_name, file_name))
        exit(1)

    aws_id, aws_secret = request_aws_credentials(file_name)

    print('[+] Creating infrastructure in AWS')

    master_password = random_string(16)
    url = AwsDeplpyment().deploy(aws_id, aws_secret, environment_name,
                                 master_password)

    if 'environments' not in config:
        config['environments'] = []

    print('[+] Saving "{}" configuration file'.format(file_name))
    add_environment_to_config(
        {
            'name': environment_name,
            'url': url,
            'master-password': master_password
        }, file_name)

    print('[+] Now you can connect to your new environment:')
    connection_command = '\n\tpython3 wsc2.py -c -n {}'.format(
        environment_name)
    if file_name != DEFAULT_CONFIG_FILE_NAME:
        connection_command = '\n\tpython3 wsc2.py -c -n {} -f {}'.format(
            environment_name, file_name)

    print(connection_command)
    print()
Exemplo n.º 28
0
def sync_environments(file_name):
    global aws_id, aws_secret

    config = load_config(file_name)
    aws_id, aws_secret = request_aws_credentials(file_name)

    if aws_id not in config:
        config['aws_id'] = aws_id

    aws_environments = AwsDeplpyment().get_all_environments(aws_id, aws_secret)
    environments = []
    for item in aws_environments:
        environments.append({
            'name': item['environment'],
            'url': item['url'],
            'master-password': item['master_password']
        })
    config['environments'] = environments
    save_config(config, file_name)
    print('[+] Configuration file synchronized with AWS')
    list_environments(file_name)
Exemplo n.º 29
0
    global LOGOBJ
    userIp = request.remote_addr
    uUid = utils.getuUid()
    LOGOBJ.debug("accessing /login by :" + str(userIp) + str(uUid))
    userName = ""
    passWord = ""
    authSuccess = False
    #if already logged in or if auth is not required we need to proceed to next page
    if CONFIGOBJ.auth_enabled:
        userName = str(request.form.get('username'))
        passWord = str(request.form.get('password'))
        #TODO: call post sql query with given username and password
        #like
        #authSuccess = sqlQuery()
        authSuccess = True
        if authSuccess == True:
            #TODO: call the rbac server next layer i.e resource page
            LOGOBJ.debug("Successful!")
            return {'username': userName, 'password': passWord}


if __name__ == '__main__':
    global CONFIGOBJ
    global LOGOBJ
    LOGOBJ = logger.getLogger()
    CONFIGOBJ = config.load_config()
    db_utils.create_table()
    app.run(host='0.0.0.0',
            port=CONFIGOBJ.rbac_port,
            debug=True,
            threaded=True)
Exemplo n.º 30
0
from anchor.anchorbox_generate import AnchorGenerator
from anchor.predBox_and_imageBox import distance2bbox, bbox2distance
from anchor.anchor_assigner import Assigner
from utils.bbox_distribution import BoxesDistribution
from loss.giou_loss import GIoULoss
from loss.distribution_focal_loss import DistributionFocalLoss
from loss.quality_focal_loss import QualityFocalLoss
from loss.multi_iou_cal import MultiIoUCal
from config.config import load_config, cfg
from loss.L1L2loss import Regularization
from torch import optim
import torch.nn as nn

if __name__ == '__main__':
    """config"""
    load_config(cfg, "./config/config.yaml")
    print(cfg)
    headerNum = len(cfg.model.featSizes)
    device = torch.device('cuda:0')
    """dataset"""
    trainData = ListDataset(trainAnnoPath=cfg.dir.trainAnnoDir,
                            trainImgPath=cfg.dir.trainImgDir,
                            netInputSizehw=cfg.model.netInput,
                            augFlag=cfg.data.augment,
                            normalize=cfg.data.normalize,
                            imgChannelNumber=cfg.model.imgChannelNumber)
    trainLoader = torch.utils.data.DataLoader(
        trainData,
        collate_fn=collate_function,
        batch_size=cfg.train.batchSize,
        shuffle=True,
Exemplo n.º 31
0
def train(all_batch=[], read_from_file=True, section=[]):
    if read_from_file:
        with open(config.get_pkl_path("train"), "rb") as f:
            train_word_batches, train_char_batches, train_char_len_batches, train_pos_tag_batches, train_entity_batches, train_toi_batches, train_word_origin_batches = pickle.load(
                f)
        with open(config.get_pkl_path("test"), "rb") as f:
            test_word_batches, test_char_batches, test_char_len_batches, test_pos_tag_batches, test_entity_batches, test_toi_batches, test_word_origin_batches = pickle.load(
                f)
    else:
        train_word_batches, train_char_batches, train_char_len_batches, train_pos_tag_batches, train_entity_batches, train_toi_batches, train_toi_batch_layer0, train_toi_batch_layer1 = all_batch[
            0]
        dev_word_batches, dev_char_batches, dev_char_len_batches, dev_pos_tag_batches, dev_entity_batches, dev_toi_batches, dev_toi_batch_layer0, dev_toi_batch_layer1 = all_batch[
            1]
        test_word_batches, test_char_batches, test_char_len_batches, test_pos_tag_batches, test_entity_batches, test_toi_batches, test_toi_batch_layer0, test_toi_batch_layer1 = all_batch[
            2]

    misc_config = pickle.load(open(config.get_pkl_path("config"), "rb"))
    config.load_config(misc_config)

    ner_model = TOI_BERT(config)
    if config.if_DTE:
        ner_model.load_vector()

    if (len(section)):
        config.layer_maxlen = section

    if config.if_gpu and torch.cuda.is_available():
        ner_model = ner_model.cuda()

    evaluate = Evaluate(ner_model, config)

    parameters = filter(lambda p: p.requires_grad, ner_model.parameters())
    optimizer = create_opt(parameters, config.opt, config.lr)

    best_model = None
    best_per = 0
    pre_loss = 100000
    train_all_batches = list(
        zip(train_word_batches, train_char_batches, train_char_len_batches,
            train_pos_tag_batches, train_entity_batches, train_toi_batches,
            train_word_origin_batches))
    tokenizer = BertTokenizer.from_pretrained(
        f"bert-{config.bert_config}-uncased")
    bert_model = BertModel.from_pretrained(
        f"{config.bert_path}{config.bert_config}")
    bert_model.cuda()
    bert_model.eval()
    for parameter in bert_model.parameters():
        parameter.requires_grad = False

    for e_ in range(config.epoch):
        print("Epoch:", e_ + 1)

        cur_time = time.time()
        if config.if_shuffle:
            shuffle(train_all_batches)
        losses = []
        ner_model.train()
        config.mode = 'Train'
        runtimeModel = ModelInRuntime.instance(
            (ner_model, bert_model, tokenizer, config,
             len(train_all_batches) + len(test_word_batches)))
        runtimeModel.model = ner_model

        for each_batch in tqdm(train_all_batches):
            optimizer.zero_grad()
            runtimeModel.setTrainData(each_batch)
            result, _, aim = runtimeModel.runClassification()
            loss = ner_model.calc_loss(result, aim)
            loss.backward()
            optimizer.step()
            losses.append(loss.data.cpu().numpy())

        sub_loss = np.mean(losses)
        print(f'Avg loss = {sub_loss:.4f}')
        print(f"Training step took {time.time() - cur_time:.0f} seconds")
        if e_ >= 0:
            print("dev:")
            cls_f1 = evaluate.get_f1(
                zip(test_word_batches, test_char_batches,
                    test_char_len_batches, test_pos_tag_batches,
                    test_entity_batches, test_toi_batches,
                    test_word_origin_batches), bert_model)
            if cls_f1 > best_per and cls_f1 > config.score_th:
                best_per = cls_f1
                model_path = config.get_model_path(
                ) + f"/epoch{e_ + 1}_f1_{cls_f1:.4f}.pth"
                torch.save(ner_model.state_dict(), model_path)
                print("model save in " + model_path)
            print('\n\n')
        if sub_loss >= pre_loss:
            adjust_learning_rate(optimizer)
        pre_loss = sub_loss
Exemplo n.º 32
0
Arquivo: main.py Projeto: esann/capp
def generate_the_objects():
    db_objects = [create_empty_object(get_next_id(), _config, root_object_id), \
                  create_empty_object(get_next_id(), _config, root_object_id), \
                  create_empty_object(get_next_id(), _config, root_object_id), \
                  create_empty_object(get_next_id(), _config, root_object_id)]
    for o in db_objects:
        yield o

    return

_config = {}
root_object_id = None

if __name__ == '__main__':

    _config = load_config()
    init_engine()
    root_object_id = _config[ROOT_OBJECT_ID]

    the_object.test_the_object_module(_config)
    # objects = [o for o in generate_the_objects()]
    #
    # root_folder_link = create_empty_object(get_next_id(), _config)
    # set_data(root_folder_link, DATATYPE_OWNER_LINK)
    #
    # root_folder = create_empty_object(get_next_id(), _config)
    # add_link(root_folder, root_folder_link[ID], None)
    # set_data(root_folder, DATATYPE_NAME, "root")
    #
    # folder = create_empty_object(get_next_id(), _config)
    # add_link(folder, root_folder_link[ID], root_folder[ID])