def get_config(src_path, target, toolchain_name): # Convert src_path to a list if needed src_paths = [src_path] if type(src_path) != ListType else src_path # We need to remove all paths which are repeated to avoid # multiple compilations and linking with the same objects src_paths = [src_paths[0]] + list(set(src_paths[1:])) # Create configuration object config = Config(target, src_paths) # If the 'target' argument is a string, convert it to a target instance if isinstance(target, str): try: target = TARGET_MAP[target] except KeyError: raise KeyError("Target '%s' not found" % target) # Toolchain instance try: toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options=None, notify=None, macros=None, silent=True, extra_verbose=False) except KeyError as e: raise KeyError("Toolchain %s not supported" % toolchain_name) # Scan src_path for config files resources = toolchain.scan_resources(src_paths[0]) for path in src_paths[1:]: resources.add(toolchain.scan_resources(path)) config.add_config_files(resources.json_files) return config.get_config_data()
def __init__(self, asymptomatic_ratio: float, hospitalized_ratio: float): self._config = Config() self.illness_days_mean = self._config.get('virus', 'infectious_days_mean') self.illness_days_std = self._config.get('virus', 'infectious_days_std') self.transmission_probability = self._config.get( 'virus', 'transmission_probability') self.asymptomatic_ratio = asymptomatic_ratio self.hospitalized_ratio = hospitalized_ratio self.mild_symptoms_ratio = 1 - hospitalized_ratio - asymptomatic_ratio input_data = InputData() mean_periodic_interactions = self._config.get( 'population', 'mean_periodic_interactions') mean_stochastic_interactions = self._config.get( 'population', 'mean_stochastic_interactions') mean_interactions = mean_periodic_interactions + mean_stochastic_interactions self.R = (1 + input_data.mean_travel_ratio) * \ mean_interactions * self.illness_days_mean * self.transmission_probability logging.info( f'Initialized the {self.__class__.__name__} virus with R0={self.R:.4f}' )
def test_PSA_overrides(target_start_size): target, start, size = target_start_size set_targets_json_location() config = Config(target) roms = config.get_all_active_memories(ROM_ALL_MEMORIES) assert("ROM" in roms) assert(roms["ROM"] == [start, size])
def train(model, dataset_train, dataset_val): """Train the model.""" Config.dump(os.path.join(model.log_dir, 'config.yml')) # Image augmentation # http://imgaug.readthedocs.io/en/latest/source/augmenters.html augmentation = iaa.SomeOf((0, 2), [ iaa.Fliplr(0.5), iaa.Flipud(0.5), iaa.OneOf([ iaa.Affine(rotate=90), iaa.Affine(rotate=180), iaa.Affine(rotate=270) ]), iaa.Multiply((0.8, 1.5)), iaa.GaussianBlur(sigma=(0.0, 5.0)) ]) # If starting from imagenet, train heads only for a bit # since they have random weights logging.info('Train network heads') model.fit(dataset_train, dataset_val, Config.TRAINING.LEARNING.RATE, 20, 'heads', augmentation=augmentation) logging.info('Train all layers') model.fit(dataset_train, dataset_val, Config.TRAINING.LEARNING.RATE, 40, 'all', augmentation=augmentation)
def main(argv): #default: M = 'yolov3' gpu = '0' path = None try: opts, args = getopt.getopt(argv[1:], 'hm:p:g:', ['m=', 'path=', 'gpu=']) except getopt.GetoptError: print(argv[0] + ' -m <M> -p <path> -g <gpu>') sys.exit(2) for opt, arg in opts: if opt == '-h': print(argv[0] + ' -m <M> -p <path> -g <gpu>') elif opt in ['-m', '--M']: M = arg elif opt in ['-p', '--path']: path = arg elif opt in ['-g', '--gpu']: gpu = arg if path == None: raise ValueError('you should specify the model path via [python convertor -p /path/to/model/weights]') print('model: [%s], gpu: [%s], weights: [%s]'%(M, gpu, path)) os.environ["CUDA_VISIBLE_DEVICES"] = gpu config = Config(M) config.mgn = True model = MODELS(config = config).model load_weights_by_name(model, path) load_weights_by_name(model, os.path.join(parpath, 'pretrained_weights/%s.h5'%M)) saved_path = os.path.join(parpath, 'saved_weights/%s_reid.h5'%M) print('weights saving to %s'%saved_path) model.save_weights(saved_path)
def __init__(self, recipe_kind='Notset', read_list='ReadList.txt', url=None, debug=False): u""" 配置文件使用$符区隔,同一行内的配置文件归并至一本电子书内 :param recipe_kind: :param read_list: default value: ReadList.txt :param url: :param debug: :return: """ self.recipe_kind = recipe_kind self.read_list = read_list self.url = url log.warning_log(u"website type: " + str(self.recipe_kind) + '\n') import logging if debug is True: Debug.logger.setLevel(logging.DEBUG) else: Debug.logger.setLevel(logging.INFO) Debug.logger.debug(u"read_list: " + str(self.read_list)) Debug.logger.debug(u"url: " + str(self.url)) Debug.logger.debug(u"recipe type:" + str(recipe_kind)) Path.init_base_path(recipe_kind) # 设置路径 Path.init_work_directory() # 创建路径 self.init_database() # 初始化数据库 Config._load() return
def run_simulation() -> TimeSeriesResult: config = Config() population_size = 450000 virus = Virus.from_string(config.get('virus', 'name')) n_days = config.get('simulation_days') population_centre = PopulationCentreBase( name='Mock city', longitude=17.1, latitude=48.15, populations=[PopulationBase(int(population_size / 10), virus) for i in range(10)], virus=virus ) population_centre.infect(50) for day_i in range(n_days): if day_i % 10 == 0: logging.info(f'day: {day_i}') population_centre.next_day() return TimeSeriesResult( simulation_days=population_centre.simulation_days, infected=population_centre.infected, unaffected=population_centre.unaffected, immune=population_centre.immune, dead=population_centre.dead, new_cases=population_centre.new_cases )
def scan_and_copy_resources(self, prj_paths, trg_path, relative=False): # Copy only the file for the required target and toolchain lib_builds = [] # Create the configuration object if isinstance(prj_paths, basestring): prj_paths = [prj_paths] config = Config(self.target, prj_paths) for src in ["lib", "src"]: resources = self.__scan_and_copy(join(prj_paths[0], src), trg_path) for path in prj_paths[1:]: resources.add(self.__scan_and_copy(join(path, src), trg_path)) lib_builds.extend(resources.lib_builds) # The repository files # for repo_dir in resources.repo_dirs: # repo_files = self.__scan_all(repo_dir) # for path in prj_paths: # self.toolchain.copy_files(repo_files, trg_path, rel_path=join(path, src)) # The libraries builds for bld in lib_builds: build_url = open(bld).read().strip() lib_data = self.build_url_resolver(build_url) lib_path = lib_data["path"].rstrip("\\/") self.__scan_and_copy(lib_path, join(trg_path, lib_data["name"])) # Create .hg dir in mbed build dir so it's ignored when versioning hgdir = join(trg_path, lib_data["name"], ".hg") mkdir(hgdir) fhandle = file(join(hgdir, "keep.me"), "a") fhandle.close() if not relative: # Final scan of the actual exported resources resources = self.toolchain.scan_resources(trg_path) resources.relative_to(trg_path, self.DOT_IN_RELATIVE_PATH) else: # use the prj_dir (source, not destination) resources = self.toolchain.scan_resources(prj_paths[0]) for path in prj_paths[1:]: resources.add(toolchain.scan_resources(path)) # Loads the resources into the config system which might expand/modify resources based on config data self.resources = config.load_resources(resources) if hasattr(self, "MBED_CONFIG_HEADER_SUPPORTED") and self.MBED_CONFIG_HEADER_SUPPORTED: # Add the configuration file to the target directory self.config_header = self.toolchain.MBED_CONFIG_FILE_NAME config.get_config_data_header(join(trg_path, self.config_header)) self.config_macros = [] self.resources.inc_dirs.append(".") else: # And add the configuration macros to the toolchain self.config_macros = config.get_config_data_macros()
def __init__(self, name='root', log_path=LOG_PATH): #文件的命名 c = Config().get('log') self.logname = os.path.join(log_path, '%s.log' % time.strftime('%Y_%m_%d')) self.logger = logging.getLogger(name) self.logger.setLevel(logging.DEBUG) #日志输出格式 pattern = c.get('pattern') if c and c.get( 'pattern') else '[%(asctime)s]-%(name)s-%(levelname)s: %(message)s' self.formatter = logging.Formatter(pattern)
def build_all(config: Config) -> List[BuildReport]: """ Checks the index for any changed files :param config: :return: """ try: pass finally: config.save_index() return []
def run_simulation() -> GeographicalResult: config = Config() virus = Virus.from_string(config.get('virus', 'name')) n_days = config.get('simulation_days') cities = city_factory(virus) results = GeographicalResult() input_data = InputData() for day_i in range(n_days): if day_i % 10 == 0: logging.info(f'day: {day_i}') for i, city_i in enumerate(cities): migrations_smeared = np.random.poisson( input_data.get_migration_row(i)) for j, city_j in enumerate(cities): if i == j: continue health_states, interaction_multiplicities = city_i.get_travelling( migrations_smeared[j]) if len(interaction_multiplicities) == 0: continue n_infected = 0 for interaction_i in range(interaction_multiplicities.max()): transmission_mask = ( interaction_multiplicities > interaction_i) * ( np.random.random(len(interaction_multiplicities)) < virus.transmission_probability) n_infected += health_states[transmission_mask].astype( int).sum() n_infected = health_states.astype(int).sum() city_j.infect(n_infected) for city in cities: city.next_day() for city in cities: results.add_result(city.to_dict()) return results
def get_config(src_path, target, toolchain_name): # Convert src_path to a list if needed src_paths = [src_path] if type(src_path) != ListType else src_path # We need to remove all paths which are repeated to avoid # multiple compilations and linking with the same objects src_paths = [src_paths[0]] + list(set(src_paths[1:])) # Create configuration object config = Config(target, src_paths) # If the 'target' argument is a string, convert it to a target instance if isinstance(target, str): try: target = TARGET_MAP[target] except KeyError: raise KeyError("Target '%s' not found" % target) # Toolchain instance try: toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options=None, notify=None, macros=None, silent=True, extra_verbose=False) except KeyError as e: raise KeyError("Toolchain %s not supported" % toolchain_name) # Scan src_path for config files resources = toolchain.scan_resources(src_paths[0]) for path in src_paths[1:]: resources.add(toolchain.scan_resources(path)) # Update configuration files until added features creates no changes prev_features = set() while True: # Update the configuration with any .json files found while scanning config.add_config_files(resources.json_files) # Add features while we find new ones features = config.get_features() if features == prev_features: break for feature in features: if feature in resources.features: resources += resources.features[feature] prev_features = features config.validate_config() cfg, macros = config.get_config_data() features = config.get_features() return cfg, macros, features
def get_brand_ids(self): con = cx_Oracle.connect(Config().get_oracle_connect()) cur = con.cursor() cur.execute( "select brand_id from m_product_brand t where t.userid='{0}'". format(self.userid)) return cur.fetchall()
def getmobile(self): con = cx_Oracle.connect(Config().get_oracle_connect()) cur = con.cursor() cur.execute( "select mobile from user_info t where t.userid='{0}'".format( self.userid)) return cur.fetchone()[0]
def get_product_id(self): con = cx_Oracle.connect(Config().get_oracle_connect()) cur = con.cursor() cur.execute( "select product_id from product t where t.merchantid=(select merchantid from user_merchant u where u.userid='{0}')" .format(self.userid)) return cur.fetchone()[0]
class TestBaiDu(unittest.TestCase): url = Config().get('url') excel = DATA_PATH + '/test_data.xlsx' # base_path = os.path.dirname(os.path.abspath(__file__)) + '\..' # driver_path = os.path.abspath(base_path+'\drivers\chromedriver.exe') # print(driver_path) # locator_kw = (By.ID,'kw') # locator_su = (By.ID,'su') # locator_result = (By.XPATH,'//div[contains(@class,"result")]/h3/a') # log = Log() # def sub_setUp(self): # self.dr = webdriver.Chrome(executable_path=DRIVER_PATH+'\chromedriver.exe') # self.dr.get(self.url) # def sub_tearDown(self): # self.dr.quit() # def test_search(self): # datas = ExcelReader(self.excel).data # for d in datas: # with self.subTest(data=d): # self.sub_setUp() # self.dr.find_element(*self.locator_kw).send_keys(d['search']) # self.dr.find_element(*self.locator_su).click() # time.sleep(2) # links = self.dr.find_elements(*self.locator_result) # for link in links: # self.log.info(link.text) # self.sub_tearDown() def setUp(self): self.dr = browser(browser_type='firefox') # self.dr = webdriver.Chrome(executable_path=DRIVER_PATH+'\chromedriver.exe') # self.page.get(self.url) def tearDown(self): self.dr.quit() def test_search_0(self): # self.dr.find_element(*self.locator_kw).send_keys('selenium') # self.dr.find_element(*self.locator_su).click() # time.sleep(2) # links = self.dr.find_elements(*self.locator_result) # for link in links: # self.log.info(link.text) baidu = BaiDuMainPage(self.dr) baidu.open(self.url) logger.info('open url') baidu.search('selenium') baidu.save_screen_shot(REPORT_PATH) logger.info('search selenium')
def __init__(self): redis_config = Config().get('redis') redis_host = redis_config['host'] redis_port = redis_config['port'] pool = redis.ConnectionPool(host=redis_host, port=redis_port, decode_responses=True) self.r = redis.Redis(connection_pool=pool)
def get_username_from_db(self, levelname): con = cx_Oracle.connect(Config().get_oracle_connect()) cur = con.cursor() cur.execute( "select levelid from user_level t where t.levelname='{0}'".format( levelname)) return cur.fetchone()[0]
def test_config(name): """Run a particular configuration test :param name: test name (same as directory name) """ test_dir = join(root_dir, name) test_data = json.load(open(data_path(test_dir))) targets_json = os.path.join(test_dir, "targets.json") set_targets_json_location(targets_json if isfile(targets_json) else None) for target, expected in test_data.items(): try: cfg, macros, features = get_config(test_dir, target, "GCC_ARM") res = compare_config(cfg, expected) assert not (res), res expected_macros = expected.get("expected_macros", None) expected_features = expected.get("expected_features", None) if expected_macros is not None: macros = Config.config_macros_to_macros(macros) assert sorted(expected_macros) == sorted(macros) if expected_features is not None: assert sorted(expected_features) == sorted(features) except ConfigException as e: err_msg = str(e) if "exception_msg" not in expected: assert not (err_msg), "Unexpected Error: %s" % e else: assert expected["exception_msg"] in err_msg
def test_bl_too_large(self, mock_intelhex_offset, mock_exists, mock_isabs): """ Create a BL that's too large to fit in ROM and test that exception is generated. :param mock_intelhex_offset: mock intel hex :param mock_exists: mock the file exists call :param mock_isabs: mock the isabs call :return: """ cfg = Config('NRF52_DK') mock_exists.return_value = True mock_isabs.return_value = True # setup the hex file bl = IntelHex() min = 0x0 max = 0x88000 for v in range(max): bl[v] = v mock_intelhex_offset.return_value = bl cfg.target.bootloader_img = True ce = False if cfg.has_regions: try: for r in list(cfg.regions): print(r) except ConfigException as e: print("args %s" % (e.args)) if (e.args[0] == "bootloader segments don't fit within rom"): ce = True self.assertTrue(ce)
def test_config(name): """Run a particular configuration test :param name: test name (same as directory name) """ test_dir = join(root_dir, name) test_data = json.load(open(data_path(test_dir))) targets_json = os.path.join(test_dir, "targets.json") set_targets_json_location(targets_json if isfile(targets_json) else None) for target, expected in test_data.items(): try: cfg, macros, features = get_config(test_dir, target, "GCC_ARM") res = compare_config(cfg, expected) assert not(res), res expected_macros = expected.get("expected_macros", None) expected_features = expected.get("expected_features", None) if expected_macros is not None: macros = Config.config_macros_to_macros(macros) assert sorted(expected_macros) == sorted(macros) if expected_features is not None: assert sorted(expected_features) == sorted(features) except ConfigException as e: err_msg = str(e) if "exception_msg" not in expected: assert not(err_msg), "Unexpected Error: %s" % e else: assert expected["exception_msg"] in err_msg
def test_parameters_and_config_macros_to_macros(): """ Test that checks that parameter-generated macros override set macros """ params = { "test_lib.parameter_with_macro": ConfigParameter( "parameter_with_macro", { "macro_name": "CUSTOM_MACRO_NAME", "value": 1 }, "test_lib", "library" ) } macros = { "CUSTOM_MACRO_NAME": ConfigMacro( "CUSTOM_MACRO_NAME=2", "dummy", "application" ) } macro_list = Config._parameters_and_config_macros_to_macros(params, macros) assert macro_list == ["CUSTOM_MACRO_NAME=1"]
def scan_and_copy_resources(self, prj_paths, trg_path, relative=False): # Copy only the file for the required target and toolchain lib_builds = [] # Create the configuration object cfg = Config(self.target, prj_paths) for src in ['lib', 'src']: resources = reduce(add, [ self.__scan_and_copy(join(path, src), trg_path) for path in prj_paths ]) lib_builds.extend(resources.lib_builds) # The repository files for repo_dir in resources.repo_dirs: repo_files = self.__scan_all(repo_dir) for path in proj_paths: self.toolchain.copy_files(repo_files, trg_path, rel_path=join(path, src)) # The libraries builds for bld in lib_builds: build_url = open(bld).read().strip() lib_data = self.build_url_resolver(build_url) lib_path = lib_data['path'].rstrip('\\/') self.__scan_and_copy(lib_path, join(trg_path, lib_data['name'])) # Create .hg dir in mbed build dir so it's ignored when versioning hgdir = join(trg_path, lib_data['name'], '.hg') mkdir(hgdir) fhandle = file(join(hgdir, 'keep.me'), 'a') fhandle.close() if not relative: # Final scan of the actual exported resources self.resources = self.toolchain.scan_resources(trg_path) self.resources.relative_to(trg_path, self.DOT_IN_RELATIVE_PATH) else: # use the prj_dir (source, not destination) self.resources = reduce( add, [self.toolchain.scan_resources(path) for path in prj_paths]) # Add all JSON files discovered during scanning to the configuration object cfg.add_config_files(self.resources.json_files) # Get data from the configuration system self.config_macros = cfg.get_config_data_macros()
def init_config(recipe_kind): if recipe_kind == 'zhihu': # TODO: 再有一个需要登录的网站, 改掉硬编码 login = Login(recipe_kind='zhihu') else: return # !!!!!发布的时候把Config.remember_account改成false!!!!!,第一次需要登录,之后用cookie即可 # 登陆成功了,自动记录账户 if Config.remember_account_set: Debug.logger.info(u'Detected settings file,use it.') Config.picture_quality = 1 else: log.warning_log(u"Please login...") login.start() Config.picture_quality = 1 Config.remember_account_set = True Config._save() return
def get_config_header(self): if self.config_data is None: return None config_file = join(self.build_dir, self.MBED_CONFIG_FILE_NAME) if not exists(config_file): with open(config_file, "wt") as f: f.write(Config.config_to_header(self.config_data)) return config_file
def getmerchantid(self): u"""通过数据库获取商户merchant_id""" con = cx_Oracle.connect(Config().get_oracle_connect()) cur = con.cursor() cur.execute( "select merchantid from user_merchant t where t.userid='{0}'". format(self.userid)) return cur.fetchone()[0]
def __init__(self, dataset, detector = 'yolov3', experiment_name = 'default', overwrite = False): self.detector = detector self.dataset = dataset self.overwrite = overwrite self.experiment_name = experiment_name self.checkdir() self.config = Config(detector) self.DA = DA('validation', self.config)
def get_not_receive_order_from_db(self): import cx_Oracle con = cx_Oracle.connect(Config().get_oracle_connect()) cur = con.cursor() cur.execute( "select order_id from m_order t where t.meruserid='{0}' and t.status='4'" .format(self.userid)) return cur.fetchone()[0]
def __init__(self, size: int, virus: Virus): config = Config() self._size = size self._indexes = np.arange(size) self._ill = np.zeros(size).astype(bool) self._illness_days = np.ones(size) * -1 self._illness_days_start = np.ones(size) * -1 self._health_condition = np.random.random(size) self._need_hospitalization = np.zeros(size).astype(bool) self._hospitalization_start = np.random.poisson( config.get('hospitalization_start'), size) self._hospitalization_percentage = config.get( 'hospitalization_percentage') self._is_new_case = np.zeros(size).astype(bool) self._is_immune = np.zeros(size).astype(bool) self._is_alive = np.ones(size).astype(bool) self._infectious_start = np.random.poisson( config.get('infectious_start'), size) self._mean_stochastic_interactions = config.get( 'population', 'mean_stochastic_interactions') self._mean_periodic_interactions = config.get( 'population', 'mean_periodic_interactions') self._virus = virus self._day_i = 0
def __process_params(config: Config): try: opts, args = getopt.getopt(sys.argv[1:], "cdg:hk:r:ty", ["cleanup-silently", "debug", "github-repo-id=", "help", "gpg-key=", "local-repo=", "test", "dry-run"]) except getopt.GetoptError: __print_cmd_help() sys.exit(2) for opt, arg in opts: if opt in ("-c", "--cleanup-silently"): log_info("[ARGS] --cleanup-silently: Will silently reset/clean your working copy automatically. You will not be asked anymore. Use with caution!") config.cleanup_silently = True elif opt in ("-d", "--debug"): log_info("[ARGS] --debug: The script will require user interactions for each step.") config.debug = True elif opt in ("-g", "--github-repo-id"): log_info("[ARGS] GitHub repository to release against is set to " + arg) config.github_repo = arg elif opt in ("-h", "--help"): __print_cmd_help() sys.exit(0) elif opt in ("-k", "--gpg-key"): log_info("[ARGS] GPG key for code signing is set to " + arg) config.gpg_keyname = arg elif opt in ("-r", "--local-repo"): log_info("[ARGS] local repository set to " + arg) config.root_path = arg elif opt in ("-t", "--test"): log_info("[ARGS] --test: Script runs on a different repo for testing purpose. Does not require any user interaction to speed up.") config.test_run = True elif opt in ("-y", "--dry-run"): config.dry_run = True log_info("[ARGS] --dry-run: No changes will be made on the Git repo.")
def main(): args = parse_args() if not os.path.isdir(args.save_prefix): os.makedirs(args.save_prefix) config = Config(args.config_filename) roidb = get_dataset(args.dataset, args) sym = get_network(args.network, args, config, 'train') train_net(sym, roidb, args, config)
def get_order_ids_from_db(self): import cx_Oracle con = cx_Oracle.connect(Config().get_oracle_connect()) cur = con.cursor() cur.execute("select order_id from m_order t where t.userid='{0}'".format(self.userid)) order_ids = [] for i in cur.fetchall(): order_ids.append(i[0]) return order_ids
def scan_and_copy_resources(self, prj_paths, trg_path, relative=False): # Copy only the file for the required target and toolchain lib_builds = [] # Create the configuration object config = Config(self.target, prj_paths) for src in ['lib', 'src']: resources = reduce(add, [self.__scan_and_copy(join(path, src), trg_path) for path in prj_paths]) lib_builds.extend(resources.lib_builds) # The repository files for repo_dir in resources.repo_dirs: repo_files = self.__scan_all(repo_dir) for path in proj_paths : self.toolchain.copy_files(repo_files, trg_path, rel_path=join(path, src)) # The libraries builds for bld in lib_builds: build_url = open(bld).read().strip() lib_data = self.build_url_resolver(build_url) lib_path = lib_data['path'].rstrip('\\/') self.__scan_and_copy(lib_path, join(trg_path, lib_data['name'])) # Create .hg dir in mbed build dir so it's ignored when versioning hgdir = join(trg_path, lib_data['name'], '.hg') mkdir(hgdir) fhandle = file(join(hgdir, 'keep.me'), 'a') fhandle.close() if not relative: # Final scan of the actual exported resources resources = self.toolchain.scan_resources(trg_path) resources.relative_to(trg_path, self.DOT_IN_RELATIVE_PATH) else: # use the prj_dir (source, not destination) resources = self.toolchain.scan_resources(prj_paths[0]) for path in prj_paths[1:]: resources.add(toolchain.scan_resources(path)) # Loads the resources into the config system which might expand/modify resources based on config data self.resources = config.load_resources(resources) # And add the configuration macros to the toolchain self.config_macros = config.get_config_data_macros()
def get_default_config(source_dir, target_name): if target_name in TARGET_CONFIGS: config_name = TARGET_CONFIGS[target_name]['default_test_configuration'] if config_name == "NONE": return None return join(CONFIG_DIR, CONFIG_MAP[config_name]) elif Config.find_app_config(source_dir): return None elif (target_name in TARGET_MAP and 'EMAC' in TARGET_MAP[target_name].device_has): return join(CONFIG_DIR, CONFIG_MAP["ETHERNET"]) else: return None
def get_config_header(self): if self.config_processed: # this function was already called, return its result return self.config_file # The config file is located in the build directory self.config_file = join(self.build_dir, self.MBED_CONFIG_FILE_NAME) # If the file exists, read its current content in prev_data if exists(self.config_file): with open(self.config_file, "rt") as f: prev_data = f.read() else: prev_data = None # Get the current configuration data crt_data = Config.config_to_header(self.config_data) if self.config_data else None # "changed" indicates if a configuration change was detected changed = False if prev_data is not None: # a previous mbed_config.h exists if crt_data is None: # no configuration data, so "mbed_config.h" needs to be removed remove(self.config_file) self.config_file = None # this means "config file not present" changed = True elif crt_data != prev_data: # different content of config file with open(self.config_file, "wt") as f: f.write(crt_data) changed = True else: # a previous mbed_config.h does not exist if crt_data is not None: # there's configuration data available with open(self.config_file, "wt") as f: f.write(crt_data) changed = True else: self.config_file = None # this means "config file not present" # If there was a change in configuration, rebuild everything self.build_all = changed # Make sure that this function will only return the location of the configuration # file for subsequent calls, without trying to manipulate its content in any way. self.config_processed = True return self.config_file
def get_config_macros(self): return Config.config_to_macros(self.config_data) if self.config_data else []
def build_library(src_paths, build_path, target, toolchain_name, dependencies_paths=None, options=None, name=None, clean=False, archive=True, notify=None, verbose=False, macros=None, inc_dirs=None, inc_dirs_ext=None, jobs=1, silent=False, report=None, properties=None, extra_verbose=False, project_id=None): """ src_path: the path of the source directory build_path: the path of the build directory target: ['LPC1768', 'LPC11U24', 'LPC2368'] toolchain: ['ARM', 'uARM', 'GCC_ARM', 'GCC_CR'] library_paths: List of paths to additional libraries clean: Rebuild everything if True notify: Notify function for logs verbose: Write the actual tools command lines if True inc_dirs: additional include directories which should be included in build inc_dirs_ext: additional include directories which should be copied to library directory """ if type(src_paths) != ListType: src_paths = [src_paths] # The first path will give the name to the library project_name = basename(src_paths[0] if src_paths[0] != "." and src_paths[0] != "./" else getcwd()) if name is None: # We will use default project name based on project folder name name = project_name if report != None: start = time() # If project_id is specified, use that over the default name id_name = project_id.upper() if project_id else name.upper() description = name vendor_label = target.extra_labels[0] cur_result = None prep_report(report, target.name, toolchain_name, id_name) cur_result = create_result(target.name, toolchain_name, id_name, description) if properties != None: prep_properties(properties, target.name, toolchain_name, vendor_label) for src_path in src_paths: if not exists(src_path): error_msg = "The library source folder does not exist: %s", src_path if report != None: cur_result["output"] = error_msg cur_result["result"] = "FAIL" add_result_to_report(report, cur_result) raise Exception(error_msg) try: # Toolchain instance toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify, silent=silent, extra_verbose=extra_verbose) toolchain.VERBOSE = verbose toolchain.jobs = jobs toolchain.build_all = clean toolchain.info("Building library %s (%s, %s)" % (name, target.name, toolchain_name)) # Scan Resources resources = None for path in src_paths: # Scan resources resource = toolchain.scan_resources(path) # Copy headers, objects and static libraries - all files needed for static lib toolchain.copy_files(resource.headers, build_path, rel_path=resource.base_path) toolchain.copy_files(resource.objects, build_path, rel_path=resource.base_path) toolchain.copy_files(resource.libraries, build_path, rel_path=resource.base_path) if resource.linker_script: toolchain.copy_files(resource.linker_script, build_path, rel_path=resource.base_path) # Extend resources collection if not resources: resources = resource else: resources.add(resource) # We need to add if necessary additional include directories if inc_dirs: if type(inc_dirs) == ListType: resources.inc_dirs.extend(inc_dirs) else: resources.inc_dirs.append(inc_dirs) # Add extra include directories / files which are required by library # This files usually are not in the same directory as source files so # previous scan will not include them if inc_dirs_ext is not None: for inc_ext in inc_dirs_ext: resources.add(toolchain.scan_resources(inc_ext)) # Dependencies Include Paths if dependencies_paths is not None: for path in dependencies_paths: lib_resources = toolchain.scan_resources(path) resources.inc_dirs.extend(lib_resources.inc_dirs) if archive: # Use temp path when building archive tmp_path = join(build_path, '.temp') mkdir(tmp_path) else: tmp_path = build_path # Handle configuration config = Config(target) # Update the configuration with any .json files found while scanning config.add_config_files(resources.json_files) # And add the configuration macros to the toolchain toolchain.add_macros(config.get_config_data_macros()) # Compile Sources for path in src_paths: src = toolchain.scan_resources(path) objects = toolchain.compile_sources(src, abspath(tmp_path), resources.inc_dirs) resources.objects.extend(objects) if archive: toolchain.build_library(objects, build_path, name) if report != None: end = time() cur_result["elapsed_time"] = end - start cur_result["output"] = toolchain.get_output() cur_result["result"] = "OK" add_result_to_report(report, cur_result) except Exception, e: if report != None: end = time() if isinstance(e, ToolException): cur_result["result"] = "FAIL" elif isinstance(e, NotSupportedException): cur_result["result"] = "NOT_SUPPORTED" cur_result["elapsed_time"] = end - start toolchain_output = toolchain.get_output() if toolchain_output: cur_result["output"] += toolchain_output add_result_to_report(report, cur_result) # Let Exception propagate raise e
def test_tree(full_name, name): failed = 0 sys.path.append(full_name) if "test_data" in sys.modules: del sys.modules["test_data"] import test_data for target, expected in test_data.expected_results.items(): sys.stdout.write("%s:'%s'(%s) " % (name, expected["desc"], target)) sys.stdout.flush() err_msg = None try: # Use 'set_targets_json_location' to remove the previous custom targets from the target list set_targets_json_location(Target._Target__targets_json_location) cfg, macros, features = get_config(full_name, target, "GCC_ARM") macros = Config.config_macros_to_macros(macros) except ConfigException as e: err_msg = e.message if err_msg: if expected.has_key("exception_msg"): if err_msg.find(expected["exception_msg"]) == -1: print "FAILED!" sys.stderr.write(" Unexpected error message!\n") sys.stderr.write(" Expected: '%s'\n" % expected["exception_msg"]) sys.stderr.write(" Got: '%s'\n" % err_msg) failed += 1 else: print "OK" else: print "FAILED!" sys.stderr.write(" Error while getting configuration!\n") sys.stderr.write(" " + err_msg + "\n") failed += 1 else: res = compare_config(cfg, expected) expected_macros = expected.get("expected_macros", None) expected_features = expected.get("expected_features", None) if res: print "FAILED!" sys.stdout.write(" " + res + "\n") failed += 1 elif expected_macros is not None: if sorted(expected_macros) != sorted(macros): print "FAILED!" sys.stderr.write(" List of macros doesn't match\n") sys.stderr.write(" Expected: '%s'\n" % ",".join(sorted(expected_macros))) sys.stderr.write(" Got: '%s'\n" % ",".join(sorted(expected_macros))) failed += 1 else: print "OK" elif expected_features is not None: if sorted(expected_features) != sorted(features): print "FAILED!" sys.stderr.write(" List of features doesn't match\n") sys.stderr.write(" Expected: '%s'\n" % ",".join(sorted(expected_features))) sys.stderr.write(" Got: '%s'\n" % ",".join(sorted(expected_features))) failed += 1 else: print "OK" else: print "OK" sys.path.remove(full_name) return failed
def add_fib_at_start(arginput): input_file = arginput + ".bin" file_name_hex = arginput + "_fib.hex" file_name_bin = arginput + ".bin" # Read in hex file input_hex_file = intelhex.IntelHex() input_hex_file.padding = 0x00 input_hex_file.loadbin(input_file, offset=FLASH_BASE) output_hex_file = intelhex.IntelHex() output_hex_file.padding = 0x00 # Get the starting and ending address addresses = input_hex_file.addresses() addresses.sort() start_end_pairs = list(ranges(addresses)) regions = len(start_end_pairs) if regions == 1: start, end = start_end_pairs[0] else: start = min(min(start_end_pairs)) end = max(max(start_end_pairs)) assert start >= FLASH_BASE, ("Error - start 0x%x less than begining of user\ flash area" %start) # Compute checksum over the range (don't include data at location of crc) size = end - start + 1 data = input_hex_file.tobinarray(start=start, size=size) crc32 = binascii.crc32(data) & 0xFFFFFFFF fw_rev = FW_REV checksum = (start + size + crc32 + fw_rev) & 0xFFFFFFFF print("Writing FIB: base 0x%08X, size 0x%08X, crc32 0x%08X, fw rev 0x%08X,\ checksum 0x%08X" % (start, size, crc32, fw_rev, checksum)) #expected initial values used by daplink to validate that it is a valid bin #file added as dummy values in this file because the fib area preceeds the #application area the bootloader will ignore these dummy values # 00 is stack pointer (RAM address) # 04 is Reset vector (FLASH address) # 08 NMI_Handler (FLASH address) # 0C HardFault_Handler(FLASH address) # 10 dummy dummy_sp = 0x3FFFFC00 dummy_reset_vector = 0x00003625 dummy_nmi_handler = 0x00003761 dummy_hardfault_handler = 0x00003691 dummy_blank = 0x00000000 #expected fib structure #typedef struct fib{ #uint32_t base; /**< Base offset of firmware, indicating what flash the # firmware is in. (will never be 0x11111111) */ #uint32_t size; /**< Size of the firmware */ #uint32_t crc; /**< CRC32 for firmware correctness check */ #uint32_t rev; /**< Revision number */ #uint32_t checksum; /**< Check-sum of information block */ #}fib_t, *fib_pt; fib_start = FIB_BASE dummy_fib_size = 20 fib_size = 20 trim_size = 24 user_code_start = FLASH_BASE trim_area_start = TRIM_BASE # Write FIB to the file in little endian output_hex_file[fib_start + 0] = (dummy_sp >> 0) & 0xFF output_hex_file[fib_start + 1] = (dummy_sp >> 8) & 0xFF output_hex_file[fib_start + 2] = (dummy_sp >> 16) & 0xFF output_hex_file[fib_start + 3] = (dummy_sp >> 24) & 0xFF output_hex_file[fib_start + 4] = (dummy_reset_vector >> 0) & 0xFF output_hex_file[fib_start + 5] = (dummy_reset_vector >> 8) & 0xFF output_hex_file[fib_start + 6] = (dummy_reset_vector >> 16) & 0xFF output_hex_file[fib_start + 7] = (dummy_reset_vector >> 24) & 0xFF output_hex_file[fib_start + 8] = (dummy_nmi_handler >> 0) & 0xFF output_hex_file[fib_start + 9] = (dummy_nmi_handler >> 8) & 0xFF output_hex_file[fib_start + 10] = (dummy_nmi_handler >> 16) & 0xFF output_hex_file[fib_start + 11] = (dummy_nmi_handler >> 24) & 0xFF output_hex_file[fib_start + 12] = (dummy_hardfault_handler >> 0) & 0xFF output_hex_file[fib_start + 13] = (dummy_hardfault_handler >> 8) & 0xFF output_hex_file[fib_start + 14] = (dummy_hardfault_handler >> 16) & 0xFF output_hex_file[fib_start + 15] = (dummy_hardfault_handler >> 24) & 0xFF output_hex_file[fib_start + 16] = (dummy_blank >> 0) & 0xFF output_hex_file[fib_start + 17] = (dummy_blank >> 8) & 0xFF output_hex_file[fib_start + 18] = (dummy_blank >> 16) & 0xFF output_hex_file[fib_start + 19] = (dummy_blank >> 24) & 0xFF # Write FIB to the file in little endian output_hex_file[fib_start + 20] = (start >> 0) & 0xFF output_hex_file[fib_start + 21] = (start >> 8) & 0xFF output_hex_file[fib_start + 22] = (start >> 16) & 0xFF output_hex_file[fib_start + 23] = (start >> 24) & 0xFF output_hex_file[fib_start + 24] = (size >> 0) & 0xFF output_hex_file[fib_start + 25] = (size >> 8) & 0xFF output_hex_file[fib_start + 26] = (size >> 16) & 0xFF output_hex_file[fib_start + 27] = (size >> 24) & 0xFF output_hex_file[fib_start + 28] = (crc32 >> 0) & 0xFF output_hex_file[fib_start + 29] = (crc32 >> 8) & 0xFF output_hex_file[fib_start + 30] = (crc32 >> 16) & 0xFF output_hex_file[fib_start + 31] = (crc32 >> 24) & 0xFF output_hex_file[fib_start + 32] = (fw_rev >> 0) & 0xFF output_hex_file[fib_start + 33] = (fw_rev >> 8) & 0xFF output_hex_file[fib_start + 34] = (fw_rev >> 16) & 0xFF output_hex_file[fib_start + 35] = (fw_rev >> 24) & 0xFF output_hex_file[fib_start + 36] = (checksum >> 0) & 0xFF output_hex_file[fib_start + 37] = (checksum >> 8) & 0xFF output_hex_file[fib_start + 38] = (checksum >> 16) & 0xFF output_hex_file[fib_start + 39] = (checksum >> 24) & 0xFF #pad the rest of the file for i in range(fib_start + dummy_fib_size + fib_size, trim_area_start): output_hex_file[i] = 0xFF # Read in configuration data from the config parameter in targets.json configData = Config('NCS36510') paramData = configData.get_target_config_data() for v in paramData.values(): if (v.name == "target.mac-addr-high"): mac_addr_high = int(v.value, 16) elif (v.name == "target.mac-addr-low"): mac_addr_low = int(v.value,16) elif (v.name == "target.32KHz-clk-trim"): clk_32k_trim = int(v.value,16) elif (v.name == "target.32MHz-clk-trim"): clk_32m_trim = int(v.value,16) elif (v.name == "target.rssi-trim"): rssi = int(v.value,16) elif (v.name == "target.txtune-trim"): txtune = int(v.value,16) else: print("Not a valid param") output_hex_file[trim_area_start + 0] = mac_addr_low & 0xFF output_hex_file[trim_area_start + 1] = (mac_addr_low >> 8) & 0xFF output_hex_file[trim_area_start + 2] = (mac_addr_low >> 16) & 0xFF output_hex_file[trim_area_start + 3] = (mac_addr_low >> 24) & 0xFF output_hex_file[trim_area_start + 4] = mac_addr_high & 0xFF output_hex_file[trim_area_start + 5] = (mac_addr_high >> 8) & 0xFF output_hex_file[trim_area_start + 6] = (mac_addr_high >> 16) & 0xFF output_hex_file[trim_area_start + 7] = (mac_addr_high >> 24) & 0xFF output_hex_file[trim_area_start + 8] = clk_32k_trim & 0xFF output_hex_file[trim_area_start + 9] = (clk_32k_trim >> 8) & 0xFF output_hex_file[trim_area_start + 10] = (clk_32k_trim >> 16) & 0xFF output_hex_file[trim_area_start + 11] = (clk_32k_trim >> 24) & 0xFF output_hex_file[trim_area_start + 12] = clk_32m_trim & 0xFF output_hex_file[trim_area_start + 13] = (clk_32m_trim >> 8) & 0xFF output_hex_file[trim_area_start + 14] = (clk_32m_trim >> 16) & 0xFF output_hex_file[trim_area_start + 15] = (clk_32m_trim >> 24) & 0xFF output_hex_file[trim_area_start + 16] = rssi & 0xFF output_hex_file[trim_area_start + 17] = (rssi >> 8) & 0xFF output_hex_file[trim_area_start + 18] = (rssi >> 16) & 0xFF output_hex_file[trim_area_start + 19] = (rssi >> 24) & 0xFF output_hex_file[trim_area_start + 20] = txtune & 0xFF output_hex_file[trim_area_start + 21] = (txtune >> 8) & 0xFF output_hex_file[trim_area_start + 22] = (txtune >> 16) & 0xFF output_hex_file[trim_area_start + 23] = (txtune >> 24) & 0xFF # pad the rest of the area with 0xFF for i in range(trim_area_start + trim_size, user_code_start): output_hex_file[i] = 0xFF #merge two hex files output_hex_file.merge(input_hex_file, overlap='error') # Write out file(s) output_hex_file.tofile(file_name_hex, 'hex') output_hex_file.tofile(file_name_bin, 'bin')