def rm_env(env_name): """Alias for remove_env """ if env_exists(env_name): logger.info("Removing environment: {0}". format(env_name)) remove_env(env_name)
def test_model(model_name, source_name, env_name, batch_size, vep=False): """kipoi test ... Args: model_name (str) source_name: source name """ if env_exists(env_name): logger.info("Environment {0} exists. Removing it.".format(env_name)) remove_env(env_name) # TODO - if the model is a Keras model, print the Keras config file # and note which config file got used # create the model test environment cmd = "kipoi" args = ["env", "create", "--source", source_name, "--env", env_name, model_name] if vep: # Add --vep to environment installation args.insert(-1, "--vep") returncode = _call_command(cmd, args, use_stdout=True) assert returncode == 0 # run the tests in the environment cmd = get_kipoi_bin(env_name) args = ["test", "--batch_size", str(batch_size), "--source", source_name, model_name] # New, modified path for conda. Source activate namely does the following: # - CONDA_DEFAULT_ENV=${env_name} # - CONDA_PREFIX=${env_path} # - PATH=$conda_bin:$PATH new_env = os.environ.copy() new_env['PATH'] = os.path.dirname(cmd) + os.pathsep + new_env['PATH'] returncode, logs = _call_command(cmd, args, use_stdout=True, return_logs_with_stdout=True, env=new_env ) assert returncode == 0 # detect WARNING in the output log warn = 0 for line in logs: warn_start = escape_codes[default_log_colors['WARNING']] + \ 'WARNING' + escape_codes['reset'] if line.startswith(warn_start): logger.error("Warning present: {0}".format(line)) warn += 1 if warn > 0: raise ValueError("{0} warnings were observed for model {1}". format(warn, model_name))
def rm_env(env_name): """Alias for remove_env """ from kipoi.conda.env_db import get_model_env_db if env_exists(env_name): logger.info("Removing environment: {0}".format(env_name)) remove_env(env_name) # remove from db db = get_model_env_db() db_entries = [e for e in db.get_all() if e.create_args.env == env_name] [db.remove(e) for e in db_entries] db.save()
def test_model(model_name, source_name, env_name, batch_size): """kipoi test ... Args: model_name (str) source_name: source name """ if env_exists(env_name): logger.info("Environment {0} exists. Removing it.".format(env_name)) remove_env(env_name) # TODO - if the model is a Keras model, print the Keras config file # and note which config file got used # create the model test environment cmd = "kipoi" args = ["env", "create", "--source", source_name, "--env", env_name, model_name] returncode = _call_command(cmd, args, use_stdout=True) assert returncode == 0 # run the tests in the environment cmd = get_kipoi_bin(env_name) args = ["test", "--batch_size", str(batch_size), "--source", source_name, model_name] returncode, logs = _call_command(cmd, args, use_stdout=True, return_logs_with_stdout=True) assert returncode == 0 # detect WARNING in the output log warn = 0 for line in logs: warn_start = escape_codes[default_log_colors['WARNING']] + \ 'WARNING' + escape_codes['reset'] if line.startswith(warn_start): logger.error("Warning present: {0}".format(line)) warn += 1 if warn > 0: raise ValueError("{0} warnings were observed for model {1}". format(warn, model_name))
def test_model(model_name, caplog): """kipoi test ... """ caplog.set_level(logging.INFO) source_name = "kipoi" assert source_name == "kipoi" env_name = conda_env_name(model_name, model_name, source_name) env_name = "test-" + env_name # prepend "test-" # if environment already exists, remove it if env_exists(env_name): print("Removing the environment: {0}".format(env_name)) remove_env(env_name) # create the model test environment args = ["kipoi", "env", "create", "--source", source_name, "--env", env_name, model_name] returncode = subprocess.call(args=args) assert returncode == 0 if model_name == "basenji": batch_size = str(2) else: batch_size = str(4) # run the tests in the environment args = [get_kipoi_bin(env_name), "test", "--batch_size", batch_size, "--source", source_name, model_name] returncode = subprocess.call(args=args) assert returncode == 0 for record in caplog.records: # there shoudn't be any warning assert record.levelname not in ['WARN', 'WARNING', 'ERROR', 'CRITICAL']
def create_model_env(model_name, source_name, env_name, vep=False): """kipoi test ... Args: model_name (str) source_name: source name """ if env_exists(env_name): logger.info("Environment {0} exists. Removing it.".format(env_name)) remove_env(env_name) # TODO - if the model is a Keras model, print the Keras config file # and note which config file got used # create the model test environment cmd = "kipoi" args = [ "env", "create", "--source", source_name, "--env", env_name, model_name ] if vep: # Add --vep to environment installation args.insert(-1, "--vep") returncode = _call_command(cmd, args, use_stdout=True) assert returncode == 0
def cli_test_source(command, raw_args): """Runs test on the model """ assert command == "test-source" # setup the arg-parsing parser = argparse.ArgumentParser('kipoi {}'.format(command), description='Test models in model source') parser.add_argument('source', default="kipoi", help='Which source to test') parser.add_argument('--git-range', nargs='+', help='''Git range (e.g. commits or something like "master HEAD" to check commits in HEAD vs master, or just "HEAD" to include uncommitted changes). All models modified within this range will be tested.''') parser.add_argument('-n', '--dry_run', action='store_true', help='Dont run model testing') parser.add_argument('-b', '--batch_size', default=4, type=int, help='Batch size') parser.add_argument('-x', '--exitfirst', action='store_true', help='exit instantly on first error or failed test.') parser.add_argument( '-k', default=None, help='only run tests which match the given substring expression') parser.add_argument('-c', '--clean_env', action='store_true', help='clean the environment after running.') parser.add_argument('--vep', action='store_true', help='Install the vep dependency.') parser.add_argument('--common_env', action='store_true', help='Test models in common environments.') parser.add_argument('--all', action='store_true', help="Test all models in the source") args = parser.parse_args(raw_args) # -------------------------------------------- source = kipoi.get_source(args.source) all_models = all_models_to_test(source) if args.k is not None: all_models = [x for x in all_models if re.match(args.k, x)] if len(all_models) == 0: logger.info("No models found in the source") sys.exit(1) if args.all: test_models = all_models logger.info('Testing all models:\n- {0}'.format( '\n- '.join(test_models))) else: test_models = restrict_models_to_test(all_models, source, args.git_range) if len(test_models) == 0: logger.info("No model modified according to git, exiting.") sys.exit(0) logger.info('{0}/{1} models modified according to git:\n- {2}'.format( len(test_models), len(all_models), '\n- '.join(test_models))) # Sort the models alphabetically test_models = sorted(test_models) # Parse the repo config cfg_path = get_file_path(source.local_path, "config", extensions=[".yml", ".yaml"], raise_err=False) if cfg_path is not None: cfg = kipoi.specs.SourceConfig.load(cfg_path, append_path=False) logger.info("Found config {0}:\n{1}".format(cfg_path, cfg)) else: cfg = None if args.dry_run: logger.info( "-n/--dry_run enabled. Skipping model testing and exiting.") sys.exit(0) # TODO - make sure the modes are always tested in the same order? # - make sure the keras config doesn't get cluttered # Test common environments if args.common_env: logger.info("Installing common environmnets") env_dir = "shared/envs" import yaml models_yaml_path = os.path.join(source.local_path, env_dir, "models.yaml") if not os.path.exists(models_yaml_path): logger.error( "{} doesn't exists when installing the common environment". format(models_yaml_path)) sys.exit(1) model_envs = yaml.load( open(os.path.join(source.local_path, env_dir, "models.yaml"))) test_envs = { get_common_env(m, model_envs) for m in test_models if get_common_env(m, model_envs) is not None } if len(test_envs) == 0: logger.info("No common environments to test") sys.exit(0) logger.info( "Instaling environments covering the following models: \n{}". format(yaml.dump(model_envs))) for env in test_envs: if env_exists(env): logger.info( "Common environment already exists: {}. Skipping the installation" .format(env)) else: logger.info("Installing environment: {}".format(env)) create_model_env(os.path.join(env_dir, env), args.source, env, vep=args.vep) logger.info("Running {0} tests..".format(len(test_models))) failed_models = [] for i in range(len(test_models)): m = test_models[i] print('-' * 20) print("{0}/{1} - model: {2}".format(i + 1, len(test_models), m)) print('-' * 20) try: if not args.common_env: # Prepend "test-" to the standard kipoi env name env_name = conda_env_name(m, source=args.source) env_name = "test-" + env_name # Test test_model(m, args.source, env_name, get_batch_size(cfg, m, args.batch_size), args.vep, create_env=True) else: # figure out the common environment name env_name = get_common_env(m, model_envs) if env_name is None: # skip is none was found logger.info( "Common environmnet not found for {}".format(m)) continue # --------------------------- # Test print("test_model...") test_model(m, args.source, env_name, get_batch_size(cfg, m, args.batch_size), args.vep, create_env=False) except Exception as e: logger.error("Model {0} failed: {1}".format(m, e)) failed_models += [m] if args.exitfirst: if args.clean_env and not args.common_env: rm_env(env_name) sys.exit(1) finally: if args.clean_env and not args.common_env: rm_env(env_name) print('-' * 40) if failed_models: logger.error("{0}/{1} tests failed for models:\n- {2}".format( len(failed_models), len(test_models), "\n- ".join(failed_models))) sys.exit(1) logger.info('All tests ({0}) passed'.format(len(test_models)))