def test_write_to_cp437_output(self): """Check writing to a cp437 output (e.g. Windows console).""" raw = io.BytesIO() output = io.TextIOWrapper(io.BufferedWriter(raw), encoding="cp437") # Windows console. massedit.edit_files(["tests.py"], expressions=["line[:10]"], output=output) actual = raw.getvalue() self.assertIsNotNone(actual)
def test_bad_module(self): """Test error when a bad module is passed to the command.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(ImportError): massedit.edit_files(['tests.py'], functions=['bong:modify']) expected = "failed to import bong\n" self.assertEqual(log_sink.log, expected)
def test_bad_module(self): """Check error when the function module is not found.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(ImportError): massedit.edit_files(['tests.py'], functions=['bong:modify']) expected = 'failed to import bong\n' self.assertEqual(log_sink.log, expected)
def test_missing_function_name(self): """Check error when the function is empty but not the module.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(AttributeError): massedit.edit_files(["tests.py"], functions=["massedit:"]) expected = "'massedit:' is not a callable function: " + "'dict' object has no attribute 'massedit'\n" self.assertEqual(log_sink.log, expected)
def test_empty_function(self): """Test empty argument.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(AttributeError): massedit.edit_files(["tests.py"], functions=[":"]) expected = "':' is not a callable function: " + "'dict' object has no attribute ''\n" self.assertEqual(log_sink.log, expected)
def test_bad_function_name(self): """Check error when the function name is not valid.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(AttributeError): massedit.edit_files(['tests.py'], functions=['massedit:bad_fun']) expected = "has no attribute 'bad_fun'\n" self.assertIn(expected, log_sink.log)
def update(self, host, zone, ip): click.echo(" - Updating record [{}.{}] with ip [{}]".format( host, zone, ip)) filenames = ['hosts'] regex = "re.sub('({}.{}).*(ansible_ssh_host=)?(.*)', r'\\1 \\2ansible_host={}', line)".format( host, zone, ip) massedit.edit_files(filenames, [regex], dry_run=False)
def test_missing_function_name(self): """Check error when the function is empty but not the module.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(AttributeError): massedit.edit_files(['tests.py'], functions=['massedit:']) expected = "'massedit:' is not a callable function: " + \ "'dict' object has no attribute 'massedit'\n" self.assertEqual(log_sink.log, expected)
def test_bad_function(self): """Check error when the function name is not valid.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(AttributeError): massedit.edit_files(['tests.py'], functions=['massedit:bong']) expected = "'massedit:bong' is not a callable function: " + \ "'module' object has no attribute 'bong'\n" self.assertEqual(log_sink.log, expected)
def test_empty_function(self): """Check error when function and module are empty.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(AttributeError): massedit.edit_files(['tests.py'], functions=[':']) expected = "':' is not a callable function: " + \ "'dict' object has no attribute ''\n" self.assertEqual(log_sink.log, expected)
def test_empty_function(self): """Test empty argument.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(AttributeError): massedit.edit_files(['tests.py'], functions=[':']) expected = "':' is not a callable function: " + \ "'dict' object has no attribute ''\n" self.assertEqual(log_sink.log, expected)
def test_write_to_cp437_output(self): """Check writing to a cp437 output (e.g. Windows console).""" raw = io.BytesIO() output = io.TextIOWrapper(io.BufferedWriter(raw), encoding='cp437') # Windows console. massedit.edit_files(['tests.py'], expressions=['line[:10]'], output=output) actual = raw.getvalue() self.assertIsNotNone(actual)
def test_exec_option(self): """Check trivial call using executable.""" output = io.StringIO() execname = "head -1" next(massedit.get_paths(["tests.py"])) massedit.edit_files(["tests.py"], executables=[execname], output=output) actual = output.getvalue().split("\n") self.assertEqual(actual[3], "-#!/usr/bin/env python") self.assertEqual(actual[-1], "+#!/usr/bin/env python+")
def walk_and_replace(find, replace): for path, dirs, files in os.walk(os.path.relpath("./Working/")): for filename in files: filepath = os.path.join(path, filename) filenames = [filepath] massedit.edit_files(filenames, ["re.sub('%s', '%s', line)" % (find, replace)], dry_run=False, output=os.devnull)
def test_wrong_number_of_argument(self): """Test passing function that has the wrong number of arguments.""" log_sink = LogInterceptor(massedit.log) with self.assertRaises(ValueError): massedit.edit_files(['tests.py'], functions=['massedit:get_function']) expected = ("'massedit:get_function' is not a callable function: " + "function should take 2 arguments: lines, file_name\n") self.assertEqual(log_sink.log, expected)
def test_exec_option(self): """Check trivial call using executable.""" output = io.StringIO() execname = 'head -1' next(massedit.get_paths(['tests.py'])) massedit.edit_files(['tests.py'], executables=[execname], output=output) actual = output.getvalue().split("\n") self.assertEqual(actual[3], '-#!/usr/bin/env python') self.assertEqual(actual[-1], '+#!/usr/bin/env python+')
def test_exec_option(self): """Check trivial call using executable.""" output = io.StringIO() execname = 'head -1' next(massedit.get_paths(['tests.py'])) massedit.edit_files(['tests.py'], executables=[execname], output=output) actual = output.getvalue().split('\n') self.assertEqual(actual[3], '-#!/usr/bin/env python') self.assertEqual(actual[-1], '+#!/usr/bin/env python+')
def test_exec_option(self): """Check trivial call using executable.""" output = io.StringIO() execname = "head -1" if platform.system() == "Windows": execname = "powershell -c gc -head 1" next(massedit.get_paths(["tests.py"])) massedit.edit_files(["tests.py"], executables=[execname], output=output) actual = output.getvalue().split("\n") self.assertEqual(actual[3], "-#!/usr/bin/env python") self.assertEqual(actual[-1], "+#!/usr/bin/env python+")
def test_error_in_function(self): """Check error when the function triggers an exception.""" def divide_by_zero(unused): # pylint: disable=W0613 """Simulates division by zero.""" raise ZeroDivisionError() output = io.StringIO() massedit.log.disabled = True with self.assertRaises(ZeroDivisionError): massedit.edit_files(['tests.py'], [], [divide_by_zero], output=output) massedit.log.disabled = False
def test_error_in_function(self): """Check error when the function triggers an exception.""" def divide_by_zero(*_): """Simulates division by zero.""" raise ZeroDivisionError() output = io.StringIO() massedit.log.disabled = True with self.assertRaises(ZeroDivisionError): massedit.edit_files(['tests.py'], [], [divide_by_zero], output=output) massedit.log.disabled = False
def update_alpr(self): filenames = ['/storage/projects/alpr/config/openalpr.conf'] search_regex = self.name + '\s*=.*' replacement = self.name + ' = ' + str(self.value) devnull = open(os.devnull, 'w') massedit_command = "re.sub('%s', '%s', line)" % (search_regex, replacement) massedit.edit_files(filenames, [ massedit_command ], dry_run=False, output=devnull) devnull.close() print " -- Setting - OpenALPR Config is being updated: " + replacement
def test_exec_option(self): """Check trivial call using executable.""" output = io.StringIO() execname = 'head -1' if platform.system() == 'Windows': execname = 'powershell -c gc -head 1' next(massedit.get_paths(['tests.py'])) massedit.edit_files(['tests.py'], executables=[execname], output=output) actual = output.getvalue().split("\n") self.assertEqual(actual[3], '-#!/usr/bin/env python') self.assertEqual(actual[-1], '+#!/usr/bin/env python+')
def test_file_option(self): """Test processing of a file.""" def add_header(data, _): """Add header on top of the file.""" yield "header on top\n" for line in data: yield line output = io.StringIO() massedit.edit_files(['tests.py'], [], [add_header], output=output) # third line shows the added header. actual = output.getvalue().split("\n")[3] expected = "+header on top" self.assertEqual(actual, expected)
def test_file_option(self): """Check processing with function.""" def add_header(data): """Adds header at the beginning of the data.""" yield 'header on top\n' for line in data: yield line output = io.StringIO() massedit.edit_files(['tests.py'], [], [add_header], output=output) # third line shows the added header. actual = output.getvalue().split('\n')[3] expected = '+header on top' self.assertEqual(actual, expected)
def update_alpr(self): filenames = ['/storage/projects/alpr/config/openalpr.conf'] search_regex = self.name + '\s*=.*' replacement = self.name + ' = ' + str(self.value) devnull = open(os.devnull, 'w') massedit_command = "re.sub('%s', '%s', line)" % (search_regex, replacement) massedit.edit_files(filenames, [massedit_command], dry_run=False, output=devnull) devnull.close() print " -- Setting - OpenALPR Config is being updated: " + replacement
def test_process_subdirectory_dry_run_with_one_change(self): """Check that ommiting -w option does not change the files.""" output = io.StringIO() processed_files = massedit.edit_files( ["*.txt"], expressions=["re.sub('text 1', 'blah blah 1', line)"], start_dirs=self.workspace.top_dir, output=output, ) self.assertEqual(processed_files, self.file_names[1:2]) index = {} for ii, file_name in enumerate(self.file_names): with io.open(file_name) as fh: new_lines = fh.readlines() self.assertEqual(new_lines, ["some text " + unicode(ii)]) index[file_name] = ii actual = output.getvalue() expected = "".join( [ textwrap.dedent( """\ --- {} +++ <new> @@ -1 +1 @@ -some text {}+some blah blah {}""" ).format(file_name, index[file_name], index[file_name]) for file_name in processed_files ] ) self.assertEqual(actual, expected)
def test_api(self): """Check simple replacement via api.""" file_base_name = os.path.basename(self.file_name) processed = massedit.edit_files( [file_base_name], ["re.sub('Dutch', 'Guido', line)"], [], start_dirs=self.workspace.top_dir, dry_run=False, ) self.assertEqual(processed, [self.file_name]) with io.open(self.file_name, "r") as new_file: new_lines = new_file.readlines() original_lines = zen.splitlines(True) self.assertEqual(len(new_lines), len(original_lines)) n_lines = len(new_lines) for line in range(n_lines): if line != 16: self.assertEqual(new_lines[line - 1], original_lines[line - 1]) else: expected_line_16 = ( "Although that way may not be obvious " + "at first unless you're Guido.\n" ) self.assertEqual(new_lines[line - 1], expected_line_16)
def _update_configs(self): self.log('Updating configuration') self._streamer_prefix = sp.check_output( 'ros2 pkg prefix kinesis_video_streamer', shell=True).strip().decode('utf-8') streamer_config_path = os.path.join(self._streamer_prefix, 'share', 'kinesis_video_streamer', 'config', 'sample_config.yaml') self._encoder_prefix = sp.check_output( 'ros2 pkg prefix h264_video_encoder', shell=True).strip().decode('utf-8') encoder_config_path = os.path.join(self._encoder_prefix, 'share', 'h264_video_encoder', 'config', 'sample_configuration.yaml') massedit.edit_files([ streamer_config_path, ], [ "re.sub('stream_name:(.*)', 'stream_name: %s', line)" % (self._fq(self._video_stream_name), ), "re.sub('rekognition_data_stream:(.*)', 'rekognition_data_stream: %s', line)" % (self._fq(self._data_stream_name), ), "re.sub('\#\srekognition_data_stream:', 'rekognition_data_stream:', line)", "re.sub('topic_type:(.*)', 'topic_type: 3', line)", "re.sub('rekognition_topic_name:(.*)', 'rekognition_topic_name: /rekognition/results', line)", "re.sub('\#\srekognition_topic_name:', 'rekognition_topic_name:', line)" ], dry_run=False) massedit.edit_files([ encoder_config_path, ], [ "re.sub('subscription_topic:(.*)', 'subscription_topic: %s', line)" % (self._encoder_subscription_topic, ), "re.sub('bitrate:(.*)', 'bitrate: 512000', line)", "re.sub('output_width:(.*)', 'output_width: 400', line)", "re.sub('\#\soutput_width:', 'output_width:', line)", "re.sub('output_height:(.*)', 'output_height: 320', line)", "re.sub('\#\soutput_height:', 'output_height:', line)", "re.sub('fps_numerator:(.*)', 'fps_numerator: 28', line)" ], dry_run=False)
def replace_config_env_matches(directory, patterns): compiled_patterns = [] for pattern, replacement_env in patterns.items(): replacement = os.environ.get(replacement_env, '').replace('"', '\"') compiled_patterns.append('re.sub("{}", "{}", line)'.format( pattern, replacement)) whitelisted_extensions = [ 'yml', 'yaml', 'json', 'properties', 'ini', 'csv' ] file_matches = [ '*.{}'.format(extension) for extension in whitelisted_extensions ] massedit.edit_files(file_matches, expressions=compiled_patterns, start_dirs=directory, dry_run=False, max_depth=10)
def createapp(microservice_name): try: Path(microservice_name).mkdir() with open(f"{microservice_name}/__init__.py", "w") as io_file: io_file.write("# Init your app.\n") with open(f"{microservice_name}/crud.py", "w") as io_file: io_file.write("# Create your CRUD methods here.\n") with open(f"{microservice_name}/schemas.py", "w") as io_file: io_file.write("# Create your pydantic schemas here.\n") with open(f"{microservice_name}/views.py", "w") as io_file: io_file.write("# Create your endpoints here.\n") with open(f"{microservice_name}/tests.py", "w") as io_file: io_file.write("# Create your tests here.\n") Path(f"{microservice_name}/models").mkdir() with open(f"{microservice_name}/models/__init__.py", "w") as io_file: io_file.write( "__all__ = []\n" "# Don't modify the line above, or this line!\n" "import automodinit\n" "automodinit.automodinit(__name__, __file__, globals())\n" "del automodinit\n" "# Anything else you want can go after here, " "it won't get modified.\n") with open(f"{microservice_name}/models/models.py", "w") as io_file: io_file.write("# Create your SqlAlchemy models here.\n") add_app_to_settings = ( "re.sub(r'APPS: List\\[str] = \\['," f"""'APPS: List[str] = ["{microservice_name}",', line)""") massedit.edit_files([f"./{project_name}/config.py"], [add_app_to_settings], dry_run=False) print( f'{microservice_name} has been created and APPS in "config.py"', "has been updated.", ) except FileExistsError: print(f"{microservice_name} alredy exist")
def test_api(self): """Check simple replacement via api.""" file_base_name = os.path.basename(self.file_name) processed = massedit.edit_files( [file_base_name], ["re.sub('Dutch', 'Guido', line)"], [], start_dirs=self.workspace.top_dir, dry_run=False ) self.assertEqual(processed, [self.file_name]) with io.open(self.file_name, "r") as new_file: new_lines = new_file.readlines() original_lines = zen.splitlines(True) self.assertEqual(len(new_lines), len(original_lines)) n_lines = len(new_lines) for line in range(n_lines): if line != 16: self.assertEqual(new_lines[line - 1], original_lines[line - 1]) else: expected_line_16 = "Although that way may not be obvious " + "at first unless you're Guido.\n" self.assertEqual(new_lines[line - 1], expected_line_16)
def setup(version, reps, search, maxruns): sources = glob.glob('*.src%d.c' % version) filenames = [x for x in sources if not x.startswith('_')] massedit.edit_files(filenames, [ "re.sub(r'gcc ', 'gcc -Ofast -march=%s -mavx2 -m64 ', line)" % gcc_arch ], dry_run=False) #massedit.edit_files(filenames, ["re.sub(r'gcc -O3 -fopenmp ','icc -O3 -mtune=%s -xCORE-AVX512 -qopt-zmm-usage=high -qopenmp ' % icc_arch, line)"], dry_run=False) massedit.edit_files(filenames, [ "re.sub(r'arg repetitions = 35;', 'arg repetitions = %d;', line)" % reps ], dry_run=False) massedit.edit_files(filenames, [ "re.sub(r\"arg algorithm = 'Randomsearch';\", \"arg algorithm = '%s';\", line)" % search ], dry_run=False) massedit.edit_files(filenames, [ "re.sub(r'arg total_runs = 10000;', 'arg total_runs = %d;', line)" % maxruns ], dry_run=False)
def test_process_subdirectory_dry_run_with_one_change(self): """Check that ommiting -w option does not change the files.""" output = io.StringIO() processed_files = massedit.edit_files(\ ["*.txt"], expressions=["re.sub('text 1', 'blah blah 1', line)"], start_dirs=self.workspace.top_dir, output=output) self.assertEqual(processed_files, self.file_names[1:2]) index = {} for ii, file_name in enumerate(self.file_names): with io.open(file_name) as fh: new_lines = fh.readlines() self.assertEqual(new_lines, ["some text " + unicode(ii)]) index[file_name] = ii actual = output.getvalue() expected = "".join([textwrap.dedent("""\ --- {} +++ <new> @@ -1 +1 @@ -some text {}+some blah blah {}""").format(\ file_name, index[file_name], index[file_name]) for file_name in processed_files]) self.assertEqual(actual, expected)
recursive_albums_name_pattern = os.getenv("RECURSIVE_ALBUMS_NAME_PATTERN") overwrite = os.getenv("OVERWRITE") if overwrite == 'true': overwrite = True else: overwrite = False filenames = [ os.path.join(os.path.dirname(os.path.realpath(__file__)), "web", "package.json") ] massedit.edit_files(filenames, [ "re.sub(r'^.*\"homepage\":.*$', ' \"homepage\": \"" + http_root + "\",', line)" ], dry_run=False) output_photos_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "web", "public", "static", "_gallery") output_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "web", "src", "_gallery") external_root = os.path.join(http_root, "static", "_gallery", "albums") # TODO check exists input_photo_path = os.path.realpath(input_path) generator = SiteGenerator(site_name, input_photo_path, people_enabled, watermark_enabled, watermark_path, watermark_ratio, recursive_albums, recursive_albums_name_pattern,
def ecoliLongTermFLYCOP_oneConf(glu1, ac1, o21, glu2, ac2, o22, fitFunc='MaxYield_MinTime', dirPlot='', repeat=10): ''' Call: avgFitness, sdFitness = ecoliLongTerm_oneConf(glu1,ac1,o21,glu2,ac2,o22) INPUTS: glu1: lower bound of glucose uptake in model 1. ace1: lower bound of acetate uptake in model 1. o21: lower bound of oxygen uptake in model 1. glu2: lower bound of glucose uptake in model 2. ace2: lower bound of acetate uptake in model 2. o22: lower bound of oxygen uptake in model 2. dirPlot: copy of the graphs with several run results. repeat: number of runs with the same configuration. OUTPUT: avgFitness: average fitness of 'repeat' COMETS runs with the same configuration (due to it is not deterministic) sdFitness: standard deviation of fitness during 'repeat' COMETS runs (see above) ''' if not (os.path.exists('ModelsInput/iJO1366py_tmp.mat')): initialize_models() # Determine initial biomasses. biomass1 = 0.01 biomass2 = 0.01 print("Fitness function:" + fitFunc) # Single GEMs parameter modifications # =================================== if not (os.path.exists('ecoli_1_tmp.mat.cmt')): # 1.1.- [COBRApy] Establish modifications in model 1 model = cobra.io.load_matlab_model('ModelsInput/iJO1366py_tmp.mat') model.reactions.get_by_id('EX_glc__D(e)').bounds = (-1000, 1000) model.reactions.get_by_id('EX_o2(e)').bounds = (-1000, 1000) model.reactions.get_by_id('GLCtex_copy1').bounds = (0, -glu1) model.reactions.get_by_id('O2tex').bounds = (0, -o21) if (ac1 <= 0): model.reactions.get_by_id('ACtex').lower_bound = -1000 model.reactions.get_by_id('ACtex').upper_bound = -ac1 model.reactions.get_by_id('EX_ac(e)').bounds = (ac1, 1000) else: model.reactions.get_by_id('ACtex').lower_bound = -ac1 model.reactions.get_by_id('ACtex').upper_bound = 1000 model.reactions.get_by_id('EX_ac(e)').bounds = (-1000, ac1) cobra.io.save_matlab_model(model, 'ecoli_1_tmp.mat', 'model') del (model) # 1.2.- [COBRApy] Establish modifications in model 2 model = cobra.io.load_matlab_model('ModelsInput/iJO1366py_tmp.mat') model.reactions.get_by_id('EX_glc__D(e)').bounds = (-1000, 1000) model.reactions.get_by_id('EX_o2(e)').bounds = (-1000, 1000) model.reactions.get_by_id('GLCtex_copy1').bounds = (0, -glu2) model.reactions.get_by_id('O2tex').bounds = (0, -o22) if (ac2 <= 0): model.reactions.get_by_id('ACtex').lower_bound = -1000 model.reactions.get_by_id('ACtex').upper_bound = -ac2 model.reactions.get_by_id('EX_ac(e)').bounds = (ac2, 1000) else: model.reactions.get_by_id('ACtex').lower_bound = -ac2 model.reactions.get_by_id('ACtex').upper_bound = 1000 model.reactions.get_by_id('EX_ac(e)').bounds = (-1000, ac2) cobra.io.save_matlab_model(model, 'ecoli_2_tmp.mat', 'model') del (model) # 2.- [python] mat_to_comets('ecoli_1_tmp.mat') mat_to_comets('ecoli_2_tmp.mat') # Community parameter modifications # ================================= # 4.- [shell script] Write automatically the COMETS parameter about initial biomass of strains. massedit.edit_files(['ecoliLongTerm_layout_template.txt'], ["re.sub(r'XXX','" + str(biomass1) + "',line)"], dry_run=False) massedit.edit_files(['ecoliLongTerm_layout_template.txt'], ["re.sub(r'YYY','" + str(biomass2) + "',line)"], dry_run=False) # 5.- [COMETS by command line] Run COMETS if not (os.path.exists('IndividualRunsResults')): os.makedirs('IndividualRunsResults') totfitness = 0 sumTotBiomass = 0 sumTotYield = 0 fitnessList = [] # To repeat X times, due to random behaviour in COMETS: for i in range(repeat): with open("output.txt", "w") as f: subprocess.call(['./comets_scr', 'comets_script_template'], stdout=f) # 6.- [R call] Run script to generate one graph: strains versus metabolite/s subprocess.call([ '../../Scripts/plot_biomassX2_vs_2mediaItem.sh', 'template', 'glc_D', 'ac', 'Ecoli1', 'Ecoli2' ]) # 7.- Compute fitness (measure to optimize): print('computing fitness...') # 7.1.- Determine endCycle: when glucose and acetate are exhausted with open("biomass_vs_glc_D_ac_template.txt", "r") as sources: lines = sources.readlines() iniPointV = lines[0].split() iniBiomass = float(iniPointV[1]) + float(iniPointV[2]) totGlc = float(iniPointV[3]) endGlcCycle = 0 for line in lines: endCycle = int(line.split()[0]) glcConc = float(line.split()[3]) acConc = float(line.split()[4]) if ((endGlcCycle == 0) and (glcConc == 0.0)): endGlcCycle = endCycle if ((glcConc == 0.0) and (acConc == 0.0)): break if ((glcConc == 0.0) and (ac1 >= 0) and (ac2 >= 0)): break endPointV = lines[endCycle].split() # 7.2.- Compute first element fitness: maximize biomass yield # biomass yield= sum(increment in biomass per strain (i.e. biomass final point-biomass initial point))/initial concentration of glucose in the media (total glucose, because the end of our experiment is after glucose finished). In gDW/mmol. # To compute final biomass as the maximum biomass of each strain finalBiomass1 = 0 finalBiomass2 = 0 count = 0 for line in lines: if (float(line.split()[1]) > finalBiomass1): finalBiomass1 = float(line.split()[1]) if (float(line.split()[2]) > finalBiomass2): finalBiomass2 = float(line.split()[2]) if (count > endCycle): break count = count + 1 finalBiomass = finalBiomass1 + finalBiomass2 biomassYieldNew = float( (finalBiomass - iniBiomass) / (totGlc * 0.1801559)) # molecular weigth glucose per mmol # For normalizing yield MaximumYield = 0.6 # 7.3.- Compute second element fitnes: minimize time fitTime = 1 - (float(endCycle) / float(240)) # 7.4.- Compute joint fitness, as a 50% each element. if (fitFunc == 'Yield'): fitness = (biomassYieldNew / MaximumYield) # Normalizing Yield elif (fitFunc == 'MaxYield_MinTime'): fitness = 0.5 * (biomassYieldNew / MaximumYield) + 0.5 * fitTime #Normalizing yield elif ( fitFunc == 'YieldNewScattered' ): # (biomass^4)*10: To spread values from ~0.45-0.55 values to 0.5 to 1 fitness = (biomassYieldNew**4) * 10 elif (fitFunc == 'MaxYieldNewScattered_MinTime'): fitness = 0.5 * ((biomassYieldNew**4) * 10) + 0.5 * fitTime elif (fitFunc == 'Biomass'): fitness = float(finalBiomass - iniBiomass) elif (fitFunc == 'MaxBiomass_MinTime'): fitness = 0.5 * (float(finalBiomass - iniBiomass)) + 0.5 * fitTime elif ((fitFunc == 'GR') or (fitFunc == 'MaxGR_MinTime')): numRxnGR1 = int( subprocess.check_output([ 'egrep -A1 "OBJECTIVE" ecoli_1_tmp.mat.cmt | tail -1 | tr -d \[:space:\]' ], shell=True)) numRxnGR2 = int( subprocess.check_output([ 'egrep -A1 "OBJECTIVE" ecoli_2_tmp.mat.cmt | tail -1 | tr -d \[:space:\]' ], shell=True)) try: GR1 = float( subprocess.check_output([ 'egrep "fluxes\{"' + str(endGlcCycle - 1) + '"\}\{1\}\{1\}\{1\}" flux_log_template.txt | cut -d"=" -f2 | cut -d" " -f' + str(numRxnGR1 + 1) ], shell=True)) except: GR1 = 0.0 try: GR2 = float( subprocess.check_output([ 'egrep "fluxes\{"' + str(endGlcCycle - 1) + '"\}\{1\}\{1\}\{2\}" flux_log_template.txt | cut -d"=" -f2 | cut -d" " -f' + str(numRxnGR2 + 1) ], shell=True)) except: GR2 = 0.0 fitGR = (GR1 + GR2) / 2 if (fitFunc == 'GR'): fitness = fitGR elif (fitFunc == 'MaxGR_MinTime'): fitness = 0.5 * fitGR + 0.5 * fitTime # To avoid unrealistic cases, because with 10mM of glc the strains can't reach more than ~1 gr/L. I'm not sure if the relation is lineal with less or more glucose, but this solution is better than >1, which it will be very ad-hoc to totGlc=10. #if(float(finalBiomass-iniBiomass) > (totGlc/10)): if (float(finalBiomass - iniBiomass) > 1.03 ): # Given that with both strains with WT, total biomass=1.028 fitness = 0 # Compute acetate uptake numRxnExAc = 37 # Position EX_ac(e) in .mat.cmt - Position first rxn + 1 # flux in cycle 2 (the first one is usually 0) uptakeAc1 = float( subprocess.check_output([ 'egrep "fluxes\{2\}\{1\}\{1\}\{1\}" flux_log_template.txt | cut -d"=" -f2 | cut -d" " -f' + str(numRxnExAc) ], shell=True)) uptakeAc2 = float( subprocess.check_output([ 'egrep "fluxes\{2\}\{1\}\{1\}\{2\}" flux_log_template.txt | cut -d"=" -f2 | cut -d" " -f' + str(numRxnExAc) ], shell=True)) print(" Total biomass: " + str(round(finalBiomass, 6)) + " in cycle " + str(endCycle) + ". Biomass yield=" + str(round(biomassYieldNew, 6))) totfitness = totfitness + fitness fitnessList.append(fitness) sumTotBiomass = sumTotBiomass + finalBiomass sumTotYield = sumTotYield + biomassYieldNew # Copy individual solution file = 'IndividualRunsResults/' + 'biomass_vs_glc_D_ac_run' + str( i) + '_' + str(fitness) + '_' + str(endCycle) + '.pdf' shutil.move('biomass_vs_glc_D_ac_template_plot.pdf', file) if (dirPlot != ''): file2 = dirPlot + 'biomass_vs_glc_D_ac_' + str(glu1) + '_' + str( ac1) + '_' + str(o21) + '_' + str( glu2) + '_' + str(ac2) + '_' + str(o22) + '_' + str( round(uptakeAc1, 1)) + '_' + str(round( uptakeAc2, 1)) + '_run' + str(i) + '_' + str( fitness) + '_' + str(endCycle) + '.pdf' shutil.copy(file, file2) file = 'IndividualRunsResults/' + 'total_biomass_log_run' + str( i) + '.txt' shutil.move('total_biomass_log_template.txt', file) file = 'IndividualRunsResults/' + 'media_log_run' + str(i) + '.txt' shutil.move('media_log_template.txt', file) file = 'IndividualRunsResults/' + 'flux_log_run' + str(i) + '.txt' shutil.move('flux_log_template.txt', file) avgfitness = totfitness / repeat sdfitness = statistics.stdev(fitnessList) avgBiomass = sumTotBiomass / repeat avgYield = sumTotYield / repeat print( "Fitness_function\tconfiguration\tfitness\tsd\tavg.Biomass\tavg.Yield\tendCycle" ) print(fitFunc + "\t" + str(glu1) + ',' + str(ac1) + ',' + str(o21) + ',' + str(glu2) + ',' + str(ac2) + ',' + str(o22) + ',' + str(round(uptakeAc1, 1)) + ',' + str(round(uptakeAc2, 1)) + "\t" + str(round(avgfitness, 6)) + "\t" + str(sdfitness) + "\t" + str(round(avgBiomass, 6)) + "\t" + str(round(avgYield, 6)) + "\t" + str(endCycle)) with open(dirPlot + "configurationsResults" + fitFunc + ".txt", "a") as myfile: myfile.write( "Fitness_function\tconfiguration\tfitness\tsd\tavg.Biomass\tavg.Yield\tendCycle\n" ) myfile.write(fitFunc + "\t" + str(glu1) + ',' + str(ac1) + ',' + str(o21) + ',' + str(glu2) + ',' + str(ac2) + ',' + str(o22) + ',' + str(round(uptakeAc1, 1)) + ',' + str(round(uptakeAc2, 1)) + "\t" + str(round(avgfitness, 6)) + "\t" + str(sdfitness) + "\t" + str(round(avgBiomass, 6)) + "\t" + str(round(avgYield, 6)) + "\t" + str(endCycle) + "\n") print("Avg.fitness(sd):\t" + str(avgfitness) + "\t" + str(sdfitness) + "\n") if (sdfitness > 0.1): avgfitness = 0.0 return avgfitness, sdfitness
def coGrowth4EcoliFLYCOP_oneConf(biomass1, biomass2, biomass3, biomass4, arg, lys, met, phe, fitFunc='ratioGRavgGR', dirPlot='', repeat=3): ''' Call: avgFitness, sdFitness = coGrowth4Ecoli_oneConf(biomass1,biomass2,biomass3,biomass4,arg,lys,met,phe) INPUTS: biomass1: biomass of Ecoli Ec1, according to [Chan,2017]. In: {met, lys}, Out:arg biomass2: biomass of Ecoli Ec2, according to [Chan,2017]. In: {arg, phe}, Out:lys biomass3: biomass of Ecoli Ec3, according to [Chan,2017]. In: {arg, phe}, Out:met biomass4: biomass of Ecoli Ec4, according to [Chan,2017]. In: {met, lys}, Out:phe arg: rate of arginine to secrete by Ec1, as proportion of BOF flux. lys: rate of lysine to secrete by Ec2, as proportion of BOF flux. met: rate of metionine to secrete by Ec3, as proportion of BOF flux. phe: rate of phenilalanine to secrete by Ec4, as proportion of BOF flux. fitFunc: fitness function to optimize. dirPlot: copy of the graphs with several run results. repeat: number of runs with the same configuration. OUTPUT: avgFitness: average fitness of 'repeat' COMETS runs with the same configuration (due to it is not deterministic) sdFitness: standard deviation of fitness during 'repeat' COMETS runs (see above) ''' if not (os.path.exists('ModelsInput/iAF1260_Ec1.mat')): initialize_models() title = str(biomass1) + '_' + str(biomass2) + '_' + str( biomass3) + '_' + str(biomass4) maxBiomass = 2 # maximum to normalize increment in biomass in fitBiomass as part of fitness function. iniBiomass = 0.1 mass1 = iniBiomass * biomass1 mass2 = iniBiomass * biomass2 mass3 = iniBiomass * biomass3 mass4 = iniBiomass * biomass4 print("Fitness function:" + fitFunc) # Single GEMs parameter modifications # =================================== if not (os.path.exists('iAF1260_Ec1_tmp.mat.cmt')): # 1.1.- [COBRApy] Establish modifications in model 1 model = cobra.io.load_matlab_model('ModelsInput/iAF1260_Ec1.mat') # New: add the extracellular(e) metabolite to secrete to the right side of BOF rxn, and the same amount of intracellular(c) metabolite to the left side of BOF rxn. Preserving open bounds in EX_* and *tex rxns. # [Harcombe, 2014] "A mutant S. enterica model was constructed that excreted methionine at a rate consistent with empirical observations. To achieve this, we added on the right side of the growth reaction 0.5 mmol/gDW of excreted extracellular methionine, balanced by an equal amount of intracellular methionine consumed (at the left side of the reaction equation)." idBOFrxn = list(model.objective.keys())[0].id coeff = arg # 'subtract_metabolite' adds a term with the metabolite coefficient with the given value*-1. So, on the right side you put -coeff (-*-1=+) and on the left side coeff (+*-1=-). model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('arg-L[c]'): coeff}) model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('arg-L[e]'): -coeff}) cobra.io.save_matlab_model(model, 'iAF1260_Ec1_tmp.mat', 'model') del (model) # 1.2.- [COBRApy] Establish modifications in model 2 model = cobra.io.load_matlab_model('ModelsInput/iAF1260_Ec2.mat') idBOFrxn = list(model.objective.keys())[0].id coeff = lys model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('lys-L[c]'): coeff}) model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('lys-L[e]'): -coeff}) cobra.io.save_matlab_model(model, 'iAF1260_Ec2_tmp.mat', 'model') del (model) # 1.3.- [COBRApy] Establish modifications in model 3 model = cobra.io.load_matlab_model('ModelsInput/iAF1260_Ec3.mat') idBOFrxn = list(model.objective.keys())[0].id coeff = met model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('met-L[c]'): coeff}) model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('met-L[e]'): -coeff}) cobra.io.save_matlab_model(model, 'iAF1260_Ec3_tmp.mat', 'model') del (model) # 1.4.- [COBRApy] Establish modifications in model 4 model = cobra.io.load_matlab_model('ModelsInput/iAF1260_Ec4.mat') idBOFrxn = list(model.objective.keys())[0].id coeff = phe model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('phe-L[c]'): coeff}) model.reactions.get_by_id(idBOFrxn).subtract_metabolites( {model.metabolites.get_by_id('phe-L[e]'): -coeff}) cobra.io.save_matlab_model(model, 'iAF1260_Ec4_tmp.mat', 'model') del (model) # 2.- [python] mat_to_comets('iAF1260_Ec1_tmp.mat') mat_to_comets('iAF1260_Ec2_tmp.mat') mat_to_comets('iAF1260_Ec3_tmp.mat') mat_to_comets('iAF1260_Ec4_tmp.mat') # Community parameter modifications # ================================= # 4.- [shell script] Write automatically the COMETS parameter about initial biomass of 4 strains, depending on proportions. massedit.edit_files(['coGrowth4Ecoli_layout_template.txt'], ["re.sub(r'WWW','" + str(mass1) + "',line)"], dry_run=False) massedit.edit_files(['coGrowth4Ecoli_layout_template.txt'], ["re.sub(r'XXX','" + str(mass2) + "',line)"], dry_run=False) massedit.edit_files(['coGrowth4Ecoli_layout_template.txt'], ["re.sub(r'YYY','" + str(mass3) + "',line)"], dry_run=False) massedit.edit_files(['coGrowth4Ecoli_layout_template.txt'], ["re.sub(r'ZZZ','" + str(mass4) + "',line)"], dry_run=False) # 5.- [COMETS by command line] Run COMETS # Compute no.rxn Growth Rate numRxnGR1 = int( subprocess.check_output([ 'egrep -A1 "OBJECTIVE" iAF1260_Ec1_tmp.mat.cmt | tail -1 | tr -d \[:space:\]' ], shell=True)) numRxnGR2 = int( subprocess.check_output([ 'egrep -A1 "OBJECTIVE" iAF1260_Ec2_tmp.mat.cmt | tail -1 | tr -d \[:space:\]' ], shell=True)) numRxnGR3 = int( subprocess.check_output([ 'egrep -A1 "OBJECTIVE" iAF1260_Ec3_tmp.mat.cmt | tail -1 | tr -d \[:space:\]' ], shell=True)) numRxnGR4 = int( subprocess.check_output([ 'egrep -A1 "OBJECTIVE" iAF1260_Ec4_tmp.mat.cmt | tail -1 | tr -d \[:space:\]' ], shell=True)) if not (os.path.exists('IndividualRunsResults')): os.makedirs('IndividualRunsResults') totfitness = 0 sumTotCycle = 0 sumAvgGR = 0 sumAvgRatioGR = 0 sumAvgFitBiomass = 0 sumGR1 = 0 sumGR2 = 0 sumGR3 = 0 sumGR4 = 0 fitnessList = [] # To repeat X times, due to random behaviour in COMETS: for i in range(repeat): with open("output.txt", "w") as f: subprocess.call(['./comets_scr', 'comets_script_template'], stdout=f) # 6.- [R call] Run script to generate one graph: 4 strains versus 4 aminoacids subprocess.call([ '../../Scripts/plot_biomassX4_vs_4mediaItem.sh', 'template', 'arg-L', 'lys-L', 'met-L', 'phe-L', 'Ec1', 'Ec2', 'Ec3', 'Ec4' ]) subprocess.call([ '../../Scripts/plot_biomassX4_vs_mediaItem.sh', 'template', 'glc-D', 'Ec1', 'Ec2', 'Ec3', 'Ec4' ]) # 7.- Compute fitness (measure to optimize): print('computing fitness...') # 7.1.- Determine endCycle: When glc is exhausted with open("biomass_vs_glc-D_template.txt", "r") as sourcesGLC: linesGLC = sourcesGLC.readlines() endCycle = 0 for lineGLC in linesGLC: glcConc = float(lineGLC.split()[5]) if (glcConc < 0.001): endCycle = int(lineGLC.split()[0]) break if (endCycle == 0): endCycle = int(lineGLC.split()[0]) # Compute cycle of exponential grow expCycle = int(endCycle - 1) iniExpCycle = int(0.85 * endCycle) numCycles = int(expCycle - iniExpCycle) # 7.2.- Compute fitness elements finalBiomassV = linesGLC[expCycle].split() finalBiomass = float(finalBiomassV[1]) + float( finalBiomassV[2]) + float(finalBiomassV[3]) + float( finalBiomassV[4]) GR1 = 0 GR2 = 0 GR3 = 0 GR4 = 0 try: GR1 = float( subprocess.check_output([ 'egrep "fluxes\{.*\}\{1\}\{1\}\{1\}" flux_log_template.txt | egrep -A' + str(numCycles) + ' "fluxes\{"' + str(iniExpCycle) + '"\}\{1\}\{1\}\{1\}" | cut -d"=" -f2 | cut -d" " -f' + str(numRxnGR1 + 1) + ' | awk \'{if($1>0){sum+=$1}} END {if(NR>0){print sum/NR}}\'' ], shell=True)) if (GR1 < 0.0): GR1 = 0.0 except: GR1 = 0.0 try: GR2 = float( subprocess.check_output([ 'egrep "fluxes\{.*\}\{1\}\{1\}\{2\}" flux_log_template.txt | egrep -A' + str(numCycles) + ' "fluxes\{"' + str(iniExpCycle) + '"\}\{1\}\{1\}\{2\}" | cut -d"=" -f2 | cut -d" " -f' + str(numRxnGR2 + 1) + ' | awk \'{if($1>0){sum+=$1}} END {if(NR>0){print sum/NR}}\'' ], shell=True)) if (GR2 < 0.0): GR2 = 0.0 except: GR2 = 0.0 try: GR3 = float( subprocess.check_output([ 'egrep "fluxes\{.*\}\{1\}\{1\}\{3\}" flux_log_template.txt | egrep -A' + str(numCycles) + ' "fluxes\{"' + str(iniExpCycle) + '"\}\{1\}\{1\}\{3\}" | cut -d"=" -f2 | cut -d" " -f' + str(numRxnGR3 + 1) + ' | awk \'{if($1>0){sum+=$1}} END {if(NR>0){print sum/NR}}\'' ], shell=True)) if (GR3 < 0.0): GR3 = 0.0 except: GR3 = 0.0 try: GR4 = float( subprocess.check_output([ 'egrep "fluxes\{.*\}\{1\}\{1\}\{4\}" flux_log_template.txt | egrep -A' + str(numCycles) + ' "fluxes\{"' + str(iniExpCycle) + '"\}\{1\}\{1\}\{4\}" | cut -d"=" -f2 | cut -d" " -f' + str(numRxnGR4 + 1) + ' | awk \'{if($1>0){sum+=$1}} END {if(NR>0){print sum/NR}}\'' ], shell=True)) if (GR4 < 0.0): GR4 = 0.0 except: GR4 = 0.0 print("exp cycle range (" + str(iniExpCycle) + "," + str(expCycle) + "): GR1: " + str(GR1) + " GR2: " + str(GR2) + " GR3: " + str(GR3) + " GR4: " + str(GR4)) avgGR = float((GR1 + GR2 + GR3 + GR4) / 4) # sumRatioGR = 0 sumRatioGR = sumRatioGR + compute_ratioGR(GR1, GR2) sumRatioGR = sumRatioGR + compute_ratioGR(GR1, GR3) sumRatioGR = sumRatioGR + compute_ratioGR(GR1, GR4) sumRatioGR = sumRatioGR + compute_ratioGR(GR2, GR3) sumRatioGR = sumRatioGR + compute_ratioGR(GR2, GR4) sumRatioGR = sumRatioGR + compute_ratioGR(GR3, GR4) ratioGR = float(sumRatioGR / 6) # fitBiomass = float((finalBiomass - iniBiomass) / maxBiomass) if (fitFunc == 'ratioGRavgGR'): fitness = float(0.5 * ratioGR + 0.5 * avgGR) elif (fitFunc == 'ratioGR'): fitness = ratioGR elif (fitFunc == 'ratioGRratioBiomass'): fitness = float(0.5 * ratioGR + 0.5 * fitBiomass) elif (fitFunc == 'ratioGR40_Biomass60'): fitness = float(0.4 * ratioGR + 0.6 * fitBiomass) elif (fitFunc == 'ratioGR30_Biomass70'): fitness = float(0.3 * ratioGR + 0.7 * fitBiomass) elif (fitFunc == 'ratioGR20_Biomass80'): fitness = float(0.2 * ratioGR + 0.8 * fitBiomass) print(" Total fitness: " + str(round(fitness, 6)) + ", avgGR: " + str(round(avgGR, 6)) + ", ratioGR: " + str(round(ratioGR, 6)) + " in cycle " + str(iniExpCycle) + " to " + str(expCycle)) totfitness = totfitness + fitness fitnessList.append(fitness) sumTotCycle = sumTotCycle + expCycle sumAvgGR = sumAvgGR + avgGR sumAvgRatioGR = sumAvgRatioGR + ratioGR sumAvgFitBiomass = sumAvgFitBiomass + fitBiomass sumGR1 = sumGR1 + GR1 sumGR2 = sumGR2 + GR2 sumGR3 = sumGR3 + GR3 sumGR4 = sumGR4 + GR4 # Copy individual solution file = 'IndividualRunsResults/' + 'biomass_run' + str(i) + '_' + str( fitness) + '_' + str(expCycle) + '.pdf' shutil.move('biomass_vs_arg-L_lys-L_met-L_phe-L_template_plot.pdf', file) if (dirPlot != ''): file2 = dirPlot + 'biomass_' + str(biomass1) + '_' + str( biomass2) + '_' + str(biomass3) + '_' + str( biomass4) + '_' + str(arg) + '_' + str(lys) + '_' + str( met) + '_' + str(phe) + '_run' + str(i) + '_' + str( fitness) + '_' + str(expCycle) + '.pdf' shutil.copy(file, file2) file = 'IndividualRunsResults/' + 'total_biomass_log_run' + str( i) + '.txt' shutil.move('total_biomass_log_template.txt', file) file = 'IndividualRunsResults/' + 'media_log_run' + str(i) + '.txt' shutil.move('media_log_template.txt', file) file = 'IndividualRunsResults/' + 'flux_log_run' + str(i) + '.txt' shutil.move('flux_log_template.txt', file) avgfitness = totfitness / repeat sdfitness = statistics.stdev(fitnessList) avgAvgGR = sumAvgGR / repeat avgRatioGR = sumAvgRatioGR / repeat avgFitBiomass = sumAvgFitBiomass / repeat avgCycle = sumTotCycle / repeat avgGR1 = sumGR1 / repeat avgGR2 = sumGR2 / repeat avgGR3 = sumGR3 / repeat avgGR4 = sumGR4 / repeat print( "Fitness_function\tconfiguration\tfitness\tsd\tavgGR\tratioGR\tfitBiomass\tGR1\tGR2\tGR3\tGR4\texpCycle" ) print(fitFunc + "\t" + str(biomass1) + ',' + str(biomass2) + ',' + str(biomass3) + ',' + str(biomass4) + ',' + str(arg) + ',' + str(lys) + ',' + str(met) + ',' + str(phe) + "\t" + str(round(avgfitness, 6)) + "\t" + str(round(sdfitness, 6)) + "\t" + str(round(avgAvgGR, 6)) + "\t" + str(round(avgRatioGR, 6)) + "\t" + str(round(avgFitBiomass, 6)) + "\t" + str(round(avgGR1, 6)) + "\t" + str(round(avgGR2, 6)) + "\t" + str(round(avgGR3, 6)) + "\t" + str(round(avgGR4, 6)) + "\t" + str(round(avgCycle, 1))) with open(dirPlot + "configurationsResults" + fitFunc + ".txt", "a") as myfile: myfile.write( "Fitness_function\tconfiguration\tfitness\tsd\tavgGR\tratioGR\tfitBiomass\tGR1\tGR2\tGR3\tGR4\texpCycle\n" ) myfile.write(fitFunc + "\t" + str(biomass1) + ',' + str(biomass2) + ',' + str(biomass3) + ',' + str(biomass4) + ',' + str(arg) + ',' + str(lys) + ',' + str(met) + ',' + str(phe) + "\t" + str(round(avgfitness, 6)) + "\t" + str(round(sdfitness, 6)) + "\t" + str(round(avgAvgGR, 6)) + "\t" + str(round(avgRatioGR, 6)) + "\t" + str(round(avgFitBiomass, 6)) + "\t" + str(round(avgGR1, 6)) + "\t" + str(round(avgGR2, 6)) + "\t" + str(round(avgGR3, 6)) + "\t" + str(round(avgGR4, 6)) + "\t" + str(round(avgCycle, 1)) + "\n") print("Avg.fitness(sd):\t" + str(avgfitness) + "\t" + str(sdfitness) + "\n") if (sdfitness > 0.1): avgfitness = 0.0 return avgfitness, sdfitness
def test_exception_on_bad_patterns(self): """Check edit_files raises an error string instead of a list.""" with self.assertRaises(TypeError): massedit.edit_files('test', [], [])
def create_cluster(ctx, base, network, cluster_name, pd_count, tikv_count, tidb_version): cluster_dir = os.path.join(ctx.obj['etc'], cluster_name) # config pdservices = [{ 'name': 'pd_{}_{}'.format(cluster_name, index), 'base': os.path.abspath(base), 'image': 'pingcap/pd:latest', } for index in range(pd_count)] tikvservices = [{ 'name': 'tikv_{}_{}'.format(cluster_name, index), 'base': os.path.abspath(base), 'image': 'pingcap/tikv:latest', } for index in range(tikv_count)] tidb = { 'base': os.path.abspath(base), 'image': 'pingcap/tidb:{}'.format(tidb_version), } monitor = {'base': os.path.abspath(base)} # ensure dir exists ensure_dir(base) ensure_dir(os.path.join(base, 'data')) ensure_dir(os.path.join(base, 'logs')) ensure_dir(cluster_dir) # create config shutil.copytree('./config', os.path.join(base, 'config')) # edit some configs filenames = [ os.path.join( base, 'config/grafana/provisioning/datasources/datasources.yaml') ] massedit.edit_files(filenames, [ "re.sub(r'prometheus:9090', 'prometheus_{}:9090', line)".format( cluster_name) ], dry_run=False) filenames = [os.path.join(base, 'prometheus.yml')] massedit.edit_files(filenames, [ "re.sub(r'pushgateway:9091', 'pushgateway_{}:9091', line)".format( cluster_name) ], dry_run=False) # keep docker-compose.yml tp = Jinja2(__name__) content = tp.render_template('/docker_compose.jinja', network=network, pdservices=pdservices, tikvservices=tikvservices, tidb=tidb, monitor=monitor, cluster_name=cluster_name) filename = os.path.join(cluster_dir, 'docker-compose.yml') with open(filename, 'w') as f: f.write(content) project = project_from_options(cluster_dir, {}) project.up(detached=True) click.echo( click.style('cluster {} created'.format(cluster_name), fg='green'))
def synKtPHAFLYCOP_oneConf(sucrPer=30, biomassSynecho=3.5, biomassKT=0.1, nh4=18, fitFunc='MaxPHA', maxCycles=1000, dirPlot='', repeat=3): ''' Call: avgFitness, sdFitness = cleaning_oneConf(sucrPer,biomassSynecho,biomassKT,nh4) INPUTS: sucrPer: %sucrose (in function of carbon source) biomassSynecho: Initial biomass synecho biomassKT: Initial biomass putida (or ratio among both) nh4: initial NH4 in the media fitFunc: fitness function to optimize. maxCycles: cycles in COMETS run. dirPlot: copy of the graphs with several run results. repeat: number of runs with the same configuration. OUTPUT: avgFitness: average fitness of 'repeat' COMETS runs with the same configuration (due to it is not deterministic) sdFitness: standard deviation of fitness during 'repeat' COMETS runs (see above) ''' if not (os.path.exists('ModelsInput/iSynecho_cscBandSPS_over.mat')): initialize_models() # Determine initial biomasses. biomass1 = biomassSynecho biomass2 = biomassKT maxBiomass = 5 # maximum to normalize increment in biomass in fitBiomass as part of fitness function. maxPha = 25 # maximum to normalize increment in biomass in fitBiomass as part of fitness function. if (maxCycles > -1): maxCycles2 = int(maxCycles) else: maxCycles2 = 1000 print("Fitness function:" + fitFunc) # Single GEMs parameter modifications # =================================== if not (os.path.exists('strain_1_tmp.mat.cmt')): # 1.1.- [COBRApy] Establish modifications in model 1 model = cobra.io.load_matlab_model( 'ModelsInput/iSynecho_cscBandSPS_over.mat') # To un-limit the sucrose production, for the flux variability analysis model.reactions.get_by_id('SUCRtex').bounds = (-1000, 1000) dictSucrValue = cobra.flux_analysis.variability.flux_variability_analysis( model, {'EX_sucr(e)'}, fraction_of_optimum=1 - (sucrPer / 100)) sucrLimit = dictSucrValue['EX_sucr(e)']['maximum'] model.reactions.get_by_id('SUCRtex').bounds = (sucrLimit, 1000) model.reactions.get_by_id('EX_sucr(e)').bounds = (sucrLimit, sucrLimit) cobra.io.save_matlab_model(model, 'strain_1_tmp.mat', 'model') del (model) # 1.2.1.- [COBRApy] Establish modifications in model 2.1 (when KT is growing) # To put the same uptakes in transporters (*tex) that in Exchange reactions, due to COMETS limits with *tex rxn's rather than EX_ rxn's. model = cobra.io.load_matlab_model( 'ModelsInput/iJN1411_sucr_notNO3_PHA.mat') model.reactions.get_by_id('EX_sucr(e)').bounds = (-3.1, 0) model.reactions.get_by_id('SUCRtex').bounds = (0, 3.1) cobra.io.save_matlab_model(model, 'strain_2_tmp.mat', 'model') # 1.2.2.- [COBRApy] Establish modifications in model 2.2 (when NH4 is exhausted and it produces PHA) # To put the same uptakes in transporters (*tex) that in Exchange reactions, due to COMETS limits with *tex rxn's rather than EX_ rxn's. model = cobra.io.load_matlab_model( 'ModelsInput/iJN1411_sucr_notNO3_PHA.mat') model.reactions.get_by_id('EX_nh4(e)').bounds = (0, 0) model.reactions.get_by_id('EX_sucr(e)').bounds = (-3.1, 0) model.reactions.get_by_id('SUCRtex').bounds = (0, 3.1) model.reactions.get_by_id('C80aPHAtex').bounds = (0, 1000) model.reactions.get_by_id('DM_C80aPHA').bounds = (0, 0) # PHA production proportional to sucrose intakes coeff = float( 1.83 / 3.1 ) # 0.59032258: 3.1 sucr generates 1.83 PHA in the single model in COBRA model.reactions.get_by_id('SUCRtex').subtract_metabolites( {model.metabolites.get_by_id('C80aPHA[c]'): coeff}) model.reactions.get_by_id('SUCRtex').subtract_metabolites( {model.metabolites.get_by_id('C80aPHA[e]'): -coeff}) cobra.io.save_matlab_model(model, 'strain_2_b_tmp.mat', 'model') del (model) # 2.- [python] mat_to_comets('strain_1_tmp.mat') mat_to_comets('strain_2_tmp.mat') mat_to_comets('strain_2_b_tmp.mat') # Community parameter modifications # ================================= # 4.- [shell script] Write automatically the COMETS parameter about initial biomass of 3 strains, depending on proportions, and initial media concentrations. massedit.edit_files(['synKtPHA_layout_template.txt'], ["re.sub(r'XXX','" + str(biomass1) + "',line)"], dry_run=False) massedit.edit_files(['synKtPHA_layout_template.txt'], ["re.sub(r'YYY','" + str(biomass2) + "',line)"], dry_run=False) massedit.edit_files(['synKtPHA_layout_template.txt'], ["re.sub(r'XXNH4XX','" + str(nh4) + "',line)"], dry_run=False) # end-if building models # 5.- [COMETS by command line] Run COMETS if not (os.path.exists('IndividualRunsResults')): os.makedirs('IndividualRunsResults') totfitness = 0 sumPha = 0 sumSucr = 0 fitnessList = [] # To repeat X times, due to random behaviour in COMETS. In this synKtPHA case, repeat could be 1, because we assume sucrose must be produced by Synecho before KT takes it, so we fix the strain models run per cycle to 1)Synecho 2)KT. for i in range(repeat): with open("output1.txt", "w") as f: subprocess.call(['./comets_scr', 'comets_script_template'], stdout=f) # 6.- [R call] Run script to generate one graph: title = str(sucrPer) + '-' + str(biomass1) + '-' + str( biomass2) + '-' + str(nh4) print(title) subprocess.call([ "../../Scripts/plot_biomassX2_vs_3mediaItem.sh 'template1' 'sucr' 'nh4' 'C80aPHA' '" + str(50) + "' '" + str(title) + "' 'blue' 'black' 'darkmagenta' 'Synecho' 'KT'" ], shell=True) # 7.1.- Determine endCycle: when nh4 is exhausted with open("biomass_vs_sucr_nh4_C80aPHA_template1.txt", "r") as sources: lines = sources.readlines() iniPointV = lines[0].split() iniBiomass = float(iniPointV[1]) + float(iniPointV[2]) endCycle = 0 for line in lines: #endCycle=int(line.split()[0]) nh4Conc = float(line.split()[4]) if (nh4Conc < float(0.01)): endCycle = int(line.split()[0]) break if (endCycle == 0): endCycle = int(line.split()[0]) return 0, 0 # If after 72h the NH4 is not exhausted, PHA will not be generated!! finalBiomassV = lines[endCycle].split() biomass1New = float(finalBiomassV[1]) biomass2New = float(finalBiomassV[2]) # Get metabolite value in endCycle (at the end of first phase) in a python dictionary subprocess.call([ "../../Scripts/get_media_composition_oneCycle.sh 'template1' '" + str(endCycle) + "'" ], shell=True) fileMedia = "media_cycle_" + str(endCycle) + ".txt" metDict = {} with open(fileMedia) as f: reader = csv.reader(f, delimiter='\t') metDict = {rows[0]: rows[1] for rows in reader} # Read and write new layout if os.path.exists( 'synKtPHA_layout_template2.txt'): # delete previous content os.remove("synKtPHA_layout_template2.txt") with open("synKtPHA_layout_template.txt", "r") as layIn: with open("synKtPHA_layout_template2.txt", "a") as layOut: lines = layIn.readlines() for line in lines: if 'model_file' in line: layOut.write( "model_file\tstrain_1_tmp.mat.cmt\tstrain_2_b_tmp.mat.cmt\n" ) elif '[e]' in line: met = re.sub('\[e\]', r'', line.split()[0]) layOut.write("\t\t\t" + met + "[e]\t" + metDict[met] + "\n") elif line.startswith('\t\t0\t0'): layOut.write("\t\t0\t0\t" + str(biomass1New) + "\t" + str(biomass2New) + "\n") elif 'maxCycles' in line: layOut.write(" maxCycles = " + str(maxCycles2) + "\n") elif 'totalbiomasslogname' in line: layOut.write( " totalbiomasslogname = total_biomass_log_template2.txt\n" ) elif 'medialogname' in line: layOut.write( " medialogname = media_log_template2.txt\n") elif 'fluxlogname' in line: layOut.write( " fluxlogname = flux_log_template2.txt\n") else: # if not line to change, directly copy them layOut.write(line) # Rename files to avoid to change COMETS files shutil.move('synKtPHA_layout_template.txt', 'synKtPHA_layout_template1.txt') shutil.move('synKtPHA_layout_template2.txt', 'synKtPHA_layout_template.txt') # 2nd COMETS run with open("output2.txt", "w") as f: subprocess.call(['./comets_scr', 'comets_script_template'], stdout=f) # [R call] Run script to generate one graph: title = str(sucrPer) + '-' + str(biomass1New) + '-' + str( biomass2New) + '-' + str(nh4) print(title) subprocess.call([ "../../Scripts/plot_biomassX2_vs_3mediaItem.sh 'template2' 'sucr' 'nh4' 'C80aPHA' '" + str(maxCycles2 / 10) + "' '" + str(title) + "' 'blue' 'black' 'darkmagenta' 'Synecho' 'KT'" ], shell=True) # Generate combined files (biomass, flux, media) with output COMETS 1 and 2: # A.-biomass # head 1-n template1.txt + template2.txt (without header or the first line wiht the same values) subprocess.call([ "head -n" + str(endCycle + 1) + " total_biomass_log_template1.txt > total_biomass_log_template.txt" ], shell=True) subprocess.call([ "tail -n" + str(maxCycles2) + " total_biomass_log_template2.txt > temp_biomass2.txt" ], shell=True) count = endCycle + 1 with open("temp_biomass2.txt", "r") as fin: with open("total_biomass_log_template.txt", "a") as fout: lines = fin.readlines() for line in lines: fout.write( str(count) + "\t" + str(line.split()[1]) + "\t" + str(line.split()[2]) + "\n") count = count + 1 os.remove("temp_biomass2.txt") # B.-media #egrep media_n+1{1} --> no.line #head 1-no.line template1.txt + template 2.txt, without header and replacing media_i by i=i+n # First file fragment cmd = "grep -n 'media_" + str( endCycle + 1) + "{1}' media_log_template1.txt | cut -d: -f1" numLine1 = int( subprocess.check_output(cmd, shell=True).decode('utf-8').strip()) subprocess.call([ "head -n" + str(numLine1 - 1) + " media_log_template1.txt > media_log_template.txt" ], shell=True) # Second file fragment, replacing no.cycles consecutive to the last in first fragment cmd = "grep -n 'media_1{1}' media_log_template2.txt | cut -d: -f1" numLine2 = int( subprocess.check_output(cmd, shell=True).decode('utf-8').strip()) cmd = "wc -l media_log_template2.txt | cut -d' ' -f1" totLines2 = int( subprocess.check_output(cmd, shell=True).decode('utf-8').strip()) subprocess.call([ "tail -n" + str(totLines2 - numLine2 + 1) + " media_log_template2.txt > temp_media2.txt" ], shell=True) for x in range( 1, maxCycles2 + 1): # +1 because max in range(1,range) is no.iterations+1 subprocess.call([ "egrep '^media_" + str(x) + "\{' temp_media2.txt | sed 's/media_" + str(x) + "{/media_" + str(x + endCycle) + "{/' >> media_log_template.txt" ], shell=True) os.remove("temp_media2.txt") # C.-fluxes # fluxes{cycle}{1}{1}{modelNumber} # First file fragment cmd = "grep -n 'fluxes{" + str( endCycle + 1) + "}{1}{1}{1}' flux_log_template1.txt | cut -d: -f1" numLine1 = int( subprocess.check_output(cmd, shell=True).decode('utf-8').strip()) subprocess.call([ "head -n" + str(numLine1 - 1) + " flux_log_template1.txt > flux_log_template.txt" ], shell=True) # Second file fragment, replacing no.cycles consecutive to the last in first fragment # Not to remove any cycle, because the first one is different, given the last biomass and media composition in first part. Here there isn't cycle=0. for x in range(1, maxCycles2 + 1): subprocess.call([ "egrep '^fluxes\{" + str(x) + "\}' flux_log_template2.txt | sed 's/fluxes{" + str(x) + "}/fluxes{" + str(x + endCycle) + "}/' >> flux_log_template.txt" ], shell=True) # Plot combined title = str(sucrPer) + '-' + str(biomass1) + '-' + str( biomass2) + '-' + str(nh4) print(title) subprocess.call([ "../../Scripts/plot_biomassX2_vs_4mediaItem.sh 'template' 'so4' 'no3' 'pi' 'hco3' '" + str(100) + "' '" + str(title) + "' 'blue' 'cyan' 'black' 'darkmagenta' 'Synecho' 'KT'" ], shell=True) # 7.- Compute fitness (measure to optimize): # 7.1.- Determine endCycle: maxCycle cmd = "wc -l total_biomass_log_template.txt | cut -d' ' -f1" maxCycle = -1 + int( subprocess.check_output(cmd, shell=True).decode('utf-8').strip()) with open("biomass_vs_so4_no3_pi_hco3_template.txt", "r") as sources: lines = sources.readlines() iniPointV = lines[0].split() iniBiomass = float(iniPointV[1]) + float(iniPointV[2]) if (maxCycles > -1): endCycle = maxCycles2 else: # compute end when no3, so4 or pi is exhausted endCycle = 0 for line in lines: so4Conc = float(line.split()[3]) no3Conc = float(line.split()[4]) piConc = float(line.split()[5]) #hco3Conc=float(line.split()[6]) if (so4Conc < float(0.0001) or no3Conc < float(0.0001) or piConc < float(0.0001)): # #if(hco3Conc<float(0.0001)): endCycle = int(line.split()[0]) break if (endCycle == 0): endCycle = maxCycles2 finalBiomassV = lines[endCycle].split() # To measure products subprocess.call([ "../../Scripts/plot_biomassX2_vs_3mediaItem.sh 'template' 'sucr' 'nh4' 'C80aPHA' '" + str(float(endCycle / 10)) + "' '" + str(title) + "' 'blue' 'black' 'darkmagenta' 'Synecho' 'KT'" ], shell=True) with open("biomass_vs_sucr_nh4_C80aPHA_template.txt", "r") as sources: lines = sources.readlines() finalLineV = lines[endCycle].split() totSucr = float(finalLineV[3]) totPha = float(finalLineV[5]) print(str(totPha) + " PHA in cycle " + str(endCycle)) finalBiomass = float(finalBiomassV[1]) + float(finalBiomassV[2]) # 7.2.- Compute fitness: maximize PHA fitTime = 1 - (float(endCycle) / float(maxCycle)) fitBiomass = float((finalBiomass - iniBiomass) / maxBiomass) fitPHA = float(totPha / maxPha) if (fitFunc == 'MaxPHA'): fitness = fitPHA elif (fitFunc == 'PHA_Biomass'): fitness = float(0.5 * fitPHA + 0.5 * fitBiomass) print(" Fitness: " + str(round(fitness, 6)) + " in cycle " + str(endCycle)) totfitness = totfitness + fitness fitnessList.append(fitness) sumPha = sumPha + totPha sumSucr = sumSucr + totSucr # Copy individual solution file = 'IndividualRunsResults/' + 'biomass_vs_sucr_nh4_C80aPHA_run' + str( i) + '_' + str(fitness) + '_' + str(endCycle) + '.pdf' shutil.move('biomass_vs_sucr_nh4_C80aPHA_template_plot.pdf', file) if (dirPlot != ''): file2 = dirPlot + 'biomass_' + str(sucrPer) + '_' + str( biomass1) + '_' + str(biomass2) + '_' + str( nh4) + '_run' + str(i) + '_' + str(fitness) + '_' + str( endCycle) + '.pdf' shutil.move(file, file2) file = 'IndividualRunsResults/' + 'total_biomass_log_run' + str( i) + '.txt' shutil.move('total_biomass_log_template.txt', file) file = 'IndividualRunsResults/' + 'media_log_run' + str(i) + '.txt' shutil.move('media_log_template.txt', file) file = 'IndividualRunsResults/' + 'flux_log_run' + str(i) + '.txt' shutil.move('flux_log_template.txt', file) avgfitness = totfitness / repeat if (repeat > 1): sdfitness = statistics.stdev(fitnessList) else: sdfitness = 0.0 avgPha = sumPha / repeat avgSucr = sumSucr / repeat print( "Fitness_function\tconfiguration\tfitness\tsd\tC80aPHA(mM)\tsucr(mM)\tendCycle" ) print(fitFunc + "\t" + str(sucrPer) + "," + str(biomassSynecho) + "," + str(biomassKT) + "," + str(nh4) + "\t" + str(round(avgfitness, 6)) + "\t" + str(sdfitness) + "\t" + str(round(avgPha, 6)) + "\t" + str(round(avgSucr, 6)) + "\t" + str(endCycle)) with open(dirPlot + "configurationsResults" + fitFunc + ".txt", "a") as myfile: myfile.write( "Fitness_function\tconfiguration\tfitness\tsd\tC80aPHA(mM)\tsucr(mM)\tendCycle\n" ) myfile.write(fitFunc + "\t" + str(sucrPer) + "," + str(biomassSynecho) + "," + str(biomassKT) + "," + str(nh4) + "\t" + str(round(avgfitness, 6)) + "\t" + str(sdfitness) + "\t" + str(round(avgPha, 6)) + "\t" + str(round(avgSucr, 6)) + "\t" + str(endCycle) + "\n") print("Avg.fitness(sd):\t" + str(avgfitness) + "\t(" + str(sdfitness) + ")\n") if (sdfitness > 0.1): avgfitness = 0.0 return avgfitness, sdfitness
def remove(self, host, zone): click.echo(" - Removing record [{}.{}]".format(host, zone)) filenames = ['hosts'] regex = "re.sub('({}.{}).*(ansible_ssh_host=)?(.*)', r'\\1', line)".format( host, zone) massedit.edit_files(filenames, [regex], dry_run=False)