def run (out=None, omit_unicode_experiment=False): if (out is None): out = sys.stdout out.write(boost.python.platform_info) tag = libtbx.env.under_dist("boost", "TAG") if (op.isfile(tag)): tag = open(tag).read().strip() else: tag = None print >> out, "boost/TAG:", tag print >> out, "os.name:", os.name print >> out, "sys.platform:", sys.platform print >> out, "sys.byteorder:", sys.byteorder print >> out, "platform.platform():", platform.platform() print >> out, "platform.architecture():", platform.architecture() for attr in ["division_by_zero", "invalid", "overflow"]: attr = "floating_point_exceptions.%s_trapped" % attr print >> out, "%s:" % attr, eval("boost.python.%s" % attr) print >> out, "number of processors:", introspection.number_of_processors( return_value_if_unknown="unknown") introspection.machine_memory_info().show(out=out) try: import thread except ImportError: print >> out, "import thread: NO" else: print >> out, "import thread: OK" print "Division operator semantics: %s division" % (div_probe() / 0) c = getattr(boost.python.ext, "str_or_unicode_as_char_list", None) if (c is not None and not omit_unicode_experiment): print >> out, '"hello" =', c("hello") print >> out, 'u"hello" =', c(u"hello") e = u"\u00C5".encode("utf-8", "strict") print >> out, 'u"\u00C5" =', c(u"\u00C5"), 'as utf-8 =', c(e) print >> out, "LATIN CAPITAL LETTER A WITH RING ABOVE =", e from libtbx.utils import format_cpu_times print format_cpu_times()
def run(args): if ("--full" in args): to_do = range(1, 230 + 1) elif ("--special" in args): to_do = sorted(special.keys()) else: to_do = [75, 151] for space_group_number in to_do: sgi = sgtbx.space_group_info(number=space_group_number) sgi.show_summary(prefix="") sys.stdout.flush() n_special = 0 for m in scitbx.math.unimodular_generator(range=1).all(): cb_op = sgtbx.change_of_basis_op(sgtbx.rt_mx(sgtbx.rot_mx(m,1), 1)) \ .new_denominators(12, 144) cb_sgi = sgi.change_basis(cb_op=cb_op) cb_op_ref = cb_sgi.change_of_basis_op_to_reference_setting() ref_sgi = cb_sgi.change_basis(cb_op=cb_op_ref) assert ref_sgi.group() == sgi.group() c = cb_op_ref.c() if (c.r().is_unit_mx() and c.t().num() != (0, 0, 0)): n_special += 1 cb_ref_sgi = sgi.change_basis(cb_op=cb_op_ref) print " cb_op=%s -> %s" % (str(cb_op.c()), cb_ref_sgi.type(). universal_hermann_mauguin_symbol()) sys.stdout.flush() # verify that c.t() is not an allowed origin shift assert cb_ref_sgi.group() != sgi.group() assert special.get(space_group_number, 0) == n_special print format_cpu_times()
def run(args): assert len(args) == 0 exercise_forward_compatibility() exercise_misc() assert utils.sequence_index_dict(["a", "b"]) == {"a": 0, "b": 1} assert utils.flat_list(0) == [0] assert utils.flat_list([1, 2, 3]) == [1, 2, 3] assert utils.flat_list([1, [2, 3, 4], 3]) == [1, 2, 3, 4, 3] assert utils.flat_list([1, [2, 3, 4], [[3, 4], [5, 6]]]) == [1, 2, 3, 4, 3, 4, 5, 6] try: raise RuntimeError("Trial") except KeyboardInterrupt: raise except Exception: assert utils.format_exception() == "RuntimeError: Trial" else: raise Exception_expected try: assert 1 == 2 except KeyboardInterrupt: raise except Exception: s = utils.format_exception() assert s.startswith("AssertionError: ") assert s.find("tst_utils.py line ") >= 0 else: raise Exception_expected exercise_indented_display() exercise_approx_equal() exercise_file_utils() exercise_dir_utils() print utils.format_cpu_times()
def run(): initial_current_working_directory = os.getcwd() rotamer_data_dir = rotamer_eval.find_rotarama_data_dir(optional=True) if rotamer_data_dir is None: print ' Rebuilding rotarama library skipped. Needs rotamer library.' return target_db = rotamer_eval.open_rotarama_dlite( rotarama_data_dir=rotamer_data_dir) # rebuild_pickle_files(data_dir=rotamer_data_dir, # file_prefix="rota500-", # target_db=target_db, # amino_acids=rotamer_eval.aminoAcids) rebuild_pickle_files(data_dir=rotamer_data_dir, file_prefix="rota8000-", target_db=target_db, amino_acids=rotamer_eval.aminoAcids) # ramachandran_data_dir = rotamer_eval.find_rotarama_data_dir() target_db = rotamer_eval.open_rotarama_dlite( rotarama_data_dir=ramachandran_data_dir) rebuild_pickle_files(data_dir=rotamer_data_dir, file_prefix="rama8000-", target_db=target_db, amino_acids=ramachandran_eval.aminoAcids_8000) # rebuild_pickle_files(data_dir=rotamer_data_dir, # file_prefix="rama500-", # target_db=target_db, # amino_acids=ramachandran_eval.aminoAcids) os.chdir(initial_current_working_directory) print format_cpu_times()
def run(args): assert len(args) == 0 have_cma_es = libtbx.env.has_module("cma_es") if not have_cma_es: print "Skipping some tests: cma_es module not available or not configured." print names = ["easom", "rosenbrock", "ackley", "rastrigin"] for name in names: print "****", name, "****" if name == "easom": start = flex.double([0.0, 0.0]) else: start = flex.double([4, 4]) for ii in range(1): test_lbfgs(name) if have_cma_es: test_cma_es(name) test_differential_evolution(name) test_cross_entropy(name) test_simplex(name) test_dssa(name) print print from libtbx.utils import format_cpu_times print format_cpu_times()
def pdb_inp_generator(file_infos, chunk_n, chunk_i): import iotbx.pdb from libtbx.utils import format_cpu_times import time print "len(file_infos):", len(file_infos) sys.stdout.flush() t0_total = time.time() try: for i_file, file_info in enumerate(file_infos): if (i_file % chunk_n != chunk_i): continue print "i_file:", i_file, file_info.name sys.stdout.flush() pdb_code = pdb_code_from_file_name(file_name=file_info.name) try: yield pdb_info( chunk_n=chunk_n, chunk_i=chunk_i, file_info=file_info, pdb_code=pdb_code, pdb_inp=iotbx.pdb.input(file_name=file_info.name)) except KeyboardInterrupt: raise except Exception: report_exception(file_name=file_name) finally: sys.stderr.flush() print "total time: %.2f" % (time.time() - t0_total) print format_cpu_times() sys.stdout.flush()
def run(): if (not libtbx.env.has_module("reduce")): print "Reduce not installed" return exercise_adopting_ref_tors_restraints_h() print format_cpu_times()
def run(): verbose = "--verbose" in sys.argv[1:] exercise_extract_authors() exercise_pdb_input_error_handling() exercise_systematic_chain_ids() exercise_amino_acid_codes() exercise_validate_sequence() exercise_records() exercise_make_atom_with_labels() exercise_combine_unique_pdb_files() exercise_pdb_codes_fragment_files() exercise_format_records() exercise_format_and_interpret_cryst1() exercise_remark_290_interpretation() exercise_residue_name_plus_atom_names_interpreter() regression_pdb = libtbx.env.find_in_repositories(relative_path="phenix_regression/pdb", test=op.isdir) if regression_pdb is None: print "Skipping some tests: phenix_regression/pdb not available" else: exercise_format_fasta(regression_pdb=regression_pdb) exercise_merge_files_and_check_for_overlap(regression_pdb=regression_pdb) exercise_mtrix(regression_pdb=regression_pdb) exercise_mtrix_format_pdb_string() exercise_BIOMT() exercise_header_misc(regression_pdb=regression_pdb) for use_u_aniso in (False, True): exercise_xray_structure(use_u_aniso, verbose=verbose) write_icosahedron() print format_cpu_times()
def run(): verbose = "--verbose" in sys.argv[1:] exercise_extract_authors() exercise_pdb_input_error_handling() exercise_systematic_chain_ids() exercise_amino_acid_codes() exercise_validate_sequence() exercise_records() exercise_make_atom_with_labels() exercise_combine_unique_pdb_files() exercise_pdb_codes_fragment_files() exercise_format_records() exercise_format_and_interpret_cryst1() exercise_remark_290_interpretation() exercise_residue_name_plus_atom_names_interpreter() regression_pdb = libtbx.env.find_in_repositories( relative_path="phenix_regression/pdb", test=op.isdir) if (regression_pdb is None): print "Skipping some tests: phenix_regression/pdb not available" else: exercise_format_fasta(regression_pdb=regression_pdb) exercise_merge_files_and_check_for_overlap(regression_pdb=regression_pdb) exercise_mtrix(regression_pdb=regression_pdb) exercise_mtrix_format_pdb_string() exercise_BIOMT() exercise_header_misc(regression_pdb=regression_pdb) for use_u_aniso in (False, True): exercise_xray_structure(use_u_aniso, verbose=verbose) write_icosahedron() print format_cpu_times()
def run(args): assert len(args) == 0 exercise_joint_lib_six_dof_aja_simplified() exercise_with_test_cases_tardy_pdb() exercise_fixed_vertices() exercise_pickle() print format_cpu_times()
def run(args): assert len(args) == 0 if '--exercise-retrieve-unless-exists' in args: exercise_retrieve_unless_exists() else: print 'Skipping exercise_retrieve_unless_exists' exercise_forward_compatibility() exercise_misc() assert utils.sequence_index_dict(["a", "b"]) == {"a": 0, "b": 1} assert utils.flat_list(0) == [0] assert utils.flat_list([1,2,3]) == [1,2,3] assert utils.flat_list([1,[2,3,4],3]) == [1,2,3,4,3] assert utils.flat_list([1,[2,3,4],[[3,4],[5,6]]]) == [1,2,3,4,3,4,5,6] try: raise RuntimeError("Trial") except KeyboardInterrupt: raise except Exception: assert utils.format_exception() == "RuntimeError: Trial" else: raise Exception_expected try: assert 1 == 2 except KeyboardInterrupt: raise except Exception: s = utils.format_exception() assert s.startswith("AssertionError: ") assert s.find("tst_utils.py line ") >= 0 else: raise Exception_expected exercise_indented_display() exercise_approx_equal() exercise_file_utils() exercise_dir_utils() print utils.format_cpu_times()
def run(args): assert len(args) == 0 have_cma_es = libtbx.env.has_module("cma_es") if (not have_cma_es): print "Skipping some tests: cma_es module not available or not configured." print names = ['easom', 'rosenbrock', 'ackley', 'rastrigin'] for name in names: print "****", name, "****" if name == 'easom': start = flex.double([0.0, 0.0]) else: start = flex.double([4, 4]) for ii in range(1): test_lbfgs(name) if (have_cma_es): test_cma_es(name) test_differential_evolution(name) test_cross_entropy(name) test_simplex(name) test_dssa(name) print print from libtbx.utils import format_cpu_times print format_cpu_times()
def exercise(): verbose = "--verbose" in sys.argv[1:] exercise_cholesky() default_flag = True if (0 or default_flag): for m in xrange(1,5+1): for n in xrange(1,m+1): linear_function_full_rank(m=m, n=n, verbose=verbose) if (0 or default_flag): for m in xrange(1,5+1): for n in xrange(1,m+1): linear_function_rank_1(m=m, n=n, verbose=verbose) if (0 or default_flag): for m in xrange(3,7+1): for n in xrange(3,m+1): linear_function_rank_1_with_zero_columns_and_rows( m=m, n=n, verbose=verbose) if (0 or default_flag): rosenbrock_function(m=2, n=2, verbose=verbose) if (0 or default_flag): helical_valley_function(m=3, n=3, verbose=verbose) if (0 or default_flag): powell_singular_function(m=4, n=4, verbose=verbose) if (0 or default_flag): freudenstein_and_roth_function(m=2, n=2, verbose=verbose) if (0 or default_flag): bard_function(m=15, n=3, verbose=verbose) if (0 or default_flag): kowalik_and_osborne_function(m=11, n=4, verbose=verbose) if (0 or default_flag): meyer_function(m=16, n=3, verbose=verbose) print format_cpu_times()
def run(args): if "--full" in args: to_do = range(1, 230 + 1) elif "--special" in args: to_do = sorted(special.keys()) else: to_do = [75, 151] for space_group_number in to_do: sgi = sgtbx.space_group_info(number=space_group_number) sgi.show_summary(prefix="") sys.stdout.flush() n_special = 0 for m in scitbx.math.unimodular_generator(range=1).all(): cb_op = sgtbx.change_of_basis_op(sgtbx.rt_mx(sgtbx.rot_mx(m, 1), 1)).new_denominators(12, 144) cb_sgi = sgi.change_basis(cb_op=cb_op) cb_op_ref = cb_sgi.change_of_basis_op_to_reference_setting() ref_sgi = cb_sgi.change_basis(cb_op=cb_op_ref) assert ref_sgi.group() == sgi.group() c = cb_op_ref.c() if c.r().is_unit_mx() and c.t().num() != (0, 0, 0): n_special += 1 cb_ref_sgi = sgi.change_basis(cb_op=cb_op_ref) print " cb_op=%s -> %s" % (str(cb_op.c()), cb_ref_sgi.type().universal_hermann_mauguin_symbol()) sys.stdout.flush() # verify that c.t() is not an allowed origin shift assert cb_ref_sgi.group() != sgi.group() assert special.get(space_group_number, 0) == n_special print format_cpu_times()
def run(): import libtbx.option_parser as optparse parser = optparse.OptionParser() parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="be verbose") parser.add_option("-g", "--space_group", action="store", type="string", dest="space_group", help="space group symbol or number or all or all530") parser.add_option("-n", "--n_steps", action="store", type="int", dest="n_steps", default=NSteps, help="number of grid points in one" + " dimension of the asu box") parser.add_option("--groups_file", action="store", type="string", dest="groups_file", help="file containing space groups, one per line") (opts, args) = parser.parse_args() groups = [] if not opts.groups_file is None: tmp_file = open(opts.groups_file, "r") for line in tmp_file.readlines(): # newlines retained groups.append( line.strip()) # removes whitespace in the begining and end tmp_file.close() if (opts.space_group is None) & (len(groups) == 0): groups.extend(SpaceGroups) elif opts.space_group == "all": for isg in xrange(1, 231): groups.append(str(isg)) elif opts.space_group == "all530": it = cctbx.sgtbx.space_group_symbol_iterator() while (True): symbol = it.next() # TODO: the following does not work #if( symbol.number()==0 ): # break groups.append(symbol.hermann_mauguin()) if (symbol.number() == 230): break elif not opts.space_group is None: groups.append(opts.space_group) print >> cout, "Number of groups: ", len(groups) print >> cout, "Options= ", opts compare_groups(groups, opts.n_steps, opts.verbose) print format_cpu_times()
def run(): structure = random_structure.xray_structure( sgtbx.space_group_info("P21/c"), elements=["Si"]*10, volume_per_atom=18.6, min_distance=1.2, general_positions_only=False) miller_set_f_obs = miller.build_set( crystal_symmetry=structure, anomalous_flag=True, d_min=0.8) f_obs = miller_set_f_obs.structure_factors_from_scatterers( xray_structure=structure, algorithm="direct").f_calc() fft_map = f_obs.fft_map(symmetry_flags=maptbx.use_space_group_symmetry) padded = fft_map.real_map() unpadded = fft_map.real_map_unpadded() # copy unpadded_1d = unpadded.as_1d() # 1D view => in-place mmm = flex.min_max_mean_double(unpadded_1d) for delta in ((mmm.min + mmm.mean)/2, mmm.mean, (mmm.mean + mmm.max)/2): # in-place charge flipping ab_initio.ext.flip_charges_in_place(padded, delta) # same but on an unpadded copy using the flex tools flipped_selection = unpadded_1d < delta flipped = unpadded_1d.select(flipped_selection) flipped *= -1 unpadded_1d.set_selected(flipped_selection, flipped) assert approx_equal(padded, unpadded, 1e-15) print format_cpu_times()
def run(): if (not libtbx.env.has_module("reduce")): print "Reduce not installed." return exercise_adopting_coord_restraints() exercise_adopting_coord_restraints_water() print format_cpu_times()
def exercise(): random.seed(0) for a1_in in xrange(0, 400, 15): for a2_in in xrange(0, 400, 15): for a3_in in xrange(0, 400, 15): exercise_core((a1_in, a2_in, a3_in)) for i_trial in xrange(1000): exercise_core([random.random()*360-180 for i in [0,1,2]]) print format_cpu_times()
def exercise(): random.seed(0) for a1_in in xrange(0, 400, 15): for a2_in in xrange(0, 400, 15): for a3_in in xrange(0, 400, 15): exercise_core((a1_in, a2_in, a3_in)) for i_trial in xrange(1000): exercise_core([random.random() * 360 - 180 for i in [0, 1, 2]]) print format_cpu_times()
def run(args): assert len(args) == 0 n_trials = 100 from scitbx.math.minimum_covering_ellipsoid import compute as mce_compute from scitbx.array_family import flex from libtbx.test_utils import approx_equal, is_below_limit # XXX point group 222 should be sufficient, but 432 is currently needed point_group_432_rotation_matrices = [(1, 0, 0, 0, 1, 0, 0, 0, 1), (1, 0, 0, 0, 0, -1, 0, 1, 0), (1, 0, 0, 0, 0, 1, 0, -1, 0), (0, 0, 1, 0, 1, 0, -1, 0, 0), (0, 0, -1, 0, 1, 0, 1, 0, 0), (0, -1, 0, 1, 0, 0, 0, 0, 1), (0, 1, 0, -1, 0, 0, 0, 0, 1), (0, 0, 1, 1, 0, 0, 0, 1, 0), (0, 1, 0, 0, 0, 1, 1, 0, 0), (0, -1, 0, 0, 0, -1, 1, 0, 0), (0, 0, 1, -1, 0, 0, 0, -1, 0), (0, -1, 0, 0, 0, 1, -1, 0, 0), (0, 0, -1, -1, 0, 0, 0, 1, 0), (0, 0, -1, 1, 0, 0, 0, -1, 0), (0, 1, 0, 0, 0, -1, -1, 0, 0), (1, 0, 0, 0, -1, 0, 0, 0, -1), (-1, 0, 0, 0, 1, 0, 0, 0, -1), (-1, 0, 0, 0, -1, 0, 0, 0, 1), (0, 1, 0, 1, 0, 0, 0, 0, -1), (0, -1, 0, -1, 0, 0, 0, 0, -1), (0, 0, 1, 0, -1, 0, 1, 0, 0), (0, 0, -1, 0, -1, 0, -1, 0, 0), (-1, 0, 0, 0, 0, 1, 0, 1, 0), (-1, 0, 0, 0, 0, -1, 0, -1, 0)] def check(center, radii, rotation): a, b, c = radii points_principal = flex.vec3_double([(-a, 0, 0), (a, 0, 0), (0, -b, 0), (0, b, 0), (0, 0, -c), (0, 0, c)]) points = rotation * points_principal + center mce = mce_compute(points) assert approx_equal(mce.center, center) assert approx_equal(sorted(mce.radii), sorted(radii)) assert approx_equal(mce.rotation.determinant(), 1) points_mce = mce.rotation.inverse().elems * (points - mce.center) rms = [] for r in point_group_432_rotation_matrices: rp = r * points_mce rms.append(rp.rms_difference(points_principal)) assert is_below_limit(value=min(rms), limit=1e-8, eps=0) mt = flex.mersenne_twister(seed=0) check((0, 0, 0), (1, 2, 3), (1, 0, 0, 0, 1, 0, 0, 0, 1)) for i_trial in xrange(n_trials): center = list(mt.random_double(size=3) * 8 - 4) radii = list(mt.random_double(size=3) * 3 + 0.1) rotation = mt.random_double_r3_rotation_matrix() check(center, radii, rotation) from libtbx.utils import format_cpu_times print format_cpu_times()
def tst_run_requiring_cns(args, call_back): import libtbx.path if (libtbx.path.full_command_path(command="cns") is None): print "Skipping tests: cns not available." else: debug_utils.parse_options_loop_space_groups( argv=args, call_back=call_back, show_cpu_times=False) from libtbx.utils import format_cpu_times print format_cpu_times()
def run(): verbose = "--verbose" in sys.argv[1:] exercise_corrupt_cryst1() exercise_d_data_target_d_atomic_params() exercise_d_data_target_d_atomic_params2() exercise_get_atom_selections(verbose=verbose) exercise_f_000() exercise_detect_link_problems() print format_cpu_times()
def run(): for d_min in [2, 4]: exercise_5_bulk_sol_and_scaling(d_min=d_min) exercise_6_instantiate_consistency() exercise_f_model_no_scales() exercise_top_largest_f_obs_f_model_differences() exercise_1() exercise_2() exercise_4_f_hydrogens() print format_cpu_times()
def run(): for d_min in [2, 4]: exercise_5_bulk_sol_and_scaling(d_min = d_min) exercise_6_instantiate_consistency() exercise_f_model_no_scales() exercise_top_largest_f_obs_f_model_differences() exercise_1() exercise_2() exercise_4_f_hydrogens() print format_cpu_times()
def exercise(): exercise_deepcopy_show_select() exercise_deepcopy_show_select_compare_arrays() exercise_inflate() exercise_add_1a() exercise_add_1b() exercise_add_1c() exercise_add_2b() exercise_add_2c() print format_cpu_times()
def run(): exercise_reference_table() for space_group_number in xrange(1,230+1): asu = reference_table.get_asu(space_group_number) exercise_shape_vertices(asu=asu, unit_cell=None) debug_utils.parse_options_loop_space_groups( sys.argv[1:], run_call_back, show_cpu_times=False) exercise_is_simple_interaction() exercise_non_crystallographic_asu_mappings() print format_cpu_times()
def run(args): assert args in [[], ["--verbose"]] verbose = (len(args) != 0) compare_fftpack_with_cmplft_1d() compare_fftpack_with_cmplft_3d() compare_fftpack_with_realft_1d() compare_fftpack_with_hermft_1d() exercise_fftlib_real_complex_3d_real_imag_w(verbose=verbose) compare_large_3d_real_imag_w_complete_true_false(verbose=verbose) print format_cpu_times()
def exercise(args): assert len(args) == 0 if (extract_from_symmetry_lib.ccp4io_lib_data is None): print "Skipping iotbx/mtz/tst_extract_from_symmetry_lib.py:" \ " ccp4io not available" return exercise_230() exercise_symop_lib_recycling() exercise_syminfo_lib_pdb_cryst1_recycling() print format_cpu_times()
def run(): exercise_5_bulk_sol_and_scaling() exercise_6_instantiate_consistency() exercise_f_model_no_scales() exercise_top_largest_f_obs_f_model_differences() exercise_5_bulk_sol_and_scaling_and_H() exercise_1() exercise_2() exercise_3_f_part1_and_f_part2() exercise_4_f_hydrogens() print format_cpu_times()
def run(): exercise_reference_table() for space_group_number in xrange(1, 230 + 1): asu = reference_table.get_asu(space_group_number) exercise_shape_vertices(asu=asu, unit_cell=None) debug_utils.parse_options_loop_space_groups(sys.argv[1:], run_call_back, show_cpu_times=False) exercise_is_simple_interaction() exercise_non_crystallographic_asu_mappings() print format_cpu_times()
def exercise(): exercise_atom() exercise_bond() exercise_angle() exercise_dihedral() exercise_chirality() exercise_planarity() exercise_motif() exercise_alteration() exercise_manipulation() print format_cpu_times()
def run(): verbose = "--verbose" in sys.argv[1:] exercise_basic(verbose=verbose) mon_lib_srv = mmtbx.monomer_library.server.server() ener_lib = mmtbx.monomer_library.server.ener_lib() inputs = get_inputs(mon_lib_srv=mon_lib_srv, ener_lib=ener_lib, verbose=verbose) exercise_00(inputs=inputs, verbose=verbose) exercise_01(inputs=inputs, verbose=verbose) exercise_03(mon_lib_srv=mon_lib_srv, ener_lib=ener_lib, verbose=verbose) exercise_02(inputs=inputs, verbose=verbose) print format_cpu_times()
def run(args): assert len(args) == 0 n_trials = 100 t0 = time.time() for i in xrange(1): exercise1(n_trials) exercise2() exercise3() exercise4() exercise5(n_trials) from libtbx.utils import format_cpu_times print format_cpu_times()
def loop_space_groups( argv, flags, call_back, symbols_to_stdout=True, symbols_to_stderr=False, show_cpu_times=True, **kwds): call_back_results = [] chunk_size = 1 chunk_member = 0 if (flags.ChunkSize != False): chunk_size = int(flags.ChunkSize) if (flags.ChunkMember != False): chunk_member = int(flags.ChunkMember) assert chunk_size > 0 and chunk_member < chunk_size n_threads = int(flags.Threads) if n_threads > 1: print "** Warning: multi-threaded space-group looping disabled **" if (not flags.RandomSeed): random.seed(0) if (len(argv) > 0 + flags.n): symbols = argv else: symbols = get_test_space_group_symbols( flags.AllSpaceGroups, flags.ChiralSpaceGroups, flags.AllSettings, flags.UnusualSettings) i_loop = -1 for symbol in symbols: if (symbol.startswith("--")): continue i_loop += 1 if (i_loop % chunk_size != chunk_member): continue space_group_info = sgtbx.space_group_info(symbol) sys.stdout.flush() if symbols_to_stderr: print >> sys.stderr, space_group_info sys.stderr.flush() if (symbols_to_stdout): print space_group_info sys.stdout.flush() call_back_result = call_back(flags, space_group_info, **kwds) sys.stdout.flush() try: continue_flag, call_back_result = call_back_result except TypeError: continue_flag, call_back_result = call_back_result, None call_back_results.append(call_back_result) if (continue_flag == False): break if (show_cpu_times): print format_cpu_times() sys.stdout.flush() return call_back_results
def run(args): def have_ext(): for node in os.listdir(libtbx.env.under_build(path="lib")): if (node.startswith("iotbx_ccp4_map_ext")): return True return False if (not have_ext()): # XXX backward compatibility 2008-09-30 print "Skipping iotbx_ccp4_map tests: extension not available" else: import iotbx.ccp4_map exercise(args=args) exercise_writer() print format_cpu_times()
def loop_space_groups(argv, flags, call_back, symbols_to_stdout=True, symbols_to_stderr=False, show_cpu_times=True, **kwds): call_back_results = [] chunk_size = 1 chunk_member = 0 if (flags.ChunkSize != False): chunk_size = int(flags.ChunkSize) if (flags.ChunkMember != False): chunk_member = int(flags.ChunkMember) assert chunk_size > 0 and chunk_member < chunk_size n_threads = int(flags.Threads) if n_threads > 1: print "** Warning: multi-threaded space-group looping disabled **" if (not flags.RandomSeed): random.seed(0) if (len(argv) > 0 + flags.n): symbols = argv else: symbols = get_test_space_group_symbols(flags.AllSpaceGroups, flags.ChiralSpaceGroups, flags.AllSettings, flags.UnusualSettings) i_loop = -1 for symbol in symbols: if (symbol.startswith("--")): continue i_loop += 1 if (i_loop % chunk_size != chunk_member): continue space_group_info = sgtbx.space_group_info(symbol) sys.stdout.flush() if symbols_to_stderr: print >> sys.stderr, space_group_info sys.stderr.flush() if (symbols_to_stdout): print space_group_info sys.stdout.flush() call_back_result = call_back(flags, space_group_info, **kwds) sys.stdout.flush() try: continue_flag, call_back_result = call_back_result except TypeError: continue_flag, call_back_result = call_back_result, None call_back_results.append(call_back_result) if (continue_flag == False): break if (show_cpu_times): print format_cpu_times() sys.stdout.flush() return call_back_results
def exercise(args): phenix_regression_pdb_file_names = get_phenix_regression_pdb_file_names() forever = "--forever" in args while True: exercise_hybrid_36() exercise_base_256_ordinal() exercise_columns_73_76_evaluator( pdb_file_names=phenix_regression_pdb_file_names) exercise_line_info_exceptions() exercise_pdb_input() exercise_input_pickling() exercise_xray_structure_simple() if (not forever): break print format_cpu_times()
def run(args): assert len(args) in [0, 1] if len(args) == 0: n_dynamics_steps = 1 out = null_out() else: n_dynamics_steps = max(1, int(args[0])) out = sys.stdout # exercise_reference_impl_quick() exercise_featherstone_FDab(out=out) exercise_reference_impl_long(n_dynamics_steps=n_dynamics_steps, out=out) # print format_cpu_times()
def run(args): assert len(args) in [0,1] if (len(args) == 0): n_dynamics_steps = 1 out = null_out() else: n_dynamics_steps = max(1, int(args[0])) out = sys.stdout # exercise_reference_impl_quick() exercise_featherstone_FDab(out=out) exercise_reference_impl_long(n_dynamics_steps=n_dynamics_steps, out=out) # print format_cpu_times()
def run(args): verbose = False semi_emp_rotamer_pdb_dirs = [] for arg in args: if (arg == "--verbose"): verbose = True else: assert op.isdir(arg) semi_emp_rotamer_pdb_dirs.append(arg) mon_lib_srv = mmtbx.monomer_library.server.server() amino_acid_resnames = sorted( iotbx.pdb.amino_acid_codes.one_letter_given_three_letter.keys()) for resname in amino_acid_resnames: if (verbose): print "resname:", resname if resname == "UNK": # skipping UNK residue because there is no rotamers available for it continue pdb_inp = iotbx.pdb.input( file_name=op.join( protein_pdb_files, reference_pdb_file_name_lookup[resname])) pdb_hierarchy = pdb_inp.construct_hierarchy() exercise_server_rotamer_iterator( mon_lib_srv=mon_lib_srv, pdb_hierarchy=pdb_hierarchy, verbose=verbose) if (verbose): print if (len(semi_emp_rotamer_pdb_dirs) == 0): pdb_dir = libtbx.env.find_in_repositories( relative_path="phenix_regression/semi_emp_rotamer_pdb") if (pdb_dir is None): print "Skipping compare_dihedrals(): semi_emp_rotamer_pdb not available." else: semi_emp_rotamer_pdb_dirs.append(pdb_dir) for pdb_dir in semi_emp_rotamer_pdb_dirs: compare_dihedrals( mon_lib_srv=mon_lib_srv, amino_acid_resnames=amino_acid_resnames, pdb_dir=pdb_dir, file_name_extension=".uhf_631dp.pdb", verbose=verbose) for file_name in os.listdir(protein_pdb_files): if (not file_name.endswith(".ent")): continue if (verbose): print file_name exercise_termini( mon_lib_srv=mon_lib_srv, pdb_file_name=op.join(protein_pdb_files, file_name)) if (verbose): print exercise_pro_missing_hd1(mon_lib_srv=mon_lib_srv) print format_cpu_times() print "OK"
def run(args): verbose = False semi_emp_rotamer_pdb_dirs = [] for arg in args: if (arg == "--verbose"): verbose = True else: assert op.isdir(arg) semi_emp_rotamer_pdb_dirs.append(arg) mon_lib_srv = mmtbx.monomer_library.server.server() amino_acid_resnames = sorted( iotbx.pdb.amino_acid_codes.one_letter_given_three_letter.keys()) for resname in amino_acid_resnames: if (verbose): print "resname:", resname if resname in ["UNK", 'PYL', 'SEC']: # skipping UNK residue because there is no rotamers available for it continue pdb_inp = iotbx.pdb.input( file_name=op.join( protein_pdb_files, reference_pdb_file_name_lookup[resname])) pdb_hierarchy = pdb_inp.construct_hierarchy() exercise_server_rotamer_iterator( mon_lib_srv=mon_lib_srv, pdb_hierarchy=pdb_hierarchy, verbose=verbose) if (verbose): print if (len(semi_emp_rotamer_pdb_dirs) == 0): pdb_dir = libtbx.env.find_in_repositories( relative_path="phenix_regression/semi_emp_rotamer_pdb") if (pdb_dir is None): print "Skipping compare_dihedrals(): semi_emp_rotamer_pdb not available." else: semi_emp_rotamer_pdb_dirs.append(pdb_dir) for pdb_dir in semi_emp_rotamer_pdb_dirs: compare_dihedrals( mon_lib_srv=mon_lib_srv, amino_acid_resnames=amino_acid_resnames, pdb_dir=pdb_dir, file_name_extension=".uhf_631dp.pdb", verbose=verbose) for file_name in os.listdir(protein_pdb_files): if (not file_name.endswith(".ent")): continue if (verbose): print file_name exercise_termini( mon_lib_srv=mon_lib_srv, pdb_file_name=op.join(protein_pdb_files, file_name)) if (verbose): print exercise_pro_missing_hd1(mon_lib_srv=mon_lib_srv) print format_cpu_times() print "OK"
def run(args): exercise_change_of_basis_between_arbitrary_space_groups() exercise_sys_abs_equiv() exercise_allowed_origin_shift() exercise_generator_set() exercise_space_group_info() test_enantiomorphic_pairs() exercise_ss_continuous_shifts_are_principal() exercise_monoclinic_cell_choices(verbose="--verbose" in args) exercise_orthorhombic_hm_qualifier_as_cb_symbol() exercise_tensor_constraints() exercise_space_group_contains() exercise_inversion_centring() exercise_compare_cb_op_as_hkl() print format_cpu_times()
def run(args): verbose = "--verbose" in args grid_size = (50, 40, 30) test = triangulation_test_case(periodic(), grid_size, periodic=True, lazy_normals=False, descending_normals=False) test.run(iso_level=0.194, from_here=(-0.5, -0.5, -0.5), to_there=(1.5, 1.5, 1.5), verbose=verbose) """ For this one, the iso-surface passes through points at corners of the map, e.g. (1, 1, 0). That makes it interesting for that corner vertex ends up being part of only one triangle which is degenerate and the normal associated to that vertex is therefore undefined """ test = triangulation_test_case(elliptic(), grid_size, periodic=False, lazy_normals=False, descending_normals=False) test.run(iso_level=3, from_here=None, to_there=None, verbose=verbose) assert test.degenerate_edges == [(2973, 2912)] test.run(iso_level=1.3, from_here=(0.3, 0.2, 0.4), to_there=(0.7, 0.8, 0.6), verbose=verbose) assert test.degenerate_edges == [] test.run(iso_level=1.4, from_here=(-0.3, 0.2, 0.4), to_there=(0.7, 0.8, 1.6), verbose=verbose) assert test.degenerate_edges == [] test = triangulation_test_case(elliptic(), grid_size, periodic=False, lazy_normals=True, descending_normals=False) test.run(iso_level=0.8, from_here=None, to_there=None, verbose=verbose) assert test.degenerate_edges == [] test = triangulation_test_case(hyperbolic(), grid_size, periodic=False, lazy_normals=True, descending_normals=False) test.run(iso_level=0.2, from_here=None, to_there=None, verbose=verbose) assert test.degenerate_edges == [] test = triangulation_test_case(sinusoidal(), grid_size, periodic=False, lazy_normals=False, descending_normals=True) test.run(iso_level=0.8, from_here=None, to_there=None, verbose=verbose) assert test.degenerate_edges == [] print format_cpu_times()
def show_process_info(out): print >> out, "\\/"*39 introspection.virtual_memory_info().show_if_available(out=out, show_max=True) xray.structure_factors.global_counters.show(out=out) print >> out, format_cpu_times() print >> out, "/\\"*39 out.flush()
def exercise(args): forever = False random_seed = None for arg in args: if (arg == "--forever"): forever = True elif (arg.startswith("--random_seed=")): random_seed = int(arg.split("=", 1)[1]) if (random_seed is None): random_seed = flex.get_random_seed() while True: print "random_seed:", random_seed random.seed(random_seed) flex.set_random_seed(value=random_seed) exercise_2() if (not forever): break random_seed += 1 print format_cpu_times()