Esempio n. 1
0
    def each_group_should_have_six_restraints(self):
        results = parse.get_secondary_structure_restraints(
            content=self.ss, system=self.system, scaler=self.scaler
        )

        for group in results:
            self.assertEqual(len(group), 6)
Esempio n. 2
0
    def each_group_should_have_six_restraints(self):
        results = parse.get_secondary_structure_restraints(contents=self.ss,
                                                           system=self.system,
                                                           scaler=self.scaler)

        for group in results:
            self.assertEqual(len(group), 6)
Esempio n. 3
0
def setup_system():
    # get the sequence
    sequence = "AAAAAAAAAAAAAAAA"
    sequence = parse.get_sequence_from_AA1(contents=sequence)
    # create the system
    p = system.ProteinMoleculeFromSequence(sequence)
    b = system.SystemBuilder()
    s = b.build_system_from_molecules([p])
    s.temperature_scaler = system.LinearTemperatureScaler(0, 1, 300, 310)

    rest_scaler = s.restraints.create_scaler("nonlinear",
                                             alpha_min=0,
                                             alpha_max=1,
                                             factor=4.0)
    secondary = "H" * 16
    secondary_restraints = parse.get_secondary_structure_restraints(
        system=s, scaler=rest_scaler, contents=secondary)
    s.restraints.add_selectively_active_collection(secondary_restraints,
                                                   len(secondary_restraints))

    # create the options
    options = system.RunOptions()
    options.use_big_timestep = True

    # create a store
    store = vault.DataStore(s.n_atoms,
                            N_REPLICAS,
                            s.get_pdb_writer(),
                            block_size=BACKUP_FREQ)
    store.initialize(mode="w")
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=1)
    policy = adaptor.AdaptationPolicy(1.0, 50, 100)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS,
                                       adaptation_policy=policy)
    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS,
                                                            max_steps=N_STEPS,
                                                            ladder=l,
                                                            adaptor=a)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s) for i in range(N_REPLICAS)]
    states[1].alpha = 1.0
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms
Esempio n. 4
0
def setup_system():
    # get the sequence
    sequence = "AAAAAAAAAAAAAAAA"
    sequence = parse.get_sequence_from_AA1(contents=sequence)
    # create the system
    p = system.ProteinMoleculeFromSequence(sequence)
    b = system.SystemBuilder()
    s = b.build_system_from_molecules([p])
    s.temperature_scaler = system.LinearTemperatureScaler(0, 1, 300, 310)

    rest_scaler = s.restraints.create_scaler(
        "nonlinear", alpha_min=0, alpha_max=1, factor=4.0
    )
    secondary = "H" * 16
    secondary_restraints = parse.get_secondary_structure_restraints(
        system=s, scaler=rest_scaler, contents=secondary
    )
    s.restraints.add_selectively_active_collection(
        secondary_restraints, len(secondary_restraints)
    )

    # create the options
    options = system.RunOptions()
    options.use_big_timestep = True

    # create a store
    store = vault.DataStore(
        s.n_atoms, N_REPLICAS, s.get_pdb_writer(), block_size=BACKUP_FREQ
    )
    store.initialize(mode="w")
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=1)
    policy = adaptor.AdaptationPolicy(1.0, 50, 100)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS, adaptation_policy=policy)
    remd_runner = master_runner.MasterReplicaExchangeRunner(
        N_REPLICAS, max_steps=N_STEPS, ladder=l, adaptor=a
    )
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s) for i in range(N_REPLICAS)]
    states[1].alpha = 1.0
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms
def setup_system():
    # get the sequence
    sequence = 'AAAAAAAAAAAAAAAA'
    sequence = parse.get_sequence_from_AA1(contents=sequence)
    # create the system
    p = system.ProteinMoleculeFromSequence(sequence)
    b = system.SystemBuilder()
    s = b.build_system_from_molecules([p])
    s.temperature_scaler = system.LinearTemperatureScaler(0, 1, 300, 310)

    rest_scaler = s.restraints.create_scaler('nonlinear', alpha_min=0, alpha_max=1, factor=4.0)
    secondary = 'H'*16
    secondary_restraints = parse.get_secondary_structure_restraints(system=s, scaler=rest_scaler,
                                                                          contents=secondary)
    s.restraints.add_selectively_active_collection(secondary_restraints, len(secondary_restraints))

    # create com restraint
    com = s.restraints.create_restraint('com', rest_scaler, ramp=None,
                                        group1=[(1, 'CA')],
                                        group2=[(3, 'CA')],
                                        weights1=None,
                                        weights2=None,
                                        dims='xyz',
                                        force_const=100.,
                                        distance=0.5)
    s.restraints.add_as_always_active(com)

    # create absolute com restraint
    com = s.restraints.create_restraint('abs_com', rest_scaler, ramp=None,
                                        group=[(1, 'CA')],
                                        weights=None,
                                        dims='xyz',
                                        force_const=1.,
                                        position=[0., 0., 0.])
    s.restraints.add_as_always_active(com)

    # create the options
    options = system.RunOptions()

    # create a store
    store = vault.DataStore(s.n_atoms, N_REPLICAS, s.get_pdb_writer(), block_size=BACKUP_FREQ)
    store.initialize(mode='w')
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=1)
    policy = adaptor.AdaptationPolicy(1.0, 50, 100)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS, adaptation_policy=policy)
    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS, max_steps=N_STEPS, ladder=l, adaptor=a)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s) for i in range(N_REPLICAS)]
    states[1].alpha = 1.0
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms
Esempio n. 6
0
    def test_adds_correct_number_of_groups(self):
        results = parse.get_secondary_structure_restraints(contents=self.ss, system=self.system,
                                                           scaler=self.scaler)

        self.assertEqual(len(results), 4)
Esempio n. 7
0
    def test_adds_correct_number_of_groups(self):
        results = parse.get_secondary_structure_restraints(contents=self.ss,
                                                           system=self.system,
                                                           scaler=self.scaler)

        self.assertEqual(len(results), 4)
Esempio n. 8
0
def setup_system():
    # load the sequence
    sequence = parse.get_sequence_from_AA1(filename='sequence.dat')
    n_res = len(sequence.split())

    # build the system
    p = system.ProteinMoleculeFromSequence(sequence)
    b = system.SystemBuilder()
    s = b.build_system_from_molecules([p])
    s.temperature_scaler = system.GeometricTemperatureScaler(0, 0.4, 300., 550.)

    #
    # Secondary Structure
    #
    ss_scaler = s.restraints.create_scaler('constant')
    ss_rests = parse.get_secondary_structure_restraints(filename='ss.dat', system=s, scaler=ss_scaler,
            torsion_force_constant=2.5, distance_force_constant=2.5)
    n_ss_keep = int(len(ss_rests) * 0.85)
    s.restraints.add_selectively_active_collection(ss_rests, n_ss_keep)

    #
    # Confinement Restraints
    #
    conf_scaler = s.restraints.create_scaler('constant')
    confinement_rests = []
    confinement_dist = (16.9*np.log(s.residue_numbers[-1])-15.8)/28
    for index in range(n_res):
        rest = s.restraints.create_restraint('confine', conf_scaler, res_index=index+1, atom_name='CA', radius=confinement_dist, force_const=250.0)
        confinement_rests.append(rest)
    s.restraints.add_as_always_active_list(confinement_rests)

    #
    # Distance Restraints
    #
    # High reliability
    #
    dist_scaler = s.restraints.create_scaler('nonlinear', alpha_min=0.4, alpha_max=1.0, factor=4.0)
    selected_dist = get_dist_restraints('contacts.dat', s, dist_scaler)
    n_high_keep = int(1.00 * len(selected_dist))
    s.restraints.add_selectively_active_collection(selected_dist, n_high_keep)

    #
    # create the options
    options = system.RunOptions()
    options.implicit_solvent_model = 'gbNeck2'
    options.use_big_timestep = True
    options.cutoff = 1.8

    options.use_amap = True
    options.amap_beta_bias = 1.0
    options.timesteps = 14286
    options.minimize_steps = 20000

    # create a store
    store = vault.DataStore(s.n_atoms, N_REPLICAS, s.get_pdb_writer(), block_size=BLOCK_SIZE)
    store.initialize(mode='w')
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=48 * 48)
    policy = adaptor.AdaptationPolicy(2.0, 50, 50)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS, adaptation_policy=policy)

    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS, max_steps=N_STEPS, ladder=l, adaptor=a, ramp_steps=50)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s, i) for i in range(N_REPLICAS)]
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms
Esempio n. 9
0
def setup_system():
    # load the sequence
    sequence = parse.get_sequence_from_AA1(filename='sequence.dat')
    n_res = len(sequence.split())

    # build the system
    p = system.ProteinMoleculeFromSequence(sequence)
    b = system.SystemBuilder()
    s = b.build_system_from_molecules([p])
    s.temperature_scaler = system.GeometricTemperatureScaler(
        0, 0.4, 300., 550.)

    #
    # Secondary Structure
    #
    ss_scaler = s.restraints.create_scaler('constant')
    ss_rests = parse.get_secondary_structure_restraints(
        filename='ss.dat',
        system=s,
        scaler=ss_scaler,
        torsion_force_constant=2.5,
        distance_force_constant=2.5)
    n_ss_keep = int(len(ss_rests) * 0.70)  #We enforce 70% of restrains
    s.restraints.add_selectively_active_collection(ss_rests, n_ss_keep)

    #
    # Confinement Restraints
    #
    conf_scaler = s.restraints.create_scaler('constant')
    confinement_rests = []
    confinement_dist = (16.9 * np.log(s.residue_numbers[-1]) - 15.8) / 28.
    for index in range(n_res):
        rest = s.restraints.create_restraint('confine',
                                             conf_scaler,
                                             LinearRamp(0, 100, 0, 1),
                                             res_index=index + 1,
                                             atom_name='CA',
                                             radius=confinement_dist,
                                             force_const=250.0)
        confinement_rests.append(rest)
    s.restraints.add_as_always_active_list(confinement_rests)

    #
    # Distance Restraints
    #
    # High reliability
    #
    dist_scaler = s.restraints.create_scaler('nonlinear',
                                             alpha_min=0.4,
                                             alpha_max=1.0,
                                             factor=4.0)
    #contact80_dist = get_dist_restraints('target_contacts_over_80.dat', s, dist_scaler)
    #n_high_keep = int(0.80 * len(contact80_dist))
    #s.restraints.add_selectively_active_collection(contact80_dist, n_high_keep)

    #
    # Long
    #
    #contact60_dist = get_dist_restraints('target_contacts_over_60.dat', s, dist_scaler)
    #n_high_keep = int(0.60 * len(contact60_dist))
    #s.restraints.add_selectively_active_collection(contact60_dist, n_high_keep)

    #
    # Heuristic Restraints
    #
    subset = np.array(range(n_res)) + 1

    #
    # Hydrophobic
    #
    create_hydrophobes(s, scaler=dist_scaler, group_1=subset)

    #
    # Strand Pairing
    #
    sse, active = make_ss_groups(subset=subset)
    generate_strand_pairs(s, sse, active, subset=subset)

    # create the options
    options = system.RunOptions()
    options.implicit_solvent_model = 'gbNeck2'
    options.use_big_timestep = True
    options.cutoff = 1.8

    options.use_amap = True
    options.amap_beta_bias = 1.0
    options.timesteps = 14286
    options.minimize_steps = 20000

    # create a store
    store = vault.DataStore(s.n_atoms,
                            N_REPLICAS,
                            s.get_pdb_writer(),
                            block_size=BLOCK_SIZE)
    store.initialize(mode='w')
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=48 * 48)
    policy = adaptor.AdaptationPolicy(2.0, 50, 50)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS,
                                       adaptation_policy=policy)

    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS,
                                                            max_steps=N_STEPS,
                                                            ladder=l,
                                                            adaptor=a)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s, i) for i in range(N_REPLICAS)]
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms
Esempio n. 10
0
def setup_system():
    # load the sequence
    protein_sequence = parse.get_sequence_from_AA1(filename='protein.dat')
    peptide_sequence = parse.get_sequence_from_AA1(filename='peptide.dat')

    n_res_protein = len(protein_sequence.split())
    n_res_peptide = len(peptide_sequence.split())

    # build the system
    protein = system.ProteinMoleculeFromSequence(protein_sequence)
    peptide = system.ProteinMoleculeFromSequence(peptide_sequence)
    protein.set_translation([100, 100, 150])
    peptide.set_translation([100, 150, 100])
    calcium1 = system.ProteinMoleculeFromSequence('CA')
    calcium2 = system.ProteinMoleculeFromSequence('CA')
    calcium3 = system.ProteinMoleculeFromSequence('CA')
    calcium4 = system.ProteinMoleculeFromSequence('CA')
    calcium1.set_translation([100, 105, 50])
    calcium2.set_translation([100, 110, 50])
    calcium3.set_translation([100, 115, 50])
    calcium4.set_translation([100, 120, 50])

    rdc_patcher = patchers.RdcAlignmentPatcher(n_tensors=1)
    ond_patcher = patchers.VirtualSpinLabelPatcher({
        17: 'OND',
        34: 'OND',
        42: 'OND',
        53: 'OND',
        86: 'OND',
        110: 'OND',
        117: 'OND',
        127: 'OND',
        143: 'OND',
        149: 'OND'
    })

    b = system.SystemBuilder()
    s = b.build_system_from_molecules(
        [protein, calcium1, calcium2, calcium3, calcium4, peptide],
        leap_header_cmds="source leaprc.water.tip3p",
        patchers=[rdc_patcher, ond_patcher])

    s.temperature_scaler = system.GeometricTemperatureScaler(
        0, 0.3, 300., 550.)

    ramp = s.restraints.create_scaler('linear_ramp',
                                      start_time=1,
                                      end_time=200,
                                      start_weight=0,
                                      end_weight=1)

    #
    # Secondary Structure
    #
    ss_scaler = s.restraints.create_scaler('constant')
    protein_ss_rests = parse.get_secondary_structure_restraints(
        filename='protein_ss.dat',
        system=s,
        scaler=ss_scaler,
        ramp=ramp,
        torsion_force_constant=2.5,
        distance_force_constant=2.5,
        min_secondary_match=5)

    peptide_ss_rests = parse.get_secondary_structure_restraints(
        filename='peptide_ss.dat',
        system=s,
        scaler=ss_scaler,
        ramp=ramp,
        torsion_force_constant=2.5,
        distance_force_constant=2.5,
        first_residue=int(n_res_protein) + 5)  # + 4 due to calciums
    # + 1 for 1-based indexing

    protein_ss_keep = int(len(protein_ss_rests) * 0.95)
    peptide_ss_keep = int(len(peptide_ss_rests) * 0.95)
    s.restraints.add_selectively_active_collection(protein_ss_rests,
                                                   protein_ss_keep)
    s.restraints.add_selectively_active_collection(peptide_ss_rests,
                                                   peptide_ss_keep)

    #
    # Confinement Restraints
    #
    conf_scaler = s.restraints.create_scaler('constant')
    confinement_rests = []
    n_res = n_res_protein + n_res_peptide + 4
    for index in range(1, n_res + 1):
        protein_rest = s.restraints.create_restraint('confine',
                                                     conf_scaler,
                                                     ramp=ramp,
                                                     res_index=index,
                                                     atom_name='CA',
                                                     radius=5,
                                                     force_const=250.0)
        confinement_rests.append(protein_rest)

    s.restraints.add_as_always_active_list(confinement_rests)

    #
    # Calcium restraints
    #
    scaler = s.restraints.create_scaler('nonlinear',
                                        alpha_min=0.5,
                                        alpha_max=1.0,
                                        factor=4.0)
    calcium_rests = get_dist_restraints('calcium_restraints.dat', s, scaler,
                                        ramp)
    n_keep_calcium = len(calcium_rests)
    s.restraints.add_selectively_active_collection(calcium_rests,
                                                   n_keep_calcium)

    #
    # PRE restraints
    #
    scaler_short = s.restraints.create_scaler('nonlinear',
                                              alpha_min=0.6,
                                              alpha_max=1.0,
                                              factor=4.0)
    scaler_medium = s.restraints.create_scaler('nonlinear',
                                               alpha_min=0.5,
                                               alpha_max=0.6,
                                               factor=4.0)
    scaler_long = s.restraints.create_scaler('nonlinear',
                                             alpha_min=0.4,
                                             alpha_max=0.5,
                                             factor=4.0)
    scalers = [scaler_short, scaler_medium, scaler_long]

    OND_list = [17, 34, 42, 53, 86, 110, 117, 127, 143, 149]
    for ond in OND_list:
        for length, i in zip(['short', 'medium', 'long'], range(3)):
            scaler = scalers[int(i)]
            pre_restraints = get_dist_restraints_pre(
                'rest_files/' + str(ond) + '-pre-' + length + '.dat', s,
                scaler, ramp)
            n_keep_pre = int(len(pre_restraints) * 0.90)
            s.restraints.add_selectively_active_collection(
                pre_restraints, n_keep_pre)

    #
    # RDC Restraints
    #
    rdc_scaler = s.restraints.create_scaler('nonlinear',
                                            alpha_min=0.3,
                                            alpha_max=0.4,
                                            factor=4.0,
                                            strength_at_alpha_max=1.0e-2)
    rdc_rests = parse.get_rdc_restraints(system=s,
                                         patcher=rdc_patcher,
                                         scaler=rdc_scaler,
                                         ramp=ramp,
                                         quadratic_cut=1.0,
                                         scale_factor=1.0e4,
                                         filename='rdc.dat')
    s.restraints.add_as_always_active_list(rdc_rests)

    # create the options
    options = system.RunOptions()
    options.implicit_solvent_model = 'obc'
    options.use_big_timestep = False
    options.cutoff = 1.8
    options.remove_com = True

    options.use_amap = True
    options.amap_beta_bias = 1.0
    options.timesteps = 25000
    options.minimize_steps = 5000

    # create a store
    store = vault.DataStore(s.n_atoms,
                            N_REPLICAS,
                            s.get_pdb_writer(),
                            block_size=BLOCK_SIZE)
    store.initialize(mode='w')
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=48 * 48)
    policy_1 = adaptor.AdaptationPolicy(2.0, 50, 50)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS,
                                       adaptation_policy=policy_1)

    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS,
                                                            max_steps=N_STEPS,
                                                            ladder=l,
                                                            adaptor=a)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s, i) for i in range(N_REPLICAS)]
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()
    return s.n_atoms
Esempio n. 11
0
def setup_system():
    # create the system starting from coordinates in template.pdb
    templates = glob.glob('%s-sep.pdb' % (sys.argv[1]))
    p = system.ProteinMoleculeFromPdbFile(templates[0])
    b = system.SystemBuilder(forcefield="ff14sbside")
    # load non-standard AA force field params, bonds
    s = b.build_system_from_molecules([p])

    # Create temperature ladder
    s.temperature_scaler = system.GeometricTemperatureScaler(
        0.0, 0.5, 300., 500.)

    # Keep protein dimer conformation fairly constant
    dist_scaler = s.restraints.create_scaler('nonlinear',
                                             alpha_min=0.4,
                                             alpha_max=1.0,
                                             factor=4.0)

    const_scaler = s.restraints.create_scaler('constant')
    dist = keep_fixed_distance('%s-contacts.dat' % (sys.argv[1]),
                               s,
                               scaler=const_scaler)
    s.restraints.add_selectively_active_collection(dist, int(len(dist)))

    # Keep DNA hbonds
    #Read sequence file
    sequenceDNA = readSeq('%s-seq.dat' % (sys.argv[1]))
    #Generate hbondsDNA.dat
    make_hbond_restraint_file(sequenceDNA, 0)
    dist_scaler3 = s.restraints.create_scaler('nonlinear',
                                              alpha_min=0.9,
                                              alpha_max=1.0,
                                              factor=4.0)
    dist = keep_fixed_distance('hbondsDNA.dat', s, scaler=const_scaler)
    s.restraints.add_selectively_active_collection(dist, int(len(dist)))

    # Keep DNA close to starting conformation
    rest = make_cartesian_collections(s,
                                      const_scaler,
                                      range(1, 43),
                                      atoms=[
                                          "C1'", "C2", "C2'", "C3'", "C4",
                                          "C4'", "C5", "C5'", "C6", "C7", "C8",
                                          "DA3", "N1", "N2", "N3", "N4", "N6",
                                          "N7", "N9", "O2", "O3'", "O4", "O4'",
                                          "O5'", "O6", "OP1", "OP2", "P"
                                      ])
    # rest = make_cartesian_collections(s, const_scaler, range(1,16),atoms=["C1'", "C2", "C2'", "C3'", "C4", "C4'", "C5", "C5'", "C6", "N1", "N3", "O3'", "O4'"])
    #These are the common atoms to all DNA bases including ends:
    #C1' C2 C2' C3' C4 C4' C5 C5' C6 N1 N3 O3' O4' O5'
    s.restraints.add_as_always_active_list(rest)

    # Create Contacts between protein and DNA
    dom1 = get_dist_restraints('%s-DNA-contacts.dat' % (sys.argv[1]),
                               s,
                               scaler=dist_scaler)
    s.restraints.add_selectively_active_collection(dom1, int(len(dom1)))

    # Find Glycines and Restrain peptide within reasonable distance from DNA
    names = np.array(s.atom_names)
    resid = np.array(s.residue_numbers)
    # resnames = np.array(s.residue_names)
    select = names == 'CB'
    non_gly = resid[select]

    # scaler3 = s.restraints.create_scaler('nonlinear',alpha_min=0.7,alpha_max=1.0, factor=4.0, strength_at_alpha_min=1.0, strength_at_alpha_max=0.5)

    # conf_rest = []
    # group1 = []
    # group2 = []
    # for i in range(2,21):
    #     group1.append( (i,"O5'") )
    # for i in range(22,41):
    #     group1.append( (i,"O5'") )
    # for j in non_gly:
    #     group2.append( (j,"CB") )
    # positioner = s.restraints.create_scaler('linear_positioner',alpha_min=0.7, alpha_max=1.0, pos_min=10., pos_max=15.)
    # conf_rest.append(s.restraints.create_restraint('com', scaler3,ramp=LinearRamp(0,100,0,1),
    #                                                    force_const=75.0,group1=group1,group2=group2,
    #                                                    distance =positioner,weights1=None, weights2=None, dims='xyz'))
    # s.restraints.add_as_always_active_list(conf_rest)

    dist_scaler2 = s.restraints.create_scaler('nonlinear',
                                              alpha_min=0.7,
                                              alpha_max=1.0,
                                              factor=4.0)
    res_groups = get_distance_rests('%s-res_groups.dat' % (sys.argv[1]),
                                    s,
                                    scaler=dist_scaler2)
    s.restraints.add_selectively_active_collection(res_groups,
                                                   int(len(res_groups) - 10))

    #
    # Secondary Structure
    #
    ss_scaler = s.restraints.create_scaler('constant')
    ss_rests = parse.get_secondary_structure_restraints(
        filename='%s-ss.dat' % (sys.argv[1]),
        system=s,
        ramp=LinearRamp(0, 100, 0, 1),
        scaler=ss_scaler,
        torsion_force_constant=2.5,
        distance_force_constant=2.5)
    n_ss_keep = int(len(ss_rests) * 0.96)
    s.restraints.add_selectively_active_collection(ss_rests, n_ss_keep)

    # create the options
    options = system.RunOptions()
    options.implicit_solvent_model = 'gbNeck2'
    options.remove_com = False
    options.use_big_timestep = False  # MD timestep (3.3 fs)
    options.use_bigger_timestep = True  # MD timestep (4.0 fs)
    options.cutoff = 1.8  # cutoff in nm
    options.soluteDielectric = 1.
    #options.implicitSolventSaltConc = None

    options.use_amap = False  # correction to FF12SB
    options.amap_beta_bias = 1.0
    options.timesteps = 11111  # number of MD steps per exchange
    options.minimize_steps = 20000  # init minimization steps

    # create a store
    store = vault.DataStore(s.n_atoms,
                            N_REPLICAS,
                            s.get_pdb_writer(),
                            block_size=BLOCK_SIZE)
    store.initialize(mode='w')
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner, sets up replica exchange details
    l = ladder.NearestNeighborLadder(n_trials=48)
    policy = adaptor.AdaptationPolicy(2.0, 50, 50)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS,
                                       adaptation_policy=policy)
    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS,
                                                            max_steps=N_STEPS,
                                                            ladder=l,
                                                            adaptor=a)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    # create and save the initial states, initialize each replica with a different template
    states = [gen_state_templates(i, templates) for i in range(N_REPLICAS)]
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms
Esempio n. 12
0
def setup_system():

    # ECO settings
    #eco_cutoff = 0.8 # the distance (in nm) that qualifies as a connection in the graph
    eco_cutoff = 1.0  # the distance (in nm) that qualifies as a connection in the graph
    doing_eco_hydrophobe = True
    doing_eco_hbond = True
    doing_eco_2ndary = False
    doing_eco_strand_pairing = True
    doing_eco_knob = False
    doing_eco_evolutionary = False
    #eco_factor = 4.0 # the factor by which we multiply the eco energy adjustment
    eco_factor = 1  # the factor by which we multiply the eco energy adjustment
    eco_constant = 0.0  # In theory, these could be changed for any restraint
    eco_linear = 0.0

    # load the sequence
    sequence = parse.get_sequence_from_AA1(filename='sequence.dat')
    n_res = len(sequence.split())

    # build the system
    p = system.ProteinMoleculeFromSequence(sequence)
    b = system.SystemBuilder(forcefield="ff14sbside")
    s = b.build_system_from_molecules([p])
    s.temperature_scaler = system.GeometricTemperatureScaler(
        0, 0.6, 300., 450.)

    #
    # Secondary Structure
    #
    ss_scaler = s.restraints.create_scaler('constant')
    ss_rests = parse.get_secondary_structure_restraints(
        filename='ss.dat',
        system=s,
        ramp=LinearRamp(0, 100, 0, 1),
        scaler=ss_scaler,
        torsion_force_constant=2.5,
        distance_force_constant=2.5,
        doing_eco=doing_eco_2ndary,
        eco_factor=eco_factor,
        eco_constant=eco_constant,
        eco_linear=eco_linear)
    n_ss_keep = int(len(ss_rests) * 0.70)  #We enforce 70% of restrains
    s.restraints.add_selectively_active_collection(ss_rests, n_ss_keep)

    #
    # Confinement Restraints
    #
    #conf_scaler = s.restraints.create_scaler('nonlinear', alpha_min=0.4, alpha_max=1.0, factor=4.0,strength_at_alpha_min=0.0, strength_at_alpha_max=1.0)
    #confinement_rests = []
    #confinement_dist = (16.9*np.log(s.residue_numbers[-1])-15.8)/28.*1.2
    #for index in range(n_res):
    #    rest = s.restraints.create_restraint('confine', conf_scaler, LinearRamp(0,100,0,1),res_index=index+1, atom_name='CA', radius=confinement_dist, force_const=250.0)
    #    confinement_rests.append(rest)
    #s.restraints.add_as_always_active_list(confinement_rests)

    #
    # Distance Restraints
    #
    dist_scaler = s.restraints.create_scaler('nonlinear',
                                             alpha_min=0.4,
                                             alpha_max=1.0,
                                             factor=4.0)

    # High reliability
    #
    #
    # Create Plateau kind of scalers
    #
    low_2 = make_CO_scaler(system=s,
                           scaler_type='plateaunonlinear',
                           alpha_min=0.70,
                           alpha_one=0.85,
                           alpha_two=0.85,
                           alpha_max=1.0,
                           strength_at_alpha_min=1.0,
                           strength_at_alpha_max=0.0)
    low_4 = make_CO_scaler(system=s,
                           scaler_type='plateaunonlinear',
                           alpha_min=0.55,
                           alpha_one=0.70,
                           alpha_two=0.70,
                           alpha_max=0.85,
                           strength_at_alpha_min=1.0,
                           strength_at_alpha_max=0.0)
    low_6 = s.restraints.create_scaler('plateaunonlinear',
                                       alpha_min=0.40,
                                       alpha_one=0.55,
                                       alpha_two=0.55,
                                       alpha_max=0.7,
                                       factor=4.0,
                                       strength_at_alpha_min=1.0,
                                       strength_at_alpha_max=0.0)
    low_8 = s.restraints.create_scaler('nonlinear',
                                       alpha_min=0.40,
                                       alpha_max=0.55,
                                       factor=4.0)
    #
    # Heuristic Restraints
    #
    subset = np.array(range(n_res)) + 1

    #
    # Hydrophobic
    #

    create_hydrophobes(s,
                       ContactsPerHydroph=1.2,
                       scaler=dist_scaler,
                       group_1=subset,
                       CO=False,
                       doing_eco=doing_eco_hydrophobe,
                       eco_factor=eco_factor,
                       eco_constant=eco_constant,
                       eco_linear=eco_linear)

    #create_hydrophobes(s,ContactsPerHydroph=1.2/4.,scaler=low_2,group_1=subset,CO=True)
    #create_hydrophobes(s,ContactsPerHydroph=1.2/2.,scaler=low_4,group_1=subset,CO=True)
    #create_hydrophobes(s,ContactsPerHydroph=1.2*3/4.,scaler=low_6,group_1=subset,CO=False)
    #create_hydrophobes(s,ContactsPerHydroph=1.2,scaler=low_8,group_1=subset,CO=False)

    #
    # HBonds
    #

    create_HydrogenBond(s,
                        HBPerResidue=0.10,
                        scaler=dist_scaler,
                        group_1=subset,
                        CO=False,
                        doing_eco=doing_eco_hbond,
                        eco_factor=eco_factor,
                        eco_constant=eco_constant,
                        eco_linear=eco_linear)

    #create_HydrogenBond(s,HBPerResidue=0.10/4.,scaler=low_2,group_1=subset,CO=True)
    #create_HydrogenBond(s,HBPerResidue=0.10/2.,scaler=low_4,group_1=subset,CO=True)
    #create_HydrogenBond(s,HBPerResidue=0.10*3/4.,scaler=low_6,group_1=subset,CO=False)
    #create_HydrogenBond(s,HBPerResidue=0.10,scaler=low_8,group_1=subset,CO=False)

    #
    # Strand Pairing
    #
    sse, active = make_ss_groups(subset=subset)
    try:

        generate_strand_pairs(s,
                              sse,
                              float(active),
                              subset=subset,
                              scaler=dist_scaler,
                              CO=False,
                              doing_eco=doing_eco_strand_pairing,
                              eco_factor=eco_factor,
                              eco_constant=eco_constant,
                              eco_linear=eco_linear)

        #generate_strand_pairs(s,sse,float(active)/4.,subset=subset,scaler=low_2,CO=True)
        #generate_strand_pairs(s,sse,float(active)/2.,subset=subset,scaler=low_4,CO=True)
        #generate_strand_pairs(s,sse,float(active)*3/4.,subset=subset,scaler=low_6,CO=False)
        #generate_strand_pairs(s,sse,float(active),subset=subset,scaler=low_8,CO=False)
    except:
        print "Not using Strand Pairing Heuristic"
        pass

        #
        # Evolutionary restraints
        #
    try:
        create_Evolution(s,
                         scaler=dist_scaler,
                         fname='evolution_contacts.dat',
                         doing_eco=doing_eco_evolutionary,
                         eco_factor=eco_factor,
                         eco_constant=eco_constant,
                         eco_linear=eco_linear)
    except:
        print "Not using Evolutionary restraints"
        pass

    #
    # Distance Restraints
    #
    #
    # Knob restraints
    #
    try:
        knobs, knob_accuracy = get_knob_restraints('Knob.data',
                                                   s,
                                                   scaler=dist_scaler,
                                                   doing_eco=doing_eco_knob,
                                                   eco_factor=eco_factor,
                                                   eco_constant=eco_constant,
                                                   eco_linear=eco_linear)
        n_knobs = int(len(knobs) * knob_accuracy)
        s.restraints.add_selectively_active_collection(knobs, n_knobs)
    except:
        print "Not using Knob-Socket predictions"
        pass

    # setup mcmc at startup
    movers = []
    n_atoms = s.n_atoms
    for i in range(1, n_res + 1):
        n = s.index_of_atom(i, 'N') - 1
        ca = s.index_of_atom(i, 'CA') - 1
        c = s.index_of_atom(i, 'C') - 1

        mover = mc.DoubleTorsionMover(n, ca, list(range(ca, n_atoms)), ca, c,
                                      list(range(c, n_atoms)))

        movers.append((mover, 1))

    sched = mc.MonteCarloScheduler(movers, n_res * 60)

    # create the options
    options = system.RunOptions()
    options.implicit_solvent_model = 'gbNeck2'
    options.use_big_timestep = False
    options.use_bigger_timestep = True
    options.cutoff = 1.8
    #options.eco_cutoff = eco_cutoff
    # set eco_output very high so that log file does not print
    options.eco_params = {
        'eco_cutoff': 1.0,
        'eco_output_freq': 10000000,
        'print_avg_eco': False,
        'print_eco_value_array': False,
    }

    options.use_amap = False
    options.amap_beta_bias = 1.0
    options.timesteps = 11111
    options.minimize_steps = 20000
    options.min_mc = sched
    options.make_alpha_carbon_list(s.atom_names)
    print "alpha_carbon_indeces:", options.alpha_carbon_indeces

    # create a store
    store = vault.DataStore(s.n_atoms,
                            N_REPLICAS,
                            s.get_pdb_writer(),
                            block_size=BLOCK_SIZE)
    store.initialize(mode='w')
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=48 * 48)
    policy = adaptor.AdaptationPolicy(2.0, 50, 50)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS,
                                       adaptation_policy=policy)

    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS,
                                                            max_steps=N_STEPS,
                                                            ladder=l,
                                                            adaptor=a)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s, i) for i in range(N_REPLICAS)]
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms
Esempio n. 13
0
def setup_system():
    # load the sequence
    sequence = parse.get_sequence_from_AA1(filename='sequence.dat')
    n_res = len(sequence.split())

    # build the system
    p = system.ProteinMoleculeFromSequence(sequence)
    b = system.SystemBuilder(forcefield="ff14sbside")
    s = b.build_system_from_molecules([p])
    s.temperature_scaler = system.GeometricTemperatureScaler(0, 0.6, 300., 450.)

    #
    # Secondary Structure
    #
    ss_scaler = s.restraints.create_scaler('constant')
    ss_rests = parse.get_secondary_structure_restraints(filename='ss.dat', system=s,ramp=LinearRamp(0,100,0,1), scaler=ss_scaler,
            torsion_force_constant=2.5, distance_force_constant=2.5)
    n_ss_keep = int(len(ss_rests) * 1.0) #We enforce 100% of restrains 
    s.restraints.add_selectively_active_collection(ss_rests, n_ss_keep)


    #
    # Distance Restraints
    #
    dist_scaler = s.restraints.create_scaler('nonlinear', alpha_min=0.4, alpha_max=1.0, factor=4.0)

    # High reliability
    #
    #
    old_protocol = s.restraints.create_scaler('nonlinear', alpha_min=0.40, alpha_max=1.00, factor=4.0)
    #
    # Heuristic Restraints
    #
    subset= np.array(list(range(n_res))) + 1

        #
        # Hydrophobic
        #
    create_hydrophobes(s,ContactsPerHydroph=1.2,scaler=old_protocol,group_1=subset,CO=False)
        #
        # Strand Pairing
        #
    sse,active = make_ss_groups(subset=subset)
    try:
        generate_strand_pairs(s,sse,float(active),subset=subset,scaler=old_protocol,CO=False)
    except:
        print("Not using Strand Pairing Heuristic")
        pass

        #
        # Evolutionary restraints
        #
    try:
        create_Evolution(s,accuracy=0.7,scaler=dist_scaler,fname='evolution_contacts.dat')
    except:
        print("Not using Evolutionary restraints")
        pass

    #
    # Distance Restraints
    #
        #
        # Knob restraints
        #
    try:
        knobs,knob_accuracy = get_knob_restraints('Knob.data',s,scaler=dist_scaler)
        n_knobs = int(len(knobs) * knob_accuracy) 
        s.restraints.add_selectively_active_collection(knobs,n_knobs)
    except:
        print("Not using Knob-Socket predictions")
        pass



    # setup mcmc at startup
    movers = []
    n_atoms = s.n_atoms
    for i in range(1, n_res + 1):
        n = s.index_of_atom(i, 'N') - 1
        ca = s.index_of_atom(i, 'CA') - 1
        c = s.index_of_atom(i, 'C') - 1

        mover = mc.DoubleTorsionMover(n, ca, list(range(ca, n_atoms)),
                                      ca, c, list(range(c, n_atoms)))

        movers.append((mover, 1))

    sched = mc.MonteCarloScheduler(movers, n_res * 60)

    # create the options
    options = system.RunOptions()
    options.implicit_solvent_model = 'gbNeck2'
    options.use_big_timestep = False
    options.use_bigger_timestep = True
    options.cutoff = 1.8

    options.use_amap = False
    options.amap_alpha_bias = 1.0
    options.amap_beta_bias = 1.0
    options.timesteps = 11111
    options.minimize_steps = 20000
   # for i in range(30):
   #     print("Heads up! using MC minimizer!")
#    options.min_mc = sched

    # create a store
    store = vault.DataStore(s.n_atoms, N_REPLICAS, s.get_pdb_writer(), block_size=BLOCK_SIZE)
    store.initialize(mode='w')
    store.save_system(s)
    store.save_run_options(options)

    # create and store the remd_runner
    l = ladder.NearestNeighborLadder(n_trials=100)
    policy = adaptor.AdaptationPolicy(2.0, 50, 50)
    a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS, adaptation_policy=policy)

    remd_runner = master_runner.MasterReplicaExchangeRunner(N_REPLICAS, max_steps=N_STEPS, ladder=l, adaptor=a)
    store.save_remd_runner(remd_runner)

    # create and store the communicator
    c = comm.MPICommunicator(s.n_atoms, N_REPLICAS)
    store.save_communicator(c)

    # create and save the initial states
    states = [gen_state(s, i) for i in range(N_REPLICAS)]
    store.save_states(states, 0)

    # save data_store
    store.save_data_store()

    return s.n_atoms