Пример #1
0
def reduce_raw_data(raw_data, qmax, bandwidth, level=0.05, q_background=None):
    print "delta_q is ", bandwidth
    if qmax > raw_data.q[-1]:
        qmax = raw_data.q[-1]
    ### Get rid of noisy signall at very low q range ###
    qmin_indx = flex.max_index(raw_data.i)
    qmin = raw_data.q[qmin_indx]
    new_data = get_q_array_uniform_body(raw_data,
                                        q_min=qmin,
                                        q_max=qmax,
                                        level=level)
    qmax = new_data.q[-1]
    print "LEVEL=%f" % level, "and Q_MAX=%f" % qmax
    raw_q = raw_data.q[qmin_indx:]
    raw_i = raw_data.i[qmin_indx:]
    raw_s = raw_data.s[qmin_indx:]
    ### Take care of the background (set zero at very high q) ###
    if (q_background is not None):
        cutoff = flex.bool(raw_q > q_background)
        q_bk_indx = flex.last_index(cutoff, False)
        if (q_bk_indx < raw_q.size()):
            bkgrd = flex.mean(raw_i[q_bk_indx:])
            print "Background correction: I=I-background, where background=", bkgrd
            raw_i = flex.abs(raw_i - bkgrd)

    q = flex.double(range(int(
        (qmax - qmin) / bandwidth) + 1)) * bandwidth + qmin
    raw_data.i = flex.linear_interpolation(raw_q, raw_i, q)
    raw_data.s = flex.linear_interpolation(raw_q, raw_s, q)
    raw_data.q = q

    return raw_data
Пример #2
0
def smear_data(I_s, q_s, delta_q):
    tmp_q1 = q_s - delta_q
    tmp_q2 = q_s + delta_q
    tmp_i1 = flex.linear_interpolation(q_s, I_s, tmp_q1[1:-2])
    tmp_i2 = flex.linear_interpolation(q_s, I_s, tmp_q2[1:-2])
    new_i = (tmp_i1 + tmp_i2) / 2.0
    return new_i
Пример #3
0
 def reduction(self, data, np=50):
     qmin = max(data.q[0] - 0.01, 0.0)
     qmax = data.q[-1] + 0.01
     new_q = flex.double(range(np)) / float(np) * (qmax - qmin) + qmin
     data.i = flex.linear_interpolation(data.q, data.i, new_q)
     data.s = flex.linear_interpolation(data.q, data.s, new_q)
     data.q = new_q
     return data
Пример #4
0
    def __init__(self,
                 data,
                 dmax,
                 n_param,
                 n_fst_pass,
                 n_trial=10,
                 n_simplex=10,
                 data_q=None):
        self.trials = []
        self.data = data
        self.dmax = max(data.q)
        if (data_q is None):
            self.data_q = flex.double(range(51)) / 100.0
        else:
            self.data_q = data_q
        self.integrator_obj = pr_tools.fast_integrator(self.dmax, self.data_q)
        self.new_r = flex.double(range(1, int(self.dmax + 1)))
        intepolated_pr = flex.linear_interpolation(data.q, data.i, self.new_r)
        #print list(intepolated_pr)
        self.i_expt_pr = self.integrator_obj.get_intensity_from_pr_array(
            intepolated_pr)

        for ii in range(n_trial):
            tmp_object = rcs_fitter(n_param,
                                    n_fst_pass,
                                    dmax,
                                    data,
                                    simplex_trial=n_simplex)
            self.trials.append(tmp_object)
        self.collect_scores()
Пример #5
0
 def linear_interpolation(self, new_r):
     if (self.r[-1] < new_r[-1]):
         print "WARNING: xx Interpolation out of bound xx "
         self.r.append(new_r[-1])
         self.pr.append(0)
     new_pr = flex.linear_interpolation(self.r, self.pr, new_r)
     return new_pr / (flex.sum(new_pr) * (new_r[1] - new_r[0]))
Пример #6
0
def calibration(pr_1, pr_2, n_params):
    x1 = pr_1.r / pr_1.r[-1]
    x2 = pr_2.r / pr_2.r[-1]
    cdf_1 = pr_1.pr2cdf()
    cdf_2 = pr_2.pr2cdf()
    new_cdf_2 = flex.linear_interpolation(x2, cdf_2, x1).deep_copy()
    fitting = nonlinear_fit(new_cdf_2, cdf_1, n_params)
    solution = fitting.solution
    fn = chebyshev_polynome(n_params, -1.0, 1.0, solution)
    return fn
Пример #7
0
def reduce_raw_data(raw_data, qmax, bandwidth, level=0.05, q_background=None):
    print
    print " ====  Data reduction ==== "
    print
    print "  Preprocessing of data increases efficiency of shape retrieval procedure."
    print
    print "   -  Interpolation stepsize                           :  %4.3e" % bandwidth
    print "   -  Uniform density criteria:  level is set to       :  %4.3e" % level
    print "                                 maximum q to consider :  %4.3e" % qmax
    qmin_indx = flex.max_index(raw_data.i)
    qmin = raw_data.q[qmin_indx]
    new_data = get_q_array_uniform_body(raw_data,
                                        q_min=qmin,
                                        q_max=qmax,
                                        level=level)
    qmax = new_data.q[-1]
    if qmax > raw_data.q[-1]:
        qmax = raw_data.q[-1]
    print "      Resulting q range to use in  search:   q start   :  %4.3e" % qmin
    print "                                             q stop    :  %4.3e" % qmax
    print
    raw_q = raw_data.q[qmin_indx:]
    raw_i = raw_data.i[qmin_indx:]
    raw_s = raw_data.s[qmin_indx:]
    ### Take care of the background (set zero at very high q) ###
    if (q_background is not None):
        cutoff = flex.bool(raw_q > q_background)
        q_bk_indx = flex.last_index(cutoff, False)
        if (q_bk_indx < raw_q.size()):
            bkgrd = flex.mean(raw_i[q_bk_indx:])
            print "Background correction: I=I-background, where background=", bkgrd
            raw_i = flex.abs(raw_i - bkgrd)

    q = flex.double(range(int(
        (qmax - qmin) / bandwidth) + 1)) * bandwidth + qmin
    raw_data.i = flex.linear_interpolation(raw_q, raw_i, q)
    raw_data.s = flex.linear_interpolation(raw_q, raw_s, q)
    raw_data.q = q

    return raw_data
Пример #8
0
    def __init__(self,
                 start_pdb,
                 target_I,
                 ntotal,
                 nmodes,
                 max_rmsd,
                 backbone_scale,
                 prefix,
                 weight='i',
                 method='rtb',
                 log='tmp.log'):
        self.counter = 0
        self.nmode_init = ntotal
        self.method = method
        self.nmodes = nmodes
        self.topn = 10
        self.Niter = 0
        self.modes = flex.int(range(self.nmode_init)) + 7
        self.cutoff = 8
        self.weighted = True
        self.log = open(log, 'w')
        self.chi = open(prefix + '.chi', 'w')
        pdb_inp = pdb.input(file_name=start_pdb)
        crystal_symmetry = pdb_inp.xray_structure_simple().\
            cubic_unit_cell_around_centered_scatterers(
            buffer_size = 10).crystal_symmetry()
        self.pdb_processor = process_pdb_file_srv(
            crystal_symmetry=crystal_symmetry)

        self.expt = saxs_read_write.read_standard_ascii_qis(target_I)
        self.q = self.expt.q
        self.expt_I = self.expt.i
        self.expt_s = self.expt.s
        if (self.q.size() > 100):
            self.q = self.interpolation(self.q, n_pts=30)
            self.expt_I = flex.linear_interpolation(self.expt.q, self.expt.i,
                                                    self.q)
            self.expt_s = flex.linear_interpolation(self.expt.q, self.expt.s,
                                                    self.q)

        #if( weight=='i'):
        self.expt_s = self.expt_I

        for aa, bb, cc in zip(self.q, self.expt_I, self.expt_s):
            print aa, bb, cc

        start_name = start_pdb
        self.pdb = PDB(start_name, method=self.method)
        self.she_engine = she.she(start_name, self.q)
        self.natom = self.pdb.natm
        self.scale_factor = backbone_scale
        self.pdb.Hessian(self.cutoff, self.nmode_init, self.scale_factor)

        self.root = prefix
        self.scale = 0
        self.drmsd = max_rmsd
        if (self.method == 'rtb'):
            self.drmsd = self.drmsd * 2
        self.Rmax2 = self.natom * (self.drmsd)**2.0
        self.step_size = sqrt(self.Rmax2 / self.nmodes) * 6.0

        self.new_indx = flex.int(range(self.natom))
        self.stop = False
        self.minscore = 1e20
        self.minDev = 0  #minimum deviations of refined structures, compared to refined structure from the previous step
        self.optNum = 1  #number of iterations between geometry optimization
        ### set running env for pulchra ###
        import libtbx.env_config
        env = libtbx.env_config.unpickle()
        self.pulchra = env.build_path + '/pulchra/exe/pulchra'
        self.iterate()
        self.log.close()
        self.chi.close()
Пример #9
0
    def __init__(self,
                 start_pdb,
                 target_I,
                 max_rmsd,
                 backbone_scale,
                 prefix,
                 nstep_per_cycle=100,
                 method='ca',
                 weight='i',
                 log='tmp.log'):
        self.counter = 0
        self.topn = 3
        self.Niter = 0
        self.method = method
        self.cutoff = 12
        self.log = open(log, 'w')
        self.nstep_per_cycle = nstep_per_cycle
        self.pdb_obj = PDB(start_pdb, method=self.method)
        crystal_symmetry = self.pdb_obj.pdbi.xray_structure_simple().\
            cubic_unit_cell_around_centered_scatterers(
            buffer_size = 10).crystal_symmetry()
        self.pdb_processor = process_pdb_file_srv(
            crystal_symmetry=crystal_symmetry)

        self.expt = saxs_read_write.read_standard_ascii_qis(target_I)
        self.q = self.expt.q
        self.expt_I = self.expt.i
        self.expt_s = self.expt.s
        if (self.q.size() > 20):
            self.q = self.interpolation(self.q, n_pts=20)
            self.expt_I = flex.linear_interpolation(self.expt.q, self.expt.i,
                                                    self.q)
            self.expt_s = flex.linear_interpolation(self.expt.q, self.expt.s,
                                                    self.q)

        if (weight == 'i'):
            self.expt_s = flex.sqrt(self.expt_I)

        self.time_nm = 0
        self.time_she = 0
        self.she_engine = she.she(start_pdb, self.q)
        self.natom = self.pdb_obj.natm
        self.nbeads = self.pdb_obj.n_block
        self.scale_factor = backbone_scale
        time1 = time.time()
        self.time_nm += (time.time() - time1)

        self.root = prefix
        self.drmsd = max_rmsd
        self.step_size = self.drmsd * 3
        self.threshold = self.drmsd**2.0

        self.new_indx = flex.int(range(self.natom))
        self.stop = False
        self.minscore = 1e20
        self.minDev = 0  #minimum deviations of refined structures, compared to refined structure from the previous step
        self.optNum = 10  #number of iterations between geometry optimization

        #self.estimate_init_weight()
        #self.restraint_weight *= 8  ## contribute 8x of chi initially
        self.iterate()
        self.log.close()

        print "time used for NM : %d" % self.time_nm
        print "time used for she: %d" % self.time_she
Пример #10
0
    def __init__(self,
                 start_pdb,
                 target_I,
                 ntotal,
                 nmodes,
                 max_rmsd,
                 backbone_scale,
                 prefix,
                 weight='i',
                 method='rtb',
                 log='tmp.log'):
        self.counter = 0
        self.nmode_init = ntotal
        self.nmodes = 3  #nmodes
        self.method = method
        self.topn = 3
        self.Niter = 0
        self.modes = flex.int(range(self.nmode_init)) + 7
        self.cutoff = 12
        self.weighted = True
        self.log = open(log, 'w')
        pdb_inp = pdb.input(file_name=start_pdb)
        crystal_symmetry = pdb_inp.xray_structure_simple().\
            cubic_unit_cell_around_centered_scatterers(
            buffer_size = 10).crystal_symmetry()
        #    uc=cctbx.uctbx.unit_cell("300,300,300,90,90,90")
        #    crystal_symmetry=cctbx.crystal.symmetry(uc, 'P1')
        self.pdb_processor = process_pdb_file_srv(
            crystal_symmetry=crystal_symmetry)

        self.expt = saxs_read_write.read_standard_ascii_qis(target_I)
        self.q = self.expt.q
        self.expt_I = self.expt.i
        self.expt_s = self.expt.s
        if (self.q.size() > 50):
            self.q = self.interpolation(self.q, n_pts=50)
            self.expt_I = flex.linear_interpolation(self.expt.q, self.expt.i,
                                                    self.q)
            self.expt_s = flex.linear_interpolation(self.expt.q, self.expt.s,
                                                    self.q)

        if (weight == 'i'):
            self.expt_s = self.expt_I

        self.time_nm = 0
        self.time_she = 0
        start_name = start_pdb
        self.pdb = PDB(start_name, method=self.method)
        self.she_engine = she.she(start_name, self.q)
        self.natom = self.pdb.natm
        self.scale_factor = backbone_scale
        time1 = time.time()
        self.nmode = self.pdb.Hessian(self.cutoff, self.nmode_init,
                                      self.scale_factor)
        self.time_nm += (time.time() - time1)

        self.root = prefix
        self.drmsd = max_rmsd
        self.Rmax2 = self.natom * (self.drmsd)**2.0
        self.step_size = sqrt(self.Rmax2 / self.nmodes) * 5.0

        self.new_indx = flex.int(range(self.natom))
        self.stop = False
        self.minscore = 1e20
        self.minDev = 0  #minimum deviations of refined structures, compared to refined structure from the previous step
        self.optNum = 10  #number of iterations between geometry optimization
        self.iterate()
        self.log.close()

        print "time used for NM : %d" % self.time_nm
        print "time used for she: %d" % self.time_she
Пример #11
0
def run_single_pdb(params, log):
    group_size = params.refine.group_size
    max_num_fibonacci = 12

    target_data = saxs_read_write.read_standard_ascii_qis(params.refine.target)
    if (params.refine.data_type == 'pr'):
        dmax = int(target_data.q[-1] + 0.5)
        new_r = flex.double(range(dmax))
        target_data = flex.linear_interpolation(target_data.q, target_data.i,
                                                new_r)

    rbs = []
    center = []
    pdb_objects = []

    main_body = True

    pdb_inp = pdb.hierarchy.input(params.refine.model[0])
    cache = pdb_inp.hierarchy.atom_selection_cache()
    max_size = 0
    for item in params.refine.rigid_body:
        fix_location = item.fixed_position
        fix_orientation = item.fixed_orientation
        cache_selected = cache.selection(string=item.selection)
        pdb_obj = pdb_inp.hierarchy.atoms().select(cache_selected)
        size = pdb_obj.size()
        atom_indx = flex.int(range(0, size, group_size))
        xyz = flex.vec3_double()
        atoms = pdb_obj
        for a in atoms:
            xyz.append(a.xyz)
        if (size > max_size):
            max_size = size
            rbs.insert(
                0,
                rb(xyz, atom_indx, dmax, max_num_fibonacci, fix_location,
                   fix_orientation))
            pdb_objects.insert(0, pdb_obj)
        else:
            rbs.append(
                rb(xyz, atom_indx, dmax, max_num_fibonacci, fix_location,
                   fix_orientation))
            pdb_objects.append(pdb_obj)

    num_body = len(pdb_objects)

    shift = [(0, 0, 0)]
    if (params.refine.data_type == 'pr'):
        rb_eng = rbe.rb_engine(rbs, int(dmax))
    else:
        rb_eng = rbe.rb_engine(rbs, int(dmax), q_array=target_data.q)

    for ii in range(1, num_body):
        shift.append(
            flex.double(rbs[ii].center()) - flex.double(rbs[0].center()))
        #  rb_eng.rbs[ii].rotate_only(list( flex.random_double(3,) ), 10.0/180.0*pi)
        rb_eng.rbs[ii].translate_after_rotation(list(shift[ii]))

    filename = "initial_model.pdb"
    write_pdb_single(filename, num_body, rb_eng, pdb_inp, pdb_objects)

    refine = refine_rb(rb_eng,
                       target_data,
                       data_type=params.refine.data_type,
                       shift=shift,
                       both=True)

    solution = refine.solution
    refine.target(solution)

    filename = params.refine.output + ".pdb"
    write_pdb_single(filename, num_body, rb_eng, pdb_inp, pdb_objects)