示例#1
0
def sequence_to_stills(experiments, reflections, params):
    assert len(reflections) == 1
    reflections = reflections[0]

    new_experiments = ExperimentList()
    new_reflections = flex.reflection_table()

    # This is the subset needed to integrate
    for key in [
            "id",
            "imageset_id",
            "shoebox",
            "bbox",
            "intensity.sum.value",
            "intensity.sum.variance",
            "entering",
            "flags",
            "miller_index",
            "panel",
            "xyzobs.px.value",
            "xyzobs.px.variance",
    ]:
        if key in reflections:
            new_reflections[key] = type(reflections[key])()
        elif key == "imageset_id":
            assert len(experiments.imagesets()) == 1
            reflections["imageset_id"] = flex.int(len(reflections), 0)
            new_reflections["imageset_id"] = flex.int()
        elif key == "entering":
            reflections["entering"] = flex.bool(len(reflections), False)
            new_reflections["entering"] = flex.bool()
        else:
            raise RuntimeError(
                "Expected key not found in reflection table: %s" % key)

    for expt_id, experiment in enumerate(experiments):
        # Get the goniometr setting matrix
        goniometer_setting_matrix = matrix.sqr(
            experiment.goniometer.get_setting_rotation())
        goniometer_axis = matrix.col(experiment.goniometer.get_rotation_axis())
        step = experiment.scan.get_oscillation()[1]

        refls = reflections.select(reflections["id"] == expt_id)
        _, _, _, _, z1, z2 = refls["bbox"].parts()

        # Create an experiment for each scanpoint
        for i_scan_point in range(*experiment.scan.get_array_range()):
            if params.max_scan_points and i_scan_point >= params.max_scan_points:
                break
            # The A matrix is the goniometer setting matrix for this scan point
            # times the scan varying A matrix at this scan point. Note, the
            # goniometer setting matrix for scan point zero will be the identity
            # matrix and represents the beginning of the oscillation.
            # For stills, the A matrix needs to be positioned in the midpoint of an
            # oscillation step. Hence, here the goniometer setting matrixis rotated
            # by a further half oscillation step.
            A = (goniometer_axis.axis_and_angle_as_r3_rotation_matrix(
                angle=experiment.scan.get_angle_from_array_index(i_scan_point)
                + (step / 2),
                deg=True,
            ) * goniometer_setting_matrix * matrix.sqr(
                experiment.crystal.get_A_at_scan_point(i_scan_point)))
            crystal = MosaicCrystalSauter2014(experiment.crystal)
            crystal.set_A(A)

            # Copy in mosaic parameters if available
            if params.output.domain_size_ang is None and hasattr(
                    experiment.crystal, "get_domain_size_ang"):
                crystal.set_domain_size_ang(
                    experiment.crystal.get_domain_size_ang())
            elif params.output.domain_size_ang is not None:
                crystal.set_domain_size_ang(params.output.domain_size_ang)

            if params.output.half_mosaicity_deg is None and hasattr(
                    experiment.crystal, "get_half_mosaicity_deg"):
                crystal.set_half_mosaicity_deg(
                    experiment.crystal.get_half_mosaicity_deg())
            elif params.output.half_mosaicity_deg is not None:
                crystal.set_half_mosaicity_deg(
                    params.output.half_mosaicity_deg)

            new_experiment = Experiment(
                detector=experiment.detector,
                beam=experiment.beam,
                crystal=crystal,
                imageset=experiment.imageset.as_imageset()
                [i_scan_point:i_scan_point + 1],
            )
            new_experiments.append(new_experiment)

            # Each reflection in a 3D shoebox can be found on multiple images.
            # Slice the reflections such that any reflection on this scan point
            # is included with this image
            new_id = len(new_experiments) - 1
            subrefls = refls.select((i_scan_point >= z1) & (i_scan_point < z2))
            for refl in subrefls.rows():
                assert i_scan_point in range(*refl["bbox"][4:6])

                new_sb = Shoebox()
                start = i_scan_point - refl["bbox"][4]  # z1
                new_sb.data = refl["shoebox"].data[start:start + 1, :, :]
                new_sb.background = refl["shoebox"].background[start:start +
                                                               1, :, :]
                new_sb.mask = refl["shoebox"].mask[start:start + 1, :, :]
                intensity = new_sb.summed_intensity()
                new_sb.bbox = tuple(
                    list(refl["bbox"])[0:4] +
                    [0, 1])  # keep the original shoebox but reset the z values
                new_sb.panel = refl["panel"]
                new_refl = {}
                new_refl["id"] = new_refl["imageset_id"] = new_id
                new_refl["shoebox"] = new_sb
                new_refl["bbox"] = new_sb.bbox
                new_refl["intensity.sum.value"] = intensity.observed.value
                new_refl[
                    "intensity.sum.variance"] = intensity.observed.variance
                for key in ["entering", "flags", "miller_index", "panel"]:
                    new_refl[key] = refl[key]
                centroid = new_sb.centroid_foreground_minus_background()
                new_refl["xyzobs.px.value"] = centroid.px.position
                new_refl["xyzobs.px.variance"] = centroid.px.variance
                new_reflections.append({})
                for key in new_refl:
                    new_reflections[key][-1] = new_refl[key]

    # Re-predict using the reflection slices and the stills predictors
    ref_predictor = ExperimentsPredictorFactory.from_experiments(
        new_experiments, force_stills=new_experiments.all_stills())
    new_reflections = ref_predictor(new_reflections)

    return (new_experiments, new_reflections)
示例#2
0
def tilt_fit(imgs, is_bg_pix, delta_q, photon_gain, sigma_rdout, zinger_zscore,
             exper, predicted_refls, sb_pad=0, filter_boundary_spots=False,
             minsnr=None, mintilt=None, plot=False, verbose=False, is_BAD_pix=None,
             min_strong=None, min_bg=10, min_dist_to_bad_pix=7, **kwargs):

    if is_BAD_pix is None:
        is_BAD_pix = np.zeros(np.array(is_bg_pix).shape, np.bool)

    predicted_refls['id'] = flex.int(len(predicted_refls), -1)
    predicted_refls['imageset_id'] = flex.int(len(predicted_refls), 0)
    El = ExperimentList()
    El.append(exper)
    predicted_refls.centroid_px_to_mm(El)
    predicted_refls.map_centroids_to_reciprocal_space(El)
    ss_dim, fs_dim = imgs[0].shape
    n_refl = len(predicted_refls)
    integrations = []
    variances = []
    coeffs = []
    new_shoeboxes = []
    tilt_error = []
    boundary = []
    detdist = exper.detector[0].get_distance()
    pixsize = exper.detector[0].get_pixel_size()[0]
    ave_wave = exper.beam.get_wavelength()

    bad_trees = {}
    unique_panels = set(predicted_refls["panel"])
    for p in unique_panels:
        panel_bad_pix = is_BAD_pix[p]
        ybad, xbad = np.where(is_BAD_pix[0])
        if ybad.size:
            bad_pts = zip(ybad, xbad)
            bad_trees[p] = cKDTree(bad_pts)
        else:
            bad_trees[p] = None

    sel = []
    for i_ref in range(len(predicted_refls)):
        ref = predicted_refls[i_ref]
        i_com, j_com, _ = ref['xyzobs.px.value']

        # which detector panel am I on ?
        i_panel = ref['panel']

        if bad_trees[i_panel] is not None:
            if bad_trees[i_panel].query_ball_point((i_com, j_com), r=min_dist_to_bad_pix):
                sel.append(False)
                integrations.append(None)
                variances.append(None)
                coeffs.append(None)
                new_shoeboxes.append(None)
                tilt_error.append(None)
                boundary.append(None)
                continue

        i1_a, i2_a, j1_a, j2_a, _, _ = ref['bbox']  # bbox of prediction

        i1_ = max(i1_a, 0)
        i2_ = min(i2_a, fs_dim-1)
        j1_ = max(j1_a, 0)
        j2_ = min(j2_a, ss_dim-1)

        # get the number of pixels spanning the box in pixels
        Qmag = 2*np.pi*np.linalg.norm(ref['rlp'])  # magnitude momentum transfer of the RLP in physicist convention
        rad1 = (detdist/pixsize) * np.tan(2*np.arcsin((Qmag-delta_q*.5)*ave_wave/4/np.pi))
        rad2 = (detdist/pixsize) * np.tan(2*np.arcsin((Qmag+delta_q*.5)*ave_wave/4/np.pi))
        bbox_extent = (rad2-rad1) / np.sqrt(2)   # rad2 - rad1 is the diagonal across the bbox
        i_com = i_com - 0.5
        j_com = j_com - 0.5
        i_low = int(i_com - bbox_extent/2.)
        i_high = int(i_com + bbox_extent/2.)
        j_low = int(j_com - bbox_extent/2.)
        j_high = int(j_com + bbox_extent/2.)

        i1_orig = max(i_low, 0)
        i2_orig = min(i_high, fs_dim-1)
        j1_orig = max(j_low, 0)
        j2_orig = min(j_high, ss_dim-1)

        i_low = i_low - sb_pad
        i_high = i_high + sb_pad
        j_low = j_low - sb_pad
        j_high = j_high + sb_pad

        i1 = max(i_low, 0)
        i2 = min(i_high, fs_dim-1)
        j1 = max(j_low, 0)
        j2 = min(j_high, ss_dim-1)

        i1_p = i1_orig - i1
        i2_p = i1_p + i2_orig-i1_orig
        j1_p = j1_orig - j1
        j2_p = j1_p + j2_orig-j1_orig

        if i1 == 0 or i2 == fs_dim or j1 == 0 or j2 == ss_dim:
            boundary.append(True)
            if filter_boundary_spots:
                sel.append(False)
                integrations.append(None)
                variances.append(None)
                coeffs.append(None)
                new_shoeboxes.append(None)
                tilt_error.append(None)
                continue
        else:
            boundary.append(False)

        # get the iamge and mask
        shoebox_img = imgs[i_panel][j1:j2, i1:i2] / photon_gain  # NOTE: gain is imortant here!
        dials_mask = np.zeros(shoebox_img.shape).astype(np.int32)

        # initially all pixels are valid
        dials_mask += MaskCode.Valid
        shoebox_mask = is_bg_pix[i_panel][j1:j2, i1:i2]
        badpix_mask = is_BAD_pix[i_panel][j1:j2, i1:i2]


        dials_mask[shoebox_mask] = dials_mask[shoebox_mask] + MaskCode.Background

        new_shoebox = Shoebox((i1_orig, i2_orig, j1_orig, j2_orig, 0, 1))
        new_shoebox.allocate()
        new_shoebox.data = flex.float(np.ascontiguousarray(shoebox_img[None, j1_p:j2_p, i1_p: i2_p]))
        #new_shoebox.data = flex.float(shoebox_img[None,])

        # get coordinates arrays of the image
        Y, X = np.indices(shoebox_img.shape)

        # determine if any more outliers are present in background pixels
        img1d = shoebox_img.ravel()
        mask1d = shoebox_mask.ravel()  # mask specifies which pixels are bg
        # out1d specifies which bg pixels are outliers (zingers)
        out1d = np.zeros(mask1d.shape, bool)
        out1d[mask1d] = is_outlier(img1d[mask1d].ravel(), zinger_zscore)
        out2d = out1d.reshape(shoebox_img.shape)

        # combine bad2d with badpix mask
        out2d = np.logical_or(out2d, badpix_mask)

        # these are points we fit to: both zingers and original mask
        fit_sel = np.logical_and(~out2d, shoebox_mask)  # fit plane to these points, no outliers, no masked

        if np.sum(fit_sel) < min_bg:
            integrations.append(None)
            variances.append(None)
            coeffs.append(None)
            new_shoeboxes.append(None)
            tilt_error.append(None)
            sel.append(False)
            continue

        # update the dials mask...
        dials_mask[fit_sel] = dials_mask[fit_sel] + MaskCode.BackgroundUsed

        # fast scan pixels, slow scan pixels, pixel values (corrected for gain)
        fast, slow, rho_bg = X[fit_sel], Y[fit_sel], shoebox_img[fit_sel]

        # do the fit of the background plane
        A = np.array([fast, slow, np.ones_like(fast)]).T
        # weights matrix:
        W = np.diag(1 / (sigma_rdout ** 2 + rho_bg))
        AWA = np.dot(A.T, np.dot(W, A))
        try:
            AWA_inv = np.linalg.inv(AWA)
        except np.linalg.LinAlgError:
            print ("WARNING: Fit did not work.. investigate reflection")
            print (ref)
            integrations.append(None)
            variances.append(None)
            coeffs.append(None)
            new_shoeboxes.append(None)
            tilt_error.append(None)
            sel.append(False)
            continue


        AtW = np.dot(A.T, W)
        a, b, c = np.dot(np.dot(AWA_inv, AtW), rho_bg)
        coeffs.append((a, b, c))

        # fit of the tilt plane background
        X1d = np.ravel(X)
        Y1d = np.ravel(Y)
        background = (X1d * a + Y1d * b + c).reshape(shoebox_img.shape)
        new_shoebox.background = flex.float(np.ascontiguousarray(background[None, j1_p: j2_p, i1_p:i2_p]))

        # vector of residuals
        r = rho_bg - np.dot(A, (a, b, c))
        Nbg = len(rho_bg)
        Nparam = 3
        r_fact = np.dot(r.T, np.dot(W, r)) / (Nbg - Nparam)
        var_covar = AWA_inv * r_fact
        abc_var = var_covar[0][0], var_covar[1][1], var_covar[2][2]

        # place the strong spot mask in the expanded shoebox
        peak_mask = ref['shoebox'].mask.as_numpy_array()[0] == MaskCode.Valid + MaskCode.Foreground
        peak_mask_valid = peak_mask[j1_-j1_a:- j1_a + j2_, i1_-i1_a:-i1_a + i2_]
        peak_mask_expanded = np.zeros_like(shoebox_mask)

        # overlap region
        i1_o = max(i1_, i1)
        i2_o = min(i2_, i2)
        j1_o = max(j1_, j1)
        j2_o = min(j2_, j2)

        pk_mask_istart = i1_o - i1_
        pk_mask_jstart = j1_o - j1_
        pk_mask_istop = peak_mask_valid.shape[1] - (i2_ - i2_o)
        pk_mask_jstop = peak_mask_valid.shape[0] - (j2_ - j2_o)
        peak_mask_overlap = peak_mask_valid[pk_mask_jstart: pk_mask_jstop, pk_mask_istart: pk_mask_istop]

        pk_mask_exp_i1 = i1_o - i1
        pk_mask_exp_j1 = j1_o - j1
        pk_mask_exp_i2 = peak_mask_expanded.shape[1] - (i2 - i2_o)
        pk_mask_exp_j2 = peak_mask_expanded.shape[0] - (j2 - j2_o)
        peak_mask_expanded[pk_mask_exp_j1: pk_mask_exp_j2, pk_mask_exp_i1: pk_mask_exp_i2] = peak_mask_overlap

        # update the dials mask
        dials_mask[peak_mask_expanded] = dials_mask[peak_mask_expanded] + MaskCode.Foreground

        p = X[peak_mask_expanded]  # fast scan coords
        q = Y[peak_mask_expanded]  # slow scan coords
        rho_peak = shoebox_img[peak_mask_expanded]  # pixel values

        Isum = np.sum(rho_peak - a*p - b*q - c)  # summed spot intensity

        var_rho_peak = sigma_rdout ** 2 + rho_peak  # include readout noise in the variance
        Ns = len(rho_peak)  # number of integrated peak pixels

        # variance propagated from tilt plane constants
        var_a_term = abc_var[0] * ((np.sum(p))**2)
        var_b_term = abc_var[1] * ((np.sum(q))**2)
        var_c_term = abc_var[2] * (Ns**2)
        tilt_error.append(var_a_term + var_b_term + var_c_term)

        # total variance of the spot
        var_Isum = np.sum(var_rho_peak) + var_a_term + var_b_term + var_c_term

        integrations.append(Isum)
        variances.append(var_Isum)
        new_shoebox.mask = flex.int(np.ascontiguousarray(dials_mask[None, j1_p:j2_p, i1_p:i2_p]))
        new_shoeboxes.append(new_shoebox)
        sel.append(True)

        if i_ref % 50 == 0 and verbose:
            print("Integrated refls %d / %d" % (i_ref+1, n_refl))


    #if filter_boundary_spots:
    #    sel = flex.bool([I is not None for I in integrations])
    boundary = np.array(boundary)[sel].astype(bool)
    integrations = np.array([I for I in integrations if I is not None])
    variances = np.array([v for v in variances if v is not None])
    coeffs = np.array([c for c in coeffs if c is not None])
    tilt_error = np.array([te for te in tilt_error if te is not None])

    #boundary = np.zeros(tilt_error.shape).astype(np.bool)

    predicted_refls = predicted_refls.select(flex.bool(sel))

    predicted_refls['resolution'] = flex.double( 1/ np.linalg.norm(predicted_refls['rlp'], axis=1))
    predicted_refls['boundary'] = flex.bool(boundary)
    predicted_refls["intensity.sum.value.Leslie99"] = flex.double(integrations)
    predicted_refls["intensity.sum.variance.Leslie99"] = flex.double(variances)
    predicted_refls['shoebox'] = flex.shoebox([sb for sb in new_shoeboxes if sb is not None])
    idx_assign = assign_indices.AssignIndicesGlobal(tolerance=0.333)
    idx_assign(predicted_refls, El)
    
    return predicted_refls, coeffs, tilt_error, integrations, variances