Exemple #1
0
def frames_and_literature_positions_to_atom14_pos(
        aatype: torch.Tensor,
        all_frames_to_global: affine.Rigids) -> affine.Vecs:
    """
	https://github.com/lupoglaz/alphafold/blob/2d53ad87efedcbbda8e67ab3be96af769dbeae7d/alphafold/model/all_atom.py#L532
	"""
    aatype = aatype.to(dtype=torch.long)
    device = all_frames_to_global.trans.x.device
    dtype = all_frames_to_global.trans.x.dtype

    restype_atom14_to_rigid_group = torch.from_numpy(
        residue_constants.restype_atom14_to_rigid_group).to(device=device)
    restype_atom14_rigid_group_positions = torch.from_numpy(
        residue_constants.restype_atom14_rigid_group_positions).to(
            device=device, dtype=dtype)
    restype_atom14_mask = torch.from_numpy(
        residue_constants.restype_atom14_mask).to(device=device)

    residx_to_group_idx = batched_gather(restype_atom14_to_rigid_group, aatype)
    group_mask = F.one_hot(residx_to_group_idx, 8).squeeze()
    map_atoms_to_global = affine.rigids_apply(
        lambda x: torch.sum(x[:, None, :] * group_mask, dim=-1),
        all_frames_to_global)

    lit_positions_tensor = batched_gather(restype_atom14_rigid_group_positions,
                                          aatype)
    lit_positions = affine.vecs_from_tensor(lit_positions_tensor)
    pred_positions = affine.rigids_mul_vecs(map_atoms_to_global, lit_positions)

    mask = batched_gather(restype_atom14_mask, aatype)
    pred_positions = affine.vecs_apply(lambda x: x * mask, pred_positions)
    return pred_positions
Exemple #2
0
	def find_structural_violations(self, batch:Dict[str, torch.Tensor], atom14_pred_positions:torch.Tensor)->Dict[str, torch.Tensor]:
		"""
		https://github.com/lupoglaz/alphafold/blob/2d53ad87efedcbbda8e67ab3be96af769dbeae7d/alphafold/model/folding.py#L734
		"""
		connection_violations = protein.between_residue_bond_loss(
			pred_atom_positions=atom14_pred_positions,
			pred_atom_mask=batch['atom14_atom_exists'].to(dtype=torch.float32),
			residue_index=batch['residue_index'].to(dtype=torch.float32),
			aatype=batch['aatype'],
			tolerance_factor_soft=self.config.violation_tolerance_factor,
			tolerance_factor_hard=self.config.violation_tolerance_factor
		)
		atomtype_radius = atom14_pred_positions.new_tensor([residue_constants.van_der_waals_radius[name[0]] for name in residue_constants.atom_types])
		atom14_atom_radius = batch['atom14_atom_exists'] * batched_gather(atomtype_radius, batch['residx_atom14_to_atom37'])
		
		between_residue_clashes = protein.between_residue_clash_loss(
			atom14_pred_positions=atom14_pred_positions,
			atom14_atom_exists=batch['atom14_atom_exists'].to(dtype=torch.float32),
			atom14_atom_radius=atom14_atom_radius,
			residue_index=batch['residue_index'].to(dtype=torch.float32),
			overlap_tolerance_soft=self.config.clash_overlap_tolerance,
			overlap_tolerance_hard=self.config.clash_overlap_tolerance
		)

		restype_atom14_bounds = residue_constants.make_atom14_dists_bounds(
			overlap_tolerance=self.config.clash_overlap_tolerance,
			bond_length_tolerance_factor=self.config.violation_tolerance_factor
		)
		aatypes = batch['aatype'].to(dtype=torch.long)
		atom14_dists_lower_bound = batched_gather(torch.from_numpy(restype_atom14_bounds['lower_bound']).to(device=aatypes.device), aatypes)
		atom14_dists_upper_bound = batched_gather(torch.from_numpy(restype_atom14_bounds['upper_bound']).to(device=aatypes.device), aatypes)
		within_residue_violations = protein.within_residue_violations(
			atom14_pred_positions=atom14_pred_positions, 
			atom14_atom_exists=batch['atom14_atom_exists'].to(dtype=torch.float32), 
			atom14_dists_lower_bound=atom14_dists_lower_bound, 
			atom14_dists_upper_bound=atom14_dists_upper_bound,
			tighten_bounds_for_loss=0.0
		)
		per_residue_violations_mask = torch.max(torch.stack([
			connection_violations['per_residue_violation_mask'],
			torch.max(between_residue_clashes['per_atom_clash_mask'], dim=-1).values,
			torch.max(within_residue_violations['per_atom_violations'], dim=-1).values]), dim=0).values
		
		return {'between_residues': {
					'bonds_c_n_loss_mean': connection_violations['c_n_loss'],  # ()
					'angles_ca_c_n_loss_mean': connection_violations['ca_c_n_loss'],  # ()
					'angles_c_n_ca_loss_mean': connection_violations['c_n_ca_loss'],  # ()
					'connections_per_residue_loss_sum': connection_violations['per_residue_loss_sum'],  # (N)
					'connections_per_residue_violation_mask': connection_violations['per_residue_violation_mask'],  # (N)
					'clashes_mean_loss': between_residue_clashes['mean_loss'],  # ()
					'clashes_per_atom_loss_sum': between_residue_clashes['per_atom_loss_sum'],  # (N, 14)
					'clashes_per_atom_clash_mask': between_residue_clashes['per_atom_clash_mask']},  # (N, 14)
				'within_residues': {
					'per_atom_loss_sum': within_residue_violations['per_atom_loss_sum'],  # (N, 14)
					'per_atom_violations': within_residue_violations['per_atom_violations']},  # (N, 14),
				'total_per_residue_violations_mask': per_residue_violations_mask}  # (N)
Exemple #3
0
def atom14_to_atom37(atom14_data: torch.Tensor,
                     batch: Dict[str, torch.Tensor]) -> torch.Tensor:
    """
	https://github.com/lupoglaz/alphafold/blob/2d53ad87efedcbbda8e67ab3be96af769dbeae7d/alphafold/model/all_atom.py#L76
	"""
    assert atom14_data.ndimension() in [2, 3]
    assert 'residx_atom37_to_atom14' in batch
    assert 'atom37_atom_exists' in batch
    idxs = batch['residx_atom37_to_atom14'].to(dtype=torch.long)
    atom37_data = batched_gather(atom14_data,
                                 idxs,
                                 dim=1,
                                 no_batch_dims=len(
                                     atom14_data.shape[:1]))  #Double check
    if atom14_data.ndimension() == 2:
        atom37_data *= batch['atom37_atom_exists']
    elif atom14_data.ndimension() == 3:
        atom37_data *= batch['atom37_atom_exists'][:, :,
                                                   None].to(atom37_data.dtype)

    return atom37_data
Exemple #4
0
def torsion_angles_to_frames(
        aatype: torch.Tensor, backb_to_global: affine.Rigids,
        torsion_angles_sin_cos: torch.Tensor) -> affine.Rigids:
    """
	https://github.com/lupoglaz/alphafold/blob/2d53ad87efedcbbda8e67ab3be96af769dbeae7d/alphafold/model/all_atom.py#L445
	"""
    assert aatype.ndimension() == 1
    assert backb_to_global.rot.xx.ndimension() == 1
    assert torsion_angles_sin_cos.ndimension() == 3
    assert torsion_angles_sin_cos.size(1) == 7
    assert torsion_angles_sin_cos.size(2) == 2
    device = torsion_angles_sin_cos.device
    dtype = torsion_angles_sin_cos.dtype

    frames = torch.from_numpy(
        residue_constants.restype_rigid_group_default_frame).to(device=device,
                                                                dtype=dtype)
    idxs = aatype.to(dtype=torch.long)
    m = batched_gather(frames, idxs)
    default_frames = affine.rigids_from_tensor4x4(m)

    sin_angles = torsion_angles_sin_cos[..., 0]
    cos_angles = torsion_angles_sin_cos[..., 1]
    num_res = aatype.size(0)
    sin_angles = torch.cat(
        [torch.zeros(num_res, 1, dtype=dtype, device=device), sin_angles],
        dim=-1)
    cos_angles = torch.cat(
        [torch.ones(num_res, 1, dtype=dtype, device=device), cos_angles],
        dim=-1)
    zeros = torch.zeros_like(sin_angles)
    ones = torch.ones_like(sin_angles)

    all_rots = affine.Rots(ones, zeros, zeros, zeros, cos_angles, -sin_angles,
                           zeros, sin_angles, cos_angles)

    all_frames = affine.rigids_mul_rots(default_frames, all_rots)

    chi2_frame_to_frame = affine.rigids_apply(lambda x: x[:, 5], all_frames)
    chi3_frame_to_frame = affine.rigids_apply(lambda x: x[:, 6], all_frames)
    chi4_frame_to_frame = affine.rigids_apply(lambda x: x[:, 7], all_frames)

    chi1_frame_to_backb = affine.rigids_apply(lambda x: x[:, 4], all_frames)
    chi2_frame_to_backb = affine.rigids_mul_rigids(chi1_frame_to_backb,
                                                   chi2_frame_to_frame)
    chi3_frame_to_backb = affine.rigids_mul_rigids(chi2_frame_to_backb,
                                                   chi3_frame_to_frame)
    chi4_frame_to_backb = affine.rigids_mul_rigids(chi3_frame_to_backb,
                                                   chi4_frame_to_frame)

    def _concat_frames(xall, x5, x6, x7):
        return torch.cat([xall[:, :5], x5[:, None], x6[:, None], x7[:, None]],
                         dim=-1)

    all_frames_to_backb = affine.rigids_apply(_concat_frames, all_frames,
                                              chi2_frame_to_backb,
                                              chi3_frame_to_backb,
                                              chi4_frame_to_backb)
    all_frames_to_global = affine.rigids_mul_rigids(
        affine.rigids_apply(lambda x: x[:, None], backb_to_global),
        all_frames_to_backb)
    return all_frames_to_global
Exemple #5
0
def make_atom14_positions(protein):
    aatype = protein["aatype"]
    residx_atom14_mask = protein["atom14_atom_exists"]
    residx_atom14_to_atom37 = protein["residx_atom14_to_atom37"]
    all_atom_mask = protein["all_atom_mask"]
    all_atom_positions = protein["all_atom_positions"]
    residx_atom14_gt_mask = residx_atom14_mask * batched_gather(
        all_atom_mask,
        residx_atom14_to_atom37,
        dim=-1,
        no_batch_dims=len(all_atom_mask.shape[:-1]))

    residx_atom14_gt_positions = residx_atom14_gt_mask[
        ..., None] * batched_gather(all_atom_positions,
                                    residx_atom14_to_atom37,
                                    dim=-2,
                                    no_batch_dims=len(
                                        all_atom_positions.shape[:-2]))

    protein["atom14_atom_exists"] = residx_atom14_mask
    protein["atom14_gt_exists"] = residx_atom14_gt_mask
    protein["atom14_gt_positions"] = residx_atom14_gt_positions

    restype_3 = [
        residue_constants.restype_1to3[res]
        for res in residue_constants.restypes
    ]
    restype_3 += ['UNK']

    all_matrixes = {
        res: torch.eye(14,
                       dtype=all_atom_mask.dtype,
                       device=all_atom_mask.device)
        for res in restype_3
    }
    for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
        correspondense = torch.arange(start=0,
                                      end=14,
                                      device=all_atom_mask.device)
        for source_atom, dest_atom in swap.items():
            source_index = residue_constants.restype_name_to_atom14_names[
                resname].index(source_atom)
            dest_index = residue_constants.restype_name_to_atom14_names[
                resname].index(dest_atom)
            correspondense[source_index] = dest_index
            correspondense[dest_index] = source_index
        renaming_matrix = all_atom_mask.new_zeros(14, 14)
        for index, corr in enumerate(correspondense):
            renaming_matrix[index, corr] = 1.0
        all_matrixes[resname] = renaming_matrix

    renaming_matrixes = torch.stack(
        [all_matrixes[resname] for resname in restype_3])
    renaming_transform = renaming_matrixes[aatype]
    atom14_alt_gt_positions = torch.einsum("...rac,...rab->...rbc",
                                           residx_atom14_gt_positions,
                                           renaming_transform)
    protein["atom14_alt_gt_positions"] = atom14_alt_gt_positions
    atom14_alt_gt_exists = torch.einsum("...ra,...rab->...rb",
                                        residx_atom14_gt_mask,
                                        renaming_transform)
    protein["atom14_alt_gt_exists"] = atom14_alt_gt_exists

    restype_atom14_is_ambiguous = all_atom_mask.new_zeros(21, 14)
    for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
        for source_atom, dest_atom in swap.items():
            source_index = residue_constants.restype_name_to_atom14_names[
                resname].index(source_atom)
            dest_index = residue_constants.restype_name_to_atom14_names[
                resname].index(dest_atom)
            restype = residue_constants.restype_order[
                residue_constants.restype_3to1[resname]]
            restype_atom14_is_ambiguous[restype, source_index] = 1
            restype_atom14_is_ambiguous[restype, dest_index] = 1

    protein["atom14_atom_is_ambiguous"] = restype_atom14_is_ambiguous[aatype]
    return protein
Exemple #6
0
def atom37_to_torsion_angles(aatype: torch.Tensor,
                             all_atom_pos: torch.Tensor,
                             all_atom_mask: torch.Tensor,
                             placeholder_for_undefined=False):
    """https://github.com/lupoglaz/alphafold/blob/2d53ad87efedcbbda8e67ab3be96af769dbeae7d/alphafold/model/all_atom.py#L271"""
    aatype = torch.minimum(aatype.unsqueeze(dim=0), aatype.new_tensor([20]))
    all_atom_pos = all_atom_pos.unsqueeze(dim=0)
    all_atom_mask = all_atom_mask.unsqueeze(dim=0)
    num_batch, num_res = aatype.shape
    pad = torch.zeros(num_batch,
                      1,
                      37,
                      3,
                      dtype=torch.float32,
                      device=aatype.device)
    prev_all_atom_pos = torch.cat([pad, all_atom_pos[:, :-1, :, :]], dim=1)
    pad = torch.zeros(num_batch,
                      1,
                      37,
                      dtype=torch.float32,
                      device=aatype.device)
    prev_all_atom_mask = torch.cat([pad, all_atom_mask[:, :-1, :]], dim=1)

    pre_omega_atom_pos = torch.cat(
        [prev_all_atom_pos[:, :, 1:3, :], all_atom_pos[:, :, 0:2, :]], dim=-2)
    phi_atom_pos = torch.cat(
        [prev_all_atom_pos[:, :, 2:3, :], all_atom_pos[:, :, 0:3, :]], dim=-2)
    psi_atom_pos = torch.cat(
        [all_atom_pos[:, :, 0:3, :], all_atom_pos[:, :, 4:5, :]], dim=-2)
    pre_omega_mask = torch.prod(prev_all_atom_mask[:, :, 1:3],
                                dim=-1) * torch.prod(all_atom_mask[:, :, 0:2],
                                                     dim=-1)
    phi_mask = prev_all_atom_mask[:, :, 2] * torch.prod(
        all_atom_mask[:, :, 0:3], dim=-1)
    psi_mask = torch.prod(all_atom_mask[:, :, 0:3],
                          dim=-1) * all_atom_mask[:, :, 4]

    chi_atom_indices = get_chi_atom_indices().to(device=aatype.device)
    atom_indices = chi_atom_indices[aatype, :, :]
    chis_atom_pos = batched_gather(all_atom_pos, atom_indices, -2,
                                   len(atom_indices.shape[:-2]))

    chi_angles_mask = list(residue_constants.chi_angles_mask)
    chi_angles_mask.append([0.0, 0.0, 0.0, 0.0])
    chi_angles_mask = all_atom_mask.new_tensor(chi_angles_mask)

    chis_mask = chi_angles_mask[aatype, :]
    chi_angle_atoms_mask = batched_gather(all_atom_mask, atom_indices, -1,
                                          len(atom_indices.shape[:-2]))
    chi_angle_atoms_mask = torch.prod(chi_angle_atoms_mask, dim=-1)
    chis_mask = chis_mask * (chi_angle_atoms_mask.to(dtype=torch.float32))

    torsion_atoms_pos = torch.cat([
        pre_omega_atom_pos[..., None, :, :], phi_atom_pos[..., None, :, :],
        psi_atom_pos[..., None, :, :], chis_atom_pos
    ],
                                  dim=-3)
    torsion_angles_mask = torch.cat([
        pre_omega_mask[..., None], phi_mask[..., None], psi_mask[..., None],
        chis_mask
    ],
                                    dim=-1)

    torsion_frames = affine.rigids_from_3_points(
        point_on_neg_axis=affine.vecs_from_tensor(torsion_atoms_pos[...,
                                                                    1, :]),
        origin=affine.vecs_from_tensor(torsion_atoms_pos[..., 2, :]),
        point_on_xy_plane=affine.vecs_from_tensor(torsion_atoms_pos[...,
                                                                    0, :]))
    forth_atom_rel_pos = affine.rigids_mul_vecs(
        affine.rigids_invert(torsion_frames),
        affine.vecs_from_tensor(torsion_atoms_pos[..., 3, :]))
    torsion_angles_sin_cos = torch.stack(
        [forth_atom_rel_pos.z, forth_atom_rel_pos.y], dim=-1)
    torsion_angles_sin_cos /= torch.sqrt(
        torch.sum(torch.square(torsion_angles_sin_cos), dim=-1, keepdims=True)
        + 1e-8)
    torsion_angles_sin_cos *= torsion_angles_sin_cos.new_tensor(
        [1, 1, -1, 1, 1, 1, 1])[None, None, :, None]  #Double check

    chi_is_ambiguous = torsion_angles_sin_cos.new_tensor(
        residue_constants.chi_pi_periodic)[aatype, ...]
    mirror_torsion_angles = torch.cat([
        all_atom_mask.new_ones(num_batch, num_res, 3),
        1.0 - 2.0 * chi_is_ambiguous
    ],
                                      dim=-1)
    alt_torsion_anles_sin_cos = torsion_angles_sin_cos * mirror_torsion_angles[
        ..., None]

    if placeholder_for_undefined:
        placeholder_torsions = torch.stack([
            torch.ones(*(torsion_angles_sin_cos.shape[:-1])),
            torch.zeros(*(torsion_angles_sin_cos.shape[:-1]))
        ],
                                           dim=-1)
        torsion_angles_sin_cos = torsion_angles_sin_cos*torsion_angles_mask[...,None] +\
          placeholder_torsions*(1.0-torsion_angles_mask[...,None])
        alt_torsion_anles_sin_cos = alt_torsion_anles_sin_cos*torsion_angles_mask[...,None] +\
          placeholder_torsions*(1.0-torsion_angles_mask[...,None])
    return {
        'torsion_angles_sin_cos': torsion_angles_sin_cos,
        'alt_torsion_angles_sin_cos': alt_torsion_anles_sin_cos,
        'torsion_angles_mask': torsion_angles_mask
    }
Exemple #7
0
def atom37_to_frames(aatype: torch.Tensor, all_atom_positions: torch.Tensor,
                     all_atom_mask: torch.Tensor) -> Dict[str, torch.Tensor]:
    """https://github.com/lupoglaz/alphafold/blob/2d53ad87efedcbbda8e67ab3be96af769dbeae7d/alphafold/model/all_atom.py#L114"""
    aatype_in_shape = aatype.shape
    aatype = aatype.view(-1)
    batch_dims = len(aatype_in_shape[:-1])
    all_atom_positions = all_atom_positions.view(-1, 37, 3)
    all_atom_mask = all_atom_mask.view(-1, 37)
    restype_rigidbody_base_atom_names = np.full([21, 8, 3], '', dtype=object)
    restype_rigidbody_base_atom_names[:, 0, :] = ['C', 'CA', 'N']
    restype_rigidbody_base_atom_names[:, 3, :] = ['CA', 'C', 'O']
    for restype, restype_letter in enumerate(residue_constants.restypes):
        resname = residue_constants.restype_1to3[restype_letter]
        for chi_idx in range(4):
            if residue_constants.chi_angles_mask[restype][chi_idx]:
                atom_names = residue_constants.chi_angles_atoms[resname][
                    chi_idx]
                restype_rigidbody_base_atom_names[restype, chi_idx +
                                                  4, :] = atom_names[1:]

    restype_rigidgroup_mask = torch.zeros(21,
                                          8,
                                          dtype=torch.float32,
                                          device=all_atom_mask.device)
    restype_rigidgroup_mask[:, 0] = 1
    restype_rigidgroup_mask[:, 3] = 1
    restype_rigidgroup_mask[:20, 4:] = torch.tensor(
        residue_constants.chi_angles_mask, device=all_atom_mask.device)

    lookuptable = residue_constants.atom_order.copy()
    lookuptable[''] = 0
    restype_rigidbody_base_atom_names = np.vectorize(lambda x: lookuptable[x])(
        restype_rigidbody_base_atom_names)
    restype_rigidbody_base_atom_names = torch.from_numpy(
        restype_rigidbody_base_atom_names).to(device=aatype.device)
    residx_rigidgroup_base_atom37_idx = batched_gather(
        restype_rigidbody_base_atom_names,
        aatype,
        dim=-3,
        no_batch_dims=batch_dims)

    base_atom_pos = batched_gather(all_atom_positions,
                                   residx_rigidgroup_base_atom37_idx,
                                   dim=-2,
                                   no_batch_dims=len(
                                       all_atom_positions.shape[:-2]))

    gt_frames = affine.rigids_from_3_points(
        point_on_neg_axis=affine.vecs_from_tensor(base_atom_pos[:, :, 0, :]),
        origin=affine.vecs_from_tensor(base_atom_pos[:, :, 1, :]),
        point_on_xy_plane=affine.vecs_from_tensor(base_atom_pos[:, :, 2, :]))

    group_exists = batched_gather(restype_rigidgroup_mask,
                                  aatype,
                                  dim=-2,
                                  no_batch_dims=batch_dims)
    gt_atom_exists = batched_gather(all_atom_mask.to(dtype=torch.float32),
                                    residx_rigidgroup_base_atom37_idx,
                                    dim=-1,
                                    no_batch_dims=len(
                                        all_atom_positions.shape[:-2]))

    gt_exists = torch.min(gt_atom_exists, dim=-1)[0] * group_exists

    rots = torch.tile(
        torch.eye(3, dtype=all_atom_mask.dtype, device=all_atom_mask.device),
        (*((1, ) * batch_dims), 8, 1, 1))
    rots[0, 0, 0] = -1
    rots[0, 2, 2] = -1
    gt_frames = affine.rigids_mul_rots(gt_frames,
                                       affine.rots_from_tensor3x3(rots))
    restype_rigidgroup_is_ambiguous = all_atom_mask.new_zeros(21, 8)
    restype_rigidgroup_rots = torch.tile(
        torch.eye(3, dtype=all_atom_mask.dtype, device=all_atom_mask.device),
        (*((1, ) * batch_dims), 21, 8, 1, 1))

    for resname, _ in residue_constants.residue_atom_renaming_swaps.items():
        restype = residue_constants.restype_order[
            residue_constants.restype_3to1[resname]]
        chi_idx = int(sum(residue_constants.chi_angles_mask[restype]) - 1)
        restype_rigidgroup_is_ambiguous[restype, chi_idx + 4] = 1
        restype_rigidgroup_rots[restype, chi_idx + 4, 1, 1] = -1
        restype_rigidgroup_rots[restype, chi_idx + 4, 2, 2] = -1

    residx_rigidgroup_is_ambiguous = batched_gather(
        restype_rigidgroup_is_ambiguous,
        aatype,
        dim=-2,
        no_batch_dims=batch_dims)
    residx_rigidgroup_is_ambiguity_rot = batched_gather(
        restype_rigidgroup_rots, aatype, dim=-4, no_batch_dims=batch_dims)

    alt_gt_frames = affine.rigids_mul_rots(
        gt_frames,
        affine.rots_from_tensor3x3(residx_rigidgroup_is_ambiguity_rot))
    gt_frames_flat12 = affine.rigids_to_tensor_flat12(gt_frames)
    alt_gt_frames_flat12 = affine.rigids_to_tensor_flat12(alt_gt_frames)

    gt_frames_flat12 = gt_frames_flat12.resize(*(aatype_in_shape + (8, 12)))
    alt_gt_frames_flat12 = alt_gt_frames_flat12.resize(*(aatype_in_shape +
                                                         (8, 12)))
    gt_exists = gt_exists.resize(*(aatype_in_shape + (8, )))
    group_exists = group_exists.resize(*(aatype_in_shape + (8, )))
    residx_rigidgroup_is_ambiguous = residx_rigidgroup_is_ambiguous.resize(
        *(aatype_in_shape + (8, )))
    return {
        'rigidgroups_gt_frames': gt_frames_flat12,
        'rigidgroups_gt_exists': gt_exists,
        'rigidgroups_group_exists': group_exists,
        'rigidgroups_group_is_ambiguous': residx_rigidgroup_is_ambiguous,
        'rigidgroups_alt_gt_frames': alt_gt_frames_flat12
    }