Exemplo n.º 1
0
def compute_kernel(representation,
                   zeta,
                   feature1,
                   feature2=None,
                   kernel_type='global',
                   time_estimate=0):
    if (kernel_type == 'atomic'):
        kernel = Kernel(representation,
                        name='Cosine',
                        target_type='Atom',
                        zeta=zeta)
    else:
        kernel = Kernel(representation,
                        name='Cosine',
                        target_type='Structure',
                        zeta=zeta)
    # if(time_estimate!=0):
    #     manager = multiprocessing.Manager()
    #     return_dict = manager.dict()
    #     # timer = Thread(target=tqdm_timer, args=(time_estimate,))
    #     timer = multiprocessing.Process(target=tqdm_timer, args=(time_estimate,))
    #     timer.start()
    #     # kthread = Thread(target=kernel_function, args=(rep2, zeta,) if rep2!=None else (zeta,))
    #     kthread = multiprocessing.Process(target=kernel_function, args=(rep2, zeta,) if rep2!=None else (zeta,))
    #     kthread.start()
    #     kthread.join()
    #     print(return_dict.values())
    #     return return_dict.values()
    # else:
    if feature2 is not None:
        return kernel(feature2)
    else:
        return kernel(feature1)
Exemplo n.º 2
0
    def test_model_call(self):

        rep = SphericalInvariants(**self.hypers)

        features = rep.transform([self.frame])

        for target_type in ["Atom", "Structure"]:
            cosine_kernel = Kernel(rep,
                                   name="Cosine",
                                   target_type=target_type,
                                   zeta=2)
            cosine_kernel(features)

        # wrong name
        with self.assertRaises(RuntimeError):
            Kernel(rep, name="WrongName", target_type="Structure", zeta=2)
        with self.assertRaises(RuntimeError):
            Kernel(rep, name="cosine", target_type="Structure", zeta=2)
        # wrong target_type
        with self.assertRaises(RuntimeError):
            Kernel(rep, name="Cosine", target_type="WrongType", zeta=2)
        with self.assertRaises(RuntimeError):
            Kernel(rep, name="Cosine", target_type="structure", zeta=2)
        with self.assertRaises(RuntimeError):
            Kernel(rep, name="Cosine", target_type="atom", zeta=2)
        # wrong zeta
        with self.assertRaises(ValueError):
            Kernel(rep, name="Cosine", target_type="Structure", zeta=2.5)
        with self.assertRaises(ValueError):
            Kernel(rep, name="Cosine", target_type="Structure", zeta=-2)
Exemplo n.º 3
0
 def test_pickle(self):
     rep = SphericalInvariants(**self.hypers)
     cosine_kernel = Kernel(rep,
                            name="Cosine",
                            target_type="Structure",
                            zeta=2)
     serialized = pickle.dumps(cosine_kernel)
     cosine_kernel_ = pickle.loads(serialized)
     self.assertTrue(to_dict(cosine_kernel) == to_dict(cosine_kernel_))
Exemplo n.º 4
0
    def test_serialization(self):
        rep = SphericalInvariants(**self.hypers)

        for target_type in ["Atom", "Structure"]:
            cosine_kernel = Kernel(rep, name="Cosine", target_type=target_type, zeta=2)

            cosine_kernel_dict = to_dict(cosine_kernel)
            cosine_kernel_copy = from_dict(cosine_kernel_dict)
            cosine_kernel_copy_dict = to_dict(cosine_kernel_copy)

            self.assertTrue(cosine_kernel_dict == cosine_kernel_copy_dict)
Exemplo n.º 5
0
def compute_knm(job):
    sp = _decode(job.statepoint())
    st, lg = job.sp.start_structure, job.sp.n_structures
    frames = fromfile(job.sp.filename)[st:st + lg]

    X_pseudo = load_obj(job.fn(group['sparse_point_fn']))

    hypers = X_pseudo.representation._get_init_params()
    hypers['compute_gradients'] = job.sp.train_with_grad

    soap = SphericalInvariants(**hypers)
    kernel = Kernel(soap, **sp['kernel'])

    Nstructures = len(frames)
    Ngrad_stride = [0]
    Ngrads = 0
    for frame in frames:
        n_at = len(frame)
        Ngrad_stride.append(n_at * 3)
        Ngrads += n_at * 3
    Ngrad_stride = np.cumsum(Ngrad_stride) + Nstructures
    dump_obj(job.fn(group['kernel_fn']), kernel)

    if job.sp.train_with_grad:
        KNM = np.zeros((Nstructures + Ngrads, X_pseudo.size()))
    else:
        KNM = np.zeros((Nstructures, X_pseudo.size()))

    for i_frame, frame in enumerate(frames):
        en_row, grad_rows = compute(i_frame,
                                    frame,
                                    soap,
                                    kernel,
                                    X_pseudo,
                                    grad=job.sp.train_with_grad)
        KNM[i_frame] = en_row
        if job.sp.train_with_grad:
            KNM[Ngrad_stride[i_frame]:Ngrad_stride[i_frame + 1]] = grad_rows

    np.save(job.fn(group['knm_fn']), KNM)
Exemplo n.º 6
0
    def test_representation_gradient(self):
        """
        Test the get_features and get_features_gradient functions by computing
        the linear sparse kernel matrix and check that the exported features
        lead to the same kernel matrix as the reference method.
        """
        hypers = deepcopy(self.hypers)
        hypers["compute_gradients"] = True
        rep = SphericalInvariants(**hypers)

        features = rep.transform(self.frames)

        n_sparses = {1: 1, 6: 1, 8: 1, 14: 1, 15: 1, 20: 1, 24: 1}

        compressor = FPSFilter(rep, n_sparses, act_on="sample per species")
        X_pseudo = compressor.select_and_filter(features)

        xs = X_pseudo.get_features()
        n_sparse, n_feat = xs.shape
        masks = {sp: np.zeros(n_sparse, dtype=bool) for sp in n_sparses}
        ii = 0
        for sp, mask in masks.items():
            mask[ii:ii + n_sparses[sp]] = 1
            ii = ii + n_sparses[sp]

        zeta = 1
        kernel = Kernel(rep,
                        name="GAP",
                        zeta=zeta,
                        target_type="Structure",
                        kernel_type="Sparse")

        ij = features.get_gradients_info()
        n_atoms = len(np.unique(ij[:, 1]))
        n_neigh = ij.shape[0]

        KNM_ref = kernel(features, X_pseudo, (False, False))
        X = features.get_features(rep).reshape((n_atoms, n_feat))
        KNM = np.zeros((len(self.frames), n_sparse))
        ii = 0
        for iff, frame in enumerate(features):
            for at in frame:
                sp = at.atom_type
                KNM[iff, masks[sp]] += np.dot(X[ii], xs[masks[sp]].T)
                ii += 1
        self.assertTrue(np.allclose(KNM_ref, KNM))

        KNM_ref = kernel(features, X_pseudo, (True, False))

        X_der = features.get_features_gradient(rep).reshape(
            (n_neigh, 3, n_feat))

        KNM = np.zeros((n_atoms, 3, n_sparse))
        for ii, (i_frame, i, j, i_sp, j_sp) in enumerate(ij):
            sp = i_sp
            KNM[j, 0, masks[sp]] += np.dot(X_der[ii, 0], xs[masks[sp]].T)
            KNM[j, 1, masks[sp]] += np.dot(X_der[ii, 1], xs[masks[sp]].T)
            KNM[j, 2, masks[sp]] += np.dot(X_der[ii, 2], xs[masks[sp]].T)

        KNM = KNM.reshape((-1, n_sparse))

        self.assertTrue(np.allclose(KNM_ref, KNM))
Exemplo n.º 7
0
def compute_benchmark(job):
    from rascal.models.krr import compute_sparse_kernel_gradients
    sp = _decode(job.statepoint())
    st, lg = job.sp.start_structure, job.sp.n_structures
    frames = fromfile(job.sp.filename)[st:st + lg]

    model = load_obj(job.fn(group['model_fn']))
    soap = model.get_representation_calculator()
    grads_timing = job.sp.grads_timing

    hypers = soap._get_init_params()
    hypers['compute_gradients'] = grads_timing
    soap = SphericalInvariants(**hypers)

    rc = sp['representation']['interaction_cutoff']
    nl_options = [
        dict(name='centers', args=[]),
        dict(name='neighbourlist', args=dict(cutoff=rc)),
        # dict(name='halflist', args=dict()),
        dict(name="centercontribution", args=dict()),
        dict(name='strict', args=dict(cutoff=rc))
    ]

    kernel = Kernel(soap, **sp['kernel'])

    N_ITERATIONS = sp['N_ITERATIONS']
    if grads_timing:
        tags = ['NL', 'rep with grad', 'pred energy', 'pred forces']
    else:
        tags = ['NL', 'rep', 'pred energy']

    timers = {k: Timer(tag=k, logger=None) for k in tags}
    if job.sp.name != 'qm9':
        frames = [
            make_supercell(frames[0],
                           job.sp.n_replication * np.eye(3),
                           wrap=True,
                           tol=1e-11)
        ]
    else:
        frames = frames[:100]

    if grads_timing:
        for ii in range(N_ITERATIONS):
            with timers['NL']:
                managers = AtomsList(frames, nl_options)
            sleep(0.1)
            with timers['rep with grad']:
                managers = soap.transform(managers)
            sleep(0.1)
            Y0 = model._get_property_baseline(managers)
            with timers['pred energy']:
                KNM = kernel(managers, model.X_train, (False, False))
                Y0 + np.dot(KNM, model.weights).reshape((-1))
            sleep(0.1)
            with timers['pred forces']:
                rep = soap._representation
                forces = -compute_sparse_kernel_gradients(
                    rep, model.kernel._kernel, managers.managers,
                    model.X_train._sparse_points, model.weights.reshape(
                        (1, -1)))
            sleep(0.1)
            managers, KNM = [], []
            del managers, KNM
            sleep(0.3)
    else:
        for ii in range(N_ITERATIONS):
            with timers['NL']:
                managers = AtomsList(frames, nl_options)
            sleep(0.1)
            with timers['rep']:
                managers = soap.transform(managers)
            sleep(0.1)
            Y0 = model._get_property_baseline(managers)
            with timers['pred energy']:
                KNM = kernel(managers, model.X_train, (False, False))
                Y0 + np.dot(KNM, model.weights).reshape((-1))
            sleep(0.1)

            managers, KNM = [], []
            del managers, KNM
            sleep(0.3)

    n_atoms = 0
    for frame in frames:
        n_atoms += len(frame)

    timings = []
    for tag in tags:
        data = timers[tag].dumps()
        data.update({'name': job.sp.name, 'n_atoms': n_atoms})
        timings.append(data)

    tojson(job.fn(group['benchmark_fn']), timings)
Exemplo n.º 8
0
def dump_reference_json():
    import ubjson
    import os
    from copy import copy
    sys.path.insert(0, os.path.join(root, 'build/'))
    sys.path.insert(0, os.path.join(root, 'tests/'))

    cutoffs = [3.5]
    gaussian_sigmas = [0.5]
    max_radials = [6]
    max_angulars = [6]
    soap_types = ["RadialSpectrum", "PowerSpectrum"]

    fn = os.path.join(inputs_path, "small_molecules-20.json")
    fn_to_write = os.path.join(
        'reference_data', "inputs", "small_molecules-20.json")
    start = 0
    length = 5
    representations = ['spherical_invariants']
    kernel_names = ['Cosine']
    target_types = ['Structure', 'Atom']
    dependant_args = dict(Cosine=[dict(zeta=1), dict(zeta=2), dict(zeta=4)])

    data = dict(filename=fn_to_write,
                start=start,
                length=length,
                cutoffs=cutoffs,
                gaussian_sigmas=gaussian_sigmas,
                max_radials=max_radials,
                soap_types=soap_types,
                kernel_names=kernel_names,
                target_types=target_types,
                dependant_args=dependant_args,
                rep_info=dict(spherical_invariants=[]))

    frames = read(fn, '{}:{}'.format(start, start + length))
    for representation_name in representations:
        for cutoff in cutoffs:
            print(fn, cutoff)
            data['rep_info'][representation_name].append([])
            for kernel_name in kernel_names:
                for target_type in target_types:
                    for kwargs in dependant_args[kernel_name]:
                        for soap_type in soap_types:
                            for gaussian_sigma in gaussian_sigmas:
                                for max_radial in max_radials:
                                    for max_angular in max_angulars:
                                        if 'RadialSpectrum' == soap_type:
                                            max_angular = 0

                                        hypers = {"interaction_cutoff": cutoff,
                                                  "cutoff_smooth_width": 0.5,
                                                  "max_radial": max_radial,
                                                  "max_angular": max_angular,
                                                  "gaussian_sigma_type": "Constant",
                                                  "gaussian_sigma_constant": gaussian_sigma,
                                                  "soap_type": soap_type,
                                                  "cutoff_function_type": "ShiftedCosine",
                                                  "normalize": True,
                                                  "radial_basis": "GTO"}
                                        soap = SphericalInvariants(**hypers)
                                        soap_vectors = soap.transform(frames)
                                        hypers_kernel = dict(name=kernel_name,
                                                             target_type=target_type)
                                        hypers_kernel.update(**kwargs)
                                        kernel = Kernel(soap, **hypers_kernel)
                                        kk = kernel(soap_vectors)
                                        # x = get_spectrum(hypers, frames)
                                        for aa in soap.nl_options:
                                            aa['initialization_arguments'] = aa['args']

                                        data['rep_info'][representation_name][-1].append(dict(kernel_matrix=kk.tolist(),
                                                                                              hypers_rep=copy(
                                                                                                  soap.hypers),
                                                                                              hypers_manager=copy(
                                                                                                  soap.nl_options),
                                                                                              hypers_kernel=copy(hypers_kernel)))

    with open(os.path.join(root, dump_path,
                           "kernel_reference.ubjson"), 'wb') as f:
        ubjson.dump(data, f)
Exemplo n.º 9
0
    def test_numerical_kernel_stress(self):
        """Tests if the numerical kernel stress on the python site matches the one
        on the cpp site."""

        with open(self.kernel_input_filename, "r") as f:
            kernel_inputs = json.load(f)

        kernel_inputs = [kernel_inputs[i] for i in self.selected_test_indices]
        for kernel_input in kernel_inputs:
            structures_filename = kernel_input["filename"]
            frames = ase.io.read(structures_filename,
                                 ":" + str(kernel_input["n_structures"]))
            h_disp = kernel_input["h"]

            selected_ids = kernel_input["selected_ids"]
            hypers = kernel_input["calculator"]
            # TODO(alex) the cutoff function is kind of hard coded
            #            a general function transformation c++ parameters to
            #            python would be more suitable here
            #            future work
            calculator = SphericalInvariants(
                soap_type=hypers["soap_type"],
                radial_basis=hypers["radial_contribution"]["type"],
                max_radial=hypers["max_radial"],
                max_angular=hypers["max_angular"],
                cutoff_function_type=hypers["cutoff_function"]["type"],
                interaction_cutoff=hypers["cutoff_function"]["cutoff"]
                ["value"],
                cutoff_smooth_width=hypers["cutoff_function"]["smooth_width"]
                ["value"],
                gaussian_sigma_type=hypers["gaussian_density"]["type"],
                gaussian_sigma_constant=hypers["gaussian_density"]
                ["gaussian_sigma"]["value"],
                compute_gradients=hypers["compute_gradients"],
                normalize=hypers["normalize"],
            )
            kernel = Kernel(calculator,
                            kernel_type="Sparse",
                            **kernel_input["kernel"])
            for j in range(len(frames)):
                # we do this frame by frame to be able to use the function
                # `displace_strain_tensor` as in the
                # `test_displace_strain_tensor` test
                frame = frames[j]
                selected_id = selected_ids[j]
                managers = calculator.transform([frame])
                sparse_points = SparsePoints(calculator)
                sparse_points.extend(managers, [selected_id])

                # the binded cpp function; the minus is because the function
                # returns the negative stress
                cpp_site_stress = -compute_numerical_kernel_gradients(
                    kernel, calculator, managers, sparse_points, h_disp,
                    True)[-6:]

                def compute_numerical_kernel_gradient_on_python_site():
                    python_site_stress = np.zeros((6, len(selected_id)))
                    for i in range(6):
                        frame_displaced_plus = displace_strain_tensor(
                            copy.deepcopy(frame),
                            self.matrix_indices_in_voigt_notation[i][0],
                            self.matrix_indices_in_voigt_notation[i][1],
                            h_disp,
                        )
                        managers = calculator.transform([frame_displaced_plus])
                        kernel_plus = kernel(managers, sparse_points)

                        frame_displaced_minus = displace_strain_tensor(
                            copy.deepcopy(frame),
                            self.matrix_indices_in_voigt_notation[i][0],
                            self.matrix_indices_in_voigt_notation[i][1],
                            -h_disp,
                        )
                        managers = calculator.transform(
                            [frame_displaced_minus])
                        kernel_minus = kernel(managers, sparse_points)

                        python_site_stress[i] = np.sum(
                            (kernel_plus - kernel_minus) / (2 * h_disp),
                            axis=0)
                    return python_site_stress / frame.get_volume()

                python_site_stress = compute_numerical_kernel_gradient_on_python_site(
                )

                relative_error = compute_relative_error(
                    python_site_stress, cpp_site_stress)
                absolute_error = np.abs(python_site_stress - cpp_site_stress)
                passes_test = np.all(
                    np.logical_or(
                        relative_error < self.error_threshold,
                        absolute_error < self.error_threshold,
                    ))
                if not (passes_test):
                    np.set_printoptions(suppress=True)
                    print("structures_filename:", structures_filename)
                    print("structure index:", j)
                    print()
                    print("relative_error:\n", relative_error)
                    print()
                    print("python_site_stress:\n", python_site_stress)
                    print("cpp_site_stress:\n", cpp_site_stress)

                self.assertTrue(passes_test)