Exemplo n.º 1
0
    def predict(self, datasets):
        mc = self.model_config
        pc = self.prediction_config
        results = []

        # master model
        master_nnp = MasterNNP(
            pc.elements, mc.n_input, mc.hidden_layers, mc.n_output)
        chainer.serializers.load_npz(
            pc.load_dir / 'master_nnp.npz', master_nnp)

        for dataset in datasets:
            # hdnnp model
            hdnnp = HighDimensionalNNP(
                dataset.elemental_composition,
                mc.n_input, mc.hidden_layers, mc.n_output)
            hdnnp.sync_param_with(master_nnp)

            batch = chainer.dataset.concat_examples(dataset)
            inputs = [batch[f'inputs/{i}'] for i in range(pc.order + 1)]
            with chainer.using_config('train', False), \
                 chainer.using_config('enable_backprop', False):
                predictions = hdnnp.predict(inputs, pc.order)

            result = {
                **{'tag': dataset.tag},
                **{property_: coefficient * prediction.data
                   for property_, coefficient, prediction
                   in zip(dataset.property.properties,
                          dataset.property.coefficients,
                          predictions)},
                }
            results.append(result)
        return results
Exemplo n.º 2
0
    def start(self):
        tr = self.training_result
        dc = self.dataset_config
        mc = self.model_config

        # load preprocesses
        preprocesses = []
        for (name, args, kwargs) in dc.preprocesses:
            preprocess = PREPROCESS[name](*args, **kwargs)
            preprocess.load(self.load_dir / 'preprocess' / f'{name}.npz',
                            verbose=False)
            preprocesses.append(preprocess)
        # load master nnp
        master_nnp = MasterNNP(tr['training']['elements'],
                               tr['model']['n_input'], mc.hidden_layers,
                               tr['model']['n_output'])
        chainer.serializers.load_npz(self.load_dir / 'master_nnp.npz',
                                     master_nnp)

        if self.format == 'lammps':
            self.dump_for_lammps(preprocesses, master_nnp)
Exemplo n.º 3
0
    def train(self, dataset, comm=None):
        mc = self.model_config
        tc = self.training_config
        if comm is None:
            comm = chainermn.create_communicator('naive', MPI.comm)
        result = {'training_time': 0.0, 'observation': []}

        # model and optimizer
        master_nnp = MasterNNP(
            tc.elements, mc.n_input, mc.hidden_layers, mc.n_output)
        master_opt = chainer.optimizers.Adam(tc.init_lr)
        master_opt = chainermn.create_multi_node_optimizer(master_opt, comm)
        master_opt.setup(master_nnp)
        master_opt.add_hook(chainer.optimizer_hooks.Lasso(tc.l1_norm))
        master_opt.add_hook(chainer.optimizer_hooks.WeightDecay(tc.l2_norm))

        for training, test in dataset:
            tag = training.tag
            properties = training.property.properties

            # iterators
            train_iter = chainer.iterators.SerialIterator(
                training, tc.batch_size // MPI.size, repeat=True, shuffle=True)
            test_iter = chainer.iterators.SerialIterator(
                test, tc.batch_size // MPI.size, repeat=False, shuffle=False)

            # model
            hdnnp = HighDimensionalNNP(
                training.elemental_composition,
                mc.n_input, mc.hidden_layers, mc.n_output)
            hdnnp.sync_param_with(master_nnp)
            main_opt = chainer.Optimizer()
            main_opt = chainermn.create_multi_node_optimizer(main_opt, comm)
            main_opt.setup(hdnnp)

            # loss function
            _, kwargs = tc.loss_function
            loss_function = self.loss_function(hdnnp, properties, **kwargs)
            observation_keys = loss_function.observation_keys

            # triggers
            interval = (tc.interval, 'epoch')
            stop_trigger = EarlyStoppingTrigger(
                check_trigger=interval,
                monitor=f'val/main/{observation_keys[-1]}',
                patients=tc.patients, mode='min',
                verbose=self.verbose, max_trigger=(tc.epoch, 'epoch'))

            # updater and trainer
            updater = Updater(train_iter,
                              {'main': main_opt, 'master': master_opt},
                              loss_func=loss_function.eval)
            out_dir = tc.out_dir / tag
            trainer = chainer.training.Trainer(updater, stop_trigger, out_dir)

            # extensions
            trainer.extend(ext.ExponentialShift('alpha', 1 - tc.lr_decay,
                                                target=tc.final_lr,
                                                optimizer=master_opt))
            evaluator = chainermn.create_multi_node_evaluator(
                ext.Evaluator(test_iter, hdnnp, eval_func=loss_function.eval),
                comm)
            trainer.extend(evaluator, name='val')
            if tc.scatter_plot:
                trainer.extend(ScatterPlot(test, hdnnp, comm),
                               trigger=interval)
            if MPI.rank == 0:
                if tc.log_report:
                    trainer.extend(ext.LogReport(log_name='training.log'))
                if tc.print_report:
                    trainer.extend(ext.PrintReport(
                        ['epoch', 'iteration']
                        + [f'main/{key}' for key in observation_keys]
                        + [f'val/main/{key}' for key in observation_keys]))
                if tc.plot_report:
                    trainer.extend(ext.PlotReport(
                        [f'main/{key}' for key in observation_keys],
                        x_key='epoch', postprocess=set_log_scale,
                        file_name='training_set.png', marker=None))
                    trainer.extend(ext.PlotReport(
                        [f'val/main/{key}' for key in observation_keys],
                        x_key='epoch', postprocess=set_log_scale,
                        file_name='validation_set.png', marker=None))

            manager = Manager(tag, trainer, result, is_snapshot=True)
            if self.is_resume:
                manager.check_to_resume(self.resume_dir.name)
            if manager.allow_to_run:
                with manager:
                    trainer.run()

        if MPI.rank == 0:
            chainer.serializers.save_npz(
                tc.out_dir / 'master_nnp.npz', master_nnp)

        return result