コード例 #1
0
 def loadModel(self, filename):
     from keras.models import load_model
     self.keras_model = load_model(filename,
                                   custom_objects=custom_objects_list)
     self.optimizer = self.keras_model.optimizer
     self.compiled = True
     if self.ngpus > 1:
         self.compiled = False
コード例 #2
0
ファイル: training_base.py プロジェクト: dseith/DeepJetCore
    def setModel(self, model, **modelargs):
        if len(self.keras_inputs) < 1:
            raise Exception('setup data first')
        self.keras_model = model(self.keras_inputs, **modelargs)
        if hasattr(self.keras_model, "_is_djc_keras_model"):  #compatibility
            self.keras_model.setInputShape(self.keras_inputs)
            self.keras_model.build(None)

        if len(self.keras_weight_model_path):
            from DeepJetCore.modeltools import apply_weights_where_possible, load_model
            self.keras_model = apply_weights_where_possible(
                self.keras_model, load_model(self.keras_weight_model_path))
        #try:
        #    self.keras_model=model(self.keras_inputs,**modelargs)
        #except BaseException as e:
        #    print('problem in setting model. Reminder: since DJC 2.0, NClassificationTargets and RegressionTargets must not be specified anymore')
        #    raise e
        if not self.keras_model:
            raise Exception('Setting model not successful')
コード例 #3
0
def get_model(modelname):

    return load_model(get_model_path(modelname))
コード例 #4
0
 def _loadModel(self, filename):
     from tensorflow.keras.models import load_model
     keras_model = load_model(filename, custom_objects=custom_objects_list)
     optimizer = keras_model.optimizer
     return keras_model, optimizer
コード例 #5
0
#!/usr/bin/env python3

from argparse import ArgumentParser
from LossLayers import LossLayerBase

parser = ArgumentParser('')
parser.add_argument('inputFile')
parser.add_argument('outputFile')
args = parser.parse_args()

from DeepJetCore.modeltools import load_model

m = load_model(args.inputFile)

for l in m.layers:
    if isinstance(l, LossLayerBase):
        print('deactivating layer', l)
        l.active = False

m.save(args.outputFile)
コード例 #6
0
    def predict(self, model=None, model_path=None, output_to_file=True):
        if model_path == None:
            model_path = self.model_path

        if model is None:
            if not os.path.exists(model_path):
                raise FileNotFoundError('Model file not found')

        assert model_path is not None or model is not None

        outputs = []
        if output_to_file:
            os.system('mkdir -p ' + self.predict_dir)

        if model is None:
            model = load_model(model_path)

        all_data = []
        for inputfile in self.input_data_files:

            use_inputdir = self.inputdir
            if inputfile[0] == "/":
                use_inputdir = ""
            outfilename = "pred_" + os.path.basename(inputfile)

            print('predicting ', use_inputdir + '/' + inputfile)

            td = self.dc.dataclass()

            #also allows for inheriting classes now, like with tracks or special PU
            if not isinstance(td, TrainData_NanoML) and type(
                    td) is not TrainData_TrackML:
                raise RuntimeError(
                    "TODO: make sure this works for other traindata formats")

            if inputfile[-5:] == 'djctd':
                if self.unbuffered:
                    td.readFromFile(use_inputdir + "/" + inputfile)
                else:
                    td.readFromFileBuffered(use_inputdir + "/" + inputfile)
            else:
                print('converting ' + inputfile)
                td.readFromSourceFile(use_inputdir + "/" + inputfile,
                                      self.dc.weighterobjects,
                                      istraining=False)

            gen = TrainDataGenerator()
            # the batch size must be one otherwise we need to play tricks with the row splits later on
            gen.setBatchSize(1)
            gen.setSquaredElementsLimit(False)
            gen.setSkipTooLargeBatches(False)
            gen.setBuffer(td)

            num_steps = gen.getNBatches()
            generator = gen.feedNumpyData()

            dumping_data = []

            thistime = time.time()
            for _ in range(num_steps):
                data_in = next(generator)
                predictions_dict = model(data_in[0])
                for k in predictions_dict.keys():
                    predictions_dict[k] = predictions_dict[k].numpy()
                features_dict = td.createFeatureDict(data_in[0])
                truth_dict = td.createTruthDict(data_in[0])

                dumping_data.append(
                    [features_dict, truth_dict, predictions_dict])

            totaltime = time.time() - thistime
            print('took approx', totaltime / num_steps,
                  's per endcap (also includes dict building)')

            td.clear()
            gen.clear()
            outfilename = os.path.splitext(outfilename)[0] + '.bin.gz'
            if output_to_file:
                td.writeOutPredictionDict(dumping_data,
                                          self.predict_dir + "/" + outfilename)
            outputs.append(outfilename)
            if not output_to_file:
                all_data.append(dumping_data)

        if output_to_file:
            with open(self.predict_dir + "/outfiles.txt", "w") as f:
                for l in outputs:
                    f.write(l + '\n')

        if not output_to_file:
            return all_data