示例#1
0
    def load_datasets(self):
        ''' load train and validation dataset '''
        self.train_data = data_frame.DataFrame( 
            self.in_path+"_train.h5", output_label = self.class_label,
            phi_padding = self.phi_padding )
        self.val_data = data_frame.DataFrame( 
            self.in_path+"_val.h5", output_label = self.class_label,
            phi_padding = self.phi_padding )

        self.num_classes = self.train_data.num_classes
示例#2
0
    def load_datasets(self):
        ''' load train and validation dataset '''
        self.train_data = data_frame.DataFrame(
            self.in_path + "_train.h5",
            output_label=self.class_label,
            variables=self.input_variables,
            n_particles=self.n_particles,
            normed_inputs=self.normed_inputs)
        self.val_data = data_frame.DataFrame(self.in_path + "_val.h5",
                                             output_label=self.class_label,
                                             variables=self.input_variables,
                                             n_particles=self.n_particles,
                                             normed_inputs=self.normed_inputs)

        self.num_classes = self.train_data.num_classes
示例#3
0
    def eval_model(self):
        # loading test examples
        self.test_data = data_frame.DataFrame( 
            self.in_path+"_test.h5", output_label = self.class_label,
            phi_padding = self.phi_padding )

        self.target_names = [self.test_data.inverted_label_dict[i] for i in range(
            self.test_data.min_jets, self.test_data.max_jets+1)]

        self.test_eval = self.model.evaluate(
            self.test_data.X, self.test_data.one_hot)
        print("test loss:     {}".format( self.test_eval[0] ))
        for im, metric in enumerate(self.eval_metrics):
            print("test {}: {}".format( metric, self.test_eval[im+1] ))

        self.history = self.trained_model.history
        
        self.predicted_vector = self.model.predict( self.test_data.X )
        self.predicted_classes = np.argmax( self.predicted_vector, axis = 1)
        
        self.predicted_classes = np.array([self.test_data.max_jets if j >= self.test_data.max_jets \
                             else self.test_data.min_jets if j <= self.test_data.min_jets \
                             else j for j in self.predicted_classes])

        self.confusion_matrix = confusion_matrix(
            self.test_data.Y, self.predicted_classes )
示例#4
0
    def _load_datasets(self):
        ''' load data set '''

        return data_frame.DataFrame(input_samples=self.input_samples,
                                    event_category=self.event_category,
                                    train_variables=self.train_variables,
                                    test_percentage=self.test_percentage,
                                    norm_variables=True,
                                    additional_cut=self.additional_cut)
示例#5
0
 def _load_datasets(self):
     ''' load dataset '''
     return data_frame.DataFrame(path_to_input_files=self.in_path,
                                 classes=self.event_classes,
                                 event_category=self.event_category,
                                 train_variables=self.train_variables,
                                 prenet_targets=self.prenet_targets,
                                 test_percentage=self.test_percentage,
                                 norm_variables=True,
                                 additional_cut=self.additional_cut)
示例#6
0
    def _load_datasets(self):
        ''' load data set '''

        return data_frame.DataFrame(
            input_samples       = self.input_samples,
            input_features      = self.inputs,
            target_features     = self.targets,
            feature_scaling     = self.feature_scaling,
            test_percentage     = self.test_percentage,
            val_percentage      = self.val_percentage)
示例#7
0
 def _load_datasets(self, shuffle_seed, balanceSamples):
     ''' load data set '''
     return data_frame.DataFrame(input_samples=self.input_samples,
                                 event_category=self.category_cutString,
                                 train_variables=self.train_variables,
                                 test_percentage=self.test_percentage,
                                 norm_variables=self.norm_variables,
                                 shuffleSeed=shuffle_seed,
                                 balanceSamples=balanceSamples,
                                 evenSel=self.evenSel,
                                 addSampleSuffix=self.addSampleSuffix)
示例#8
0
    def eval_model(self):
        self.test_data = data_frame.DataFrame(self.in_path + "_test.h5")

        self.test_eval = self.model.evaluate(self.test_data.X,
                                             self.test_data.X)
        print("test loss: {}".format(self.test_eval[0]))
        for im, metric in enumerate(self.eval_metrics):
            print("test {}: {}".format(metric, self.test_eval[im + 1]))

        self.history = self.trained_model.history

        self.test_encoded_images = self.encoder.predict(self.test_data.X)
        self.test_decoded_images = self.model.predict(self.test_data.X)
示例#9
0
    def eval_model(self):
        # loading test examples
        self.test_data = data_frame.DataFrame( 
            self.in_path+"_test.h5", output_label = self.class_label, one_hot = False,
            phi_padding = self.phi_padding )


        self.test_eval = self.model.evaluate(
            self.test_data.X, self.test_data.Y)
        print("test loss: {}".format( self.test_eval[0] ))
        for im, metric in enumerate(self.eval_metrics):
            print("test {}: {}".format( metric, self.test_eval[im+1] ))

        self.history = self.trained_model.history
        
        self.predicted_vector = self.model.predict( self.test_data.X )
        
        # correct predictons (kind of arbitrary)
        # everything so far only implemented for a single output neuron with integer targets
        self.predicted_classes = [int(val+0.5) for val in self.predicted_vector[:,0]]

        self.confusion_matrix = confusion_matrix(
            self.test_data.Y, self.predicted_classes)
示例#10
0
 def load_datasets(self):
     ''' load train and validation dataset '''
     self.train_data = data_frame.DataFrame(self.in_path + "_train.h5")
     self.val_data = data_frame.DataFrame(self.in_path + "_val.h5")