Пример #1
0
	def fit(self, x_train, y_train, x_val, y_val,y_true):
		if not tf.test.is_gpu_available:
			print('error')
			exit()
		# x_val and y_val are only used to monitor the test loss and NOT for training  
		batch_size = 16
		nb_epochs = 2000

		mini_batch_size = int(min(x_train.shape[0]/10, batch_size))

		start_time = time.time() 

		hist = self.model.fit(x_train, y_train, batch_size=mini_batch_size, epochs=nb_epochs,
			verbose=self.verbose, validation_data=(x_val,y_val), callbacks=self.callbacks)
		
		duration = time.time() - start_time

		self.model.save(self.output_directory+'last_model.hdf5')

		model = keras.models.load_model(self.output_directory+'best_model.hdf5')

		y_pred = model.predict(x_val)

		# convert the predicted from binary to integer 
		y_pred = np.argmax(y_pred , axis=1)

		save_logs(self.output_directory, hist, y_pred, y_true, duration)

		keras.backend.clear_session()
Пример #2
0
    def fit(self, x_train, y_train, x_val, y_val, y_true):
        # if not tf.test.is_gpu_available:
        #     print('error')
        #     exit()
        # x_val and y_val are only used to monitor the test loss and NOT for training
        batch_size = 64
        nb_epochs = 1500

        mini_batch_size = int(min(x_train.shape[0] / 10, batch_size))

        start_time = time.time()

        hist = self.model.fit(x_train, y_train, batch_size=mini_batch_size, epochs=nb_epochs,
                              verbose=self.verbose, validation_data=(x_val, y_val), callbacks=self.callbacks)

        duration = time.time() - start_time

        self.model.save(self.output_directory + 'last_model.hdf5')

        y_pred = self.predict(x_val, y_true, x_train, y_train, y_val,
                              return_df_metrics=False)

        # save predictions
        np.save(self.output_directory + 'y_pred.npy', y_pred)

        # convert the predicted from binary to integer
        y_pred = np.argmax(y_pred, axis=1)

        df_metrics = save_logs(self.output_directory, hist, y_pred, y_true, duration)

        keras.backend.clear_session()

        return df_metrics
Пример #3
0
    def fit(self, x, y, x_test, y_test, y_true):
        if not tf.test.is_gpu_available:
            print('error')
            exit()
        mini_batch_size = 16
        nb_epochs = 120

        x_train, x_val, y_train, y_val = \
            train_test_split(x, y, test_size=0.33)

        x_test = self.prepare_input(x_test)
        x_train = self.prepare_input(x_train)
        x_val = self.prepare_input(x_val)

        start_time = time.time()

        hist = self.model.fit(x_train,
                              y_train,
                              batch_size=mini_batch_size,
                              epochs=nb_epochs,
                              verbose=self.verbose,
                              validation_data=(x_val, y_val),
                              callbacks=self.callbacks)

        duration = time.time() - start_time

        self.model.save(self.output_directory + 'last_model.hdf5')

        model = keras.models.load_model(self.output_directory +
                                        'best_model.hdf5')

        y_pred = model.predict(x_test)

        # convert the predicted from binary to integer
        y_pred = np.argmax(y_pred, axis=1)

        save_logs(self.output_directory,
                  hist,
                  y_pred,
                  y_true,
                  duration,
                  lr=False)

        keras.backend.clear_session()
Пример #4
0
    def fit(self, x_train, y_train, x_test, y_test,y_true):
        if not tf.test.is_gpu_available:
            print('error')
            exit()
        nb_epochs = 1000
        batch_size= 256
        nb_classes = y_train.shape[1]

        # limit the number of augmented time series if series too long or too many 
        if x_train.shape[1] > 500 or x_train.shape[0] > 2000 or x_test.shape[0] > 2000 : 
            self.warping_ratios = [1]
            self.slice_ratio = 0.9
        # increase the slice if series too short 
        if x_train.shape[1]*self.slice_ratio < 8:
            self.slice_ratio = 8/x_train.shape[1]

        ####################
        ## pre-processing ##
        ####################
        
        
        x_train , y_train , x_test , y_test, tot_increase_num = self.pre_processing(x_train,y_train,x_test,y_test)
        
        print('Total increased number for each MTS: ',tot_increase_num)

        #########################
        ## done pre-processing ##
        #########################
        
        input_shape = x_train.shape[1:]
        model = self.build_model(input_shape,nb_classes)
        
        if self.verbose == True:
            model.summary()
        
        start_time = time.time()
        
        hist = model.fit(x_train, y_train, batch_size=batch_size, epochs=nb_epochs,
            verbose=self.verbose, validation_data = (x_test,y_test),callbacks=self.callbacks)

        model.save(self.output_directory+'last_model.hdf5')

        model = keras.models.load_model(self.output_directory+'best_model.hdf5')
        
        y_pred = model.predict(x_test,batch_size=batch_size)
        # convert the predicted from binary to integer 
        y_pred = np.argmax(y_pred , axis=1)
        
        # get the true predictions of the test set
        y_predicted = []
        test_num_batch = int(x_test.shape[0]/tot_increase_num)
        for i in range(test_num_batch):
            unique_value, sub_ind, correspond_ind, count = np.unique(y_pred[i*tot_increase_num:(i+1)*tot_increase_num], True, True, True)

            idx_max = np.argmax(count)
            predicted_label = unique_value[idx_max]

            y_predicted.append(predicted_label)

        y_pred = np.array(y_predicted)
        
        duration = time.time() - start_time
        
        save_logs(self.output_directory, hist, y_pred, y_true, duration )

        keras.backend.clear_session()