Пример #1
0
 def execute_scheme(self):
     #model = TimeSeriesClassificationNeuralNet(self.settings)
     #model = TimeSeriesPredictionNeuralNet(self.settings)
     connection = SQLAConnection()
     query_generator = QueryGenerator(
         self.settings.sensors,
         self.settings.start_date,
         self.settings.end_date
         )
     report_generator = ReportGenerator(self.settings)
     link_generator = LinkGenerator(self.settings)
     #data = RegularityData(link_generator,connection)
     data = AnomalyData(link_generator,connection)
     #data.generate_metadata_report(ReportGenerator(self.settings))
     #data.make_df()
     #data.save_df(name=self.settings.dataset_name)
     
     #data.find_discontinuities()
     #data.split_at_discontinuities()
     #data.plot_data()
     #data.add_temp()
     #data.save_dfs(name=self.settings.dataset_name)
     #data.load_dfs(date='2020-11-01')
     #data.load_extend_dfs(date='2020-11-13')
     startdate = datetime.strptime('2020-11-01',config.dateformat)
     data.load_dfs(date=datetime.strftime(startdate,config.dateformat))
     dates_ahead = 4
     mode = 'while'
     if mode == 'for':
         for i in range(dates_ahead):
         
             data.load_extend_dfs(date=datetime.strftime(startdate+timedelta(days=i), config.dateformat))
           
     elif mode == 'while': 
         tdate = startdate      
         while tdate.date() != date.today():
             try:
                 data.load_extend_dfs(date=datetime.strftime(tdate, config.dateformat))
                 
             except FileNotFoundError:
                 pass
             tdate = tdate+timedelta(days=1)
     data.purge_empty_dfs()  
     data.preprocess()
     data.merge_dfs()
     #data.plot_data()
     #data.find_correlation()
     anomaly_settings = AnomalySettings()
     kmeans_settings = KMeansSettings()
     start_hour = '00:00:00'
     end_hour = '23:59:59'
     data.filter_hours(start_hour,end_hour)
     data.purge_empty_time_filtered_dfs()
     #data.plot_filtered_hours(plot_objects=False)
     data.set_object_settings(anomaly_settings)
     anomaly_name = f"{startdate}_{mode}_{start_hour}_{end_hour}_{anomaly_settings.anomaly_sensor}_anomaly"
     print(os.listdir(config.anomaly_path))
     print(anomaly_name)
     if f"{anomaly_name}.json" in os.listdir(config.anomaly_path):
         data.load_objects(name=f"{anomaly_name}.json")
         print(f"{anomaly_name} loaded")
     else:       
         for feature in anomaly_settings.anomaly_sensor:
             #data.locate_anomalies_filtered_dfs(feature)
             data.locate_objects_dfs(feature)
             #data.save_plots(feature)
             #data.plot_filtered_hours(foi = feature)
         data.save_objects(name=anomaly_name)
     
     
     kmeans = KMeansClustering(data.objects,kmeans_settings)
     kmeans.fit_Kmeans()
     #sensor_prediction = SensorPrediction(data.anomalies,self.settings)
     data.plot_filtered_hours(foi = 'acc1_ch_x')#,project_anomalies = 'acc1_ch_z')
     pca = PCAAnomalies(data.objects,self.settings)
     pca.fit_PCA()
     pca.save_pca(f'{anomaly_name}_pca')
     pca.set_labels(kmeans.send_labels())
     #pca.get_cov()
     #anomaly_key, df_number = pca.get_argmax(col='sigma')
     #data.plot_regularities()
     pca.plot_components_labels(n_categories = kmeans_settings.n_clusters)
     pca.scree_plot()
     pca.plot_hist_pca()
     #pca.plot_components_3d()
     pca.plot_components(features = ['Duration','frequency'])
import EpipolarGeometry
import ImageFeaturesHomography
import KMeansClustering

# start task 1
task1 = ImageFeaturesHomography.ImageFeaturesHomography()
task1.start()

# start task 2
task2 = EpipolarGeometry.EpipolarGeometry()
task2.start()

# start task 3
task3 = KMeansClustering.KMeansClustering()
task3.start()