Exemplo n.º 1
0
class ReportElementValidator(ReportElement, object):
    """
    Class for setting permission
    """
    def __init__(self):
        #super(ReportElementValidator, self).__init__(False)
        super(ReportElementValidator, self).__init__('normal', None)


    def init(self, id):
        
        # init element
        super(ReportElementValidator, self).init(id)
        self._date_unformatter = DateUnformat()
        
        # do not save instances
        self._data['report_save_historical_instances_ind'] = 'N'
        # json file operation wrapper
        self._jfile = JFileValidator(self._path, self._id, self._data)
        self._instance_save_path = "%s%s/validation/" % (self._path, self._id)
        
        #get validation settings from valid.json generated by editor
        self.validation_data = self._jfile.get_validation_data()
        
        self._meas_interval = None
        
        if self.validation_data['measurement_interval_id']:
            res = self._db.Query("""SELECT interval_unit
                                FROM measurement_interval
                        WHERE 
                            measurement_interval_id =%s""",(self.validation_data['measurement_interval_id']))
            if res:
                self._meas_interval = self._db.record[0]['interval_unit']
                
        
        self.validation_data['data_fetch_command'] = self.validation_data['data_fetch_command'].strip().replace('\r\n','\n')
        if self.validation_data['source_database_connection_id']:
            self._data['source_database_connection_id'] = int(self.validation_data['source_database_connection_id'])
        else:
            self._data['source_database_connection_id'] = 0
            
        self._data['data_fetch_command'] = self.validation_data['data_fetch_command']
        self._data['data_fetch_method'] = self.validation_data['data_fetch_method']
        
        if self.validation_data['web_service_credentials_id']:
            self._data['web_service_credentials_id'] = int(self.validation_data['web_service_credentials_id'])
        else:
            self._data['web_service_credentials_id'] = 0
        
        if self.validation_data['measurement_interval_id']:
            self._data['measurement_interval_id'] = int(self.validation_data['measurement_interval_id'])
        else:
            self._data['measurement_interval_id'] = 0
            
        
        # set report segment -d
        if self.validation_data['segment_id'] and int(self.validation_data['segment_id']):
            self._data['segment_id'] = int(self.validation_data['segment_id'])
        else:
            self._data['segment_id'] = 0        
        
        # get segment data
        self._segment = self._get_segment()
        self._segment_values = self._get_segment_values()
        
        # get validation segment value id and set it as active
        if self.validation_data['segment_value_id'] and int(self.validation_data['segment_value_id']):
            segment_value_id = int(self.validation_data['segment_value_id'])
        else:
            segment_value_id = 0
        
        # get check current segment value to be in report segments
        if segment_value_id and self._segment and any(segment['segment_value_id'] == segment_value_id for segment in self._segment_values):
            self._segment_value = list(segment for segment in self._segment_values if segment['segment_value_id'] == segment_value_id)[0]
        elif self._segment_values and self._segment:
            raise Exception("segment_value_id does not match segment_id")
        else:
            self._segment_value = list()
        
        if self._segment_value:
            self._segment_value_id = self._segment_value['segment_value_id']
        else: 
            self._segment_value_id = 0
        
        self._jfile.set_segment_value(self._segment_value)
        self._jfile.set_segment(self._segment)
    
    def _check_pre_saved_settings(self, fetch_settings):
        if fetch_settings and \
                fetch_settings['sql'] and \
                fetch_settings['sql'] == self._data['data_fetch_command'] and \
                fetch_settings['source_database_connection_id'] == self._data['source_database_connection_id'] and \
                fetch_settings['segment_id'] == self._data['segment_id']:
            return True
        return False
    def get_saved_instance(self):
        """
        get clear data for building instance
        """
        # data built by generator. it's actual data
        generation_fetch_settings = self._jfile.get_generation_fetch_settings()
                
        # data built by last validation
        validation_fetch_settings = self._jfile.get_validation_fetch_settings()
        
        # data built by last validation which was saved
        last_validation_fetch_settings = self._jfile.get_last_validation_fetch_settings()
        
        pre_saved_info = list()

        data = {'instance': None, 'meas_time': None}
        
        meas_time = None
        if self.validation_data['measurement_time']:
            meas_time = self._date_unformatter.unformat(self.validation_data['measurement_time'])
            if not meas_time:
                raise Exception("Incorrect measurement_time date format.")
        
        
        if self._check_pre_saved_settings(generation_fetch_settings):
            #print 'take generated'
            if meas_time:
                res = self._db.Query("""SELECT report_data_set_instance_id
                            FROM report_data_set_instance
                        WHERE
                            `element_id`=%s
                            AND segment_value_id = %s
                            AND measurement_time = %s
                        ORDER BY measurement_time DESC
                        LIMIT 0, 1""",(self._id, self._segment_value_id, meas_time))
                if res:
                    #print 'has meas time'
                    data_set_instance = self._db.record[0]
                    generation_fetch_settings['saved_data'] = self._jfile.get_generated_dataset_instance(data_set_instance['report_data_set_instance_id'])
                else:
                    #print 'not has'
                    generation_fetch_settings['saved_data'] = None
            else:
                generation_fetch_settings['saved_data'] = self._jfile.get_generated_dataset()
            generation_fetch_settings['source'] = 'generated'
            pre_saved_info.append(generation_fetch_settings)
        
        if self._check_pre_saved_settings(validation_fetch_settings):
            #print 'take validated'
            validation_fetch_settings['saved_data'] = self._jfile.get_validated_dataset()
            validation_fetch_settings['source'] = 'validated'
            pre_saved_info.append(validation_fetch_settings)
        
        if self._check_pre_saved_settings(last_validation_fetch_settings):
            #print 'take last saved validated'
            last_validation_fetch_settings['saved_data'] = self._jfile.get_last_validated_dataset()
            last_validation_fetch_settings['source'] = 'last_saved'
            pre_saved_info.append(last_validation_fetch_settings)
        
        # no validated query at all
        if not pre_saved_info:
            raise Exception("Please validate query first")
         
        # get pre-saved items with data 
        pre_saved_info_with_data = [pre_saved for pre_saved in pre_saved_info if pre_saved['saved_data']]
        
        # sort by generation time
        pre_saved_info_with_data.sort(key=lambda item: item['saved_data']['generation_time'], reverse = True)
        
        # check validation measurement time
        if self._data['data_fetch_method'] == 'sql':
            if self.validation_data['measurement_time']:
                #print 'meas time specified'
                # is measurement time is specified take only exact data
                pre_saved_info_with_data = [pre_saved for pre_saved in pre_saved_info_with_data if self._date_unformatter.unformat(pre_saved['saved_data']['meas_time']) == meas_time]
                #print 'pre_saved_info_with_data', len(pre_saved_info_with_data)
            else:
                # is no measurement time take only actual data
                #print 'meas time not specified'
    
                # check if generated data exists
                """
                pre_saved_info_generated = [pre_saved for pre_saved in pre_saved_info_with_data if pre_saved['source'] == 'generated']
                if pre_saved_info_generated:
                    #print 'use actual generated data'
                    # use generated data as it's actual 
                    pre_saved_info_with_data = pre_saved_info_generated
                
                else:
                """
                if True:
                    #print 'use filter to detect actual data'
                    # filter actual data
                    expired_date = None
                    
                    if self._data['max_time_before_expired_sec']:
                        #print 'use meas interval', self._data['max_time_before_expired_sec']
                        expired_date = datetime.datetime.now() - datetime.timedelta(seconds = self._data['max_time_before_expired_sec'])
                    elif self._meas_interval:
                        #print 'use meas interval', self._meas_interval
                        if self._meas_interval == 'minute':
                            expired_date = datetime.datetime.now() - datetime.timedelta(minutes = 10)
                        elif self._meas_interval == 'hour':
                            expired_date = datetime.datetime.now() - datetime.timedelta(hours = 1)
                        elif self._meas_interval == 'day':
                            expired_date = datetime.datetime.now() - datetime.timedelta(days = 1)
                        elif self._meas_interval == 'week':
                            expired_date = datetime.datetime.now() - datetime.timedelta(weeks = 1)
                        elif self._meas_interval == 'month':
                            expired_date = datetime.datetime.now() - datetime.timedelta(days = 30)
                        elif self._meas_interval == 'quarter':
                            expired_date = datetime.datetime.now() - datetime.timedelta(days = 90)
                        elif self._meas_interval == 'year':
                            expired_date = datetime.datetime.now() - datetime.timedelta(days = 365)
                        else:
                            expired_date = None
                    #print expired_date
                    if expired_date:
                        pre_saved_info_with_data = [pre_saved for pre_saved in pre_saved_info_with_data if self._date_unformatter.unformat(pre_saved['saved_data']['generation_time']) >= expired_date]
        
        if pre_saved_info_with_data:
            # if pre-saved data exists
            self._outer_conn = self._get_outer_connection()
            data['instance'] = self._outer_conn.parse_collected_data(simplejson.loads(pre_saved_info_with_data[0]['saved_data']['instance']))
            
            instance, meas_time = self._fetch_data_from_source()
            data['meas_time'] = self._date_unformatter.unformat(pre_saved_info_with_data[0]['saved_data']['meas_time'])#datetime.datetime.strptime(data['meas_time'], '%Y-%m-%d %H:%M:%S')
        else:
            # no actual pre-saved data, let's fetch it
            instance, meas_time = self._fetch_data_from_source()
            
            
            data['instance'] = instance
            data['meas_time'] = meas_time
             
        """
        #check generation sql to be equal current validation sql
        if not data['instance'] and \
                generation_fetch_settings and \
                generation_fetch_settings['sql'] and \
                generation_fetch_settings['sql'] == self.validation_data['data_fetch_command']:
            saved_data = self._jfile.get_generated_dataset()
            if saved_data:
                data = saved_data
        
        #check last validation sql to be equal current validation sql
        if not data['instance'] and \
                validation_fetch_settings and \
                validation_fetch_settings['sql'] and \
                validation_fetch_settings['sql'] == self.validation_data['data_fetch_command']:
            saved_data = self._jfile.get_validated_dataset()
            if saved_data:
                data = saved_data
                
        #check last saved validation sql to be equal current validation sql
        if not data['instance'] and \
                last_validation_fetch_settings and \
                last_validation_fetch_settings['sql'] and \
                last_validation_fetch_settings['sql'] == self.validation_data['data_fetch_command']:
            saved_data = self._jfile.get_last_validated_dataset()
            if saved_data:
                data = saved_data
        
        # no data found get by the same sql query as current validation sql.
        # raise exception - sql query must be validated
        if data['instance'] is None:
            raise Exception("Please validate query first")
        """
        
        return data['instance'], data['meas_time']        
    
    def report_generation(self, id1):
        """
        generate all enabled report elements: dataset, pivots, charts
        """
        # get instance
        instance, meas_time = self.get_saved_instance()
        self._jfile.set_meas_time(meas_time)
        
        # create dataset instance
        data_set_instance = self._process_instance(instance, meas_time, update_columns = False, write_clear_headers = True, segment_value = self._segment_value)
        
        fetched_rows = len(instance['data'])
        
        #prepare data for charts
        all_data = dict()
        all_data[0] = data_set_instance.get_formatted_header_rows()
                                
        # create all pivots
        for pivot in self._pivots:
            #data_set_pivot_instance = self._process_pivot(pivot, data_set_instance, self._segment_value)
            data_set_pivot_instance = self._process_pivot(pivot, data_set_instance)
            all_data[pivot['report_data_set_pivot_id']] = data_set_pivot_instance.get_formatted_header_rows()
        
        # create all charts
        chart_gen = ChartGenerator()
        chart_gen.report_charts(self._id, self._segment_value_id, meas_time, 0, all_data, self._jfile, chart_id = 0)
        self._make_meta()
        return fetched_rows
  
    
    
    def data_generation(self, id1):
        """
        generate dataset
        """
        # get instance
        instance, meas_time = self.get_saved_instance()
        
        # create dataset instance
        self._process_instance(instance, meas_time, update_columns = False, write_clear_headers = True, segment_value = self._segment_value)
        fetched_rows = len(instance['data'])
        return fetched_rows
    
    
    def _fetch_data_from_source(self):
        #get last measurement time
        if self.validation_data['last_measurement_time']:
            # try to parse it from valid.json
            last_meas_time = self._date_unformatter.unformat(self.validation_data['last_measurement_time'])
            if not last_meas_time:
                raise Exception("Incorrect last_measurement_time date format.")
            """
            try:
                #try datetime
                last_meas_time = datetime.datetime.strptime(self.validation_data['last_measurement_time'], "%Y-%m-%d %H:%M:%S")
            except ValueError:
                try:
                    #try date
                    last_meas_time = datetime.datetime.strptime(self.validation_data['last_measurement_time'], "%Y-%m-%d")
                except ValueError:
                    raise Exception("Incorrect last_measurement_time date format.")
            """
        else:
            # get from db
            last_meas_time = self._get_last_meas_time()
        
        if self.validation_data['measurement_time']:
            # try to parse it from valid.json
            meas_time = self._date_unformatter.unformat(self.validation_data['measurement_time'])
            if not meas_time:
                raise Exception("Incorrect measurement_time date format.")
        else:
            meas_time = ''

        
        
        self._outer_conn = self._get_outer_connection()
       
        # get instance
        if not meas_time:        
            meas_time = last_meas_time
        if self._data['data_fetch_method'] == 'web service':
            meas_times = self._get_meas_times_web_service(last_meas_time)
            if meas_times['data']:
                meas_time = meas_times['data'][-1][0]

        instance = self._get_instance(meas_time, self._segment_value, last_meas_time)
        if self._data['data_fetch_method'] == 'web service':
            json_instance = self._outer_conn.get_json_result(meas_time)
        else:
            json_instance = self._outer_conn.get_json_result()
        
        # save data fetch
        self._jfile.save_data_fetch(
                                {'instance': json_instance, 
                                 'meas_time': meas_time.strftime('%Y-%m-%d %H:%M:%S'), 
                                 'generation_time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
    
        # save sql query
        self._jfile.save_fetch_settings({'sql': self._data['data_fetch_command'],
                                        'segment_id': self._data['segment_id'],
                                        'source_database_connection_id': self._data['source_database_connection_id'],
                                        })        
        return instance, meas_time     

    def data_fetch(self, id1):
        """
        fetch data from outer source
        """
        instance, meas_time = self._fetch_data_from_source()

        # process instance and generate dataset json
        self._process_instance(instance, meas_time, update_columns = False, write_clear_headers = True, segment_value = self._segment_value)
        
        fetched_rows = len(instance['data'])
        """        
        # save clear fetched data
        self._jfile.save_data_fetch(
                                    {'instance': json_instance, 
                                     'meas_time': meas_time.strftime('%Y-%m-%d %H:%M:%S'), 
                                     'generation_time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
        
        # save sql query
        self._jfile.save_fetch_settings({'sql': self._data['data_fetch_command'],
                                            'segment_id': self._data['segment_id'],
                                            'source_database_connection_id': self._data['source_database_connection_id'],
                                            })
        
        
        """
        return fetched_rows
    
    def metadata_update(self, id1):
        """
        update report metadata
        """
        # get instance
        instance, meas_time = self.get_saved_instance()
        
        if not instance:
            raise Exception("Cannot update metadata. Dataset is empty.")

        # force dataset class update titles
        self._data['last_report_column_metadata_update_time'] = None
        
        # process instance
        instance = self._process_instance(instance, meas_time, update_columns=True, write_clear_headers = True, segment_value = self._segment_value)
        
        # if exists validated data and sql then store them as last validated
        last_validation_fetch_settings = self._jfile.get_validation_fetch_settings()
        if last_validation_fetch_settings:
            last_validated_data = self._jfile.get_validated_dataset()
            if last_validated_data:
                self._jfile.save_last_validated_data_fetch(last_validated_data)
                self._jfile.save_last_validation_fetch_settings(last_validation_fetch_settings)
        
        return 0
    
    def pivot_generation(self, id1):
        """
        generate pivot and charts based on this pivot
        """
        # take all the pivots including disabled
        self._pivots = self._get_pivots(enabled_only = False)

        # check if there any pivots
        if not self._pivots:
            raise Exception("Report has no pivots")
        if not id1:
            raise Exception("Pivot id is not specified")
        
        #get enabled charts
        self._charts = self._get_charts()
        
        # find specified pivot
        pivots = filter(lambda pivot: pivot['report_data_set_pivot_id'] == id1, self._pivots)
        if not pivots:
            raise Exception("Incorrect pivot id")
        pivot = pivots[0]
        
        # get all chart based on current pivot
        pivot_charts = filter(lambda chart: chart['report_data_set_pivot_id'] == id1, self._charts)
        
        # get instance
        instance, meas_time = self.get_saved_instance()
        self._jfile.set_meas_time(meas_time)
        
        if not instance:
            raise Exception("Cannot create pivot. Dataset is empty.")

        # create dataset instance
        data_set_instance = self._process_instance(instance, meas_time, update_columns=False, write_clear_headers = True, segment_value = self._segment_value)

        # create pivot instance
        #data_set_pivot_instance = self._process_pivot(pivot, data_set_instance, self._segment_value)
        data_set_pivot_instance = self._process_pivot(pivot, data_set_instance)
        
        #prepare pivot data for charts
        pivot_formatted_header_rows = data_set_pivot_instance.get_formatted_header_rows()
        
        all_data = dict()
        all_data[pivot['report_data_set_pivot_id']] = pivot_formatted_header_rows
        
        chart_gen = ChartGenerator()

        for pivot_chart in pivot_charts:
            chart_gen.report_charts(self._id, self._segment_value_id, meas_time, 0, all_data, self._jfile, chart_id = pivot_chart['report_data_set_chart_id'])
        
        return len(pivot_formatted_header_rows['rows'])
    
    def chart_generation(self, id1):
        """
        generate chart 
        """
        return self._chart_process(id1, 'generate')

    def saving_chart(self, id1):
        """
        populate chart 'row values' 
        """
        return self._chart_process(id1, 'populate')

    def _chart_process(self, id1, command):
        """
        process chart 
        """
        # take all the pivots including disabled 
        self._pivots = self._get_pivots(enabled_only = False)
        
        # take all the charts including disabled 
        self._charts = self._get_charts(enabled_only = False)

        # check if there any charts
        if not self._charts:
            raise Exception("report has no chart")
        if not id1:
            raise Exception("chart id is not specified")
        
        
        # find specified chart
        charts = filter(lambda chart: chart['report_data_set_chart_id'] == id1, self._charts)

        if not charts:
            raise Exception("Incorrect chart id")
        chart = charts[0]

        all_data = dict()
        
        
        # get instance
        instance, meas_time = self.get_saved_instance()
        self._jfile.set_meas_time(meas_time)
         
        
        if not instance:
            raise Exception("Cannot create chart. Dataset is empty.")
        
        # create dataset instance
        data_set_instance = self._process_instance(instance, meas_time, update_columns = False, write_clear_headers = True, segment_value = self._segment_value)
        
        
        if chart['report_data_set_pivot_id']:
            # get pivot data
            index = chart['report_data_set_pivot_id']
            pivots = filter(lambda pivot: pivot['report_data_set_pivot_id'] == index, self._pivots)
            pivot = pivots[0]
            #data_set_pivot_instance = self._process_pivot(pivot, data_set_instance, self._segment_value)
            data_set_pivot_instance = self._process_pivot(pivot, data_set_instance)
            formatted_header_rows = data_set_pivot_instance.get_formatted_header_rows()
        else:
            # get dataset
            index = 0
            formatted_header_rows = data_set_instance.get_formatted_header_rows()

        if not formatted_header_rows:
            raise Exception("Cannot create chart. Dataset is empty.")

        #prepare data for charts
        all_data[index] = formatted_header_rows
        if command == 'generate':
            chart_gen = ChartGenerator()
            chart_gen.report_charts(self._id, self._segment_value_id, meas_time, 0, all_data, self._jfile, chart['report_data_set_chart_id'])
        elif command == 'populate':
            self._populate_row_values([chart], all_data)
        return 0

    def saving_report(self, id1):
        return 0
    
    def restore_data(self, id1):
        return 0
Exemplo n.º 2
0
 def init(self, id):
     
     # init element
     super(ReportElementValidator, self).init(id)
     self._date_unformatter = DateUnformat()
     
     # do not save instances
     self._data['report_save_historical_instances_ind'] = 'N'
     # json file operation wrapper
     self._jfile = JFileValidator(self._path, self._id, self._data)
     self._instance_save_path = "%s%s/validation/" % (self._path, self._id)
     
     #get validation settings from valid.json generated by editor
     self.validation_data = self._jfile.get_validation_data()
     
     self._meas_interval = None
     
     if self.validation_data['measurement_interval_id']:
         res = self._db.Query("""SELECT interval_unit
                             FROM measurement_interval
                     WHERE 
                         measurement_interval_id =%s""",(self.validation_data['measurement_interval_id']))
         if res:
             self._meas_interval = self._db.record[0]['interval_unit']
             
     
     self.validation_data['data_fetch_command'] = self.validation_data['data_fetch_command'].strip().replace('\r\n','\n')
     if self.validation_data['source_database_connection_id']:
         self._data['source_database_connection_id'] = int(self.validation_data['source_database_connection_id'])
     else:
         self._data['source_database_connection_id'] = 0
         
     self._data['data_fetch_command'] = self.validation_data['data_fetch_command']
     self._data['data_fetch_method'] = self.validation_data['data_fetch_method']
     
     if self.validation_data['web_service_credentials_id']:
         self._data['web_service_credentials_id'] = int(self.validation_data['web_service_credentials_id'])
     else:
         self._data['web_service_credentials_id'] = 0
     
     if self.validation_data['measurement_interval_id']:
         self._data['measurement_interval_id'] = int(self.validation_data['measurement_interval_id'])
     else:
         self._data['measurement_interval_id'] = 0
         
     
     # set report segment -d
     if self.validation_data['segment_id'] and int(self.validation_data['segment_id']):
         self._data['segment_id'] = int(self.validation_data['segment_id'])
     else:
         self._data['segment_id'] = 0        
     
     # get segment data
     self._segment = self._get_segment()
     self._segment_values = self._get_segment_values()
     
     # get validation segment value id and set it as active
     if self.validation_data['segment_value_id'] and int(self.validation_data['segment_value_id']):
         segment_value_id = int(self.validation_data['segment_value_id'])
     else:
         segment_value_id = 0
     
     # get check current segment value to be in report segments
     if segment_value_id and self._segment and any(segment['segment_value_id'] == segment_value_id for segment in self._segment_values):
         self._segment_value = list(segment for segment in self._segment_values if segment['segment_value_id'] == segment_value_id)[0]
     elif self._segment_values and self._segment:
         raise Exception("segment_value_id does not match segment_id")
     else:
         self._segment_value = list()
     
     if self._segment_value:
         self._segment_value_id = self._segment_value['segment_value_id']
     else: 
         self._segment_value_id = 0
     
     self._jfile.set_segment_value(self._segment_value)
     self._jfile.set_segment(self._segment)