def parse_or_expression(self) -> Result: """ or_exp := or_exp OR and_exp """ r1 = self.parse_and_expression() if not r1.name and not r1.value: return r1 while True: token = self.pop_token() if token.type == TOKEN_TYPE['eof']: break elif token.type != TOKEN_TYPE['or']: self.push_token(token) return r1 r2 = self.parse_and_expression() if not r2.name and not r2.value: raise ParseException('invalid condition "%s"', self.condstr) r1 = Result( '({} or {})'.format( r1.name, r2.name), r1.value or r2.value) logger.debug('[*] {}'.format(r1)) return r1
def parse_and_expression(self): """ and_exp := and_exp AND n_exp """ r1 = self.parse_not_expression() if not r1.name and not r1.value: return r1 while True: token = self.pop_token() if token.type == TOKEN_TYPE['eof']: break if token.type != TOKEN_TYPE['and']: self.push_token(token) return r1 r2 = self.parse_not_expression() if not r2.name and not r2.value: raise ParseException('invalid condition "%s"', self.condstr) r1 = Result( '({} and {})'.format( r1.name, r2.name), r1.value and r2.value) logger.debug('[*] {}'.format(r1)) return r1
def send_meassurements(self, meassurements): logger.info('Sending meassurements to Monasca...') d = list() d = [ self.payload_meassure(row) for row in meassurements.values ] flatten_payload = [item for sublist in d for item in sublist] logger.debug('Payload: {}'.format(flatten_payload)) try: r = requests.post(self.url, json=flatten_payload, headers=self.headers) r.raise_for_status() except requests.exceptions.HTTPError as errh: logger.error("Http Error: {}".format(errh)) sys.exit(1) except requests.exceptions.ConnectionError as errc: logger.error("Error Connecting: {}".format(errc)) sys.exit(1) except requests.exceptions.Timeout as errt: logger.error("Timeout Error: {}".format(errt)) sys.exit(1) except requests.exceptions.RequestException as err: logger.error("OOps: Something Else: {}".format(err)) sys.exit(1) logger.info( 'Meassurements sent to Monasca, status code: {}'.format(r.status_code))
def get_token(self): logger.info('Requesting token to Keytone...') try: r = requests.post(self.url, json=self.payload, headers=self.headers) r.raise_for_status() except requests.exceptions.HTTPError as errh: logger.error("Http Error: {}".format(errh)) sys.exit(1) except requests.exceptions.ConnectionError as errc: logger.error("Error Connecting: {}".format(errc)) sys.exit(1) except requests.exceptions.Timeout as errt: logger.error("Timeout Error: {}".format(errt)) sys.exit(1) except requests.exceptions.RequestException as err: logger.error("OOps: Something Else: {}".format(err)) sys.exit(1) token = r.headers['X-Subject-Token'] logger.info('Token obtained, status code: {}'.format(r.status_code)) logger.debug('Token: {}'.format(token)) return token
def get_credentials(api): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. :param api: api - API from which we want to obtain the credentials. :return: Credentials, the obtained credential. """ logger.info("Get Google credential for API {}".format(api)) # If modifying these scopes, delete your previously saved credentials # at ./.credentials/sheets.googleapis.com.json scope = { 'sheets': 'https://www.googleapis.com/auth/spreadsheets', 'analytics': 'https://www.googleapis.com/auth/analytics.readonly', 'analyticsreporting': 'https://www.googleapis.com/auth/analytics.readonly' } credential_folder = os.path.join(CODE_HOME, CREDENTIAL_DIR) logger.debug("Credential dir: {}".format(credential_folder)) if not os.path.exists(credential_folder): try: os.makedirs(credential_folder) except OSError as e: logger.error( "Unable to create the corresponding directory: {}".format( e.message)) credential_path = os.path.join(credential_folder, CREDENTIAL_FILE) if api == 'sheets': store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CREDENTIAL, scope[api]) flow.user_agent = APPLICATION_NAME credentials = tools.run_flow(flow, store, flags) else: credentials = ServiceAccountCredentials.from_json_keyfile_name( os.path.join(SERVICE_ACCOUNT_KEY_HOME, SERVICE_ACCOUNT_KEY), scopes=scope[api]) return credentials
def parse_not_expression(self) -> Result: """ n_exp := NOT n_exp | NOT p_exp """ token = self.pop_token() if token.type == TOKEN_TYPE['eof']: return Result(name='', value=False) elif token.type != TOKEN_TYPE['not']: self.push_token(token) return self.parse_primary_expression() r1 = self.parse_not_expression() r = Result('(not {})'.format(r1.name), not r1.value) logger.debug('[*] {}'.format(r)) return r
def obtain(self): for source in db.query(Source): logger.info(source.name) # if source.name != 'Academy': continue metrics = db.query(Metric).filter_by(source_id=source.id).all() try: op_source = eval('{}()'.format(source.name)) except Exception as e: logger.error('source {} is not implemented'.format( source.name)) logger.error(e) continue for metric in metrics: try: value = op_source.get_measurement(metric) except NotDefined: value = 'Not Defined' except NotImplemented: value = 'No Impl' except InvalidConection: value = 'No Connect' except Exception as e: logger.error(e) value = 'No Access' params = { 'metric_id': metric.id, 'date': datetime.now(), 'value': value.replace(',', '') } logger.debug(params) measurement = Measurement(**params) db.add(measurement) else: db.commit()
def calculate_statistics(self, a_list): solution = list() df = pd.DataFrame(a_list) nodes = df['hd_node'].unique() logger.debug('Dimension of the Dataframe: ({}, {})'.format( df.shape[0], df.shape[1])) for i in range(0, len(nodes)): df_aux = df[df['hd_node'] == nodes[i]] number_time_resolve = df_aux['time_resolve'].count() number_time_response = df_aux['time_response'].count() # Quiero numero de datos, media, desviacion tipica y percentil de cada columna (time_resolve, time response) t1 = df_aux["time_resolve"].mean() t2 = df_aux["time_response"].mean() d1 = df_aux["time_resolve"].std() d2 = df_aux["time_response"].std() limit_response_time = 24 * 60 * 60 limit_resolution_day = 2 * 24 * 60 * 60 logger.debug( 'Node: {}, time_response_mean: {}, deviation_response: {}'. format(nodes[i], t2, d2)) logger.debug( 'Node: {}, time_resolve_mean: {}, deviation_resolve: {}'. format(nodes[i], t1, d1)) logger.debug( 'Node: {}, limit_response_time: {}, limit_resolution_day: {}'. format(nodes[i], limit_response_time, limit_resolution_day)) out1 = df_aux[df_aux['time_resolve'] <= limit_resolution_day] out2 = df_aux[df_aux['time_response'] <= limit_response_time] number_below_time_resolve = out1['time_resolve'].count() number_below_time_response = out2['time_response'].count() p_time_resolve = number_below_time_resolve * 1.0 / number_time_resolve * 100 p_time_response = number_below_time_response * 1.0 / number_time_response * 100 logger.info( 'Node: {}, Percentage responded tickets in less than 24 working hours: {}' .format(nodes[i], p_time_response)) logger.info( 'Node: {}, Percentage resolved tickets in less than 2 working days: {}' .format(nodes[i], p_time_resolve)) solution_list = { 'FIWARE Lab node': nodes[i], 'Number Issues': number_time_resolve, '% Issues responded <24h': p_time_response, '% Issues resolved <2d': p_time_resolve } solution.append(solution_list) solution_df = pd.DataFrame(solution) return solution_df