def run_algorithms(timeseries, timeseries_name, end_timestamp, full_duration, timeseries_file, skyline_app, algorithms): """ Iteratively run algorithms. """ results_dir = os.path.dirname(timeseries_file) if not os.path.exists(results_dir): os.makedirs(results_dir, mode=0o755) start_analysis = int(time.time()) triggered_algorithms = [] anomalous = False check_algorithms = [] if str(algorithms) == "['all']": if skyline_app == 'analyzer': check_algorithms = ALGORITHMS logger.info('check_algorithms for analyzer - %s' % (str(check_algorithms))) if skyline_app == 'mirage': check_algorithms = MIRAGE_ALGORITHMS logger.info('check_algorithms for mirage - %s' % (str(check_algorithms))) if skyline_app == 'boundary': check_algorithms = algorithms logger.info('check_algorithms for boundary - %s' % (str(check_algorithms))) if skyline_app == 'crucible': ALGORITHMS.append('detect_drop_off_cliff') check_algorithms = ALGORITHMS logger.info('check_algorithms for crucible - %s' % (str(check_algorithms))) else: check_algorithms = algorithms logger.info('check_algorithms specified - %s' % (str(check_algorithms))) if not check_algorithms: logger.info('check_algorithms unknown - %s' % (str(check_algorithms))) ALGORITHMS.append('detect_drop_off_cliff') check_algorithms = ALGORITHMS logger.info('check_algorithms - %s' % (str(check_algorithms))) logger.info('checking algorithms - %s' % (str(check_algorithms))) # @added 20190611 - Feature #3106: crucible - skyline.consensus.anomalies.png # Plot Skyline anomalies if CONSENSUS is achieved anomalies = [] for algorithm in check_algorithms: detected = '' try: x_vals = np.arange(len(timeseries)) y_vals = np.array([y[1] for y in timeseries]) # Match default graphite graph size plt.figure(figsize=(5.86, 3.08), dpi=100) plt.plot(x_vals, y_vals) # Start a couple datapoints in for the tail average for index in range(10, len(timeseries)): sliced = timeseries[:index] anomaly = globals()[algorithm](sliced, end_timestamp, full_duration) # Point out the datapoint if it's anomalous if anomaly: plt.plot([index], [sliced[-1][1]], 'ro') detected = "DETECTED" # @added 20190611 - Feature #3106: crucible - skyline.consensus.anomalies.png # Add the anomaly to the anomalies list to plot Skyline # anomalies if CONSENSUS is achieved anomalies.append([sliced[-1][0], sliced[-1][1], algorithm]) if detected == "DETECTED": results_filename = join(results_dir + "/" + algorithm + "." + detected + ".png") # logger.info('ANOMALY DETECTED :: %s' % (algorithm)) anomalous = True triggered_algorithms.append(algorithm) else: results_filename = join(results_dir + "/" + algorithm + ".png") plt.savefig(results_filename, dpi=100) # logger.info('%s :: %s' % (algorithm, results_filename)) if python_version == 2: os.chmod(results_filename, 0644) if python_version == 3: os.chmod(results_filename, mode=0o644) except: logger.error('error :: %s' % (traceback.format_exc())) logger.info( 'info :: error thrown in algorithm running and plotting - %s' % (str(algorithm))) end_analysis = int(time.time()) # @modified 20160814 - pyflaked # seconds_to_run = end_analysis - start_analysis # logger.info( # 'analysis of %s at a full duration of %s took %s seconds' % # (timeseries_name, str(full_duration), str(seconds_to_run))) # @added 20190611 - Feature #3106: crucible - skyline.consensus.anomalies.png # Plot Skyline anomalies where CONSENSUS achieved and create file resources # skyline.anomalies_score.txt and skyline.anomalies.csv anomalies_score = [] if anomalies: for ts, value, algo in anomalies: processed = False algorithms_triggered = [] if anomalies_score: for i in anomalies_score: if i[0] == ts: processed = True continue if processed: continue for w_ts, w_value, w_algo in anomalies: if w_ts == ts: algorithms_triggered.append(w_algo) if algorithms_triggered: consensus = len(algorithms_triggered) anomalies_score.append( [ts, value, consensus, algorithms_triggered]) try: logger.info('info :: plotting skyline.consensus.anomalies.png') x_vals = np.arange(len(timeseries)) y_vals = np.array([y[1] for y in timeseries]) # Match default graphite graph size plt.figure(figsize=(5.86, 3.08), dpi=100) plt.plot(x_vals, y_vals) for index in range(10, len(timeseries)): anomaly = False sliced = timeseries[:index] for i in anomalies_score: if sliced[-1][0] == i[0]: if i[2] >= CONSENSUS: anomaly = True # Point out the datapoint if it is anomalous according to # Skyline CONSENSUS if anomaly: plt.plot([index], [sliced[-1][1]], 'ro') results_filename = join(results_dir + "/skyline.consensus.anomalies.png") plt.savefig(results_filename, dpi=100) if python_version == 2: os.chmod(results_filename, 0644) if python_version == 3: os.chmod(results_filename, mode=0o644) except: logger.error('error :: %s' % (traceback.format_exc())) logger.error( 'error :: falied plotting skyline.consensus.anomalies.png') anomalies_filename = join(results_dir + "/skyline.anomalies_score.txt") write_data_to_file(skyline_app, anomalies_filename, 'w', str(anomalies_score)) anomalies_csv = join(results_dir + "/skyline.anomalies.csv") try: with open(anomalies_csv, 'w') as fh: fh.write( 'timstamp,value,consensus_count,triggered_algorithms\n') for ts, value, consensus, algorithms_triggered in anomalies_score: try: algos_str = str(algorithms_triggered) triggered_algorithms = algos_str.replace(',', ' ') line = '%s,%s,%s,%s\n' % (str(ts), str(value), str(consensus), str(triggered_algorithms)) with open(anomalies_csv, 'a') as fh: fh.write(line) except: logger.error(traceback.format_exc()) logger.error('error :: could not write to file %s' % (anomalies_csv)) if python_version == 2: os.chmod(anomalies_csv, 0644) if python_version == 3: os.chmod(anomalies_csv, mode=0o644) except: logger.error(traceback.format_exc()) logger.error('error :: could not write to file %s' % (anomalies_csv)) return anomalous, triggered_algorithms
def run_algorithms(timeseries, timeseries_name, end_timestamp, full_duration, timeseries_file, skyline_app, algorithms, alert_interval, add_to_panorama, padded_timeseries, from_timestamp): """ Iteratively run algorithms. """ results_dir = os.path.dirname(timeseries_file) if not os.path.exists(results_dir): os.makedirs(results_dir, mode=0o755) start_analysis = int(time.time()) triggered_algorithms = [] anomalous = False # @added 20200427 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Added default alert_interval_discarded_anomalies_count so run_algorithms # does not return as failed alert_interval_discarded_anomalies_count = 0 check_algorithms = [] if str(algorithms) == "['all']": if skyline_app == 'analyzer': check_algorithms = ALGORITHMS logger.info('check_algorithms for analyzer - %s' % (str(check_algorithms))) if skyline_app == 'mirage': check_algorithms = MIRAGE_ALGORITHMS logger.info('check_algorithms for mirage - %s' % (str(check_algorithms))) if skyline_app == 'boundary': check_algorithms = algorithms logger.info('check_algorithms for boundary - %s' % (str(check_algorithms))) if skyline_app == 'crucible': ALGORITHMS.append('detect_drop_off_cliff') check_algorithms = ALGORITHMS logger.info('check_algorithms for crucible - %s' % (str(check_algorithms))) else: check_algorithms = algorithms logger.info('check_algorithms specified - %s' % (str(check_algorithms))) if not check_algorithms: logger.info('check_algorithms unknown - %s' % (str(check_algorithms))) ALGORITHMS.append('detect_drop_off_cliff') check_algorithms = ALGORITHMS logger.info('check_algorithms - %s' % (str(check_algorithms))) logger.info('checking algorithms - %s on %s' % (str(check_algorithms), str(timeseries_file))) # @added 20190611 - Feature #3106: crucible - skyline.consensus.anomalies.png # Plot Skyline anomalies if CONSENSUS is achieved anomalies = [] # @added 20200422 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Added padded_timeseries. If the time series is padded then set # the range appropriately so that the padded period data points are not # analysed for anomalies default_range = 10 if padded_timeseries: default_range = 0 for ts, value in timeseries: if int(ts) < from_timestamp: default_range += 1 else: break logger.info('padded_timeseries - default range set to %s to %s' % (str(default_range), str(timeseries_file))) for algorithm in check_algorithms: detected = '' try: x_vals = np.arange(len(timeseries)) y_vals = np.array([y[1] for y in timeseries]) # Match default graphite graph size plt.figure(figsize=(5.86, 3.08), dpi=100) plt.plot(x_vals, y_vals) # Start a couple datapoints in for the tail average # @modified 20200422 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # If the time series is padded then use the appropriate range so # that the padded period data points are not analysed for anomalies # for index in range(10, len(timeseries)): for index in range(default_range, len(timeseries)): sliced = timeseries[:index] anomaly = globals()[algorithm](sliced, end_timestamp, full_duration) # Point out the datapoint if it's anomalous if anomaly: plt.plot([index], [sliced[-1][1]], 'ro') detected = "DETECTED" # @added 20190611 - Feature #3106: crucible - skyline.consensus.anomalies.png # Add the anomaly to the anomalies list to plot Skyline # anomalies if CONSENSUS is achieved anomalies.append([sliced[-1][0], sliced[-1][1], algorithm]) if detected == "DETECTED": results_filename = join(results_dir + "/" + algorithm + "." + detected + ".png") logger.info('ANOMALY DETECTED :: with %s on %s' % (algorithm, str(timeseries_file))) anomalous = True triggered_algorithms.append(algorithm) else: results_filename = join(results_dir + "/" + algorithm + ".png") try: plt.savefig(results_filename, dpi=100) logger.info('saved %s plot :: %s' % (algorithm, results_filename)) if python_version == 2: # @modified 20200327 - Branch #3262: py3 # os.chmod(results_filename, 0644) os.chmod(results_filename, 0o644) if python_version == 3: os.chmod(results_filename, mode=0o644) except: logger.error('error :: %s' % (traceback.format_exc())) logger.error('error :: failed to save %s for %s' % (str(results_filename), str(timeseries_file))) except: logger.error('error :: %s' % (traceback.format_exc())) logger.error( 'error :: error thrown in algorithm running and plotting - %s on %s' % (str(algorithm), str(timeseries_file))) end_analysis = int(time.time()) # @modified 20160814 - pyflaked # seconds_to_run = end_analysis - start_analysis # logger.info( # 'analysis of %s at a full duration of %s took %s seconds' % # (timeseries_name, str(full_duration), str(seconds_to_run))) # @added 20200421 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Added last_anomaly_timestamp to apply alert_interval against and # alert_interval_discarded_anomalies. If the alert interval is passed # Crucible will only report Skyline CONSENSUS anomalies if the time between # the last anomaly is not alert_interval less than the specified # alert_interval period. This enables Crucible to mimic Analyzer and Mirage # and apply a EXPIRATION_TIME type methodology to identifying anomalies like # Analyzer would. This makes Crucible work SOMEWHAT like Analyzer, however # is still a bit different as with Crucible the time series grows, like a # new metric would. # Set the last_anomaly_timestamp to the appropriate timestamp before the # alert_interval if alert_interval is set, if it is not it does not matter # as alert_interval and alert_interval_discarded_anomalies will not be # applied. # @modified 20200427 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Wrap timeseries_start_timestamp variable in try so on fail the process # does not hang try: timeseries_start_timestamp = int(timeseries[0][0]) except: logger.error('error :: %s' % (traceback.format_exc())) logger.error( 'error :: failed to determine timeseries_start_timestamp from %s' % str(timeseries_file)) timeseries_start_timestamp = 0 # @modified 20200427 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # if alert_interval: last_anomaly_timestamp = timeseries_start_timestamp if alert_interval and timeseries_start_timestamp: last_anomaly_timestamp = timeseries_start_timestamp - (alert_interval + 1) else: last_anomaly_timestamp = timeseries_start_timestamp alert_interval_discarded_anomalies = [] # To apply alert_interval the anomalies object needs to be sorted by # timestamp as the anomalies are added per algorithm so they are not # timestamp ordered, but timestamp ordered per algorithm if anomalies and alert_interval: try: logger.info( 'info :: last_anomaly_timestamp set to %s for alert_interval check on %s' % (str(last_anomaly_timestamp), str(timeseries_file))) logger.info( 'info :: sorting anomalies %s to apply alert_interval check on %s' % (str(len(anomalies)), str(timeseries_file))) sorted_anomalies = sorted(anomalies, key=lambda x: x[0]) anomalies = sorted_anomalies del sorted_anomalies except: logger.error('error :: %s' % (traceback.format_exc())) logger.error('error :: falied to create sorted_anomalies on %s' % str(timeseries_file)) # @added 20200817 - Feature #3682: SNAB - webapp - crucible_process - run_algorithms # Allow the user to pass run_algorithms to run use_consensus = 6 try: try: from settings import CONSENSUS as use_consensus except: logger.error(traceback.format_exc()) logger.error('error :: falied to set uSE_CONSENSUS') use_consensus = 6 if len(check_algorithms) <= use_consensus: use_consensus = len(check_algorithms) logger.info( 'check_algorithms passed with the number of algorithms less than CONSENSUS, use_consensus set to %s' % (str(use_consensus))) except: logger.error(traceback.format_exc()) logger.error('error :: falied to set CONSENSUS') # @added 20190611 - Feature #3106: crucible - skyline.consensus.anomalies.png # Plot Skyline anomalies where CONSENSUS achieved and create file resources # skyline.anomalies_score.txt and skyline.anomalies.csv anomalies_score = [] if anomalies: for ts, value, algo in anomalies: try: processed = False algorithms_triggered = [] if anomalies_score: for i in anomalies_score: if i[0] == ts: processed = True continue if processed: continue for w_ts, w_value, w_algo in anomalies: if w_ts == ts: algorithms_triggered.append(w_algo) # @added 20200421 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Added last_anomaly_timestamp to apply alert_interval against and # alert_interval_discarded_anomalies. If the alert interval is passed append_anomaly = True if algorithms_triggered: consensus = len(algorithms_triggered) # @added 20200421 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Added last_anomaly_timestamp to apply alert_interval against and # alert_interval_discarded_anomalies. If the alert interval is passed # @modified 20200817 - Feature #3682: SNAB - webapp - crucible_process - run_algorithms # if consensus >= CONSENSUS: if consensus >= use_consensus: current_anomaly_timestamp = int(ts) if alert_interval and last_anomaly_timestamp: time_between_anomalies = current_anomaly_timestamp - last_anomaly_timestamp if time_between_anomalies < alert_interval: try: discard_anomaly = [ ts, value, consensus, algorithms_triggered ] # This logs a lot if enabled # logger.info('debug :: time_between_anomalies %s is less than alert_interval %s, last_anomaly_timestamp set to %s and current_anomaly_timestamp is %s - discarding %s' % ( # str(time_between_anomalies), str(alert_interval), # str(last_anomaly_timestamp), # str(current_anomaly_timestamp), str(discard_anomaly))) alert_interval_discarded_anomalies.append( discard_anomaly) append_anomaly = False except: logger.error(traceback.format_exc()) logger.error( 'error :: falied to append to alert_interval_discarded_anomalies on %s' % str(timeseries_file)) # @modified 20200421 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Only append if append_anomaly # anomalies_score.append([ts, value, consensus, algorithms_triggered]) if append_anomaly: anomalies_score.append( [ts, value, consensus, algorithms_triggered]) # @modified 20200817 - Feature #3682: SNAB - webapp - crucible_process - run_algorithms # if consensus >= CONSENSUS: if consensus >= use_consensus: last_anomaly_timestamp = int(ts) except: logger.error(traceback.format_exc()) logger.error( 'error :: falied to process anomalies entry on %s' % str(timeseries_file)) # @added 20200421 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Added alert_interval_discarded_anomalies if alert_interval: if alert_interval_discarded_anomalies: logger.info( 'info :: discarded %s anomalies due to them being within the alert_interval period on %s' % (str(len(alert_interval_discarded_anomalies)), str(timeseries_file))) else: logger.info( 'info :: no anomalies were discarded due to them being within the alert_interval period on %s' % str(timeseries_file)) try: logger.info( 'info :: plotting skyline.consensus.anomalies.png for %s' % str(timeseries_file)) x_vals = np.arange(len(timeseries)) y_vals = np.array([y[1] for y in timeseries]) # Match default graphite graph size plt.figure(figsize=(5.86, 3.08), dpi=100) plt.plot(x_vals, y_vals) for index in range(10, len(timeseries)): anomaly = False sliced = timeseries[:index] for i in anomalies_score: if sliced[-1][0] == i[0]: # @modified 20200817 - Feature #3682: SNAB - webapp - crucible_process - run_algorithms # if i[2] >= CONSENSUS: if i[2] >= use_consensus: anomaly = True # Point out the datapoint if it is anomalous according to # Skyline CONSENSUS if anomaly: plt.plot([index], [sliced[-1][1]], 'ro') results_filename = join(results_dir + "/skyline.consensus.anomalies.png") plt.savefig(results_filename, dpi=100) if python_version == 2: # @modified 20200327 - Branch #3262: py3 # os.chmod(results_filename, 0644) os.chmod(results_filename, 0o644) if python_version == 3: os.chmod(results_filename, mode=0o644) except: logger.error('error :: %s' % (traceback.format_exc())) logger.error( 'error :: failed plotting skyline.consensus.anomalies.png for %s' % str(timeseries_file)) anomalies_filename = join(results_dir + "/skyline.anomalies_score.txt") try: logger.info('info :: creating anomalies_filename - %s for %s' % (anomalies_filename, str(timeseries_file))) write_data_to_file(skyline_app, anomalies_filename, 'w', str(anomalies_score)) except: logger.error('error :: %s' % (traceback.format_exc())) logger.error( 'error :: failed creating anomalies_filename - %s for %s' % (anomalies_filename, str(timeseries_file))) anomalies_csv = join(results_dir + "/skyline.anomalies.csv") logger.info('info :: creating anomalies_csv - %s for %s' % (anomalies_csv, str(timeseries_file))) try: with open(anomalies_csv, 'w') as fh: fh.write( 'timstamp,value,consensus_count,triggered_algorithms\n') for ts, value, consensus, algorithms_triggered in anomalies_score: try: algos_str = str(algorithms_triggered) triggered_algorithms = algos_str.replace(',', ' ') line = '%s,%s,%s,%s\n' % (str(ts), str(value), str(consensus), str(triggered_algorithms)) with open(anomalies_csv, 'a') as fh: fh.write(line) except: logger.error(traceback.format_exc()) logger.error('error :: could not write to file %s for %s' % (anomalies_csv, str(timeseries_file))) if python_version == 2: # @modified 20200327 - Branch #3262: py3 # os.chmod(anomalies_csv, 0644) os.chmod(anomalies_csv, 0o644) if python_version == 3: os.chmod(anomalies_csv, mode=0o644) except: logger.error(traceback.format_exc()) logger.error('error :: could not write to file %s for %s' % (anomalies_csv, str(timeseries_file))) logger.info('info :: created anomalies_csv OK for %s' % str(timeseries_file)) # @added 20200421 - Feature #3500: webapp - crucible_process_metrics # Feature #1448: Crucible web UI # Added alert_interval_discarded_anomalies alert_interval_discarded_anomalies_count = len( alert_interval_discarded_anomalies) if alert_interval_discarded_anomalies: alert_interval_discarded_anomalies_csv = join( results_dir + '/skyline.alert_interval_discarded_anomalies.csv') logger.info( 'info :: writing %s alert_interval discarded anomalies to %s for %s' % (str(len(alert_interval_discarded_anomalies)), alert_interval_discarded_anomalies_csv, str(timeseries_file))) try: with open(alert_interval_discarded_anomalies_csv, 'w') as fh: fh.write('timstamp,value,consensus,triggered_algorithms\n') for ts, value, consensus, algorithms_triggered in alert_interval_discarded_anomalies: try: line = '%s,%s,%s,%s\n' % (str(ts), str(value), str(consensus), str(algorithms_triggered)) with open(alert_interval_discarded_anomalies_csv, 'a') as fh: fh.write(line) except: logger.error(traceback.format_exc()) logger.error( 'error :: could not write to file %s for %s' % (alert_interval_discarded_anomalies_csv, str(timeseries_file))) if python_version == 2: os.chmod(alert_interval_discarded_anomalies_csv, 0o644) if python_version == 3: os.chmod(alert_interval_discarded_anomalies_csv, mode=0o644) except: logger.error(traceback.format_exc()) logger.error('error :: could not write to file %s for %s' % (alert_interval_discarded_anomalies_csv, str(timeseries_file))) else: logger.info('0 anomalies found for %s' % str(timeseries_file)) return anomalous, triggered_algorithms, alert_interval_discarded_anomalies_count
def run_algorithms(timeseries, timeseries_name, end_timestamp, full_duration, timeseries_file, skyline_app, algorithms): """ Iteratively run algorithms. """ results_dir = os.path.dirname(timeseries_file) if not os.path.exists(results_dir): if python_version == 2: mode_arg = int('0755') if python_version == 3: mode_arg = mode = 0o755 os.makedirs(results_dir, mode_arg) start_analysis = int(time.time()) triggered_algorithms = [] anomalous = False if str(algorithms) == "['all']": if skyline_app == 'analyzer': check_algorithms = ALGORITHMS if skyline_app == 'mirage': check_algorithms = MIRAGE_ALGORITHMS if skyline_app == 'boundary': check_algorithms = algorithms if skyline_app == 'crucible': check_algorithms = ALGORITHMS.append('detect_drop_off_cliff') else: check_algorithms = algorithms logger.info('checking algoritms - %s' % (str(check_algorithms))) for algorithm in check_algorithms: detected = '' try: x_vals = np.arange(len(timeseries)) y_vals = np.array([y[1] for y in timeseries]) # Match default graphite graph size plt.figure(figsize=(5.86, 3.08), dpi=100) plt.plot(x_vals, y_vals) # Start a couple datapoints in for the tail average for index in range(10, len(timeseries)): sliced = timeseries[:index] anomaly = globals()[algorithm](sliced, end_timestamp, full_duration) # Point out the datapoint if it's anomalous if anomaly: plt.plot([index], [sliced[-1][1]], 'ro') detected = "DETECTED" if detected == "DETECTED": results_filename = join(results_dir + "/" + algorithm + "." + detected + ".png") # logger.info('ANOMALY DETECTED :: %s' % (algorithm)) anomalous = True triggered_algorithms.append(algorithm) else: results_filename = join(results_dir + "/" + algorithm + ".png") plt.savefig(results_filename, dpi=100) # logger.info('%s :: %s' % (algorithm, results_filename)) if python_version == 2: mode_arg = int('0644') if python_version == 3: mode_arg = '0o644' os.chmod(results_filename, mode_arg) except: logger.error('error :: %s' % (traceback.format_exc())) logger.info( 'info :: error thrown in algorithm running and plotting - %s' % (str(algorithm))) end_analysis = int(time.time()) # @modified 20160814 - pyflaked # seconds_to_run = end_analysis - start_analysis # logger.info( # 'analysis of %s at a full duration of %s took %s seconds' % # (timeseries_name, str(full_duration), str(seconds_to_run))) return anomalous, triggered_algorithms
def run_algorithms( timeseries, timeseries_name, end_timestamp, full_duration, timeseries_file, skyline_app, algorithms): """ Iteratively run algorithms. """ results_dir = os.path.dirname(timeseries_file) if not os.path.exists(results_dir): if python_version == 2: mode_arg = int('0755') if python_version == 3: mode_arg = mode=0o755 os.makedirs(results_dir, mode_arg) start_analysis = int(time.time()) triggered_algorithms = [] anomalous = False if str(algorithms) == "['all']": if skyline_app == 'analyzer': check_algorithms = ALGORITHMS if skyline_app == 'mirage': check_algorithms = MIRAGE_ALGORITHMS if skyline_app == 'boundary': check_algorithms = algorithms if skyline_app == 'crucible': check_algorithms = ALGORITHMS.append('detect_drop_off_cliff') else: check_algorithms = algorithms logger.info('checking algoritms - %s' % (str(check_algorithms))) for algorithm in check_algorithms: detected = '' try: x_vals = np.arange(len(timeseries)) y_vals = np.array([y[1] for y in timeseries]) # Match default graphite graph size plt.figure(figsize=(5.86, 3.08), dpi=100) plt.plot(x_vals, y_vals) # Start a couple datapoints in for the tail average for index in range(10, len(timeseries)): sliced = timeseries[:index] anomaly = globals()[algorithm](sliced, end_timestamp, full_duration) # Point out the datapoint if it's anomalous if anomaly: plt.plot([index], [sliced[-1][1]], 'ro') detected = "DETECTED" if detected == "DETECTED": results_filename = join(results_dir + "/" + algorithm + "." + detected + ".png") # logger.info('ANOMALY DETECTED :: %s' % (algorithm)) anomalous = True triggered_algorithms.append(algorithm) else: results_filename = join(results_dir + "/" + algorithm + ".png") plt.savefig(results_filename, dpi=100) # logger.info('%s :: %s' % (algorithm, results_filename)) if python_version == 2: mode_arg = int('0644') if python_version == 3: mode_arg = '0o644' os.chmod(results_filename, mode_arg) except: logger.error('error :: %s' % (traceback.format_exc())) logger.info('info :: error thrown in algorithm running and plotting - %s' % (str(algorithm))) end_analysis = int(time.time()) seconds_to_run = end_analysis - start_analysis # logger.info( # 'analysis of %s at a full duration of %s took %s seconds' % # (timeseries_name, str(full_duration), str(seconds_to_run))) return anomalous, triggered_algorithms