コード例 #1
0
    def run(self, edit):
        # Reload settings
        settings.load()

        current_selection = self.view.sel()

        # Use the end of the current selection for the search, or use 0 if nothing is selected
        begin = 0
        if len(current_selection) > 0:
            begin = self.view.sel()[-1].end()

        conflict_region = find_conflict(self.view, begin)
        if conflict_region is None:
            return

        # Add the region to the selection
        self.view.show_at_center(conflict_region)
        current_selection.clear()
        current_selection.add(conflict_region)
コード例 #2
0
    def run(self):
        # Reload settings
        settings.load()

        # Ensure git executable is available
        if not self.git_executable_available():
            sublime.error_message(msgs.get('git_executable_not_found'))
            return

        self.git_repo = self.determine_git_repo()
        if not self.git_repo:
            sublime.status_message(msgs.get('no_git_repo_found'))
            return

        conflict_files = self.get_conflict_files()
        if not conflict_files:
            sublime.status_message(msgs.get('no_conflict_files_found', self.git_repo))
            return

        self.show_quickpanel_selection(conflict_files)
コード例 #3
0
    def run(self, edit, keep):
        # Reload settings
        settings.load()

        current_selection = self.view.sel()

        # Use the begin of the current selection for the search, or use 0 if nothing is selected
        begin = 0
        if len(current_selection) > 0:
            begin = current_selection[0].begin()

        conflict_region = find_conflict(self.view, begin)
        if conflict_region is None:
            return

        replace_text = extract(self.view, conflict_region, keep)

        if not replace_text:
            replace_text = ""

        self.view.replace(edit, conflict_region, replace_text)
コード例 #4
0
def plugin_loaded():
    settings.load()
コード例 #5
0
def main(argv):

    #===== Initialization =====#
    folder = "/ne_data/"
    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('hostname',
                        type=str,
                        help="IP address or hostname of Z-way server host")
    parser.add_argument('-u',
                        '--username',
                        type=str,
                        help="Username for Z-way server host")
    parser.add_argument('-p',
                        '--password',
                        type=str,
                        help="Password for Z-way server host")
    parser.add_argument('-s',
                        '--sound',
                        action='store_true',
                        help="use sound as a feature in analysis")
    parser.add_argument('-f',
                        '--settings_file',
                        type=str,
                        help="load analysis settings from file")
    #parser.add_argument('-b', '--backup', action='store_true', help="start training on backup data")
    parser.add_argument(
        '-t',
        '--time_allign',
        action='store_true',
        help="collect data only at times which are multiples of the granularity"
    )
    parser.add_argument('-o',
                        '--collect_only',
                        action='store_true',
                        help="collect data but do not run analysis")
    args = parser.parse_args(argv[1:])

    # Initialize Zway server
    host = args.hostname
    if args.username and args.password:
        zserver = zway.Server(host,
                              username=args.username,
                              password=args.password)
    else:
        zserver = zway.Server(host)

    # Use default settings or read settings from settings file
    if (args.settings_file == None):
        settings_dict = {
            "prefix": "ne_data",
            "granularity": 60,
            "training_window": 120,
            "training_interval": 60,
            "ema_alpha": 1.0,
            "severity_omega": 1.0,
            "severity_lambda": 3.719,
            "auto_regression": 0.0
        }
    else:
        try:
            settings_dict = settings.load(args.settings_file)
        except Exception as error:
            print "Error reading settings file.", error
            print " "
            exit(1)

    # Initialize Algo class
    prefix = settings_dict['prefix']
    granularity = int(settings_dict['granularity'])
    training_window = int(settings_dict['training_window'])
    training_interval = int(settings_dict['training_interval'])
    ema_alpha = float(settings_dict['ema_alpha'])
    severity_omega = float(settings_dict['severity_omega'])
    severity_lambda = float(settings_dict['severity_lambda'])
    auto_regression = int(settings_dict['auto_regression'])

    feature_names = zserver.device_IDs()
    num_features = len(feature_names)

    print "Num features: ", num_features
    print "w = %.3f, L = %.3f" % (severity_omega, severity_lambda)
    print "alpha: %.3f" % ema_alpha

    algo = Algo(num_features, training_window, training_interval)
    algo.set_severity(severity_omega, severity_lambda)
    algo.set_EWMA(ema_alpha)

    # Two Datalogs: one for data and one for results
    feature_names.append('total_power')
    print(feature_names)
    data_log = Datalog(folder, prefix, feature_names)

    results_header = ['target', 'prediction', 'anomaly']
    results_log = Datalog(folder, prefix + '_results', results_header)

    # Timing procedure
    granularity = settings_dict['granularity']
    goal_time = int(time.time())
    if args.time_allign:
        goal_time += granularity - (int(time.time()) % granularity)

    #===== Analysis =====#
    while (True):

        # Timing procedures
        while goal_time > time.time():
            time.sleep(0.2)
        goal_time = goal_time + granularity

        # Data collection
        print "Recording sample at {}".format(goal_time)
        features = get_features(zserver)
        power = get_power()
        features.append(power)
        data_log.log(features[:], goal_time)

        # Do not run analysis if only collecting data
        if (args.collect_only): continue

        features = np.array(features).flatten()
        target, pred, anomaly, zscore = algo.run(features)

        if (anomaly != None):
            results_log.log([target, pred, float(anomaly)])
            print target, pred, anomaly
        else:
            print target, pred

    # Clean-up if necessary
    print "Ending analysis"
コード例 #6
0
def main(argv):

    #np.seterr(all='print')

    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('infile', type=str, help="name of input CSV file")
    parser.add_argument('outfile', type=str, help="name of output CSV file")
    parser.add_argument('-f',
                        '--settings_file',
                        type=str,
                        help="load analysis settings from file")
    args = parser.parse_args(argv[1:])

    infile = args.infile
    outfile = args.outfile
    print("Starting analysis on %s..." % infile)
    print("Results will be recorded in %s..." % outfile)

    # Use default settings or read settings from settings file
    if (args.settings_file == None):
        settings_dict = {
            "granularity": 60,
            "training_window": 1440,
            "training_interval": 60,
            "ema_alpha": 1.0,
            "severity_omega": 1.0,
            "severity_lambda": 3.719,
            "auto_regression": 0.0
        }
    else:
        try:
            settings_dict = settings.load(args.settings_file)
        except Exception as error:
            print "Error reading settings file.", error
            print " "
            exit(1)

    training_window = int(settings_dict['training_window'])
    training_interval = int(settings_dict['training_interval'])
    ema_alpha = float(settings_dict['ema_alpha'])
    severity_omega = float(settings_dict['severity_omega'])
    severity_lambda = float(settings_dict['severity_lambda'])
    auto_regression = int(settings_dict['auto_regression'])

    # Collect data from CSV file
    df = pd.read_csv(infile)
    df = df.set_index(df.columns[0])
    timestamps = df.index.values
    targets = df[df.columns[-1]]
    features = df[df.columns[:-1]]

    # Pre-processing
    features, removed = filter_low_variance(features)
    print "\nRemoving features due to low variance:"
    for f in removed:
        print f

    num_features = len(features.columns) + auto_regression
    print "\nThe following {} features will be used:".format(num_features)
    for f in features.columns:
        print f
    if auto_regression > 0:
        print "Auto-regression: {}\n".format(auto_regression)

    # Add attacks to the data (power is last row)
    # Used for F1 calculation
    y = np.matrix(targets.values).T
    start = len(y) / 2
    y[start:], ground_truth = add_attacks(y[start:], 3, 30, 500)
    ground_truth = set([(i + start) for i in ground_truth])
    detected = set()

    X = np.matrix(features.values)
    X = scale_features(X)
    X = add_auto_regression(X, y, auto_regression)
    X = np.concatenate((X, y), 1)

    print ""

    # Initialize Algo class
    algo = Algo(num_features, training_window, training_interval)
    algo.set_severity(severity_omega, severity_lambda)
    algo.set_EWMA(ema_alpha)

    # Output lists
    results = [['timestamp', 'target', 'prediction', 'anomaly', 'zscore']]

    print "Num features: ", num_features
    print "w = %.3f, L = %.3f" % (severity_omega, severity_lambda)
    print "alpha: %.3f" % ema_alpha

    #==================== ANALYSIS ====================#
    print "Beginning analysis..."
    count = 0
    start_time = time.time()
    for count in range(len(timestamps)):

        # Get the next row of data
        cur_time = timestamps[count]

        if (count % 120 == 0):
            print "Trying time %s" % \
                dt.datetime.fromtimestamp(cur_time).strftime(DATE_FORMAT)

        new_data = np.ravel(X[count, :])
        new_data = np.around(new_data, decimals=2)
        target, prediction, anomaly, zscore = algo.run(new_data)  # Magic!

        if (prediction != None):
            results.append([cur_time, target, prediction, anomaly, zscore])
            if anomaly:
                detected.add(count)

        count += 1

    #==================== RESULTS ====================#
    print "Runtime: %.4f" % (time.time() - start_time)

    # Save data for later graphing
    with open(outfile, 'wb') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerows(results)

    # Remove headers for analysis and graphing
    timestamps, targets, predictions, anomalies, zscores = zip(*results)
    timestamps = timestamps[1:]
    targets = targets[1:]
    predictions = predictions[1:]
    anomalies = anomalies[1:]
    zscores = zscores[1:]

    precision, recall, f1_score = f1_scores(detected, ground_truth)
    PMSE_smoothed, PMSE, Re_MSE, SMSE = error_scores(
        targets, predictions)  #Remove headers

    print "{}: {:.4f}".format("RMSE-score (smoothed)", PMSE_smoothed)
    print "{}: {:.4f}".format("RMSE-score (raw)", PMSE)
    print "{}: {:.4f}".format("Relative MSE", Re_MSE)
    print "{}: {:.4f}".format("SMSE", SMSE)
    print "{}: {:.4f}".format("Precision", precision)
    print "{}: {:.4f}".format("Recall", recall)
    print "{}: {:.4f}".format("F1-Score", f1_score)

    print "Ending analysis. See %s for results." % outfile
    """
    plt.figure()    
    plt.subplot(211)
    plt.plot(timestamps, targets, timestamps, predictions)
    plt.subplot(212)
    plt.plot(timestamps, zscores)
    plt.title("Targets and Predictions")
    plt.tight_layout()
    plt.show()    
    """
    """
    plt.subplot(312)
    error = [targets[i] - predictions[i] for i in range(len(targets))]
    plt.hist(np.ravel(error), 250, facecolor='green', alpha=0.75)
    plt.axis([-1000, 1000, 0, 10000])
    plt.title("Distribution of Errors")
    
    plt.subplot(313)
    plt.hist(np.ravel(pvalues), 50, facecolor='green', alpha=0.75)
    plt.title("Distribution of P-Values")
    plt.tight_layout()
    plt.show()
    """

    return results