def LITNET_data(params):
    data_path = "/home/vibek/Anomanly_detection_packages/LITNET-2020/ALLinONE/"
    data_path_csv = data_path + "allFlows.csv"

    # Load csv data into dataframes without 'id' and 'Label'
    data = pd.read_csv(
        data_path_csv, nrows=1099999
    )  # I took first 1099999 rows, change according to your need
    num_records, num_features = data.shape
    print("there are {} flow records with {} feature dimension".format(
        num_records, num_features))

    # there is white spaces in columns names e.g. ' Destination Port'
    # So strip the whitespace from  column names
    data = data.rename(columns=lambda x: x.strip())
    print('stripped column names')

    df_label = data['attack_a']
    print('df_label:\n', df_label)

    ax = plt.figure(figsize=(12, 3))
    sns.countplot(data['attack_t'])
    plt.show()

    data = data.drop(columns=[
        'ID', 'ts_year', 'ts_month', 'ts_day', 'ts_hour', 'ts_min',
        'ts_second', 'te_year', 'te_month', 'te_day', 'te_hour', 'te_min',
        'te_second', 'pr', '_flag1', '_flag2', '_flag3', '_flag4', '_flag5',
        '_flag6', 'nh', 'nhb', 'ismc', 'odmc', 'idmc', 'osmc', 'mpls1',
        'mpls2', 'mpls3', 'mpls4', 'mpls5', 'mpls6', 'mpls7', 'mpls8', 'mpls9',
        'mpls10', 'ra', 'eng', 'tr', 'tcp_f_n_a', 'tcp_f_n_f', 'tcp_f_n_r',
        'tcp_f_n_p', 'tcp_f_n_u', 'tcp_src_tftp', 'tcp_src_kerb',
        'tcp_src_rpc', 'attack_t', 'attack_a'
    ])
    print('dropped bad columns')
    print('Data:\n', data)
    #return data

    nan_count = data.isnull().sum().sum()
    print('There are {} nan entries'.format(nan_count))

    if nan_count > 0:
        data.fillna(data.mean(), inplace=True)
        print('filled NAN')

# Normalising all numerical features:
    le = LabelEncoder()

    data['sa'] = le.fit_transform(data['sa'])
    data['da'] = le.fit_transform(data['da'])
    data['icmp_dst_ip_b'] = le.fit_transform(data['icmp_dst_ip_b'])
    data['icmp_src_ip'] = le.fit_transform(data['icmp_src_ip'])
    data['udp_dst_p'] = le.fit_transform(data['udp_dst_p'])
    data['tcp_f_s'] = le.fit_transform(data['tcp_f_s'])
    #data['tcp_f_n_a'] = le.fit_transform(data['tcp_f_n_a'])
    #data['tcp_f_n_f'] = le.fit_transform(data['tcp_f_n_f'])
    #data['tcp_f_n_r'] = le.fit_transform(data['tcp_f_n_r'])
    #data['tcp_f_n_p'] = le.fit_transform(data['tcp_f_n_p'])
    #data['tcp_f_n_u'] = le.fit_transform(data['tcp_f_n_u'])
    data['tcp_dst_p'] = le.fit_transform(data['tcp_dst_p'])
    data['tcp_src_dst_f_s'] = le.fit_transform(data['tcp_src_dst_f_s'])
    #data['tcp_src_tftp'] = le.fit_transform(data['tcp_src_tftp'])
    #data['tcp_src_kerb'] = le.fit_transform(data['tcp_src_kerb'])
    #data['tcp_src_rpc'] = le.fit_transform(data['tcp_src_rpc'])
    data['tcp_dst_p_src'] = le.fit_transform(data['tcp_dst_p_src'])
    data['smtp_dst'] = le.fit_transform(data['smtp_dst'])
    data['udp_p_r_range'] = le.fit_transform(data['udp_p_r_range'])
    data['p_range_dst'] = le.fit_transform(data['p_range_dst'])
    data['udp_src_p_0'] = le.fit_transform(data['udp_src_p_0'])

    print('Data_new:\n', data)

    data = data.astype(np.float32)
    print('data type:\n', data)
    mask = data == -1
    data[mask] = 0

    #  to leave -1 (missing features) values as is and exclude in normilizing
    mean_i = np.mean(data, axis=0)
    min_i = np.min(data, axis=0)
    max_i = np.max(data, axis=0)
    # zero centered
    r = (max_i - min_i) + eps
    data = (data - mean_i) / r

    #deal with missing features -1
    data[mask] = 0

    data = data.astype(float).apply(pd.to_numeric)
    print('converted to numeric\n', data)

    #histogram to see the data distribution
    data.hist(figsize=(300, 300))
    plt.show()

    # lets count if there is NaN values in our dataframe(missing features)
    assert data.isnull().sum().sum() == 0, "There should not be any NaN values"
    X = data.values
    print("Value of X:\n", X.shape)

    # To encode string  labels into numbers
    Y = to_categorical(df_label)
    print('Value of Y:', Y.shape)
    classe_names = list(le.classes_)
    print('classe_names:\n', classe_names)
    # Create training and test sets
    train_data, train_labels, test_data, test_labels = train_test_split(
        X, Y, test_size=0.35, random_state=seed)
    return train_data, train_labels, test_data, test_labels
예제 #2
0
def unsw_data_common(params):
    dataroot = "/home/vibek/Anomanly_detection_packages/UNSW-NB/"
    data_path = read_data(dataroot, '*_unsw.csv')
    num_records, num_features = data_path.shape
    print("there are {} flow records with {} feature dimension".format(
        num_records, num_features))

    # there is white spaces in columns names e.g. ' Destination Port'
    # So strip the whitespace from  column names
    data = data_path.rename(columns=lambda x: x.strip())
    print('stripped unsw column names')
    #_ = xai.correlations(data, include_categorical=True)

    df_label = data['Label']

    #groups = xai.imbalance_plot(data, "attack_cat", "dur")

    data = data.drop(columns=[
        'proto', 'state', 'sloss', 'dloss', 'service', 'Sload', 'Dload',
        'swin', 'dwin', 'stcpb', 'dtcpb', 'smeansz', 'dmeansz', 'trans_depth',
        'res_bdy_len', 'Sjit', 'Djit', 'Stime', 'Ltime', 'Sintpkt', 'Dintpkt',
        'tcprtt', 'synack', 'ackdat', 'is_sm_ips_ports', 'ct_state_ttl',
        'ct_flw_http_mthd', 'is_ftp_login', 'ct_ftp_cmd', 'ct_srv_src',
        'ct_srv_dst', 'ct_dst_ltm', 'ct_src_ ltm', 'ct_src_dport_ltm',
        'ct_dst_sport_ltm', 'ct_dst_src_ltm', 'attack_cat', 'Label'
    ])
    print('dropped bad columns')

    nan_count = data.isnull().sum().sum()
    print('There are {} nan entries'.format(nan_count))

    if nan_count > 0:
        data.fillna(data.mean(), inplace=True)
        print('filled NAN')

    le = LabelEncoder()

    data['srcip'] = le.fit_transform(data['srcip'])
    data['dstip'] = le.fit_transform(data['dstip'])
    data['dur'] = le.fit_transform(data['dur'])
    data['sport'] = le.fit_transform(data['sport'])
    data['dsport'] = le.fit_transform(data['dsport'])
    #print(data)

    # Normalising all numerical features:
    cols_to_norm = list(data.columns.values)[:11]
    #print('cols_to_norm:\n', cols_to_norm)
    data = data.astype(np.float32)
    #print('data type:\n', data)

    mask = data == -1
    data[mask] = 0

    #  to leave -1 (missing features) values as is and exclude in normilizing
    mean_i = np.mean(data, axis=0)
    min_i = np.min(data, axis=0)
    max_i = np.max(data, axis=0)
    # zero centered
    r = max_i - min_i + eps
    data = (data - mean_i) / r

    #deal with missing features -1
    data[mask] = 0

    data = data.astype(float).apply(pd.to_numeric)
    print('converted to numeric')

    #histogram to see the data distribution
    data.hist(figsize=(15, 18))
    plt.show()

    # lets count if there is NaN values in our dataframe(missing features)
    assert data.isnull().sum().sum() == 0, "There should not be any NaN values"
    X = data.values

    # To encode string  labels into numbers
    #df_label_final = df_label_final.apply(lambda x : transform(x))
    #le = LabelEncoder()
    #Y = le.fit_transform(df_label_final)
    Y = to_categorical(df_label)
    classe_names = list(le.classes_)
    # Create training and test sets
    train_data, train_labels, test_data, test_labels = train_test_split(
        X, Y, test_size=0.35, random_state=seed)
    return train_data, train_labels, test_data, test_labels
예제 #3
0
def IoT_data(params):
    dataroot = "/home/vibek/Anomanly_detection_packages/IoT-23/IoTScenarios/"
    data_path = read_data(dataroot, '*.labeled.csv')
    num_records, num_features = data_path.shape
    print("there are {} flow records with {} feature dimension".format(
        num_records, num_features))

    # there is white spaces in columns names e.g. ' Destination Port'
    # So strip the whitespace from  column names
    data = data_path.rename(columns=lambda x: x.strip())
    print('stripped column names')

    df_label = data['label_s']
    print('df_label:\n', df_label)

    df_label_1 = data['label_s'].str.replace('0   benign   0', 'BENIGN')
    df_label_2 = df_label_1.str.replace('(empty)   Benign   0', 'BENIGN')
    df_label_final = df_label_2.str.replace(
        '(empty)   Malicious   PartOfAHorizontalPortScan', 'Malicious')
    print('df_label new:\n', df_label_final)

    data = data.drop(columns=[
        'id', 'proto_s', 'service_s', 'sip_s', 'dip_s', 'history_s',
        'connstate_s', 'label_s'
    ])
    print('dropped bad columns')

    nan_count = data.isnull().sum().sum()
    print('There are {} nan entries'.format(nan_count))

    if nan_count > 0:
        data.fillna(data.mean(), inplace=True)
        print('filled NAN')


# Normalising all numerical features:
    cols_to_norm = list(data.columns.values)[:14]
    print('cols_to_norm:\n', cols_to_norm)
    data = data.astype(np.float32)
    print('data type:\n', data)
    mask = data == -1
    data[mask] = 0

    #  to leave -1 (missing features) values as is and exclude in normilizing
    mean_i = np.mean(data, axis=0)
    min_i = np.min(data, axis=0)
    max_i = np.max(data, axis=0)
    # zero centered
    r = (max_i - min_i) + eps
    data = (data - mean_i) / r

    #deal with missing features -1
    data[mask] = 0

    data = data.astype(float).apply(pd.to_numeric)
    print('converted to numeric\n', data)

    # lets count if there is NaN values in our dataframe(missing features)
    assert data.isnull().sum().sum() == 0, "There should not be any NaN values"
    X = data.values
    print("Value of X:\n", X.shape)

    # To encode string  labels into numbers
    df_label_final = df_label_final.apply(lambda x: transform(x))
    print("Value of df_label:\n", df_label_final.shape)
    le = LabelEncoder()
    Y = le.fit_transform(df_label_final)
    Y = to_categorical(Y)
    print('Value of Y:', Y.shape)
    classe_names = list(le.classes_)
    print('classe_names:\n', classe_names)
    # Create training and test sets
    train_data, train_labels, test_data, test_labels = train_test_split(
        X, Y, test_size=0.3, random_state=seed)
    return train_data, train_labels, test_data, test_labels
예제 #4
0
def IoT_data_common(params):
    dataroot = "/home/vibek/Anomanly_detection_packages/IoT-23/IoTScenarios/"
    data_path = read_data(dataroot, '*.labeled.csv')
    num_records, num_features = data_path.shape
    print("there are {} flow records with {} feature dimension".format(
        num_records, num_features))

    # there is white spaces in columns names e.g. ' Destination Port'
    # So strip the whitespace from  column names
    data = data_path.rename(columns=lambda x: x.strip())
    print('stripped IoT column names')

    categorical_cols = [
        "sip_s", "sport_s", "dip_s", "duration_f", "dport_s", "ibytes_f",
        "obyte_f", "ipkt_f", "iipbytes_f", "opkt_f", "opkt_f", "label_s"
    ]

    df_label = data['label_s']
    ax = plt.figure(figsize=(12, 3))
    sns.countplot(data['label_s'])
    plt.show()
    #print('df_label:\n', df_label)
    #df_groups = xai.imbalance_plot(data, "label_s", categorical_cols=categorical_cols)
    #groups = xai.imbalance_plot(data, "label_s", "dport_s", categorical_cols=categorical_cols)
    #bal_df = xai.balance(data, "label_s", "dport_s", upsample=0.8, categorical_cols=categorical_cols)
    #_ = xai.correlations(data, include_categorical=True)

    df_label_1 = data['label_s'].str.replace('0   benign   0', 'BENIGN')
    df_label_2 = df_label_1.str.replace('(empty)   Benign   0', 'BENIGN')
    df_label_final = df_label_2.str.replace(
        '(empty)   Malicious   PartOfAHorizontalPortScan', 'Malicious')
    #print('df_label new:\n', df_label_final)

    data = data.drop(columns=[
        'ts_f', 'id', 'proto_s', 'service_s', 'connstate_s', 'ilocal_f',
        'olocal_f', 'missedbytes_f', 'history_s', 'label_s'
    ])
    print('dropped bad columns')

    #data_common = data['sip_s','sport_s','dip_s','dport_s','duration_f','ibytes_f','obyte_f','ipkt_f','iipbytes_f','opkt_f','oipbytes_f']
    #print('select common columns:\n', data_common)

    nan_count = data.isnull().sum().sum()
    print('There are {} nan entries'.format(nan_count))

    if nan_count > 0:
        data.fillna(data.mean(), inplace=True)
        print('filled NAN')

    #lb = LabelBinarizer()
    le = LabelEncoder()

    data['sip_s'] = le.fit_transform(data['sip_s'])
    #print(data)

    data['dip_s'] = le.fit_transform(data['dip_s'])
    #print(data)

    # Normalising all numerical features:
    cols_to_norm = list(data.columns.values)[:11]
    #print('cols_to_norm:\n', cols_to_norm)
    data = data.astype(np.float32)
    #print('data type:\n', data)
    mask = data == -1
    data[mask] = 0

    #  to leave -1 (missing features) values as is and exclude in normilizing
    mean_i = np.mean(data, axis=0)
    min_i = np.min(data, axis=0)
    max_i = np.max(data, axis=0)
    # zero centered
    r = max_i - min_i + eps
    data = (data - mean_i) / r

    #deal with missing features -1
    data[mask] = 0

    data = data.astype(float).apply(pd.to_numeric)
    print('converted to numeric')

    #histogram to see the data distribution
    data.hist(figsize=(3, 5))
    plt.show()

    # lets count if there is NaN values in our dataframe(missing features)
    assert data.isnull().sum().sum() == 0, "There should not be any NaN values"
    X = data.values

    # To encode string  labels into numbers
    df_label_final = df_label_final.apply(lambda x: transform(x))
    #le = LabelEncoder()
    Y = le.fit_transform(df_label_final)
    Y = to_categorical(Y)
    classe_names = list(le.classes_)
    # Create training and test sets
    train_data, train_labels, test_data, test_labels = train_test_split(
        X, Y, test_size=0.35, random_state=seed)
    return train_data, train_labels, test_data, test_labels
def cicids_data_common(params):
    dataroot = "/home/vibek/Anomanly_detection_packages/CISIDS-2017/MachineLearningCSV/MachineLearningCVE/"
    data_path = read_data(dataroot, '*.pcap_ISCX.csv')
    num_records, num_features = data_path.shape
    print("there are {} flow records with {} feature dimension".format(
        num_records, num_features))

    # there is white spaces in columns names e.g. ' Destination Port'
    # So strip the whitespace from  column names
    data = data_path.rename(columns=lambda x: x.strip())
    print('stripped cicids column names')

    df_label = data['Label']
    #print('df_label:\n', df_label)

    data = data.drop(columns=[
        'Flow ID', 'Fwd Header Length.1', 'Protocol', 'Timestamp',
        'Total Fwd Packets', 'Total Backward Packets',
        'Total Length of Fwd Packets', 'Total Length of Bwd Packets',
        'Fwd Packet Length Max', 'Fwd Packet Length Min',
        'Fwd Packet Length Mean', 'Fwd Packet Length Std',
        'Bwd Packet Length Max', 'Bwd Packet Length Min',
        'Bwd Packet Length Mean', 'Bwd Packet Length Std', 'Flow Bytes/s',
        'Flow Packets/s', 'Flow IAT Mean', 'Flow IAT Std', 'Flow IAT Max',
        'Flow IAT Min', 'Fwd IAT Total', 'Fwd IAT Mean', 'Fwd IAT Std',
        'Fwd IAT Max', 'Fwd IAT Min', 'Bwd IAT Total', 'Bwd IAT Mean',
        'Bwd IAT Std', 'Bwd IAT Max', 'Bwd IAT Min', 'Fwd PSH Flags',
        'Bwd PSH Flags', 'Fwd URG Flags', 'Bwd URG Flags', 'Fwd Header Length',
        'Bwd Header Length', 'Min Packet Length', 'Max Packet Length',
        'Packet Length Mean', 'Packet Length Std', 'Packet Length Variance',
        'FIN Flag Count', 'SYN Flag Count', 'RST Flag Count', 'PSH Flag Count',
        'ACK Flag Count', 'URG Flag Count', 'CWE Flag Count', 'ECE Flag Count',
        'Down/Up Ratio', 'Average Packet Size', 'Avg Fwd Segment Size',
        'Avg Bwd Segment Size', 'Fwd Header Length', 'Fwd Avg Bytes/Bulk',
        'Fwd Avg Packets/Bulk', 'Fwd Avg Bulk Rate', 'Bwd Avg Bytes/Bulk',
        'Bwd Avg Packets/Bulk', 'Bwd Avg Bulk Rate', 'Init_Win_bytes_forward',
        'Init_Win_bytes_backward', 'act_data_pkt_fwd', 'min_seg_size_forward',
        'Active Mean', 'Active Std', 'Active Max', 'Active Min', 'Idle Mean',
        'Idle Std', 'Idle Max', 'Idle Min', 'Label'
    ])
    print('dropped bad columns')

    nan_count = data.isnull().sum().sum()
    print('There are {} nan entries'.format(nan_count))

    if nan_count > 0:
        data.fillna(data.mean(), inplace=True)
        print('filled NAN')

    #lb = LabelBinarizer()
    le = LabelEncoder()

    data['Source IP'] = le.fit_transform(data['Source IP'])
    data['Destination IP'] = le.fit_transform(data['Destination IP'])

    # Normalising all numerical features:
    cols_to_norm = list(data.columns.values)[:11]
    #print('cols_to_norm:\n', cols_to_norm)
    data = data.astype(np.float32)
    #print('data type:\n', data.shape)
    mask = data == -1
    data[mask] = 0

    #  to leave -1 (missing features) values as is and exclude in normilizing
    mean_i = np.mean(data, axis=0)
    min_i = np.min(data, axis=0)
    max_i = np.max(data, axis=0)
    # zero centered
    r = max_i - min_i + eps
    data = (data - mean_i) / r

    #deal with missing features -1
    data[mask] = 0

    data = data.astype(float).apply(pd.to_numeric)
    print('converted to numeric')

    #histogram to see the data distribution
    data.hist(figsize=(15, 18))
    plt.show()

    # lets count if there is NaN values in our dataframe(missing features)
    assert data.isnull().sum().sum() == 0, "There should not be any NaN values"
    X = data.values

    # To encode string  labels into numbers
    df_label = df_label.apply(lambda x: transform(x))
    #le = LabelEncoder()
    Y = le.fit_transform(df_label)
    Y = to_categorical(Y)
    classe_names = list(le.classes_)
    # Create training and test sets
    train_data, train_labels, test_data, test_labels = train_test_split(
        X, Y, test_size=0.35, random_state=seed)
    return train_data, train_labels, test_data, test_labels
def cicids_data(params):
    dataroot = "/home/vibek/Anomanly_detection_packages/CISIDS-2017/MachineLearningCSV/MachineLearningCVE_file/"
    data_path = read_data(dataroot, '*.pcap_ISCX.csv')
    num_records, num_features = data_path.shape
    print("there are {} flow records with {} feature dimension".format(
        num_records, num_features))

    # there is white spaces in columns names e.g. ' Destination Port'
    # So strip the whitespace from  column names
    data = data_path.rename(columns=lambda x: x.strip())
    print('stripped column names')

    df_label = data['Label']
    data = data.drop(columns=['Flow Packets/s', 'Flow Bytes/s', 'Label'])
    print('dropped bad columns')

    groups = xai.imbalance_plot(data, "Label", "Flow Duration")

    nan_count = data.isnull().sum().sum()
    print('There are {} nan entries'.format(nan_count))

    if nan_count > 0:
        data.fillna(data.mean(), inplace=True)
        print('filled NAN')


# Normalising all numerical features:
    cols_to_norm = list(data.columns.values)[:76]
    #print('cols_to_norm:\n', cols_to_norm)
    data = data.astype(np.float32)
    print(data)

    mask = data == -1
    data[mask] = 0

    #  to leave -1 (missing features) values as is and exclude in normilizing
    mean_i = np.mean(data, axis=0)
    min_i = np.min(data, axis=0)
    max_i = np.max(data, axis=0)
    # zero centered
    r = max_i - min_i + eps
    data = (data - mean_i) / r

    #deal with missing features -1
    data[mask] = 0

    data = data.astype(float).apply(pd.to_numeric)
    print('converted to numeric')

    # lets count if there is NaN values in our dataframe(missing features)
    assert data.isnull().sum().sum() == 0, "There should not be any NaN values"
    X = data.values

    # To encode string  labels into numbers
    df_label = df_label.apply(lambda x: transform(x))
    le = LabelEncoder()
    Y = le.fit_transform(df_label)
    Y = to_categorical(Y)
    classe_names = list(le.classes_)
    # Create training and test sets
    train_data, train_labels, test_data, test_labels = train_test_split(
        X, Y, test_size=0.3, random_state=seed)
    return train_data, train_labels, test_data, test_labels