def preprocessing_data_mnist(x_dev, y_dev, x_test, y_test):

    print("x_dev : ", x_dev.shape, "\n")  #(60000, 28, 28)
    print("y_dev : ", y_dev.shape, "\n")  #(60000,)
    print("x_test : ", x_test.shape, "\n")  #(10000, 28, 28)
    print("y_test : ", y_test.shape, "\n")  #(10000,)

    #Preprocessing simple : normalisation, on ferait bien plus si les images avaient des dimensions différentes !!
    #Normalisation : mettre à échelle de -1 et 1 selon les extremum de l'array concernée
    x_dev_n = utils.normalize(x_dev, axis=1)
    x_test_n = utils.normalize(x_test, axis=1)

    #Creation du set de validation : 20% du dev set
    x_train_n, x_val_n, y_train, y_val = train_test_split(x_dev_n,
                                                          y_dev,
                                                          test_size=0.2,
                                                          random_state=1)

    #Reshape des features pr avoir bonne dimensions, -1 car infini mais on pourrait mettre len(x_train)...
    x_train_n_r = x_train_n.reshape(-1, 28, 28, 1)
    x_val_n_r = x_val_n.reshape(-1, 28, 28, 1)
    x_test_n_r = x_test_n.reshape(-1, 28, 28, 1)

    #OnHotEncoder sur label
    y_train_c = to_categorical(y_train, num_classes=10)
    y_val_c = to_categorical(y_val, num_classes=10)
    y_test_c = to_categorical(y_test, num_classes=10)

    #convert in array (mandatory with tensorflow)
    y_train_a = np.array(y_train_c)
    y_val_a = np.array(y_val_c)
    y_test_a = np.array(y_test_c)

    return (x_train_n_r, x_test_n_r, x_val_n_r), (y_train_a, y_test_a, y_val_a)
Exemplo n.º 2
0
    def prepare_input(self):
        resources = self.get_resources()
        upgrades = self.get_upgrades()
        in_progress = self.get_units_in_progress()
        friendly_unit_list = self.get_friendly_unit_list()
        enemy_unit_list = self.get_enemy_unit_list()

        input_data = []
        input_data.append(self.observation.game_loop)   # 1
        input_data += resources                         # 9
        input_data += upgrades                          # 26
        input_data += in_progress                       # 70
        input_data += friendly_unit_list                # 44
        input_data += enemy_unit_list                   # 44

        # print('Time step (step/seconds): ' + str(self.observation.game_loop) + '/' + str(self.observation.game_loop/22.4))
        # print('Resources (minerals, vespene, food(cap, used, army, workers), idle_workers, army_count, warp_gates): ' + str(resources[0]) + ', ' + str(resources[1]) + ', (' + str(resources[2]) + ', ' + str(resources[3]) + ', ' + str(resources[4]) + ', ' + str(resources[5]) + '), ' + str(resources[6]) + ', ' + str(resources[7]) + ', ' + str(resources[8]))
        # print('In progress: ' + str(self.in_progress_dic(in_progress)))
        # print('Upgrades: ' + str(self.upgrades_dic(upgrades)))
        # print('Friendly buildings: ' + str(self.buildings_dic(friendly_unit_list)))
        # print('Friendly units: ' + str(self.units_dic(friendly_unit_list)))
        # print('Enemy buildings: ' + str(self.buildings_dic(enemy_unit_list)))
        # print('Enemy units: ' + str(self.units_dic(enemy_unit_list)))

        input_data = normalize(input_data, axis=-1, order=2)
        # input_data = min_max_norm(input_data, self.maxes)

        return np.array(input_data)
Exemplo n.º 3
0
 def __getitem__(self, idx):
     # Use ordering of self.indexes for dataset
     inds = self.indexes[idx * self.batch_size:(idx + 1) * self.batch_size]
     batch_x, batch_y = [], []
     for i in inds:
         # Load data from .gz
         data = np.loadtxt(self.filenames[i])
         # Check and if necessary correct data length
         diff = len(data) - 12000
         if (diff < 0):
             data = np.pad(data, (0, abs(diff)), mode="constant")
         elif (diff > 0):
             data = data[:-diff]
         # Handles the shape of the chosen input data, adding a filter dimension.
         input_types = {
             TIME_SEQUENCE:
             lambda data: data.reshape(12000, 1),
             LOG_SPECTROGRAM:
             lambda data: clipped_log_spectrogram(data).reshape(129, 53, 1),
             LINEAR_SPECTROGRAM:
             lambda data: normalize(
                 get_spectrogram(data).reshape(129, 53, 1))
         }
         # Add data and label to batch arrays
         batch_x.append(input_types[self.input_type](data))
         batch_y.append(self.labels[i])
     output = np.array(batch_x), np.array(list(map(self.get_ys, batch_y)))
     return output
Exemplo n.º 4
0
def obtain_dataset_wordvectors(dataset_file="",
                               labels_file="",
                               sequence_file="",
                               maxlen=1500):
    dataset = []
    sequences = []

    lengths = []
    for ix, i in tqdm(enumerate(open(dataset_file))):
        i = i.split()
        item = np.array([float(k) for k in i])
        dataset.append(item)
        lengths.append(len(item))

    for i in tqdm(open(sequence_file)):
        item = [[aa2int(k)] for k in i]
        sequences.append(item)

    sequences = pad_sequences(sequences,
                              maxlen=maxlen,
                              padding="post",
                              dtype="float32",
                              truncating="post")

    dataset = np.array(dataset)
    sequences = np.array(sequences)

    print(dataset.shape, sequences.shape, set(lengths))

    return normalize(dataset, axis=-1, order=2), sequences
Exemplo n.º 5
0
def classifyData(featuresList, model):

    npFeaturesList = np.array(featuresList, dtype='float64')
    normalizedFeaturesList = normalize(npFeaturesList, axis=1)
    predictionVect = model.predict(normalizedFeaturesList)
    prediction = int(np.round(np.average(np.argmax(predictionVect, axis=0))))
    return prediction
Exemplo n.º 6
0
def neural_network_sklearn(x_train, x_test, y_train, y_test):
    tf_x_train = np.asarray(x_train[:])
    tf_y_train = np.asarray(y_train[:])
    tf_x_test = np.asarray(x_test[:])
    tf_y_test = np.asarray(y_test[:])

    tf_x_train = normalize(tf_x_train, axis=1)
    tf_x_test = normalize(tf_x_test, axis=1)

    regr = MLPRegressor(random_state=1,
                        max_iter=50000,
                        learning_rate='adaptive').fit(tf_x_train, tf_y_train)
    regr.predict(tf_x_test[:2])

    score = regr.score(tf_x_test, tf_y_test)
    print("Neural Network Sklearn")
    print(score)
Exemplo n.º 7
0
def prepareForModel(input):
    """Normalizes and resizes `input` retaining number of inputs
    
    resizes = n_inputs x `input.shape[1]` x `input.shape[2]` x 1
    """

    input = utils.normalize(input, axis=1)
    input = input.reshape((len(input), input.shape[1], input.shape[2], 1))
    return input
Exemplo n.º 8
0
def down_and_normal(x, d):
    """
    Function for downsampling the data, normalizing 
    and improving numerical stability.
    """
    x = dec(x, d)  #downsampling
    fn = lambda a: a * 1e6  # improves numerical stability
    x = fn(x)
    x = normalize(x)
    return x.astype(np.float32)
Exemplo n.º 9
0
    def __init__(self, train_x, test_x, train_y, test_y, classes):
        self.train_x_, self.test_x_ = normalize(train_x), normalize(test_x)

        train_x_pca_, test_x_pca_ = principle_components(self.train_x_, self.test_x_)
        self.train_x_pca_, self.test_x_pca_ = normalize(train_x_pca_), normalize(test_x_pca_)

        self.classes = classes

        print(train_y[0], train_y[2], train_y[len(train_y) - 2])

        def neg_1(x):
            new_ = [0.0 if i == 2.0 else i for i in x]
            new_2 = [1.0 if j == 3.0 else j for j in new_]
            return new_2

        train_y, test_y = list(neg_1(train_y)), list(neg_1(test_y))
        # train_y, test_y = list(train_y), list(test_y)

        print(train_y[0], train_y[2], train_y[len(train_y) - 2])
        self.train_y_cat, self.test_y_cat = to_categorical(train_y), to_categorical(test_y)
        print(self.train_y_cat[0], self.train_y_cat[2], self.train_y_cat[len(train_y) - 2])
Exemplo n.º 10
0
def run_predict_isAtk(df):
    '''
    Separate attacks traffic and normal traffic

    Parameters
    ----------
    df : DataFrame

    Return
    ------
    [Success]
        output : dict
            Normal traffic
        df2 : DataFrame
            Attack traffic to be analyse
    '''
    output = {}
    protocols = {
        num: name[8:]
        for name, num in vars(socket).items() if name.startswith("IPPROTO")
    }
    columnList = df.columns
    df1 = df.copy()
    logID = df.pop('ID')
    sourceIP = df.pop('SourceIP')
    protocol = df['Protocol'].values
    port_num = df['Dst Port'].values
    time = df[['Timestamp']].values
    df_test = normalize(df.values)
    model = load_model("modules/model/binary_class_classifier.h5")
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    predictions = model.predict_proba(df_test)

    df2 = DataFrame(columns=columnList)
    for p, q, r, s, t, sip in zip(predictions, logID, protocol, port_num, time,
                                  sourceIP):
        if argmax(p) == 0:
            output[int(q)] = {
                "IsAtk": argmax(p),
                "IP": sip,
                "Protocol": protocols[r],
                "Port": int(s),
                "Time": int(t[0])
            }

        else:
            row_index = df1.loc[df1["ID"] == q].index[0]
            df2.loc[df1.index[row_index]] = df1.iloc[row_index]

    return output, df2
Exemplo n.º 11
0
    def image_transormation(self, filename):
        img = Image.open(filename)
        size = (28, 28)
        img = img.resize(size)
        img.save(filename)
        img_array = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
        img_array = cv2.bitwise_not(img_array)

        

        img_array_norm  = utils.normalize(img_array, axis=1)

        return img_array_norm
def predict_result_extern_data(model, image, label):
    label_true = label
    image = cv2.resize(image, (28, 28), interpolation=cv2.INTER_AREA)
    #image = np.dot(image[...,:3], [0.299, 0.587, 0.144])
    image = image.reshape(1, 28, 28, 1)
    image = utils.normalize(image, axis=1)
    image = np.array(image)
    predict = model.predict(image)
    #label_pred = np.argmax(model.predict(y_test_a[index_test_set]))
    label_pred = np.argmax(predict)
    print("Data extern label : ", label_true)
    print("Modèle prédit : ", (predict), "donc : ", label_pred)
    plt.imshow(image)
    plt.show()
Exemplo n.º 13
0
	def down_and_normal(self, data1=True, data2=False, downsample_rate1=2, downsample_rate2=2, norm=True):
		"""
		Downsample and normalize the data.

		:params: data1 (bool): apply to data1 or not
		:params: data2 (bool): apply to data2 or not
		:params: downsample_rate1 (int): downsample rate.
		:params: downsample_rate2 (int): downsample rate.
		:params: norm: (bool) to normalize or not to normalize.
	    Returns: n_trial * n_chans * n_samples Numpy array containing downsampled and/or normalized data.
		"""
		if data1 == False and data2 == False:
			raise ValueError(f"Require at least one data type to be True: data1:{data1}, data2:{data2}")
		else:
			fnc = lambda a: a * 1e6 # improves numerical stability
			if data1:
				self.downsample_rate1 = downsample_rate1
				if self.downsample_rate1 > 1:
					self.data1 = dec(self.data1, downsample_rate1) 
					self.downsampled[0] = True
				
				self.data1 = fnc(self.data1)
				if norm:
					self.data1 = normalize(self.data1)
					self.normalized[0] = True

			if data2:
				self.downsample_rate2 = downsample_rate2
				if self.downsample_rate2 > 1:
					self.data2 = dec(self.data2, downsample_rate2) 
					self.downsampled[1] = True
				
				self.data2 = fnc(self.data2)
				if norm:
					self.data2 = normalize(self.data2)
					self.normalized[1] = True
Exemplo n.º 14
0
 def __init__(self, target_shape, offset=None, data_format=None, **kwargs):
     '''
     Crop to target.
     If only one `offset` is set, then all dimensions are offset by this amount.
     '''
     super(CroppingLike2D, self).__init__(**kwargs)
     self.data_format = normalize(data_format)
     self.target_shape = target_shape
     if offset is None or offset == 'centered':
         self.offset = 'centered'
     elif isinstance(offset, int):
         self.offset = (offset, offset)
     elif hasattr(offset, '__len__'):
         if len(offset) != 2:
             raise ValueError('`offset` should have two elements. Found: ' +
                              str(offset))
         self.offset = offset
     self.input_spec = InputSpec(ndim=4)
Exemplo n.º 15
0
def identifyImageProbabilities(image_list, conv2d=False):
    global MODEL_PATH, DIMENSION

    model = load_model(MODEL_PATH)

    if (not isinstance(image_list, list)):
        image = cv2.resize(image_list, (DIMENSION, DIMENSION),
                           interpolation=cv2.INTER_AREA)
        image_list = [image]

    image_list_as_array = np.asarray(image_list)
    image_list_as_array = normalize(image_list_as_array, axis=1)

    if (conv2d):
        return model.predict(
            image_list_as_array.reshape(-1, DIMENSION, DIMENSION, 1))
    else:
        return model.predict(image_list_as_array)
def get_embedding(embedding_dim, batch_size, epochs):
    # build model
    x = Input(shape=(1,))
    o = Embedding(input_dim=997, output_dim=embedding_dim,
                  embeddings_initializer=he_normal(), name='embedding')(x)
    h = Dense(128, use_bias=False,
              kernel_initializer=he_normal(), activation='relu')(o)
    h = Dense(24 * 5, use_bias=False,
              kernel_initializer=he_normal(), activation='relu')(o)
    model = Model(inputs=x, outputs=h)
    model.compile(loss='mse', optimizer=Adam(3e-4))
    
    # train embedding weights
    hist = model.fit(np.arange(0, 997).reshape(-1, 1), normalize(embed_label.values),
                 batch_size=batch_size, epochs=epochs, shuffle=True, verbose=0)
    
    # output embedding vector
    areaEmbedding = model.get_weights()[0]
    
    return areaEmbedding, hist
Exemplo n.º 17
0
def make_triplet(alert, to_tpu: bool = False):
    """
        Feed in alert packet
    """
    cutout_dict = dict()

    for cutout in ('science', 'template', 'difference'):
        cutout_data = loads(
            dumps([alert[f'cutout{cutout.capitalize()}']['stampData']]))[0]

        # unzip
        with gzip.open(io.BytesIO(cutout_data), 'rb') as f:
            with fits.open(io.BytesIO(f.read())) as hdu:
                data = hdu[0].data
                # replace nans with zeros
                cutout_dict[cutout] = np.nan_to_num(data)
                # normalize
                cutout_dict[cutout] = normalize(cutout_dict[cutout])

        # pad to 63x63 if smaller
        shape = cutout_dict[cutout].shape
        if shape != (63, 63):
            # print(f'Shape of {candid}/{cutout}: {shape}, padding to (63, 63)')
            cutout_dict[cutout] = np.pad(cutout_dict[cutout],
                                         [(0, 63 - shape[0]),
                                          (0, 63 - shape[1])],
                                         mode='constant',
                                         constant_values=1e-9)

    triplet = np.zeros((63, 63, 3))
    triplet[:, :, 0] = cutout_dict['science']
    triplet[:, :, 1] = cutout_dict['template']
    triplet[:, :, 2] = cutout_dict['difference']

    if to_tpu:
        # Edge TPUs require additional processing
        triplet = np.rint(triplet * 128 + 128).astype(np.uint8).flatten()

    return triplet
def show_results(img_ext):
    img_ext = cv2.resize(img_ext, (28, 28), interpolation=cv2.INTER_AREA)
    #image = np.dot(image[...,:3], [0.299, 0.587, 0.144])
    #img_ext = img_ext.reshape(1, 28, 28, 1)
    img_ext = utils.normalize(img_ext, axis=1)
    img_ext = np.array(img_ext)
    plt.figure()
    plt.subplot(2, 3, 1)
    plt.title("MNIST : prediction 7 (99%)", color='blue', size=18)
    plt.imshow(x_test_n_r[0], cmap='plasma')
    plt.subplot(2, 3, 2)
    plt.title("MNIST : prediction 5 (95%)", color='blue', size=18)
    plt.imshow(x_test_n_r[52], cmap='plasma')
    plt.subplot(2, 3, 3)
    plt.title("MNIST : prediction 6 (91%)", color='blue', size=18)
    plt.imshow(x_test_n_r[54], cmap='plasma')
    plt.subplot(2, 3, 4)
    plt.title("MNIST : prediction 4 (96%)", color='blue', size=18)
    plt.imshow(x_test_n_r[56], cmap='plasma')
    plt.subplot(2, 3, 5)
    plt.title("Extern data : prediction 8 (95%)", color='blue', size=18)
    plt.imshow(img_ext, cmap='plasma')
    plt.show()
Exemplo n.º 19
0
    def get(self,
            split='train',
            preprocess='line_profile',
            categorical_labels=True):

        x, y = self.load_data(split)

        if preprocess == 'line_profile':
            x = self.get_line_profiles(x, x.shape[1])
        elif preprocess == 'flattened':
            x = x.reshape((x.shape[0], -1))
        elif preprocess == 'images':
            n, h, w = x.shape
            x = x.reshape((n, h, w, 1))
            x = x.astype('float32')
            x /= 255.0

        if categorical_labels:
            num_classes = len(np.unique(y))
            y = to_categorical(y, num_classes)

        x = normalize(x)

        return x, y
Exemplo n.º 20
0
        if (image_name.split('.')[1] == 'jpg'):
            image = cv2.imread(test_image_directory + image_name, 0)
            image = Image.fromarray(image)
            image = image.resize((SIZE, SIZE))
            X_test.append(np.array(image))

    masks = os.listdir(test_mask_directory)
    for i, mask_name in enumerate(masks):
        if (mask_name.split('.')[1] == 'jpg'):
            mask = cv2.imread(test_mask_directory + mask_name, 0)
            mask = Image.fromarray(mask)
            mask = mask.resize((SIZE, SIZE))
            Y_test.append(np.array(mask))

    X_test = np.expand_dims(X_test, axis=3)
    X_test = normalize(X_test, axis=1)
    Y_test = np.expand_dims(
        np.array(Y_test),
        axis=3) / 255.  #change the 255 if masks already normalized

    test_masks_cat = to_categorical(Y_test, num_classes=n_classes)
    y_test_cat = test_masks_cat.reshape(
        (Y_test.shape[0], Y_test.shape[1], Y_test.shape[2],
         n_classes))  #check this line

    x_train = np.expand_dims(x_train, axis=3)
    x_train = normalize(x_train, axis=1)
    y_train = np.expand_dims(
        np.array(y_train),
        axis=3) / 255.  #change the 255 if masks already normalized
# In[8]:


f_mnist = fashion_mnist


# In[59]:


(x_tr, y_tr), (x_te, y_te) = f_mnist.load_data()


# In[60]:


x_tr, x_te = normalize(x_tr), normalize(x_te)


# In[61]:


x_tr.shape


# In[62]:


x_tr[0].shape


# In[63]:
Exemplo n.º 22
0
    def __init__(self, scale_factor=2, data_format=None, **kwargs):
        super(SubPixelUpscaling, self).__init__(**kwargs)

        self.scale_factor = scale_factor
        self.data_format = normalize(data_format)
Exemplo n.º 23
0
def load_data_part_of_game(
    data_path,
    train,
    validation,
    test,
    time_start,
    time_end,
    maxes_path=None,
    seed=None,
):
    print('Loading data...')
    # Make list of the paths to all the replays
    data_paths = []
    for file in os.listdir(data_path):
        file_path = os.path.join(data_path, file)
        if os.path.isfile(file_path) and file.lower().endswith('.npy'):
            data_paths.append(file_path)

    if seed is not None:
        np.random.seed(seed)

    np.random.shuffle(data_paths)

    train_end = int(len(data_paths) * train)
    validation_end = int(len(data_paths) * (train + validation))

    train_paths = []
    for index in range(train_end):
        train_paths.append(data_paths[index])

    validation_paths = []
    for index in range(train_end, validation_end):
        validation_paths.append(data_paths[index])

    amount_train_data_points = 0
    for path in train_paths:
        for data_point in np.load(path):
            if len(data_point
                   ) == 248 and time_start <= data_point[0] < time_end:
                amount_train_data_points += 1

    amount_validation_data_points = 0
    for path in validation_paths:
        for data_point in np.load(path):
            if len(data_point
                   ) == 248 and time_start <= data_point[0] < time_end:
                amount_validation_data_points += 1

    data = []
    labels = []
    for path in data_paths:
        for data_point in np.load(path):
            if len(data_point
                   ) == 248 and time_start <= data_point[0] < time_end:
                data.append(data_point[:-54])
                labels.append(data_point[-54:])
    print('Data loaded.')

    if len(data) == 0:
        return np.array([]), np.array([]), np.array([]), np.array(
            []), np.array([]), np.array([]),

    print('Performing L2 normalization...')
    data = normalize(data, axis=-1, order=2)
    print('L2 normalization done.')
    # print('Performing min-max normalization...')
    # min_max_norm(data, maxes_path)
    # print('Min-max normalization done.')

    print('Splitting data...')
    train_end = amount_train_data_points
    validation_end = amount_train_data_points + amount_validation_data_points

    train_data = []
    train_labels = []
    for index in range(train_end):
        train_data.append(data[index])
        train_labels.append(labels[index])

    validation_data = []
    validation_labels = []
    for index in range(train_end, validation_end):
        validation_data.append(data[index])
        validation_labels.append(labels[index])

    test_data = []
    test_labels = []
    for index in range(validation_end, len(data)):
        test_data.append(data[index])
        test_labels.append(labels[index])
    print('Data split.')

    print(
        '_____________________________________________________________________________________'
    )
    print('Data meta data')
    print('{:20s} {:7d}'.format('# of games', len(data_paths)))
    print('{:20s} {:7d}'.format('# of data points', len(data)))
    print('Split seed: ' + str(seed))
    print(
        '-------------------------------------------------------------------------------------'
    )
    print('| {:25s} | {:25s} | {:25s} |'.format('Data', '# data points',
                                                '# data point dimensions'))
    print(
        '|---------------------------|---------------------------|---------------------------|'
    )
    print('| {:25s} | {:25d} | {:25d} |'.format('train_data shape',
                                                len(train_data),
                                                len(train_data[0])))
    print('| {:25s} | {:25d} | {:25d} |'.format('train_labels shape',
                                                len(train_labels),
                                                len(train_labels[0])))
    print('| {:25s} | {:25d} | {:25d} |'.format('validation_data shape',
                                                len(validation_data),
                                                len(validation_data[0])))
    print('| {:25s} | {:25d} | {:25d} |'.format('validation_labels shape',
                                                len(validation_labels),
                                                len(validation_labels[0])))
    print('| {:25s} | {:25d} | {:25d} |'.format('test_data shape',
                                                len(test_data),
                                                len(test_data[0])))
    print('| {:25s} | {:25d} | {:25d} |'.format('test_labels shape',
                                                len(test_labels),
                                                len(test_labels[0])))
    print(
        '-------------------------------------------------------------------------------------'
    )

    return np.array(train_data), np.array(train_labels), np.array(
        validation_data), np.array(validation_labels), np.array(
            test_data), np.array(test_labels)
Exemplo n.º 24
0
from tensorflow.keras.utils import normalize
import tensorflow.keras.datasets.mnist as mnist
import numpy as np
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = normalize(x_train), normalize(x_test)

x_train_2D, x_test_2D = x_train[:, np.newaxis, ...], x_test[:, np.newaxis, ...]

x_train = np.reshape(x_train, (60000, 28 * 28))
y_train_reformatted = np.zeros((60000, 10))
for i, correct in enumerate(y_train):
    y_train_reformatted[i, correct] = 1

x_test = np.reshape(x_test, (10000, 28 * 28))
y_test_reformatted = np.zeros((10000, 10))
for i, correct in enumerate(y_test):
    y_test_reformatted[i, correct] = 1
Exemplo n.º 25
0
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
import tensorflow as tf
import pickle
import cv2
import time
# Load the dataset

X = pickle.load(open("X.pickle", "rb"))
Y = pickle.load(open("Y.pickle", "rb"))

modelname = "a-z{}".format(int(time.time()))
#board = TensorBoard(Log_dir="logs/{}".format(modelname))

# Scaling the data. /255 since data is image data
X = normalize(X, axis=1)
print("({})".format(X.shape[0] / 2400) +
      str(X.shape))  # (2400,50,50,1) - n,y,x,c

model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))  # 64 3,3
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))  #
model.add(Dropout(0.40))
model.add(Conv2D(128, (3, 3)))  # 5,5
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

# EXTRA
model.add(Conv2D(64, (3, 3)))  # 5,5
Exemplo n.º 26
0
def run_predict_Atks(df):
    '''
    Identify Possible Attacks

    Parameters
    ----------
    df : DataFrame

    Return
    ------
        output : dict
            Labels Attack Traffic
	'''
    columnList = df.columns
    df1 = df.copy()

    output = {}
    protocols = {
        num: name[8:]
        for name, num in vars(socket).items() if name.startswith("IPPROTO")
    }
    atks = {
        0: 'Bot',
        1: 'Brute Force -Web',
        2: 'Brute Force -XSS',
        3: 'DDOS attack-HOIC',
        4: 'DDOS attack-LOIC-UDP',
        5: 'DoS attacks-GoldenEye',
        6: 'DoS attacks-Hulk',
        7: 'DoS attacks-SlowHTTPTest',
        8: 'DoS attacks-Slowloris',
        9: 'FTP-BruteForce',
        10: 'Infiltration',
        11: 'SQL Injection',
        12: 'SSH-Bruteforce'
    }

    logID = df.pop('ID')
    SourceIP = df.pop('SourceIP')
    time = df[['Timestamp']].values
    protocol = df[['Protocol']].values
    port = df[['Dst Port']].values
    df_test = normalize(df.values)
    model = load_model(
        "modules/model/Atk_multiclass_categorical_50_ep_80_bs.h5")
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    predictions = model.predict_proba(df_test)

    for l, sip, p1, p2, p3, t in zip(logID, SourceIP, predictions, protocol,
                                     port, time):
        output[int(l)] = {
            "IsAtk": 1,
            "IP": sip,
            "Protocol": protocols[int(p2)],
            "Port": int(p3),
            "Atk": atks[argmax(p1)],
            "Time": int(t[0])
        }
    return output
Exemplo n.º 27
0
def clipped_log_spectrogram(data):
    Sxx = get_spectrogram(data)
    Sxx[Sxx == 0] = 1
    min_Sxx = np.amin(Sxx)
    Sxx[Sxx == 1] = min_Sxx
    return normalize(np.log10(Sxx))
Exemplo n.º 28
0
import numpy as np

X = np.array(X)
y = np.array(y)
X_val = np.array(X_val)
y_val = np.array(y_val)


import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import normalize, to_categorical
from tensorflow.keras.optimizers import Adam

X = normalize(X)
X_val = normalize(X_val)

y = normalize(y)
y_val = normalize(y_val)

# print(y_val.shape)
# print(y.shape)

model = Sequential()

model.add(Dense(128, activation = 'relu'))
model.add(Dense(128, activation = 'relu'))
model.add(Dense(y.shape[1], activation = 'sigmoid'))

opt = Adam(lr = 0.001)
Exemplo n.º 29
0
    epochs = range(len(loss))

    plt.figure()

    plt.plot(epochs, loss, 'b', label='Training loss')
    plt.plot(epochs, val_loss, 'r', label='Validation loss')
    plt.title(title)
    plt.legend()

    plt.show()


first = np.array(first).reshape(228, params.Fd, 4)
second = np.array(second)
first = normalize(first)
second /= 1000
model = Sequential()


model.add(LSTM(params.inputUnits, return_sequences=True, input_shape=(params.Fd, 4)))
model.add(LSTM(params.hideUnits, activation=params.activationFuncInHideLayer))
model.add(Dense(params.Fh, activation=params.activationFuncInOutputLayer))

model.compile(loss=params.funcError, optimizer=RMSprop(), metrics=['mae'])

history = model.fit(first, second, epochs=params.epoch, batch_size=params.batchSize, validation_split=params.validationSize)

plot_train_history(history, "График")

print("Пожалуйста, подождите")
Exemplo n.º 30
0
# =============================================================
# plt.imshow(TRAIN_IMAGS[24], cmap="gray")
# plt.show()
# plt.imshow(TRAIN_MASKS[24])
# plt.show()

# Assign labels & encode them.
labeler = LabelEncoder()
n, h, w = TRAIN_MASKS.shape
reshaped_masks = TRAIN_MASKS.reshape(-1, 1)
encoded_masks = labeler.fit_transform(reshaped_masks)
updated_masks = encoded_masks.reshape(n, h, w)

# Prepare training datasets.
TRAIN_IMAGS = np.expand_dims(TRAIN_IMAGS, axis=3)
TRAIN_IMAGS = normalize(TRAIN_IMAGS, axis=1)  # NOTE: Normalization step
input_masks = np.expand_dims(updated_masks, axis=3)

# Create training & testing datasets.
N_TEST = 0.1
x_train, x_test, y_train, y_test = train_test_split(TRAIN_IMAGS,
                                                    input_masks,
                                                    test_size=N_TEST,
                                                    random_state=0)

# NOTE: Sanity check
# print("Class values in the dataset are ... ", np.unique(y_train))

# Categorize by one-hot encoding.
masks_cat_train = to_categorical(y_train, num_classes=N_CLASSES)
y_train_cat = masks_cat_train.reshape(