Пример #1
0
def plugins_command(status, context):
    response = plugins(status, context)

    if context == 'global':
        if response['plugins']['global']:
            click.echo('Global context!')        
            click.echo('Exporting to firebase...')

            data = helpers.db.child('data').child(settings.HOSTNAME).shallow().get()

            if data:
                click.echo(f'Updating {settings.HOSTNAME} information...')
                if helpers.update_dataset(response):
                    click.echo('Export complete!')
                else:
                    click.echo('Something goes wrong when exporting!')
            else:
                click.echo(f'Generating {settings.HOSTNAME} information...')
                if helpers.create_dataset(response):
                    click.echo('Export complete!')
                else:
                    click.echo('Something goes wrong when exporting!')
        else:
            click.echo('Nothing to export')
    else:
        click.echo(json.dumps(response))
from keras import backend as K
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
from sklearn.model_selection import train_test_split

batch_size = 64
num_classes = 26
epochs = 40
img_rows, img_cols = 40, 30
detection2 = './detection-images/sss.png'
detection_label = 'S'

print('Start loading data.')
files, labels = helpers.load_chars74k_data()
X, y = helpers.create_dataset(files, labels)
print('Data has been loaded.')

x_train, x_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    random_state=2,
                                                    train_size=0.9)

if K.image_data_format() == 'channels_first':
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)
Пример #3
0
#             [
#                 []
#             ],
#             [
#                 [
#                     []
#                 ]
#             ]
#         ]
#     ],
#     [],
#     [
#         [
#             [],
#             []
#         ]
#     ]
# ]

from helpers import create_dataset

dataset, edges = create_dataset(2)


def count_edges(data):
    # @TODO implement!
    pass


assert edges == count_edges(dataset)
Пример #4
0
        counter = 0
        solution = ['test']
        temp = 'start'
        count = find_houses(grid)
        dfs_move(grid, position, visited_houses, counter, solution, count, temp)
        check_solutions(count)

        solution = solutions
        find = 30000
        for i in range(len(solution)):
            if len(solution[i]) < find:
                find = len(solution[i])
                index = i

        solution = solution[index]
        create_dataset(grid, solution, position)
        # solution = decision_tree_move(grid, position, clf)
        print(solution)
        while solution:
            display_text(myfont, DISPLAYSURF,
                         f"Ilość śmieci w śmieciarce: {garbage_amount}/{garbage_collector.container_capacity}", 600,
                         0)

            for house in houses:
                display_text(myfont, DISPLAYSURF, f"{house.garbage_amount}",
                             house.rect.x,
                             house.rect.y + 10)

            pygame.display.update()
            move = solution.pop(0)
            garbage_taken = 0
Пример #5
0
def anomaly_uni_LSTM(lista_datos, desv_mse=0):
    temp = pd.DataFrame(lista_datos, columns=['values'])
    data_raw = temp.values.astype("float32")

    scaler = MinMaxScaler(feature_range=(0, 1))
    dataset = scaler.fit_transform(data_raw)

    print(data_raw)
    TRAIN_SIZE = 0.70

    train_size = int(len(dataset) * TRAIN_SIZE)
    test_size = len(dataset) - train_size
    train, test = dataset[0:train_size, :], dataset[train_size -
                                                    2:len(dataset), :]

    # Create test and training sets for one-step-ahead regression.
    window_size = 1
    train_X, train_Y = h.create_dataset(train, window_size)
    test_X, test_Y = h.create_dataset(test, window_size)
    forecast_X, forecast_Y = h.create_dataset(dataset, window_size)

    train_X = np.reshape(train_X, (train_X.shape[0], 1, train_X.shape[1]))
    test_X = np.reshape(test_X, (test_X.shape[0], 1, test_X.shape[1]))
    forecast_X = np.reshape(forecast_X,
                            (forecast_X.shape[0], 1, forecast_X.shape[1]))

    #############new engine LSTM
    model = Sequential()
    model.add(LSTM(100, input_shape=(train_X.shape[1], train_X.shape[2])))
    model.add(Dense(1))
    model.compile(loss='mse', optimizer='adam')
    history = model.fit(train_X,
                        train_Y,
                        epochs=300,
                        batch_size=100,
                        validation_data=(test_X, test_Y),
                        verbose=0,
                        shuffle=False)

    yhat = model.predict(test_X)

    print("estoy")
    yhat_inverse = scaler.inverse_transform(yhat.reshape(-1, 1))
    testY_inverse = scaler.inverse_transform(test_Y.reshape(-1, 1))

    print(len(test_X))
    print(len(test_Y))
    lista_puntos = np.arange(train_size, train_size + test_size, 1)

    print(lista_puntos)
    testing_data = pd.DataFrame(yhat_inverse,
                                index=lista_puntos,
                                columns=['expected value'])

    rmse = math.sqrt(mean_squared_error(testY_inverse, yhat_inverse))
    mse = mean_squared_error(testY_inverse, yhat_inverse)
    mae = mean_absolute_error(testY_inverse, yhat_inverse)

    print("pasa")
    df_aler = pd.DataFrame()
    test = scaler.inverse_transform([test_Y])

    df_aler['real_value'] = test[0]

    df_aler['expected value'] = yhat_inverse
    df_aler['step'] = np.arange(0, len(yhat_inverse), 1)
    df_aler['mae'] = mae
    df_aler['mse'] = mse
    df_aler['anomaly_score'] = abs(df_aler['expected value'] -
                                   df_aler['real_value']) / df_aler['mae']

    df_aler_ult = df_aler[:5]

    df_aler_ult = df_aler_ult[
        (df_aler_ult.index == df_aler.index.max()) |
        (df_aler_ult.index == ((df_aler.index.max()) - 1))
        | (df_aler_ult.index == ((df_aler.index.max()) - 2)) |
        (df_aler_ult.index == ((df_aler.index.max()) - 3))
        | (df_aler_ult.index == ((df_aler.index.max()) - 4))]
    if len(df_aler_ult) == 0:
        exists_anom_last_5 = 'FALSE'
    else:
        exists_anom_last_5 = 'TRUE'

    df_aler = df_aler[(df_aler['anomaly_score'] > 2)]

    max = df_aler['anomaly_score'].max()
    min = df_aler['anomaly_score'].min()
    df_aler['anomaly_score'] = (df_aler['anomaly_score'] - min) / (max - min)

    max = df_aler_ult['anomaly_score'].max()
    min = df_aler_ult['anomaly_score'].min()

    df_aler_ult['anomaly_score'] = (df_aler_ult['anomaly_score'] -
                                    min) / (max - min)

    pred_scaled = model.predict(forecast_X)
    pred = scaler.inverse_transform(pred_scaled)

    print("el tamano de la preddicion")
    print(len(pred))

    print(pred)
    print('prediccion')

    engine_output = {}

    engine_output['rmse'] = str(math.sqrt(mse))
    engine_output['mse'] = int(mse)
    engine_output['mae'] = int(mae)
    engine_output['present_status'] = exists_anom_last_5
    engine_output['present_alerts'] = df_aler_ult.fillna(0).to_dict(
        orient='record')
    engine_output['past'] = df_aler.fillna(0).to_dict(orient='record')
    engine_output['engine'] = 'LSTM'
    df_future = pd.DataFrame(pred[len(pred) - 5:], columns=['value'])
    df_future['value'] = df_future.value.astype("float64")
    df_future['step'] = np.arange(len(lista_datos), len(lista_datos) + 5, 1)
    engine_output['future'] = df_future.to_dict(orient='record')
    print("llegamos hasta aqui")
    #testing_data['excepted value'].astype("float64")
    testing_data['step'] = testing_data.index
    #testing_data.step.astype("float64")
    print("llegamos hasta aqui2")
    engine_output['debug'] = testing_data.to_dict(orient='record')

    return (engine_output)
Пример #6
0
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split

import helpers

estimators = 2000
features = 50
# CPU cores for running the RandomForestClassifier.
cpu_cores = 4

print('Start loading data.')
files, labels = helpers.load_chars74k_data()
X, y = helpers.create_dataset(files, labels, with_denoising=True)
print('Data has been loaded.')

x_train, x_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    random_state=2,
                                                    train_size=0.8)

# Normalizing images.
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

print('Start training the model.')
model = RandomForestClassifier(n_estimators=estimators,
                               max_features=features,
                               verbose=True,