Esempio n. 1
0
class Rede_neural(object):
    def __init__(self, wids=None):
        types = ""
        if wids == None:
            types = "m"
        else:
            types = "zeros"
        self.model = Sequential()
        self.model.add(
            Dense(64,
                  input_dim=16,
                  kernel_initializer=types,
                  bias_initializer=types))
        self.model.add(Activation('hard_sigmoid'))
        self.model.add(
            Dense(4, kernel_initializer=types, bias_initializer=types))
        self.model.add(Activation('softmax'))
        if wids == None:
            self.wids = self.model.get_weights()
        else:
            self.setWid(wids)

    def getWid(self):
        return self.wids

    def setWid(self, wid):
        self.wids = wid
        self.model.set_weights(wid)

    def predict(self, X):
        X = np.matrix(X.flatten())
        Y = self.model.predict(X)
        return (Y.argmax())
Esempio n. 2
0
def make_model(meta):
    'create model based on meta definition'
    model = Sequential()
    model.add(InputLayer(input_shape=(width, height, 1)))
    model.add(Conv2D(1, ))
    for l in range(meta[0]):
        print("LSTM({})".format(meta[1 + l * 2]))
        model.add(LSTM(meta[1 + l * 2]))
        if meta[2 + l * 2] > 0:
            print("DROPOUT(0.75)")
            model.add(Dropout(0.75))

    print("Dense({})".format(meta[-1]))
    model.add(Dense(meta[-1], activation='relu'))
    model.add(Dropout(0.75))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(),
                  metrics=['accuracy'])

    #randomize weights
    weights = model.get_weights()
    weights = [np.random.normal(size=w.shape) for w in weights]
    model.set_weights(weights)

    return model
Esempio n. 3
0
def make_model(meta):
    'create model based on meta definition'
    model = Sequential()
    model.add(InputLayer(input_shape=(width, height, 1)))
    for l in range(meta[0]):
        print("Conv2D({},{},{})".format(meta[1+l*5],
                                       meta[2+l*5],
                                       meta[3+l*5]))
        model.add(Conv2D(meta[1+l*5],
                         kernel_size=meta[2+l*5],
                         strides=meta[3+l*5],
                         activation='relu'))
        if meta[4+l*5] > 0:
            print("MaxPooling2D({},{})".format(meta[4+l*5],
                                       meta[5+l*5]))
            model.add(MaxPooling2D(pool_size=meta[4+l*5],
                                   strides=meta[5+l*5]))
        model.add(Dropout(0.1))

    model.add(Flatten())
    if meta[-1]>0:
        print("Dense({})".format(meta[-1]))
        model.add(Dense(meta[-1], activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(),
                  metrics=['accuracy'])

    #randomize weights
    weights = model.get_weights()
    weights = [np.random.normal(size=w.shape) for w in weights]
    model.set_weights(weights)

    return model
Esempio n. 4
0
q_net.compile(optimizer=adam(lr=stepsize), loss=mse, metrics=['accuracy'])

# Initialize the weights of the target network with θ⋆ = θ
target_net = Sequential([
    Dense(10, input_shape=(
        None,
        4,
    )),
    Activation('relu'),
    Dense(10),
    Activation('relu'),
    Dense(2),
    Activation('linear')
])
target_net.compile(optimizer=adam(lr=stepsize), loss=mse, metrics=['accuracy'])
target_net.set_weights(q_net.get_weights())

# Initialize the initial state S0
cp_env = gym.make('CartPole-v1')
curr_state = cp_env.reset()

# for t = 0, 1, ..., T do
for t in range(num_iterations):
    # With probability epsilon, choose At uniformly at random from A
    if random.uniform(0, 1) < exploration_prob:
        action = cp_env.action_space.sample()
    # and with probability 1 − epsilon, choose At such that Qθ(St, At) = maxa ∈ A Qθ(St, a)
    else:
        action = np.argmax(
            np.squeeze(q_net.predict(curr_state.reshape(
                1,
Esempio n. 5
0
autoencoder.summary()

# ガウシアンノイズデータを用いた学習と予測
autoencoder.fit(
    x_train_gauss,  # 入力
    x_train,  # 正解
    epochs=10,
    batch_size=20,
    shuffle=True)

gauss_preds = autoencoder.predict(x_test_gauss)
for i in range(10):
    array_to_img(x_test[i]).save('x_test_%d.png' % i)
    array_to_img(x_test_gauss[i]).save('x_test_gauss_%d.png' % i)
    array_to_img(gauss_preds[i]).save('x_test_gauss_pred_%d.png' % i)

# 初期化
autoencoder.set_weights(initial_weights)

# マスキングノイズデータを用いた学習と予測
autoencoder.fit(
    x_train_masked,  # 入力
    x_train,  # 正解
    epochs=10,
    batch_size=20,
    shuffle=True)
masked_preds = autoencoder.predict(x_test_masked)
for i in range(10):
    array_to_img(x_test_masked[i]).save('x_test_masked_%d.png' % i)
    array_to_img(masked_preds[i]).save('x_test_masked_pred_%d.png' % i)
    class LSTMPredictor(object):
        def __init__(self):
            self._neurons = 7
            self._batch = 1
            self._dataset = []
            self._features = 1
            self._model = None
            self._std = 0.0
            self._seq = 0

        def reset(self):
            self._dataset = []

        def update(self, value, point):
            self._dataset.append((value, point))

        def load_model(self, path, seq, std):
            logger.info('build lstm finished')
            logger.info('start reloading lstm model')
            model = load_model(path)
            self._seq = seq
            self._std = std
            # modify batch size for point-wise prediction
            logger.info('rebuild lstm started')
            self._seq = seq
            self._model = Sequential()
            self._model.add(
                LSTM(self._neurons,
                     batch_input_shape=(self._batch, seq, self._features),
                     stateful=True))
            self._model.add(Dense(units=1))
            # copy weights
            weights = model.get_weights()
            self._model.set_weights(weights)
            self._model.compile(loss='mean_squared_error', optimizer='adam')

            logger.info('after reloading lstm model')

        def predict(self):
            logger.info('begin predict')
            anomalies = []
            data = []
            for i in range(len(self._dataset)):
                data.append(self._dataset[i][0])
            X, y = self.data_preparation(data)
            y_pred = self._model.predict(X, verbose=1)
            rmse = math.sqrt(mean_squared_error(y, y_pred))
            logger.info('error of prediction: ' + str(rmse))
            #for i in range(len(y)):
            #    if abs(y[i]-y_pred[i]) > self._std:
            #        anomalies.append(self._dataset[i][1])  # add point in result
            logger.info('finish prediction')
            return y_pred

        def data_preparation(self, dataset):
            logger.info('begin data preparation')
            logger.info('from lstm perspective ' + str(len(dataset)) +
                        ' points are stored in dataset')
            data = np.reshape(dataset, (len(dataset), self._features))
            n_samples = data.shape[0]
            X = list()
            y = list()

            for i in range(0, n_samples - self._seq, 1):
                sample = data[i:i + self._seq]
                X.append(sample)
                y.append(data[i + self._seq])

            # convert input into a 2D array
            X = np.array(X)  # suitable dimension for the lstm input
            y = np.array(y)
            logger.info('end data preparation')
            return X, y