Пример #1
0
def generate_seq(input_size, output_size, processor):
    env = TremorSim(1000)
    ground, data = env.generate_sim()
    x = np.arange(0, 1000)
    y = np.arange(0, 1000)

    xs = stats.zscore([data[i].getGyroReading() for i in x])
    ys = stats.zscore([ground[i].getTremor() for i in y])

    plt.xlabel('Time [ms]', fontsize=10)
    plt.ylabel('Amplitude [rad/s]', fontsize=10)
    plt.plot(xs)
    plt.show()

    filt_x, _ = processor.Bandpass_Filter(xs, 3, 13, 5)

    dataset = np.reshape(filt_x, [-1, 1])
    gdataset = np.reshape(ys, [-1, 1])

    # gdataset = scaler.fit_transform(gdataset)
    # dataset = scaler.transform(dataset)

    # reshape into X=t and Y=t+1
    testX, testY = create_dataset(dataset, gdataset, input_size, output_size)

    # reshape input to be [samples, time steps, features]
    testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))

    return testX, testY, ys
Пример #2
0
    def __init__(self,
                 batch_size=32,
                 INPUT_SIZE=64,
                 OUTPUT_SIZE=64,
                 shuffle=True):
        self.INPUT_SIZE = INPUT_SIZE
        self.OUTPUT_SIZE = OUTPUT_SIZE
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.processor = preprocess.SignalProcessor(500)
        self.train_size = ((10000 - INPUT_SIZE - OUTPUT_SIZE) // batch_size)
        self.max_step = self.train_size * batch_size
        self.env = TremorSim(10000)
        self.trainX, self.trainY = None, None

        # Initialize First Epoch
        self.on_epoch_end()
Пример #3
0
def Test():
    env = TremorSim(200)
    ground, data = env.generate_sim()
    processor = SignalProcessor(500)

    fig = plt.figure(figsize=(8.0, 4.0))
    ax = fig.add_subplot(1, 1, 1)

    ax.set_title(" Generated Simulation Data ", fontsize=18)
    ax.set_ylabel("Gyro: [rad/s]")
    ax.set_xlabel("Time [ms]")

    values = [x.getGyroReading() for x in data]
    gvalues = [y.getTremor() for y in ground]
    filtered, freq = processor.Bandpass_Filter(values, 3, 13, 5)

    plt.plot(np.arange(0, 400, 2), values)
    plt.plot(np.arange(0, 400, 2), filtered)
    plt.plot(np.arange(0, 400, 2), gvalues)
    plt.legend(['sensor(unfiltered)', 'sensor(filtered)', 'ground'],
               loc='upper left')

    plt.show()

    processor = SignalProcessor(500.0)

    fourier, freq = processor.Fourier(values)
    fig = plt.figure(figsize=(8, 4))
    ax = fig.add_subplot(1, 1, 1)

    ax.set_title(" FFT Graph: ", fontsize=18)
    ax.set_ylabel("Amplitude")
    ax.set_xlabel("Frequency [Hz]")

    ax.plot(freq, 2.0 / len(fourier) * np.abs(fourier[:len(fourier) // 2]))
    plt.xlim(0, 50)

    plt.show()
Пример #4
0
class DataGenerator(keras.utils.Sequence):
    def __init__(self,
                 batch_size=32,
                 INPUT_SIZE=64,
                 OUTPUT_SIZE=64,
                 shuffle=True):
        self.INPUT_SIZE = INPUT_SIZE
        self.OUTPUT_SIZE = OUTPUT_SIZE
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.processor = preprocess.SignalProcessor(500)
        self.train_size = ((10000 - INPUT_SIZE - OUTPUT_SIZE) // batch_size)
        self.max_step = self.train_size * batch_size
        self.env = TremorSim(10000)
        self.trainX, self.trainY = None, None

        # Initialize First Epoch
        self.on_epoch_end()

    def __getitem__(self, index):
        'Generate one batch of data'
        X, Y = self.__data_generation(index)
        return X, Y

    def __data_generation(self, idx):
        return np.expand_dims(np.expand_dims(self.trainX[idx], axis=0),
                              axis=2), np.expand_dims(self.trainY[idx], axis=0)

    def get_sequence(self):
        ground, data = self.env.generate_sim()

        x = np.arange(0, 10000)
        y = np.arange(0, 10000)

        xs = stats.zscore([data[i].getGyroReading() for i in x])
        ys = stats.zscore([ground[i].getTremor() for i in y])

        # Bandpass Filter
        filt_x, _ = self.processor.Bandpass_Filter(xs, 3, 13, 5)

        # Normalize between 0 and 1 for LSTM [|Y| always > than |X|]
        dataset = np.reshape(filt_x, [-1, 1])
        gdataset = np.reshape(ys, [-1, 1])

        return [dataset, gdataset]

    def create_dataset(self, dataset, gdataset, input_size=1, output_size=1):
        if 10000 - input_size - output_size <= self.batch_size:
            const = 0
        else:
            const = np.random.randint(
                0,
                len(dataset) - (output_size + input_size + self.batch_size) -
                1)
        dataX = np.empty((self.batch_size, input_size))
        dataY = np.empty((self.batch_size, output_size))
        for i in range(self.batch_size):
            a = np.array(dataset[i + const:(i + const + input_size), 0])
            b = np.array(gdataset[(i + const +
                                   input_size):(i + const + input_size +
                                                output_size), 0])
            dataX[i] = a
            dataY[i] = b
        return dataX, dataY

    def __len__(self):
        return self.batch_size

    def on_epoch_end(self):
        seq, gt = self.get_sequence()
        self.trainX, self.trainY = self.create_dataset(seq, gt,
                                                       self.INPUT_SIZE,
                                                       self.OUTPUT_SIZE)
class DataGeneratorBatch(keras.utils.Sequence):
    def __init__(self,
                 batch_size=32,
                 INPUT_SIZE=64,
                 OUTPUT_SIZE=64,
                 shuffle=True):
        self.INPUT_SIZE = INPUT_SIZE
        self.OUTPUT_SIZE = OUTPUT_SIZE
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.processor = preprocess.SignalProcessor(500)
        self.train_size = ((10000 - INPUT_SIZE - OUTPUT_SIZE) // batch_size)
        self.max_step = self.train_size * batch_size
        self.env = TremorSim(10000)
        self.trainX, self.trainY = None, None

        # Initialize First Epoch
        self.on_epoch_end()

    def __getitem__(self, index):
        'Generate one batch of data'
        X, y = self.__data_generation(index)

        return np.expand_dims(X, axis=0), y

    def __data_generation(self, idx):
        return self.trainX[idx], self.trainY[idx]

    def get_sequence(self):
        ground, data = self.env.generate_sim()

        x = np.arange(0, 10000)
        y = np.arange(0, 10000)

        xs = stats.zscore([data[i].getGyroReading() for i in x])
        ys = stats.zscore([ground[i].getTremor() for i in y])

        # Bandpass Filter
        filt_x, _ = self.processor.Bandpass_Filter(xs, 3, 13, 5)

        # Normalize between 0 and 1 for LSTM [|Y| always > than |X|]
        dataset = np.reshape(filt_x, [-1, 1])
        gdataset = np.reshape(ys, [-1, 1])

        return [dataset, gdataset]

    def create_dataset_batch(self):
        trainX = np.empty(
            (self.batch_size, 10000 - self.INPUT_SIZE - self.OUTPUT_SIZE,
             self.INPUT_SIZE))
        trainY = np.empty(
            (self.batch_size, 10000 - self.INPUT_SIZE - self.OUTPUT_SIZE,
             self.OUTPUT_SIZE))
        for i in range(self.batch_size):
            seq, gt = self.get_sequence()
            X, Y = self.create_dataset(seq, gt, self.INPUT_SIZE,
                                       self.OUTPUT_SIZE)
            trainX[i] = np.array(X)
            trainY[i] = np.array(Y)
        return trainX, trainY

    def create_dataset(self, dataset, gdataset, input_size=1, output_size=1):
        dataX, dataY = [], []
        for i in range(10000 - input_size - output_size):
            a = dataset[i:(i + input_size), 0]
            b = gdataset[(i + input_size):(i + input_size + output_size), 0]
            dataX.append(a)
            dataY.append(b)
        return np.array(dataX), np.array(dataY)

    def __len__(self):
        return self.batch_size

    def on_epoch_end(self):
        self.trainX, self.trainY = self.create_dataset_batch()
Пример #6
0
def LSTM():
    # Initialize Seeds
    np.random.seed(7)
    random.seed(7)

    # Initialize LSTM
    model = LSTM_Agent(args.is_training, LR, NUM_LAYERS, TIME_STEPS,
                       INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, args.batch_size,
                       KEEP_PROB, DROPOUT_IN)

    if args.debug is True:
        print("Debugging to Log File...")
        now = datetime.datetime.now()
        for handler in logging.root.handlers[:]:
            logging.root.removeHandler(handler)
        logging.basicConfig(
            filename="./logs/" +
            "log_{}.log".format(now.strftime("%m-%d-%Y-%H-%M-%S")),
            level=logging.INFO)

    sess = tf.Session()
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter("tf_logs", sess.graph)
    saver = tf.train.Saver(keep_checkpoint_every_n_hours=1, max_to_keep=1000)

    env = TremorSim(INPUT_SIZE + TIME_STEPS + OUTPUT_SIZE)
    scaler = MinMaxScaler(feature_range=(-1, 1))

    if args.load_model is True:
        load_model(saver, sess, args.load_model_folder)
    else:
        init = tf.global_variables_initializer()
        sess.run(init)
    # relocate to the local dir and run this line to view it on Chrome (http://0.0.0.0:6006/):
    # $ tensorboard --logdir='logs'

    plt.ion()
    for episode in range(TRAINING_STEPS):
        xs, ys = get_batch_sequence(args.batch_size, env, scaler)
        feed_dict = {
            model.x: xs,
            model.y: ys,
        }

        _, cost, state, pred = sess.run(
            [model.train_op, model.loss, model.cell_final_state, model.pred],
            feed_dict=feed_dict)
        logging.info('Episode: {}, Loss: {}'.format(episode, round(cost, 4)))
        if episode % 10 == 0:
            print('Episode: {}, Loss: {}\n'.format(episode, round(cost, 4)))
        result = sess.run(merged, feed_dict)
        writer.add_summary(result, episode)

        if args.save_model is True and episode % 100 == 0:
            save_model(saver, sess, args.save_model_folder, episode // 100)
        if args.graph is True and episode % 10 == 0:
            # plotting
            plt.plot(np.arange(0, OUTPUT_SIZE * 2, 2), ys[0][0], 'r',
                     np.arange(0, OUTPUT_SIZE * 2, 2), pred[0][0:OUTPUT_SIZE],
                     'b--')
            plt.ylim((-1, 1))
            plt.show()
            plt.pause(5)
            plt.close()