示例#1
0
def resnet_exp(model_name, data_name, channel_id, stages, blocks, filter_list,
               nb_epoch, batch_size, frame_cut):
    """Perform ResNet experiment."""
    model_path = os.path.join(spiker.SPIKER_EXPS, model_name)
    if not os.path.isdir(model_path):
        os.makedirs(model_path)
    else:
        raise ValueError("[MESSAGE] This experiment has been done before."
                         " Create a new config model if you need.")
    model_pic = os.path.join(model_path, model_name+"-model-pic.png")
    model_file_base = os.path.join(model_path, model_name)

    # print model info
    print("[MESSAGE] Model Name: %s" % (model_name))
    print("[MESSAGE] Number of epochs: %d" % (nb_epoch))
    print("[MESSAGE] Batch Size: %d" % (batch_size))
    print("[MESSAGE] Number of stages: %d" % (stages))
    print("[MESSAGE] Number of blocks: %d" % (blocks))

    # load data
    data_path = os.path.join(spiker.SPIKER_DATA, "ddd17",
                             data_name)
    if not os.path.isfile(data_path):
        raise ValueError("This dataset does not exist at %s" % (data_path))
    print("[MESSAGE] Dataset %s" % (data_path))
    assert len(frame_cut) == 2
    print("[MESSAGE] Frame cut: first at %d, last at %d"
            % (frame_cut[0], -frame_cut[1]))
    frames, steering = ddd17.prepare_train_data(
        data_path, y_name="accel",
        frame_cut=frame_cut)
    frames /= 255.
    frames -= np.mean(frames, keepdims=True)
    num_samples = frames.shape[0]
    num_train = int(num_samples*0.7)
    X_train = frames[:num_train]
    Y_train = steering[:num_train]
    X_test = frames[num_train:]
    Y_test = steering[num_train:]

    del frames, steering

    if channel_id != 2:
        X_train = X_train[:, :, :, channel_id][..., np.newaxis]
        X_test = X_test[:, :, :, channel_id][..., np.newaxis]

    print("[MESSAGE] Number of samples %d" % (num_samples))
    print("[MESSAGE] Number of train samples %d" % (X_train.shape[0]))
    print("[MESSAGE] Number of test samples %d" % (X_test.shape[0]))

    # setup image shape
    input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3])

    # Build model
    model = resnet.resnet_builder(
        model_name=model_name, input_shape=input_shape,
        batch_size=batch_size,
        filter_list=filter_list, kernel_size=(3, 3),
        output_dim=1, stages=stages, blocks=blocks,
        bottleneck=False, network_type="regress")

    model.summary()
    plot_model(model, to_file=model_pic, show_shapes=True,
               show_layer_names=True)

    # configure optimizer
    #  def step_decay(epoch):
    #      "step decay callback."""
    #      if epoch >= 80 and epoch < 120:
    #          return float(0.01)
    #      elif epoch >= 120:
    #          return float(0.001)
    #      else:
    #          return float(0.1)

    #  sgd = optimizers.SGD(lr=0.0, momentum=0.9, nesterov=True)
    model.compile(loss='mean_squared_error',
                  optimizer="adam",
                  metrics=["mse"])
    print ("[MESSAGE] Model is compiled.")
    model_file = model_file_base + \
        "-{epoch:02d}-{val_mean_squared_error:.2f}.hdf5"
    checkpoint = ModelCheckpoint(model_file,
                                 monitor='val_mean_squared_error',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    #  scheduler = LearningRateScheduler(step_decay)

    csv_his_log = os.path.join(model_path, "csv_history.log")
    csv_logger = CSVLogger(csv_his_log, append=True)

    callbacks_list = [checkpoint, csv_logger]

    # training
    model.fit(
        x=X_train, y=Y_train,
        batch_size=batch_size,
        epochs=nb_epoch,
        validation_data=(X_test, Y_test),
        callbacks=callbacks_list)
示例#2
0
    [200, 2200],
    [500, 500],
    [200, 600],
    [500, 1500]]

# load steering and speed
all_train_steering = np.zeros((0,))
for data_idx in xrange(30):
    # get path
    data_path = os.path.join(data_root_path, data_list[data_idx])
    print("Reading %s" % (data_path))

    # get steering
    train_steering = ddd17.prepare_train_data(
        data_path, y_name="steering", only_y=True,
        frame_cut=[frame_cuts[data_idx][0]*2, frame_cuts[data_idx][1]*2],
        data_portion="train",
        speed_threshold=15, data_type="float32")
    print ("Number of instance: %d" % (train_steering.shape[0]))

    all_train_steering = np.append(all_train_steering, train_steering, axis=0)

print ("Total number of instances: %d" % (all_train_steering.shape[0]))

steering_mean = np.mean(all_train_steering)
three_sigma = np.std(all_train_steering)*3
high_threshold = steering_mean+three_sigma
low_threshold = steering_mean-three_sigma

# filter high degrees
filter_inds = np.logical_and(
示例#3
0
def resnet_exp(model_name, data_name, channel_id, stages, blocks, filter_list,
               nb_epoch, batch_size, frame_cut):
    """Perform ResNet experiment."""
    model_path = os.path.join(spiker.HOME, "data", "exps", "ral-exps",
                              model_name)
    model_file_base = os.path.join(model_path, model_name)

    # print model info
    print("[MESSAGE] Model Name: %s" % (model_name))
    print("[MESSAGE] Number of epochs: %d" % (nb_epoch))
    print("[MESSAGE] Batch Size: %d" % (batch_size))
    print("[MESSAGE] Number of stages: %d" % (stages))
    print("[MESSAGE] Number of blocks: %d" % (blocks))

    # load data
    data_path = os.path.join(spiker.HOME, "data", "exps", "data", "ddd17",
                             data_name)
    if not os.path.isfile(data_path):
        raise ValueError("This dataset does not exist at %s" % (data_path))
    print("[MESSAGE] Dataset %s" % (data_path))
    assert len(frame_cut) == 2
    print("[MESSAGE] Frame cut: first at %d, last at %d" %
          (frame_cut[0] * 2, -frame_cut[1] * 2))
    frames, steering = ddd17.prepare_train_data(
        data_path,
        y_name="steering",
        frame_cut=[frame_cut[0] * 2, frame_cut[1] * 2],
        speed_threshold=15.)
    num_samples = frames.shape[0]
    num_train = int(num_samples * 0.7)

    # save the training files
    save_path = os.path.join(spiker.HOME, "data", "exps", "data", "ddd17")
    train_file_name = os.path.join(save_path, model_name + ".hdf5")
    # only save once
    if not os.path.isfile(train_file_name):
        save_data = h5py.File(train_file_name, "w")
        train_group = save_data.create_group("train")
        test_group = save_data.create_group("test")

        train_group.create_dataset(name="frame",
                                   data=frames[:num_train],
                                   dtype=np.uint8)
        train_group.create_dataset(name="steering",
                                   data=steering[:num_train],
                                   dtype=np.float32)

        test_group.create_dataset(name="frame",
                                  data=frames[num_train:],
                                  dtype=np.uint8)
        test_group.create_dataset(name="steering",
                                  data=steering[num_train:],
                                  dtype=np.float32)

        save_data.flush()
        save_data.close()

    frames /= 255.
    frames -= np.mean(frames, keepdims=True)
    X_test = frames[num_train:]
    Y_test = steering[num_train:]

    del frames, steering

    if channel_id != 2:
        X_test = X_test[:, :, :, channel_id][..., np.newaxis]

    print("[MESSAGE] Number of samples %d" % (num_samples))
    print("[MESSAGE] Number of test samples %d" % (X_test.shape[0]))

    model_file = model_file_base + "-best.hdf5"
    model = load_model(model_file)

    Y_predict = model.predict(X_test)

    with open(model_file_base + "-prediction.pkl", "wb") as f:
        pickle.dump([Y_test, Y_predict], f)
from __future__ import print_function
import os

import numpy as np
import matplotlib.pyplot as plt

import spiker
from spiker.models import utils
from spiker.data import ddd17

# load and process data
data_path = os.path.join(spiker.SPIKER_DATA, "ddd17",
                         "aug09/rec1502336427-export.hdf5")
frame_cut = [100, 400]
frames, steering = ddd17.prepare_train_data(
    data_path, y_name="steering",
    frame_cut=frame_cut)
frames /= 255.
frames -= np.mean(frames, keepdims=True)
num_samples = frames.shape[0]
num_train = int(num_samples*0.7)
X_train = frames[:num_train]
Y_train = steering[:num_train]
X_test = frames[num_train:]
Y_test = steering[num_train:]
del frames, steering

model_name_base = "steering-night-6-"
# load model
model_path = os.path.join(
    spiker.SPIKER_EXPS, model_name_base+"full",
示例#5
0
def resnet_exp(model_name, data_name, channel_id, stages, blocks, filter_list,
               nb_epoch, batch_size, frame_cut):
    """Perform ResNet experiment."""
    model_path = os.path.join(spiker.HOME, "data", "exps", "ral-exps",
                              model_name)
    if not os.path.isdir(model_path):
        os.makedirs(model_path)
    else:
        raise ValueError("[MESSAGE] This experiment has been done before."
                         " Create a new config model if you need.")
    model_file_base = os.path.join(model_path, model_name)

    # print model info
    print("[MESSAGE] Model Name: %s" % (model_name))
    print("[MESSAGE] Number of epochs: %d" % (nb_epoch))
    print("[MESSAGE] Batch Size: %d" % (batch_size))
    print("[MESSAGE] Number of stages: %d" % (stages))
    print("[MESSAGE] Number of blocks: %d" % (blocks))

    # load data
    data_path = os.path.join(spiker.HOME, "data", "exps", "data", "ddd17",
                             data_name)
    if not os.path.isfile(data_path):
        raise ValueError("This dataset does not exist at %s" % (data_path))
    print("[MESSAGE] Dataset %s" % (data_path))
    assert len(frame_cut) == 2
    print("[MESSAGE] Frame cut: first at %d, last at %d" %
          (frame_cut[0] * 2, -frame_cut[1] * 2))
    frames, steering = ddd17.prepare_train_data(
        data_path,
        y_name="steering",
        frame_cut=[frame_cut[0] * 2, frame_cut[1] * 2],
        speed_threshold=15.)
    frames /= 255.
    frames -= np.mean(frames, keepdims=True)
    num_samples = frames.shape[0]
    num_train = int(num_samples * 0.7)
    X_train = frames[:num_train]
    Y_train = steering[:num_train]
    X_test = frames[num_train:]
    Y_test = steering[num_train:]

    del frames, steering

    if channel_id != 2:
        X_train = X_train[:, :, :, channel_id][..., np.newaxis]
        X_test = X_test[:, :, :, channel_id][..., np.newaxis]

    print("[MESSAGE] Number of samples %d" % (num_samples))
    print("[MESSAGE] Number of train samples %d" % (X_train.shape[0]))
    print("[MESSAGE] Number of test samples %d" % (X_test.shape[0]))

    # setup image shape
    input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3])

    # Build model
    model = resnet.resnet_builder(model_name=model_name,
                                  input_shape=input_shape,
                                  batch_size=batch_size,
                                  filter_list=filter_list,
                                  kernel_size=(3, 3),
                                  output_dim=1,
                                  stages=stages,
                                  blocks=blocks,
                                  bottleneck=False,
                                  network_type="regress")

    model.summary()
    model.compile(loss='mean_squared_error', optimizer="adam", metrics=["mse"])
    print("[MESSAGE] Model is compiled.")
    model_file = model_file_base + "-best.hdf5"
    checkpoint = ModelCheckpoint(model_file,
                                 monitor='val_mean_squared_error',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')

    csv_his_log = os.path.join(model_path, "csv_history.log")
    csv_logger = CSVLogger(csv_his_log, append=True)

    callbacks_list = [checkpoint, csv_logger]

    # training
    model.fit(x=X_train,
              y=Y_train,
              batch_size=batch_size,
              epochs=nb_epoch,
              validation_data=(X_test, Y_test),
              callbacks=callbacks_list)
示例#6
0
model_base = "-day-4-"
exp_type = "steering"
sensor_type = ["full", "dvs", "aps"]

load_prediction = os.path.join(spiker.SPIKER_EXTRA,
                               "pred" + model_base + "result-run-3")

if os.path.isfile(load_prediction):
    print("[MESSAGE] Prediction available")
    with open(load_prediction, "r") as f:
        (steer_full, steer_dvs, steer_aps) = pickle.load(f)
        f.close()
else:
    # export ground truth
    test_frames, _ = ddd17.prepare_train_data(data_path,
                                              y_name="steering",
                                              frame_cut=frame_cut)
    test_frames /= 255.
    test_frames -= np.mean(test_frames, keepdims=True)
    num_samples = test_frames.shape[0]
    num_train = int(num_samples * 0.7)
    X_test = test_frames[num_train:]
    del test_frames

    # steering full
    steer_full = get_prediction(X_test, exp_type, model_base, sensor_type[0],
                                "steering-day-4-full-103-0.02.hdf5")
    print("[MESSAGE] Steering Full")
    # steering dvs
    steer_dvs = get_prediction(X_test[:, :, :, 0][..., np.newaxis], exp_type,
                               model_base, sensor_type[1],