def default_lstm(self): ''' ''' img_seq_shape = (self.seq_length, ) + self.image_shape img_in = Input(shape=img_seq_shape, name='img_in') x = img_in x = TD(Cropping2D(cropping=((60, 0), (0, 0))))(x) x = TD(Convolution2D(24, (5, 5), strides=(2, 2), activation='relu'))(x) x = TD(Convolution2D(32, (5, 5), strides=(2, 2), activation='relu'))(x) x = TD(Convolution2D(64, (3, 3), strides=(2, 2), activation='relu'))(x) x = TD(Convolution2D(64, (3, 3), strides=(1, 1), activation='relu'))(x) x = TD(Convolution2D(64, (3, 3), strides=(1, 1), activation='relu'))(x) x = TD(Flatten(name='flattened'))(x) x = TD(Dense(100, activation='relu'))(x) x = TD(Dropout(.1))(x) x = LSTM(128, return_sequences=True, name="LSTM_seq")(x) x = Dropout(.1)(x) x = LSTM(128, return_sequences=False, name="LSTM_out")(x) x = Dropout(.1)(x) x = Dense(50, activation='relu')(x) x = Dropout(.1)(x) angle_out = Dense(1, activation='linear', name='angle_out')(x) # continous output of throttle throttle_out = Dense(1, activation='linear', name='throttle_out')(x) model = Model(inputs=[img_in], outputs=[angle_out, throttle_out]) model.compile(optimizer='adam', loss={ 'angle_out': 'mean_squared_error', 'throttle_out': 'mean_squared_error' }, loss_weights={ 'angle_out': 0.5, 'throttle_out': .5 }) return model
def rnn_lstm(seq_length=3, num_outputs=2, image_shape=(120, 160, 3)): img_seq_shape = (seq_length, ) + image_shape img_in = Input(batch_shape=img_seq_shape, name='img_in') drop_out = 0.3 x = Sequential() x.add(TD(Cropping2D(cropping=((40, 0), (0, 0))), input_shape=img_seq_shape)) #trim 60 pixels off top x.add(TD(BatchNormalization())) x.add(TD(Convolution2D(24, (5, 5), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Convolution2D(32, (5, 5), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Convolution2D(32, (3, 3), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Convolution2D(32, (3, 3), strides=(1, 1), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(MaxPooling2D(pool_size=(2, 2)))) x.add(TD(Flatten(name='flattened'))) x.add(TD(Dense(100, activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(LSTM(128, return_sequences=True, name="LSTM_seq")) x.add(Dropout(.1)) x.add(LSTM(128, return_sequences=False, name="LSTM_out")) x.add(Dropout(.1)) x.add(Dense(128, activation='relu')) x.add(Dropout(.1)) x.add(Dense(64, activation='relu')) x.add(Dense(10, activation='relu')) x.add(Dense(num_outputs, activation='linear', name='model_outputs')) return x
def rnn_lstm(seq_length=3, num_outputs=2, input_shape=(120, 160, 3), roi_crop=(0, 0)): #we now expect that cropping done elsewhere. we will adjust our expeected image size here: input_shape = adjust_input_shape(input_shape, roi_crop) img_seq_shape = (seq_length, ) + input_shape img_in = Input(batch_shape=img_seq_shape, name='img_in') drop_out = 0.3 x = Sequential() x.add( TD(Convolution2D(24, (5, 5), strides=(2, 2), activation='relu'), input_shape=img_seq_shape)) x.add(TD(Dropout(drop_out))) x.add(TD(Convolution2D(32, (5, 5), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Convolution2D(32, (3, 3), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Convolution2D(32, (3, 3), strides=(1, 1), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(MaxPooling2D(pool_size=(2, 2)))) x.add(TD(Flatten(name='flattened'))) x.add(TD(Dense(100, activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(LSTM(128, return_sequences=True, name="LSTM_seq")) x.add(Dropout(.1)) x.add(LSTM(128, return_sequences=False, name="LSTM_fin")) x.add(Dropout(.1)) x.add(Dense(128, activation='relu')) x.add(Dropout(.1)) x.add(Dense(64, activation='relu')) x.add(Dense(10, activation='relu')) x.add(Dense(num_outputs, activation='linear', name='model_outputs')) return x
def rnn_lstm(seq_length=2, num_outputs=2, image_shape=(120, 2 * 160)): from tensorflow.python.keras.layers.merge import concatenate from tensorflow.python.keras.layers import LSTM from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers.wrappers import TimeDistributed as TD drop_out = 0.3 img_seq_shape = (seq_length, ) + image_shape img_in = Input(batch_shape=img_seq_shape, name='img_in') x = Sequential() x.add( TD(Reshape(target_shape=image_shape + (1, )), input_shape=img_seq_shape)) x.add(TD(Cropping2D(cropping=((40, 0), (0, 0))), input_shape=img_seq_shape)) x.add(TD(BatchNormalization())) x.add(TD(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Conv2D(32, (5, 5), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Conv2D(32, (3, 3), strides=(2, 2), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(Conv2D(32, (3, 3), strides=(1, 1), activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(TD(MaxPool2D(pool_size=(2, 2)))) x.add(TD(Flatten(name='flattened'))) x.add(TD(Dense(100, activation='relu'))) x.add(TD(Dropout(drop_out))) x.add(LSTM(128, return_sequences=True, name="LSTM_seq")) x.add(Dropout(.1)) x.add(LSTM(128, return_sequences=False, name="LSTM_out")) x.add(Dropout(.1)) x.add(Dense(128, activation='relu')) x.add(Dropout(.1)) x.add(Dense(64, activation='relu')) x.add(Dense(10, activation='relu')) x.add(Dense(num_outputs, activation='linear', name='model_outputs')) return x