Exemplo n.º 1
0
    def __init__(self):
        input_1 = Input(shape=(None, None, 3))
        conv_1 = Convolution2D(64,
                               kernel_size=(3, 3),
                               padding='same',
                               activation='relu')(input_1)
        conv_2 = Convolution2D(64,
                               kernel_size=(5, 5),
                               padding='same',
                               activation='relu')(conv_1)
        dconv_1 = Convolution2DTranspose(64,
                                         kernel_size=(3, 3),
                                         padding='same',
                                         activation='relu')(conv_2)
        merge_1 = merge.maximum([dconv_1, conv_2])
        dconv_2 = Convolution2DTranspose(64,
                                         kernel_size=(3, 3),
                                         padding="same",
                                         activation='relu')(merge_1)
        merge_2 = merge.maximum([dconv_2, conv_1])
        conv3 = Convolution2D(3, (5, 5), padding="same",
                              activation='relu')(merge_2)

        self.model = Model(inputs=input_1, outputs=conv3)
        self.model.compile(optimizer='adam',
                           loss='mean_squared_error',
                           metrics=['acc'])
        self.model.summary()
        self.batch_size = 128
Exemplo n.º 2
0
def model():
    m = Sequential()

    m.add(
        Convolution2D(24,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      activation='relu',
                      input_shape=(66, 200, 1)))
    m.add(BatchNormalization())

    m.add(
        Convolution2D(36,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      activation='relu'))
    m.add(BatchNormalization())

    m.add(
        Convolution2D(48,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      activation='relu'))
    m.add(BatchNormalization())

    m.add(
        Convolution2D(64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      activation='relu'))
    m.add(BatchNormalization())

    m.add(
        Convolution2D(64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      activation='relu'))
    m.add(BatchNormalization())

    m.add(Flatten())

    m.add(Dense(1164, activation='relu'))
    m.add(BatchNormalization())

    m.add(Dense(200, activation='relu'))
    m.add(BatchNormalization())

    m.add(Dense(50, activation='relu'))
    m.add(BatchNormalization())

    m.add(Dense(10, activation='relu'))
    m.add(BatchNormalization())

    # Output layer
    m.add(Dense(1))

    m.compile(loss="MSE", optimizer=Adam(lr=0.001))

    print(m.summary())
    return m
Exemplo n.º 3
0
def lstm_categorical(input_dimension=(7, 120, 160, 3)):

    img_in = Input(
        shape=(input_dimension), name='img_in'
    )  # First layer, input layer, Shape comes from camera.py resolution, RGB
    x = TimeDistributed(
        Convolution2D(32, 8, 8, subsample=(4, 4), activation='relu'))(img_in)
    x = TimeDistributed(
        Convolution2D(64, 4, 4, subsample=(2, 2), activation='relu'))(x)
    x = TimeDistributed(Convolution2D(64, 3, 3, activation='relu'))(x)
    x = TimeDistributed(Flatten())(x)

    x = LSTM(512, activation='tanh')(x)

    # Steering Categorical
    angle_out = Dense(15, activation='softmax', name='angle_out')(x)

    # Throttle
    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])

    adam = Adam(lr=0.0001, clipnorm=1.0)
    model.compile(optimizer=adam,
                  loss={
                      'angle_out': 'categorical_crossentropy',
                      'throttle_out': 'mean_absolute_error'
                  },
                  loss_weights={
                      'angle_out': 0.9,
                      'throttle_out': .001
                  })

    return model
Exemplo n.º 4
0
def default_Q_categorical():
    """
    For pretrained DQN Q network
    """

    model = Sequential()
    model.add(
        Convolution2D(32,
                      8,
                      8,
                      subsample=(4, 4),
                      border_mode='same',
                      input_shape=(120, 160, 4)))  #80*80*4
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))

    # 15 categorical bins for Steering angles
    model.add(Dense(15, activation="linear"))

    # No training
    #adam = Adam(lr=1e-4)
    #model.compile(loss='mse',optimizer=adam)
    print("We finished building the Q model")

    return model
Exemplo n.º 5
0
def marks_linear():
    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in
    x = Cropping2D(cropping=((42, 0), (0, 0)))(x)  # trim 40 pixels off top
    #x = Lambda(lambda x: x / 127.5 - 1.)(x)  # normalize and re-center
    x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='linear')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='linear')(x)
    x = Dropout(.1)(x)
    # categorical output of the angle
    angle_out = Dense(1, activation='linear', name='angle_out')(x)

    # continous output of throttle
    throttle_out = Dense(1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])

    model.compile(optimizer='adam',
                  loss={'angle_out': 'mean_squared_error',
                        'throttle_out': 'mean_squared_error'},
                  loss_weights={'angle_out': 0.5, 'throttle_out': .5})
    print("Created default_linear model")

    return model
Exemplo n.º 6
0
def vgg_face_blank():
    # Model initialization
    mdl = Sequential()
    # First layer is a dummy-permutation = Identity to specify input shape
    mdl.add(Permute((1, 2, 3), input_shape=(224, 224, 3)))  # WARNING : 0 is the sample dim

    # Model body
    for l in convblock(64, 1, bits=2):
        mdl.add(l)

    for l in convblock(128, 2, bits=2):
        mdl.add(l)

    for l in convblock(256, 3, bits=3):
        mdl.add(l)

    for l in convblock(512, 4, bits=3):
        mdl.add(l)

    for l in convblock(512, 5, bits=3):
        mdl.add(l)

    # Model head
    mdl.add(Convolution2D(4096, kernel_size=(7, 7), activation='relu', name='fc6'))
    mdl.add(Dropout(0.5))

    mdl.add(Convolution2D(4096, kernel_size=(1, 1), activation='relu', name='fc7'))
    mdl.add(Dropout(0.5))

    mdl.add(Convolution2D(2622, kernel_size=(1, 1), activation='relu', name='fc8'))
    mdl.add(Flatten())
    mdl.add(Activation('softmax'))

    return mdl
Exemplo n.º 7
0
    def create_model(self, num_outputs, input_shape=(120, 160, 3), drop=0.2):
        """
        define the NN layers for this model.
        """

        img_in = Input(shape=input_shape, name='img_in')
        x = img_in
        x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name="conv2d_1")(x)
        x = SpatialDropout2D(drop)(x)
        x = Convolution2D(32, (5,5), strides=(1,1), activation='relu', name="conv2d_2")(x)
        x = SpatialDropout2D(drop)(x)
        x = Convolution2D(64, (5,5), strides=(1,1), activation='relu', name="conv2d_3")(x)
        x = SpatialDropout2D(drop)(x)
        x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_4")(x)
        x = SpatialDropout2D(drop)(x)
        x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_5")(x)
        x = SpatialDropout2D(drop)(x)
        
        x = Flatten(name='flattened')(x)
        x = Dense(100, activation='relu')(x)
        x = Dropout(drop)(x)
        x = Dense(50, activation='relu')(x)
        x = Dropout(drop)(x)

        outputs = Dense(num_outputs, activation='softmax', name="outputs")(x) 
            
        model = Model(inputs=img_in, outputs=outputs)
        
        return model
Exemplo n.º 8
0
def entrenamiento_red2():
	cnn = Sequential()
	cnn.add(Convolution2D(filtrosConv1, tamanio_filtro1, padding='same', input_shape=(altura, longitud, 3), activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	cnn.add(Convolution2D(filtrosConv2, tamanio_filtro2, padding='same', activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	#Teniendo una imagen con muchas capas, ahora la vamos a aplanar
	cnn.add(Flatten()) 
	#Despues de aplanar las imagenes, se conectan las capas
	cnn.add(Dense(256,activation='relu'))
	#A la capa densa, se le van a ir apagando el 50% de las neuronas con cada paso,
	#esto se hace para evitar el sobre-ajuste
	cnn.add(Dropout(0.5))
	#Se hace la conexion con la capa de salida
	cnn.add(Dense(clases, activation='softmax'))
	#Parametros para optimizar el algoritmo
	cnn.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=lr), metrics=['accuracy'])

	cnn.fit_generator(imagen_entrenamiento_red2, steps_per_epoch=pasos, epochs=epocas, validation_data=imagen_validacion_red2, validation_steps=pasos_validacion)
	dir='./modelo/red2/'

	if not os.path.exists(dir):
		os.mkdir(dir)
	cnn.save('./modelo/red2/modelo.h5')
	cnn.save_weights('./modelo/red2/pesos.h5')
Exemplo n.º 9
0
def default_categorical():
    'default_categorical model from donkey car'
    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in
    x = Cropping2D(cropping=((30, 10), (0, 0)))(
        x)  # crop 40 pixels off top and 10 off bottom
    x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    # x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(.1)(x)

    # categorical output of the angle
    angle_out = Dense(3, activation='softmax', name='angle_cat_out')(x)

    # continous output of throttle
    # throttle_out = Dense(1, activation='relu', name='throttle_out')(x)  # Reduce to 1 number, Positive number only

    model = Model(inputs=[img_in], outputs=[angle_out])
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Exemplo n.º 10
0
def make_model():
    left_image_in = Input(shape=(240, 320, 3),name='img_left')
    right_image_in = Input(shape=(240, 320, 3),name='img_right')
    # concat into a 6 channel input volume
    x = concatenate([left_image_in, right_image_in], axis=2)
    # FCN layers
    x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    # Dense layers
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(.1)(x)
    # Steering angle
    angle = Dense(15, activation='softmax', name='angle_cat_out')(x)
    angle_out = Dense(1, activation='sigmoid', name='angle_out')(angle)
    # throttle output
    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)
    model = Model(inputs=[left_image_in, right_image_in],
                  outputs=[angle_out, throttle_out])
    # model.compile(optimizer='adam',
    #               loss={'angle_out': 'mean_squared_error',
    #                     'throttle_out': 'mean_absolute_error'},
    #               loss_weights={'angle_out': 0.9, 'throttle_out': .01})
    return model
Exemplo n.º 11
0
def rnn_lstm(seq_length=3, num_outputs=2, image_shape=(120, 160, 3)):

    img_seq_shape = (seq_length, ) + image_shape
    img_in = Input(batch_shape=img_seq_shape, name='img_in')
    drop_out = 0.3

    x = Sequential()
    x.add(TD(Cropping2D(cropping=((40, 0), (0, 0))),
             input_shape=img_seq_shape))  #trim 60 pixels off top
    x.add(TD(BatchNormalization()))
    x.add(TD(Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(Convolution2D(32, (3, 3), strides=(2, 2), activation='relu')))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(Convolution2D(32, (3, 3), strides=(1, 1), activation='relu')))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(MaxPooling2D(pool_size=(2, 2))))
    x.add(TD(Flatten(name='flattened')))
    x.add(TD(Dense(100, activation='relu')))
    x.add(TD(Dropout(drop_out)))

    x.add(LSTM(128, return_sequences=True, name="LSTM_seq"))
    x.add(Dropout(.1))
    x.add(LSTM(128, return_sequences=False, name="LSTM_out"))
    x.add(Dropout(.1))
    x.add(Dense(128, activation='relu'))
    x.add(Dropout(.1))
    x.add(Dense(64, activation='relu'))
    x.add(Dense(10, activation='relu'))
    x.add(Dense(num_outputs, activation='linear', name='model_outputs'))

    return x
Exemplo n.º 12
0
    def init_model(self):
        self.cnn = Sequential()
        self.cnn.add(
            Convolution2D(32,
                          3,
                          padding=constant.PADDING_SAME,
                          input_shape=self.shape))
        self.cnn.add(Activation(constant.RELU_ACTIVATION_FUNCTION))
        self.cnn.add(Convolution2D(32, 3, 3))
        self.cnn.add(Activation(constant.RELU_ACTIVATION_FUNCTION))
        self.cnn.add(MaxPooling2D(pool_size=(2, 2)))
        self.cnn.add(Dropout(constant.DROP_OUT_O_25))

        self.cnn.add(Convolution2D(64, 3, padding=constant.PADDING_SAME))
        self.cnn.add(Activation(constant.RELU_ACTIVATION_FUNCTION))
        self.cnn.add(Convolution2D(64, 3, 3))
        self.cnn.add(Activation(constant.RELU_ACTIVATION_FUNCTION))
        self.cnn.add(MaxPooling2D(pool_size=(2, 2)))
        self.cnn.add(Dropout(constant.DROP_OUT_O_25))

        self.cnn.add(Flatten())
        self.cnn.add(Dense(constant.NUMBER_FULLY_CONNECTED))
        self.cnn.add(Activation(constant.RELU_ACTIVATION_FUNCTION))
        self.cnn.add(Dropout(constant.DROP_OUT_0_50))
        self.cnn.add(Dense(self.n_classes))
        self.cnn.add(Activation(constant.SOFTMAX_ACTIVATION_FUNCTION))
        self.cnn.summary()
Exemplo n.º 13
0
def default_linear_master():
    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in
    x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='linear')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='linear')(x)
    x = Dropout(.1)(x)
    # categorical output of the angle
    angle_out = Dense(1, activation='linear', name='angle_out')(x)

    # continous output of throttle
    throttle_out = Dense(1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])

    model.compile(optimizer='adam',
                  loss={
                      'angle_out': 'mean_squared_error',
                      'throttle_out': 'mean_squared_error'
                  },
                  loss_weights={
                      'angle_out': 0.5,
                      'throttle_out': .5
                  })

    return model
Exemplo n.º 14
0
def default_n_linear(num_outputs):
    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in
    x = Cropping2D(cropping=((60, 0), (0, 0)))(x)  # trim 60 pixels off top
    x = Lambda(lambda x: x / 127.5 - 1.)(x)  # normalize and re-center
    x = Convolution2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(32, (5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(64, (5, 5), strides=(1, 1), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    x = Convolution2D(64, (3, 3), strides=(1, 1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(.1)(x)

    outputs = []

    for i in range(num_outputs):
        outputs.append(
            Dense(1, activation='linear', name='n_outputs' + str(i))(x))

    model = Model(inputs=[img_in], outputs=outputs)

    model.compile(optimizer='adam', loss='mse')

    return model
Exemplo n.º 15
0
    def _residual_block(self, ip, id):
        mode = True if self.config.mode == 'train' else False
        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
        init = ip

        x = Convolution2D(self.config.n, (3, 3),
                          activation='linear',
                          padding='same',
                          name='sr_res_conv_' + str(id) + '_1')(ip)

        x = BatchNormalization(axis=channel_axis,
                               name="sr_res_batchnorm_" + str(id) + "_1")(
                                   x, training=mode)
        x = Activation('relu', name="sr_res_activation_" + str(id) + "_1")(x)

        x = Convolution2D(64, (3, 3),
                          activation='linear',
                          padding='same',
                          name='sr_res_conv_' + str(id) + '_2')(x)
        x = BatchNormalization(axis=channel_axis,
                               name="sr_res_batchnorm_" + str(id) + "_2")(
                                   x, training=mode)

        m = Add(name="sr_res_merge_" + str(id))([x, init])

        return m
Exemplo n.º 16
0
    def contruModelo(self):
        cnn = Sequential()
        cnn.add(
            Convolution2D(self.filtrosConv1,
                          self.tamano_filtro1,
                          padding="same",
                          input_shape=(self.altura, self.longitud, 3),
                          activation='relu'))
        cnn.add(MaxPooling2D(pool_size=self.tamano_pool))

        cnn.add(
            Convolution2D(self.filtrosConv2,
                          self.tamano_filtro2,
                          padding="same",
                          activation='relu'))
        cnn.add(MaxPooling2D(pool_size=self.tamano_pool))

        cnn.add(
            Convolution2D(self.filtrosConv3,
                          self.tamano_filtro3,
                          padding="same",
                          activation='relu'))
        cnn.add(MaxPooling2D(pool_size=self.tamano_pool))

        cnn.add(Flatten())
        #cnn.add(Dense(16,activation='relu'))
        cnn.add(Dense(256, activation='relu'))  #sigmoidal--- lineal
        cnn.add(Dense(self.cantidad_acciones, activation='softmax'))  #tanh

        cnn.compile(loss='mse',
                    optimizer=optimizers.RMSprop(lr=self.aprendizaje))
        return cnn
    def create(self):
        # Step 1: convolution
        self.__classifier.add(
            Convolution2D(5,
                          5,
                          input_shape=(50, 50, 1),
                          padding='same',
                          activation='relu'))

        # Step 2: pooling
        self.__classifier.add(MaxPooling2D(pool_size=(4, 4)))

        # Add a convolutional layer
        self.__classifier.add(
            Convolution2D(15,
                          5,
                          input_shape=(50, 50, 1),
                          padding='same',
                          activation='relu'))

        # Add another max pooling layer
        self.__classifier.add(MaxPooling2D(pool_size=(4, 4)))

        # Step 3: Flattening
        self.__classifier.add(Flatten())

        # Step 4: Full connection
        self.__classifier.add(
            Dense(units=self.__train_data_symbol_count, activation='softmax'))

        # Compiling the CNN
        self.__classifier.compile(optimizer='rmsprop',
                                  loss='categorical_crossentropy',
                                  metrics=['accuracy'])
def donkey_model():
    img_in = Input(shape=(224, 224, 3), name='img_in')
    x = img_in

    # Convolution2D class name is an alias for Conv2D
    x = Convolution2D(filters=24, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=32, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=64, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(units=100, activation='linear')(x)
    x = Dropout(rate=.1)(x)
    x = Dense(units=50, activation='linear')(x)
    x = Dropout(rate=.1)(x)

    # continous output of throttle
    control_out = Dense(units=5, activation='linear', name='control_out')(x)

    model = Model(inputs=[img_in], outputs=[control_out])

    model.compile(optimizer='adam',
                  loss={'control_out': 'categorical_crossentropy'},
                  metrics=['acc'])

    return model
def default_n_linear(num_outputs, input_shape=(256, 512, 3), roi_crop=(0, 0)):

    drop = 0.1

    input_shape = adjust_input_shape(input_shape, roi_crop)
    
    model = Sequential()
    model.add(Convolution2D(24, (5,5), strides=(2,2), activation='relu', name="conv2d_1", input_shape=input_shape))
    model.add(Dropout(drop))
    model.add(Convolution2D(32, (5,5), strides=(2,2), activation='relu', name="conv2d_2"))
    model.add(Dropout(drop))
    model.add(Convolution2D(64, (5,5), strides=(2,2), activation='relu', name="conv2d_3"))
    model.add(Dropout(drop))
    model.add(Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_4"))
    model.add(Dropout(drop))
    model.add(Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_5"))
    model.add(Dropout(drop))
    
    model.add(Flatten(name='flattened'))
    model.add(Dense(100, activation='relu'))
    model.add(Dropout(drop))
    model.add(Dense(50, activation='relu'))
    model.add(Dropout(drop))


    model.add(Dense(num_outputs))
    model.add(Activation('linear'))

    return model
Exemplo n.º 20
0
def default_n_linear(num_outputs, input_shape=(120, 160, 3), roi_crop=(0, 0)):

    drop = 0.1

    #we now expect that cropping done elsewhere. we will adjust our expeected image size here:
    input_shape = adjust_input_shape(input_shape, roi_crop)
    
    img_in = Input(shape=input_shape, name='img_in')
    x = img_in
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu', name="conv2d_1")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu', name="conv2d_2")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu', name="conv2d_3")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_4")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu', name="conv2d_5")(x)
    x = Dropout(drop)(x)
    
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(drop)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(drop)(x)

    outputs = []
    
    for i in range(num_outputs):
        outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(x))
        
    model = Model(inputs=[img_in], outputs=outputs)
    
    return model
Exemplo n.º 21
0
def cnn_model():
    input_tensor = Input(shape=(HEIGTH, WIDTH, CHANNELS))
    x = input_tensor
    # 第一层卷积
    conv1 = Convolution2D(filters=32, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    bath_normal1 = BatchNormalization()(conv1)
    dropout1 = Dropout(0.2)(bath_normal1)
    activate1 = Activation('relu')(dropout1)

    # 第二层卷积
    conv2 = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(activate1)
    bath_normal2 = BatchNormalization()(conv2)
    dropout2 = Dropout(0.2)(bath_normal2)
    activate2 = Activation('relu')(dropout2)

    # 第s三层卷积
    conv3 = Convolution2D(filters=128, kernel_size=(3, 3), strides=(2, 2), padding='same')(activate2)
    bath_normal3 = BatchNormalization()(conv3)
    dropout3 = Dropout(0.2)(bath_normal3)
    activate3 = Activation('relu')(dropout3)

    # flatten
    x = Flatten()(activate3)
    x = Dropout(0.25)(x)
    x = Dense(num_classes, activation='softmax')(x)
    model = tf.keras.Model(inputs=input_tensor, outputs=x)
    return model
Exemplo n.º 22
0
def residual_layer(x):
    conv = Convolution2D(256, (3, 3), padding='SAME')(x)
    norm = BatchNormalization()(conv)
    relu = Activation('relu')(norm)
    conv2 = Convolution2D(256, (3, 3), padding='SAME')(relu)
    m = merge.concatenate([conv2, x])
    return Activation('relu')(m)
Exemplo n.º 23
0
def default_linear():
    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in

    # Convolution2D class name is an alias for Conv2D
    x = Convolution2D(filters=24, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=32, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=64, kernel_size=(5, 5), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x)
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(units=100, activation='linear')(x)
    x = Dropout(rate=.1)(x)
    x = Dense(units=50, activation='linear')(x)
    x = Dropout(rate=.1)(x)
    # categorical output of the angle
    angle_out = Dense(units=1, activation='linear', name='angle_out')(x)

    # continous output of throttle
    throttle_out = Dense(units=1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])

    model.compile(optimizer='adam',
                  loss={'angle_out': 'mean_squared_error',
                        'throttle_out': 'mean_squared_error'},
                  loss_weights={'angle_out': 0.5, 'throttle_out': .5})

    return model
Exemplo n.º 24
0
def default_categorical():
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Cropping2D, Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Dense
    
    img_in = Input(shape=(120, 160, 3), name='img_in')                      
    x = img_in
    x = Cropping2D(cropping=((45,0), (0,0)))(x)
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)       
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)       
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)       
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)       
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)       
    
    # Possibly add MaxPooling (will make it less sensitive to position in image).  Camera angle fixed, so may not to be needed

    x = Flatten(name='flattened')(x)                                        # Flatten to 1D (Fully connected)
    x = Dense(100, activation='relu')(x)                                    # Classify the data into 100 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    x = Dense(50, activation='relu')(x)                                     # Classify the data into 50 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out 10% of the neurons (Prevent overfitting)
    #categorical output of the angle
    angle_out = Dense(15, activation='softmax', name='angle_out')(x)        # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0
    
    #continous output of throttle
    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)      # Reduce to 1 number, Positive number only
    
    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
    model.compile(optimizer='adam',
                  loss={'angle_out': 'categorical_crossentropy', 
                        'throttle_out': 'mean_absolute_error'},
                  loss_weights={'angle_out': 0.9, 'throttle_out': .0001})

    return model
Exemplo n.º 25
0
 def nn(self):
     nn = Sequential()
     nn.add(
         Convolution2D(self.filtroprimeravez,
                       self.filtrouno,
                       padding="same",
                       input_shape=(self.longituddelaimagen,
                                    self.alturadelaimagen, 3),
                       activation='relu'))
     nn.add(MaxPooling2D(pool_size=self.pulido))
     nn.add(
         Convolution2D(self.filtrosegundavez,
                       self.filtrodos,
                       padding="same"))
     nn.add(MaxPooling2D(pool_size=self.pulido))
     nn.add(Flatten())
     nn.add(Dense(256, activation='relu'))
     nn.add(Dropout(0.1))
     nn.add(Dense(self.numerodenfermedades, activation='softmax'))
     nn.compile(loss='categorical_crossentropy',
                optimizer=optimizers.Adam(lr=self.lr),
                metrics=['accuracy'])
     nn.fit_generator(self.entrenamiento_generador,
                      steps_per_epoch=self.pasos,
                      epochs=self.pruebas,
                      validation_data=self.validacion_generador,
                      validation_steps=self.validacon)
     #nn.save('./modelo_lab_experimental/modelo_pezenfermo.h5')
     #nn.save_weights('./modelo_lab_experimental/pesospezenfermo.h5')
     return nn
def default_linear():
    "fully connected version of the default linear model"
    img_in = Input(shape=(224, 224, 3), name='img_in')
    x = img_in

    # Convolution2D class name is an alias for Conv2D
    x = Convolution2D(filters=64, kernel_size=(5, 5), strides=(2, 2), activation='elu')(x) #output shape 110x110
    
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='elu')(x) #output shape 27x27
    x = MaxPool2D(pool_size=(2,2))(x) 
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x) #output shape 7x7
    x = MaxPool2D(pool_size=(2,2))(x)
    x = Convolution2D(filters=5, kernel_size=(3,3), strides=(2, 2), activation='relu')(x)
    x = MaxPool2D(pool_size=(2,2))(x) #output 5x5
    control_out = Flatten(name='control_out')(x)
    # control_out = Dense(units=4, activation='relu', name='control_out')(x)

    # continous output of throttle for later possibly
    # throttle_out = Dense(units=1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[control_out])

    model.compile(optimizer='adam',
                  loss={'control_out': 'categorical_crossentropy'},
                  metrics=['acc'])

    return model
Exemplo n.º 27
0
def rnn_lstm(seq_length=3, num_outputs=2, image_shape=(120,160,3)):

    #we now expect that cropping done elsewhere. we will adjust our expeected image size here:
    #input_shape = adjust_input_shape(input_shape, roi_crop)

    img_seq_shape = (seq_length,) + image_shape   
    img_in = Input(batch_shape = img_seq_shape, name='img_in')
    drop_out = 0.3

    x = Sequential()
    x.add(TD(Convolution2D(24, (5,5), strides=(2,2), activation='relu'), input_shape=img_seq_shape))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(Convolution2D(32, (5,5), strides=(2,2), activation='relu')))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(Convolution2D(32, (3,3), strides=(2,2), activation='relu')))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(Convolution2D(32, (3,3), strides=(1,1), activation='relu')))
    x.add(TD(Dropout(drop_out)))
    x.add(TD(MaxPooling2D(pool_size=(2, 2))))
    x.add(TD(Flatten(name='flattened')))
    x.add(TD(Dense(100, activation='relu')))
    x.add(TD(Dropout(drop_out)))
      
    x.add(LSTM(128, return_sequences=True, name="LSTM_seq"))
    x.add(Dropout(.1))
    x.add(LSTM(128, return_sequences=False, name="LSTM_fin"))
    x.add(Dropout(.1))
    x.add(Dense(128, activation='relu'))
    x.add(Dropout(.1))
    x.add(Dense(64, activation='relu'))
    x.add(Dense(10, activation='relu'))
    x.add(Dense(num_outputs, activation='linear', name='model_outputs'))
    
    return x
Exemplo n.º 28
0
def default_loc(num_outputs, num_locations, input_shape):
    '''
    Notes: this model depends on concatenate which failed on keras < 2.0.8
    '''

    drop = 0.5

    img_in = Input(shape=input_shape, name='img_in')

    x = img_in
    x = Convolution2D(24, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_1")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(32, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_2")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_3")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3, 3),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_4")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3, 3),
                      strides=(1, 1),
                      activation='relu',
                      name="conv2d_5")(x)
    x = Dropout(drop)(x)
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(drop)(x)

    z = Dense(50, activation='relu')(x)
    z = Dropout(.1)(z)

    #categorical output of the angle
    angle_out = Dense(15, activation='softmax', name='angle')(z)

    #categorical output of throttle
    throttle_out = Dense(20, activation='softmax', name='throttle')(z)

    #categorical output of location
    loc_out = Dense(num_locations, activation='softmax', name='loc')(z)

    #categorical output of lane
    lane_out = Dense(2, activation='softmax', name='lane')(z)

    #model = Model(inputs=[img_in], outputs=[angle_out, throttle_out, loc_out, lane_out])
    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out, loc_out])

    return model
Exemplo n.º 29
0
    def add_model(self, input_data, target_data=None):
        """Implements core of model that transforms input_data into predictions.

        The core transformation for this model which transforms a batch of input
        data into a batch of predictions.

        Args:
          input_data: A tensor of shape (batch_size, num_steps, time_stamps).
          target_data: A tensor of shape (batch_size, num_steps, time_stamps).
        Returns:
          predict: A tensor of shape (batch_size, num_steps, time_stamps)
        """
        # Consider signal matrix as an image with channels.
        height = self.config.num_steps
        width = self.config.time_stamps
        channels = self.config.channels
        batch_size = self.config.batch_size

        # input_data: (-1, height, width, channels)
        input_data = tf.reshape(input_data, [-1, channels, height, width])
        input_data = tf.transpose(input_data, perm=[0, 2, 3, 1])

        x0 = Convolution2D(64, (3, 3),
                           activation='relu',
                           padding='same',
                           name='sr_res_conv1')(input_data)

        x1 = Convolution2D(64, (3, 3),
                           activation='relu',
                           padding='same',
                           strides=(2, 2),
                           name='sr_res_conv2')(x0)
        x2 = Convolution2D(64, (3, 3),
                           activation='relu',
                           padding='same',
                           strides=(2, 2),
                           name='sr_res_conv3')(x1)

        x = self._residual_block(x2, 1)
        for i in range(self.config.nb_residual):
            x = self._residual_block(x, i + 2)
        x = Add()([x, x2])

        x = self._upscale_block(x, 1)
        x = Add()([x, x1])

        x = self._upscale_block(x, 2)
        x = Add()([x, x0])

        output = Convolution2D(self.config.channels, (3, 3),
                               activation="linear",
                               padding='same',
                               name='sr_res_conv_final')(x)

        prediction = tf.transpose(output, perm=[0, 3, 1, 2])
        prediction = tf.reshape(prediction, [batch_size, height, width])
        return prediction
Exemplo n.º 30
0
def Nvidia(num_outputs, input_shape, roi_crop):

    input_shape = adjust_input_shape(input_shape, roi_crop)
    img_in = Input(shape=input_shape, name='img_in')
    x = img_in

    # rate # dropout regularization, with a 90% keep probability.
    rate = 0.1

    # Convolutional Layer 1
    x = Convolution2D(filters=24,
                      kernel_size=5,
                      strides=(2, 2),
                      input_shape=input_shape)(x)
    x = Dropout(rate)(x)
    x = Convolution2D(filters=36,
                      kernel_size=5,
                      strides=(2, 2),
                      activation='relu')(x)
    x = Dropout(rate)(x)
    #x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')(x)
    x = Convolution2D(filters=48,
                      kernel_size=5,
                      strides=(2, 2),
                      activation='relu')(x)
    x = Dropout(rate)(x)
    x = Convolution2D(filters=64,
                      kernel_size=3,
                      strides=(1, 1),
                      activation='relu')(x)
    x = Dropout(rate)(x)
    #x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')(x)
    x = Convolution2D(filters=64,
                      kernel_size=3,
                      strides=(1, 1),
                      activation='relu')(x)
    x = Dropout(rate)(x)

    # Flatten Layers [Flatten to 1D (Fully connected)]
    x = Flatten()(x)

    # Fully Connected Layer
    x = Dense(1164, activation='relu')(x)
    x = Dense(100, activation='relu')(x)
    x = Dense(50, activation='relu')(x)
    x = Dense(10, activation='relu')(x)

    outputs = []

    for i in range(num_outputs):
        # Output layer
        outputs.append(
            Dense(1, activation='linear', name='n_outputs' + str(i))(x))
    model = Model(inputs=[img_in], outputs=outputs)

    return model