def build(self): d_input = Input(shape=(2, )) x = Dense(64, activation='relu')(d_input) x = Dropout(0.5)(x) x = Dense(64, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(64, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(64, activation='relu')(x) x = Dropout(0.5)(x) d_output = Dense(3, activation="relu")(x) model = Model(inputs=[d_input], outputs=[d_output]) optimizer = Adam(lr=0.00005) model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]) return model
def run(model): # Download kitti dataset helper.maybe_download_training_img(DATA_DIRECTORY) x, y = helper.get_data(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE) if model is None: inputs = Input(shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3)) # Block 1 block1_conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs) block1_conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(block1_conv1) block1_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(block1_conv2) # Block 2 block2_conv1 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(block1_pool) block2_conv2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(block2_conv1) block2_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(block2_conv2) # Block 3 block3_conv1 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(block2_pool) block3_conv2 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(block3_conv1) block3_conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(block3_conv2) block3_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(block3_conv3) # Block 4 block4_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(block3_pool) block4_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(block4_conv1) block4_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(block4_conv2) block4_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(block4_conv3) # Block 5 block5_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(block4_pool) block5_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(block5_conv1) block5_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(block5_conv2) block5_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(block5_conv3) pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block5_pool) upsample_1 = Conv2DTranspose(2, kernel_size=(4, 4), strides=(2, 2), padding="same")(pool5_conv1x1) pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block4_pool) add_1 = Add()([upsample_1, pool4_conv1x1]) upsample_2 = Conv2DTranspose(2, kernel_size=(4, 4), strides=(2, 2), padding="same")(add_1) pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block3_pool) add_2 = Add()([upsample_2, pool3_conv1x1]) upsample_3 = Conv2DTranspose(2, kernel_size=(16, 16), strides=(8, 8), padding="same")(add_2) output = Dense(2, activation='softmax')(upsample_3) model = Model(inputs, output, name='multinet_seg') adam = Adam(lr=LEARNING_RATE) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model.fit(x, y, batch_size=BATCH_SIZE, epochs=EPOCHS) model.save('trained_model' + str(time.time()) + '.h5')
def add(self, layer): """Adds a layer instance on top of the layer stack. Arguments: layer: layer instance. Raises: TypeError: If `layer` is not a layer instance. ValueError: In case the `layer` argument does not know its input shape. ValueError: In case the `layer` argument has multiple output tensors, or is already connected somewhere else (forbidden in `Sequential` models). """ if not isinstance(layer, Layer): raise TypeError('The added layer must be ' 'an instance of class Layer. ' 'Found: ' + str(layer)) if not self.outputs: # first layer in model: check that it is an input layer if not layer.inbound_nodes: # create an input layer if not hasattr(layer, 'batch_input_shape'): raise ValueError('The first layer in a ' 'Sequential model must ' 'get an `input_shape` or ' '`batch_input_shape` argument.') # Instantiate the input layer. x = Input(batch_shape=layer.batch_input_shape, dtype=layer.dtype, name=layer.name + '_input') # This will build the current layer # and create the node connecting the current layer # to the input layer we just created. layer(x) if len(layer.inbound_nodes) != 1: raise ValueError('A layer added to a Sequential model must ' 'not already be connected somewhere else. ' 'Model received layer ' + layer.name + ' which has ' + str(len(layer.inbound_nodes)) + ' pre-existing inbound connections.') if len(layer.inbound_nodes[0].output_tensors) != 1: raise ValueError('All layers in a Sequential model ' 'should have a single output tensor. ' 'For multi-output layers, ' 'use the functional API.') self.outputs = [layer.inbound_nodes[0].output_tensors[0]] self.inputs = topology.get_source_inputs(self.outputs[0]) # We create an input node, which we will keep updated # as we add more layers topology.Node( outbound_layer=self, inbound_layers=[], node_indices=[], tensor_indices=[], input_tensors=self.inputs, output_tensors=self.outputs, # no model-level masking for now input_masks=[None for _ in self.inputs], output_masks=[None]) else: output_tensor = layer(self.outputs[0]) if isinstance(output_tensor, list): raise TypeError('All layers in a Sequential model ' 'should have a single output tensor. ' 'For multi-output layers, ' 'use the functional API.') self.outputs = [output_tensor] # update self.inbound_nodes self.inbound_nodes[0].output_tensors = self.outputs self.inbound_nodes[0].output_shapes = [ K.int_shape(self.outputs[0]) ] self.layers.append(layer) self.built = False