def lstm_reshape(self, inputs, name_prefix, index, reshaped_inputs=None, initial=False): name_prefix = "{0}_{1}_{2}".format(self.controller_network_name, name_prefix, index) cell = LSTMCell( self.lstm_cell_units, kernel_initializer=get_weight_initializer(initializer="lstm"), recurrent_initializer=get_weight_initializer(initializer="lstm")) if initial: x = RNN(cell, return_state=True, name="{0}_{1}".format(name_prefix, "lstm"))(inputs) else: x = RNN(cell, return_state=True, name="{0}_{1}".format(name_prefix, "lstm"))(reshaped_inputs, initial_state=inputs[1:]) rx = Reshape((-1, self.lstm_cell_units), name="{0}_{1}".format(name_prefix, "reshape"))(x[0]) return x, rx
def reduce_output_size(self, inputs, name_prefix, rep, half_current_filters): x_0 = AveragePooling2D(pool_size=(1,1), strides=(2,2), padding="valid", name="{0}_avepool2d_{1}a_".format(name_prefix, rep))(inputs) x_0 = Conv2D(filters=half_current_filters, kernel_size=(1,1), strides=(1,1), padding="valid", kernel_initializer=get_weight_initializer(), kernel_regularizer=get_weight_regularizer(), name="{0}_conv2d_{1}a_".format(name_prefix, rep))(x_0) x_1 = ZeroPadding2D(padding=((0,1),(0,1)), name="{0}_zeropad2d_{1}b_".format(name_prefix, rep))(inputs) x_1 = Cropping2D(cropping=((1,0),(1,0)), name="{0}_crop2d_{1}b_".format(name_prefix, rep))(x_1) x_1 = AveragePooling2D(pool_size=(1,1), strides=(2,2), padding="valid", name="{0}_avepool2d_{1}b_".format(name_prefix, rep))(x_1) x_1 = Conv2D(filters=half_current_filters, kernel_size=(1,1), strides=(1,1), padding="valid", kernel_initializer=get_weight_initializer(), kernel_regularizer=get_weight_regularizer(), name="{0}_conv2d_{1}b_".format(name_prefix, rep))(x_1) x = Concatenate(name="{0}_concat_{1}_".format(name_prefix, rep))([x_0, x_1]) x = BatchNormalization(name="{0}_bn_{1}_".format(name_prefix, rep))(x) return x
def classification_layer(self, inputs, name_prefix, classes): x = GlobalAveragePooling2D( name="{0}_gap2d_".format(name_prefix))(inputs) x = Dense(classes, kernel_initializer=get_weight_initializer(), kernel_regularizer=get_weight_regularizer(), name="{0}_dense_".format(name_prefix))(x) x = Activation("softmax", name="{0}_softmax_".format(name_prefix))(x) return x
def relu_sepconv2d_bn(self, inputs, name_prefix, rep, kernel_size, current_filters, strides=(1,1)): x = Activation("relu", name="{0}_relu_{1}_".format(name_prefix, rep))(inputs) x = SeparableConv2D(filters=current_filters, kernel_size=kernel_size, strides=strides, padding="same", depthwise_initializer=get_weight_initializer(), pointwise_initializer=get_weight_initializer(), depthwise_regularizer=get_weight_regularizer(), pointwise_regularizer=get_weight_regularizer(), name="{0}_sepconv2d_{1}_".format(name_prefix, rep))(x) x = BatchNormalization(name="{0}_bn_{1}_".format(name_prefix, rep))(x) return x
def adjust_output_depth(self, inputs, name_prefix, current_filters): x = Activation("relu", name="{0}_relu_".format(name_prefix))(inputs) x = Conv2D(filters=current_filters, kernel_size=(1, 1), strides=(1, 1), padding="same", kernel_initializer=get_weight_initializer(), kernel_regularizer=get_weight_regularizer(), name="{0}_conv2d_".format(name_prefix))(x) x = BatchNormalization(name="{0}_bn_".format(name_prefix))(x) return x