def up_projection(lt_, nf, s, block): with tf.name_scope('up_' + str(block)): if s == 2: ht = Conv2DTranspose(nf, 2, strides=2)(lt_) ht = PReLU()(ht) lt = ZeroPadding2D(2)(ht) lt = Conv2D(nf, 6, 2)(lt) lt = PReLU()(lt) et = Subtract()([lt, lt_]) ht1 = Conv2DTranspose(nf, 2, strides=2)(et) ht1 = PReLU()(ht1) ht1 = Add()([ht, ht1]) return (ht1) if s == 4: ht = Conv2DTranspose(nf, 4, strides=4)(lt_) ht = PReLU()(ht) lt = ZeroPadding2D(2)(ht) lt = Conv2D(nf, 8, strides=4)(lt) lt = PReLU()(lt) et = Subtract()([lt, lt_]) ht1 = Conv2DTranspose(nf, 4, strides=4)(et) ht1 = PReLU()(ht1) ht1 = Add()([ht, ht1]) return (ht1) if s == 8: ht = Conv2DTranspose(nf, 8, strides=8)(lt_) ht = PReLU()(ht) lt = ZeroPadding2D(2)(ht) lt = Conv2D(nf, 12, strides=8)(lt) lt = PReLU()(lt) et = Subtract()([lt, lt_]) ht1 = Conv2DTranspose(nf, 8, strides=8)(et) ht1 = PReLU()(ht1) ht1 = Add()([ht, ht1]) return (ht1)
def define_model(IMAGE_DIMS, VEC_LEN, weight_dir): i10 = Input(shape=IMAGE_DIMS) i20 = Input(shape=IMAGE_DIMS) i30 = Input(shape=IMAGE_DIMS) t1 = Input(shape=(1, )) t2 = Input(shape=(1, )) print("[INFO] Weights restored from pre-trained InceptionV3!") encoder = InceptionV3(weights=weight_dir, include_top=False) pooling = GlobalAveragePooling2D() def l2_norm_(x): return K.sqrt(K.sum(K.square(x), 1)) def l2_normalize(x): return K.l2_normalize(x, 1) output = Dense(VEC_LEN, activation='sigmoid', name='encoder_output') o1 = encoder(i10) o2 = encoder(i20) o3 = encoder(i30) o1 = pooling(o1) # 全局平均池化层 o1 = output(o1) # 有1024个节点的全连接层 o2 = pooling(o2) # 全局平均池化层 o2 = output(o2) # 有1024个节点的全连接层 o3 = pooling(o3) # 全局平均池化层 o3 = output(o3) # 有1024个节点的全连接层 def distance(inputs): ap, an, margin, gthr = inputs ap_l2n = K.sqrt(K.sum(K.square(ap), axis=1, keepdims=True)) an_l2n = K.sqrt(K.sum(K.square(an), axis=1, keepdims=True)) d = K.minimum((an_l2n - ap_l2n), margin) g = K.maximum(ap_l2n, gthr) y = K.concatenate([d, g], axis=1) return y ap = Subtract()([o1, o2]) an = Subtract()([o1, o3]) val = Lambda(distance, name='margin')([ap, an, t1, t2]) model = Model(inputs=[i10, i20, i30, t1, t2], outputs=val) return model
def compile_dueling_deep_q_network(): he = variance_scaling_initializer() model_input = Input(shape=(STATE_SHAPE, )) x = Dense(DENSE_LAYER_DIMS, input_shape=(STATE_SHAPE, ), kernel_initializer=he, activation=ACTIVATION_FUNCTION)(model_input) # x = Dense(DENSE_LAYER_DIMS, kernel_initializer=he, activation=ACTIVATION_FUNCTION)(x) # val_stream, adv_stream = Lambda(lambda w: tf.split(w, 2, 3))(x) val_stream, adv_stream = Lambda(lambda w: tf.split(w, 2, 1))(x) val_stream = Flatten()(val_stream) val = Dense(1, kernel_initializer=he)(val_stream) adv_stream = Flatten()(adv_stream) adv = Dense(NUMBER_OF_ACTIONS, kernel_initializer=he)(adv_stream) # Combine streams into Q-Values reduce_mean = Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True) ) # custom layer for reduce mean q_vals = Add()([val, Subtract()([adv, reduce_mean(adv)])]) # Build model model = Model(model_input, q_vals) model.compile(Adam(LEARNING_RATE), loss=LOSS_FUNCTION) return model
def build_model(self, f_sizes): """ :param f_size: sparse feature nunique :return: """ dim_input = len(f_sizes) # +1 input_x = [Input(shape=(1, )) for i in range(dim_input)] # 多列 sparse feature biases = [ self.get_embed(x, size, 1) for (x, size) in zip(input_x, f_sizes) ] factors = [ self.get_embed(x, size) for (x, size) in zip(input_x, f_sizes) ] s = Add()(factors) diffs = [Subtract()([s, x]) for x in factors] dots = [Dot(axes=1)([d, x]) for d, x in zip(diffs, factors)] x = Concatenate()(biases + dots) x = BatchNormalization()(x) output = Dense(1, activation='relu', kernel_regularizer=l2(self.kernel_l2))(x) model = Model(inputs=input_x, outputs=[output]) model.compile(optimizer=Adam(clipnorm=0.5), loss='mean_squared_error') # TODO: radam output_f = factors + biases model_features = Model(inputs=input_x, outputs=output_f) return model, model_features
def demo_create_encoder(latent_dim, cat_dim, window_size, input_dim): input_layer = Input(shape=(window_size, input_dim)) code = TimeDistributed(Dense(64, activation='linear'))(input_layer) code = Bidirectional(LSTM(128, return_sequences=True))(code) code = BatchNormalization()(code) code = ELU()(code) code = Bidirectional(LSTM(64))(code) code = BatchNormalization()(code) code = ELU()(code) cat = Dense(64)(code) cat = BatchNormalization()(cat) cat = PReLU()(cat) cat = Dense(cat_dim, activation='softmax')(cat) latent_repr = Dense(64)(code) latent_repr = BatchNormalization()(latent_repr) latent_repr = PReLU()(latent_repr) latent_repr = Dense(latent_dim, activation='linear')(latent_repr) decode = Concatenate()([latent_repr, cat]) decode = RepeatVector(window_size)(decode) decode = Bidirectional(LSTM(64, return_sequences=True))(decode) decode = ELU()(decode) decode = Bidirectional(LSTM(128, return_sequences=True))(decode) decode = ELU()(decode) decode = TimeDistributed(Dense(64))(decode) decode = ELU()(decode) decode = TimeDistributed(Dense(input_dim, activation='linear'))(decode) error = Subtract()([input_layer, decode]) return Model(input_layer, [decode, latent_repr, cat, error])
def denoise_model(image): """Model that denoises the noisy image.""" initializer = normal(mean=0, stddev=0.01, seed=13) x = Conv2D(64, (3, 3), padding='same', kernel_initializer=initializer)(image) bn1 = BatchNormalization()(x) act1 = Activation(activation='selu')(bn1) x = Conv2D(64, (3, 3), padding='same', kernel_initializer=initializer)(act1) bn1 = BatchNormalization()(x) act1 = Activation(activation='selu')(bn1) encoded = Conv2D(32, (3, 3), padding='same', kernel_initializer=initializer)(act1) bn1 = BatchNormalization()(encoded) act1 = Activation(activation='selu')(bn1) x = Conv2D(32, (3, 3), padding='same', kernel_initializer=initializer)(act1) bn1 = BatchNormalization()(x) act1 = Activation(activation='selu')(bn1) x = Conv2D(64, (3, 3), padding='same', kernel_initializer=initializer)(act1) bn1 = BatchNormalization()(x) act1 = Activation(activation='selu')(bn1) x = Conv2D(64, (3, 3), padding='same', kernel_initializer=initializer)(act1) bn1 = BatchNormalization()(x) act1 = Activation(activation='selu')(bn1) decoded = Conv2D(1, (3, 3), padding='same', kernel_initializer=initializer)(act1) decoded = Subtract()([image, decoded]) return decoded
def down_projection(ht_, nf, s, block, act='prelu'): with tf.name_scope('down_' + str(block)): if s == 2: ht = ZeroPadding2D(2)(ht_) lt = Conv2D(nf, 6, strides=2)(ht) lt = PReLU()(lt) ht = Conv2DTranspose(nf, 2, strides=2)(lt) ht = PReLU()(ht) et = Subtract()([ht, ht_]) lt1 = ZeroPadding2D(2)(et) lt1 = Conv2D(nf, 6, strides=2)(lt1) lt1 = PReLU()(lt1) lt1 = Add()([lt1, lt]) return lt1 if s == 4: ht = ZeroPadding2D(2)(ht_) lt = Conv2D(nf, 8, strides=4)(ht) lt = PReLU()(lt) ht = Conv2DTranspose(nf, 4, strides=4)(lt) ht = PReLU()(ht) et = Subtract()([ht, ht_]) lt1 = ZeroPadding2D(2)(et) lt1 = Conv2D(nf, 8, strides=4)(lt1) lt1 = PReLU()(lt1) lt1 = Add()([lt1, lt]) return lt1 if s == 8: ht = ZeroPadding2D(2)(ht_) lt = Conv2D(nf, 12, strides=8)(ht) lt = PReLU()(lt) ht = Conv2DTranspose(nf, 8, strides=8)(lt) ht = PReLU()(ht) et = Subtract()([ht, ht_]) lt1 = ZeroPadding2D(2)(et) lt1 = Conv2D(nf, 12, strides=8)(lt1) lt1 = PReLU()(lt1) lt1 = Add()([lt1, lt]) return lt1
def duel_dqn(flatten, dense_initialization, num_actions): # implementation of the duel network architecture (see http://proceedings.mlr.press/v48/wangf16.pdf) # value stream dense_value = Dense(512, activation=relu, kernel_initializer=dense_initialization)(flatten) out_value = Dense(1, kernel_initializer=dense_initialization)(dense_value) # advantage stream dense_advantage = Dense(512, activation=relu, kernel_initializer=dense_initialization)(flatten) out_std_advantage = Dense( num_actions, kernel_initializer=dense_initialization)(dense_advantage) average_tensor = getattr(tRexUtils, 'average_tensor') out_avg_advantage = Lambda(average_tensor)(out_std_advantage) out_advantage = Subtract()([out_std_advantage, out_avg_advantage]) # combine out = Add()([out_value, out_advantage]) return out
def _create_meta_network(input_dim, base_network): input_a = Input(shape=(input_dim, )) input_b = Input(shape=(input_dim, )) rel_score = base_network(input_a) irr_score = base_network(input_b) # subtract scores diff = Subtract()([rel_score, irr_score]) # Pass difference through sigmoid function. prob = Activation("sigmoid")(diff) # Build model. model = Model(inputs=[input_a, input_b], outputs=prob) model.compile( optimizer="adadelta", loss="binary_crossentropy") #loss="binary_crossentropy" return model
def build_model_1(f_size): dim_input = len(f_size) input_x = [Input(shape=(1, )) for i in range(dim_input)] biases = [get_embed(x, size, 1) for (x, size) in zip(input_x, f_size)] factors = [ get_embed(x, size, k_latent) for (x, size) in zip(input_x, f_size) ] s = Add()(factors) diffs = [Subtract()([s, x]) for x in factors] dots = [Dot(axes=1)([d, x]) for d, x in zip(diffs, factors)] x = Concatenate()(biases + dots) x = BatchNormalization()(x) output = Dense(1, activation='relu', kernel_regularizer=l2(kernel_reg))(x) model = Model(inputs=input_x, outputs=[output]) model.compile(optimizer=Adam(clipnorm=0.5), loss='mean_squared_error') output_f = factors + biases model_features = Model(inputs=input_x, outputs=output_f) return model, model_features
def dncnn(input_shape: tuple = (10, 320, 320, 2), depth: int = 10, output_channel: int = 2, filters=64, kernel_size=3): inpt = Input(shape=input_shape) x = Conv3D(filters=filters, kernel_size=kernel_size, strides=1, padding='same')(inpt) x = Activation('relu')(x) for i in range(depth - 2): x = Conv3D(filters=filters, kernel_size=kernel_size, strides=1, padding='same')(x) x = Activation('relu')(x) x = Conv3D(filters=output_channel, kernel_size=kernel_size, strides=1, padding='same')(x) x = Subtract()([inpt, x]) model = Model(inputs=inpt, outputs=x) return model
def _build_net(self): inputs = Input(shape=(self.n_features, )) x = Dense(64, activation='relu', kernel_regularizer=l2(self.l2))(inputs) x = Dense(32, activation='relu', kernel_regularizer=l2(self.l2))(x) if not self.dueling: output = Dense(self.n_actions, kernel_regularizer=l2(self.l2))(x) else: v = Dense(1, kernel_regularizer=l2(self.l2))(x) a = Dense(self.n_actions, kernel_regularizer=l2(self.l2))(x) mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(a) # advantage = Lambda(lambda x, y: x - y)([a, mean]) # output = Lambda(lambda x, y: x + y)([v, advantage]) advantage = Subtract()([a, mean]) output = Add()([v, advantage]) model = Model(inputs=inputs, outputs=output) model.compile(optimizer=Adam(learning_rate=self.lr), loss='mean_squared_error', metrics=['accuracy']) return model
def __init__(self, anodec, **kwargs): super(postTreat, self).__init__(**kwargs) self.anodec = anodec self.batchNorm = BatchNormalization() self.subtract = Subtract() self.reshape = Reshape((32, 32, 1))
def define_model(IMAGE_DIMS, VEC_LEN, weight_dir): i10 = Input(shape=IMAGE_DIMS) i20 = Input(shape=IMAGE_DIMS) i30 = Input(shape=IMAGE_DIMS) t1 = Input(shape=(1, )) t2 = Input(shape=(1, )) i1 = Lambda(lambda x: tf.div(tf.subtract(tf.cast(x, tf.float32), 127.5), 127.5))(i10) i2 = Lambda(lambda x: tf.div(tf.subtract(tf.cast(x, tf.float32), 127.5), 127.5))(i20) i3 = Lambda(lambda x: tf.div(tf.subtract(tf.cast(x, tf.float32), 127.5), 127.5))(i30) # i1 = tf.cast(i1,tf.float32) # i2 = tf.cast(i2, tf.float32) # i3 = tf.cast(i3, tf.float32) # # i1=tf.div(tf.subtract(i1,127.5),127.5) # i2 = tf.div(tf.subtract(i2, 127.5), 127.5) # i3 = tf.div(tf.subtract(i3, 127.5), 127.5) print("[INFO] Weights restored from pre-trained InceptionV3!") encoder = InceptionV3(weights=weight_dir, include_top=False) pooling = GlobalAveragePooling2D() def l2_normalize(x): return K.expand_dims(K.l2_normalize(x, 1)) val = Lambda(l2_normalize, name='margin')() BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None) output = Dense(VEC_LEN, activation='tanh', name='encoder_output') o1 = encoder(i1) o2 = encoder(i2) o3 = encoder(i3) # o1 = i1 # o2 = i2 # o3 = i3 o1 = pooling(o1) # 全局平均池化层 # o1 = BatchNormalization(o1) o1 = output(o1) # 有1024个节点的全连接层 # o1 = NormLayer(o1) #o1 = Dropout(0.1)(o1) o2 = pooling(o2) # 全局平均池化层 # o2 = BatchNormalization(o2) o2 = output(o2) # 有1024个节点的全连接层 #o2 = Dropout(0.1)(o2) # o2 = NormLayer(o2) o3 = pooling(o3) # 全局平均池化层 # o3 = BatchNormalization(o3) o3 = output(o3) # 有1024个节点的全连接层 # o3 = NormLayer(o3) #o3 = Dropout(0.1)(o3) #print('[INFO] base_model_layers', len(encoder.layers)) def l2_normalize(x): return K.expand_dims(K.l2_normalize(x, 1)) def l2_norm(x): return K.sqrt(K.sum(K.square(x), 1)) def distance(inputs): ap, an, margin, gthr = inputs ap_l2n = K.sqrt(K.sum(K.square(ap), axis=1, keepdims=True)) an_l2n = K.sqrt(K.sum(K.square(an), axis=1, keepdims=True)) d = K.minimum((an_l2n - ap_l2n), margin) # d=an_l2n # g=ap_l2n g = K.maximum(ap_l2n, gthr) y = K.concatenate([d, g], axis=1) return y ap = Subtract()([o1, o2]) an = Subtract()([o1, o3]) val = Lambda(distance, name='margin')([ap, an, t1, t2]) # val = Concatenate()([d, g]) model = Model(inputs=[i10, i20, i30, t1, t2], outputs=val) # K.clear_session() return model
def light_featex(): base = 32 img_input = Input(shape=(32, 32, 3), name='image_in') blur = Conv2D(filters=3, kernel_size=[5, 5], kernel_initializer=_gaussian_kernel(2, 0, 11), padding='same', name='gaussian_blur', trainable=False)(img_input) blur = Conv2D(filters=3, kernel_size=[5, 5], kernel_initializer=_build_SRM_kernel(), padding='same', name='srm_blur', trainable=False)(blur) x = Conv2D(filters=3, kernel_size=[5, 5], kernel_initializer=_build_SRM_kernel(), padding='same', name='srm', trainable=False)(img_input) x = Subtract()([x, blur]) # block 1 bname = 'b1' nb_filters = base x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', name=bname + 'c1')(x) x = BatchNormalization()(x) x = Dropout(0.25)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', name=bname + 'c2')(x) x = BatchNormalization()(x) x = Dropout(0.25)(x) # block 2 bname = 'b2' nb_filters = 2 * base x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', name=bname + 'c1')(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', name=bname + 'c2')(x) x = BatchNormalization()(x) x = Dropout(0.25)(x) # block 3 bname = 'b3' nb_filters = 4 * base x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', name=bname + 'c1')(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', name=bname + 'c2')(x) x = BatchNormalization()(x) x = Dropout(0.25)(x) x = Flatten(name='classifier_flatten')(x) x = Dense(1024, activation='relu', name='classifier_densee')(x) x = Dropout(0.5)(x) sf = Dense(1, activation='sigmoid')(x) return Model(img_input, sf, name='Featex')