def capsule_net(input_shape,n_class,routings):
    x=layers.Input(shape=input_shape)

    conv1=layers.Conv2D(filters=256,kernel_size=9,strides=1,padding='valid',
                        activation='relu',name='conv1')(x)
    primary_capsule=PrimaryCap(conv1,dim_capsule=8,n_channels=32,
                               kernel_size=9,strides=2,padding='valid')
    digit_capsule=CapsuleLayer(output_dim_capsules=16,routings=routings,
                               output_num_capsules=n_class,name='digit_capsule')(primary_capsule)

    output_capsules=Length(name='capsule_net')(digit_capsule)

    y=layers.Input(shape=(n_class,))
    masked_with_y=Mask()([digit_capsule,y])
    masked=Mask(digit_capsule)

    decoder=models.Sequential(name='decoder')
    decoder.add(layers.Dense(units=512,activation='relu',input_dim=16*n_class))
    decoder.add(layers.Dense(units=1024,activation='relu'))
    decoder.add(layers.Dense(units=np.prod(input_shape),activation='softmax'))
    decoder.add(layers.Reshape(target_shape=input_shape,name='output_reconstruction'))

    train_model=models.Model([x,y],[output_capsules,decoder(masked_with_y)])
    eval_model=models.Model(x,[output_capsules,decoder(masked)])

    noise = layers.Input(shape=(n_class, 16))
    noised_digitcaps = layers.Add()([digit_capsule, noise])
    masked_noised_y = Mask()([noised_digitcaps, y])
    manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
    return train_model, eval_model, manipulate_model
# dropout=Dropout(0.25)(output)
primary_caps = PrimaryCap(dropout, dim_vector=dim_capsule1, n_channels=3, kernel_size=9, strides=2, padding='same',
                          name="primary_caps")
primary_caps = BatchNormalization()(primary_caps)
primary_caps = Dropout(0.3)(primary_caps)
# Layer 3: Capsule layer. Routing algorithm works here.
category_caps = CategoryCap(num_capsule=num_capsule1, dim_vector=dim_capsule1, num_routing=num_routing, name='category_caps')(
    primary_caps)

category_caps = BatchNormalization()(category_caps)
category_caps = Dropout(0.3)(category_caps)

print('category_caps',category_caps)

category_caps = Length(name='out_caps')(category_caps)

output=Dense(1,activation='sigmoid')(category_caps)
model=Model(inputs=inputs,outputs=[output])
model.summary()
model.compile(loss='binary_crossentropy',optimizer=Adam(0.0001),metrics=['accuracy'])
checkpoint_filepath = 'E:/DeepLearning/bully_code/diyu/indrnn.h5'
checkpoint = ModelCheckpoint(checkpoint_filepath,
                             monitor='acc',
                             verbose=0,
                             save_best_only=True,
                             mode='max')


metrics = Metrics()
history = model.fit(x_train, y_train,
예제 #3
0
k = 12

output = concatenate([texture, wo_ind])
print("output:", output)
"""
自注意力机制的加载
"""
from keras_self_attention import SeqSelfAttention
output = SeqSelfAttention(attention_activation='relu',
                          name='self_attention')(output)

output = Bidirectional(
    LSTM(lstm_output_size, dropout=0.5, return_sequences=True))(output)

category_caps = Length(name='out_caps')(output)

preds = Dense(1, activation='sigmoid')(category_caps)
print("training>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
model = Model(inputs=[sequence_input2, sequence_input4], outputs=[preds])
model.summary()

model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['acc'])

checkpoint = ModelCheckpoint(
    filepath="best_model.h5",  #(就是你准备存放最好模型的地方),
    monitor='val_acc',  #(或者换成你想监视的值,比如acc,loss, val_loss,其他值应该也可以,还没有试),
    verbose=1,  #(如果你喜欢进度条,那就选1,如果喜欢清爽的就选0,verbose=冗余的),
    save_best_only='True',  #(只保存最好的模型,也可以都保存),
    save_weights_only='True',
    mode=