func_c3d.trainable = False model = BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None)(func_c3d) modelOut = Dense(nb_classes, init='normal', activation='softmax')(model) model = models.Model(inputs=x, outputs=modelOut) model.summary() model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['acc']) # Train the model nama_filenya = "weights_" + vartuning + "_.hdf5" checkpointer = ModelCheckpoint(filepath=nama_filenya, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True) hist = model.fit(train_set_R1, Y_train,
def distortion_model_functional(X_train, Y_train, X_val, Y_val, params): print("In", len(X_train)) # nb_conv = 4 # nb_pool = 2 batch_size = 64 nb_epoch = 1000 opt = keras.optimizers.Adam() nb_filters = params['nb_filters'] nb_conv = params['nb_conv'] nb_pool = params['nb_pool'] nb_layer = params['nb_layer'] dropout = params['dropout'] hidden = params['nb_hidden'] nb_classes = Y_train.shape[1] input_shape = (8, 8, 1) input_pattern = Input(shape=input_shape, name='input1') # build the rest of the network model = Conv2D(nb_filters, (nb_conv, nb_conv), padding='valid', input_shape=input_shape, name='conv2d_0')(input_pattern) model = BatchNormalization()(model) model = Activation('relu')(model) model = Dropout(dropout)(model) model = MaxPooling2D(pool_size=(nb_pool, nb_pool))(model) model = BatchNormalization()(model) # model = Activation('relu')(model) for i in range(1, nb_layer): model = Conv2D(nb_filters, nb_conv, nb_conv, name='conv2d_' + str(i))(model) model = BatchNormalization()(model) model = Activation('relu')(model) model = Dropout(dropout)(model) model = MaxPooling2D(pool_size=(nb_pool, nb_pool))(model) model = BatchNormalization()(model) # model = Activation('relu')(model) model = Dense(hidden, activation='relu')(model) model = Activation('relu')(model) # model.add(Activation('sigmoid')) model = Dropout(dropout)(model) out = Dense(nb_classes, name='dense_output', activation='linear')(model) model = Model(inputs=[input_pattern], outputs=out) model.summary() model.compile(loss='mean_squared_error', optimizer=opt) history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=0, validation_data=[X_val, Y_val], shuffle=True, metric="mse") return history, model