Пример #1
0
    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['acc'])

    # Train the model
    nama_filenya = "weights_" + vartuning + "_.hdf5"

    checkpointer = ModelCheckpoint(filepath=nama_filenya,
                                   monitor='val_acc',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=True)
    hist = model.fit(train_set_R1,
                     Y_train,
                     validation_data=(test_set_R1, Y_test),
                     batch_size=16,
                     nb_epoch=jumEpoch,
                     shuffle=True,
                     verbose=1,
                     callbacks=[checkpointer])

    # Evaluate the model
    # load best model
    model.load_weights(nama_filenya)

    Y_pred = model.predict(test_set_R1, batch_size=8)

    #print(Y_pred)
    k_val = 1
    Y_pred_label = []
    for idt in range(len(Y_pred)):
        Y_pred_label.append(np.argmax(Y_pred[idt]))
Пример #2
0
def distortion_model_functional(X_train, Y_train, X_val, Y_val, params):
    print("In", len(X_train))
    # nb_conv = 4
    # nb_pool = 2
    batch_size = 64
    nb_epoch = 1000
    opt = keras.optimizers.Adam()

    nb_filters = params['nb_filters']
    nb_conv = params['nb_conv']
    nb_pool = params['nb_pool']
    nb_layer = params['nb_layer']
    dropout = params['dropout']
    hidden = params['nb_hidden']

    nb_classes = Y_train.shape[1]
    input_shape = (8, 8, 1)

    input_pattern = Input(shape=input_shape, name='input1')

    # build the rest of the network
    model = Conv2D(nb_filters, (nb_conv, nb_conv),
                   padding='valid',
                   input_shape=input_shape,
                   name='conv2d_0')(input_pattern)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)
    model = Dropout(dropout)(model)
    model = MaxPooling2D(pool_size=(nb_pool, nb_pool))(model)
    model = BatchNormalization()(model)
    # model = Activation('relu')(model)

    for i in range(1, nb_layer):
        model = Conv2D(nb_filters, nb_conv, nb_conv,
                       name='conv2d_' + str(i))(model)
        model = BatchNormalization()(model)
        model = Activation('relu')(model)
        model = Dropout(dropout)(model)
        model = MaxPooling2D(pool_size=(nb_pool, nb_pool))(model)
        model = BatchNormalization()(model)
    # model = Activation('relu')(model)

    model = Dense(hidden, activation='relu')(model)
    model = Activation('relu')(model)
    # model.add(Activation('sigmoid'))
    model = Dropout(dropout)(model)
    out = Dense(nb_classes, name='dense_output', activation='linear')(model)

    model = Model(inputs=[input_pattern], outputs=out)
    model.summary()

    model.compile(loss='mean_squared_error', optimizer=opt)
    history = model.fit(X_train,
                        Y_train,
                        batch_size=batch_size,
                        epochs=nb_epoch,
                        verbose=0,
                        validation_data=[X_val, Y_val],
                        shuffle=True,
                        metric="mse")

    return history, model