Пример #1
0
def alexNet(x_train):
    # Adapted from https://gist.github.com/JBed/c2fb3ce8ed299f197eff

    model_input = Input(shape=(x_train.shape[1], 1))

    model = Convolution1D(filters=64, kernel_size=3)(model_input)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)
    model = MaxPooling1D(pool_size=3)(model)

    model = Convolution1D(filters=128, kernel_size=7)(model)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)
    model = MaxPooling1D(pool_size=3)(model)

    model = Convolution1D(filters=192, kernel_size=3)(model)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)
    model = MaxPooling1D(pool_size=3)(model)

    model = Convolution1D(filters=256, kernel_size=3)(model)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)
    model = MaxPooling1D(pool_size=3)(model)

    model = Flatten()(model)
    model = Dense(512, init='normal')(model)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)
    model = Dense(512, init='normal')(model)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)

    model = Dense(1, init='normal')(model)
    model = BatchNormalization()(model)
    model = Activation('sigmoid')(model)

    model = Model(inputs=model_input, outputs=model)

    opt = optimizers.Adam(learning_rate=10e-5, beta_1=0.9,
                          beta_2=0.999, amsgrad=False)
    model.compile(loss=customLoss, optimizer=opt,
                  metrics=['accuracy', auc_roc])
    return model, 'functional'
Пример #2
0
                               axis=-1,
                               momentum=0.99,
                               weights=None,
                               beta_init='zero',
                               gamma_init='one',
                               gamma_regularizer=None,
                               beta_regularizer=None)(func_c3d)

    modelOut = Dense(nb_classes, init='normal', activation='softmax')(model)

    model = models.Model(inputs=x, outputs=modelOut)

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['acc'])

    # Train the model
    nama_filenya = "weights_" + vartuning + "_.hdf5"

    checkpointer = ModelCheckpoint(filepath=nama_filenya,
                                   monitor='val_acc',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=True)
    hist = model.fit(train_set_R1,
                     Y_train,
                     validation_data=(test_set_R1, Y_test),
                     batch_size=16,
                     nb_epoch=jumEpoch,
        file = cv2.imread(val_Path + f'word{i}/{a}', cv2.IMREAD_GRAYSCALE)
        word.append(file)
        df_pred = pd.DataFrame(list(zip(word)), columns=['Name'])
        pred_img = df_pred['Name']
        pred_img = np.array(list(pred_img))
        #Normalize the data
        pred_img = pred_img / 255
        img_shape = pred_img.shape[0]
        #reshape data to fit model
        pred_img = pred_img.reshape(img_shape, 64, 64, 1)
        model_path = Path + 'fm_cnn_BN16.h5'
        # load the saved best model weights
        model = load_model(model_path)

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.RMSprop(),
                      metrics=['accuracy'])

        # predict outputs on validation images
        prediction = model.predict(pred_img)
        prediction = np.argmax(prediction, axis=1)
        with open(Path + 'encoder4.pickle', 'rb') as handle:
            label_encoder = pickle.load(handle)
        letter = label_encoder.inverse_transform(prediction)

    words = ''.join(letter)
    print(words)
    space = ' '
    sent.append(words)
    sent.append(space)
Пример #4
0
def distortion_model_functional(X_train, Y_train, X_val, Y_val, params):
    print("In", len(X_train))
    # nb_conv = 4
    # nb_pool = 2
    batch_size = 64
    nb_epoch = 1000
    opt = keras.optimizers.Adam()

    nb_filters = params['nb_filters']
    nb_conv = params['nb_conv']
    nb_pool = params['nb_pool']
    nb_layer = params['nb_layer']
    dropout = params['dropout']
    hidden = params['nb_hidden']

    nb_classes = Y_train.shape[1]
    input_shape = (8, 8, 1)

    input_pattern = Input(shape=input_shape, name='input1')

    # build the rest of the network
    model = Conv2D(nb_filters, (nb_conv, nb_conv),
                   padding='valid',
                   input_shape=input_shape,
                   name='conv2d_0')(input_pattern)
    model = BatchNormalization()(model)
    model = Activation('relu')(model)
    model = Dropout(dropout)(model)
    model = MaxPooling2D(pool_size=(nb_pool, nb_pool))(model)
    model = BatchNormalization()(model)
    # model = Activation('relu')(model)

    for i in range(1, nb_layer):
        model = Conv2D(nb_filters, nb_conv, nb_conv,
                       name='conv2d_' + str(i))(model)
        model = BatchNormalization()(model)
        model = Activation('relu')(model)
        model = Dropout(dropout)(model)
        model = MaxPooling2D(pool_size=(nb_pool, nb_pool))(model)
        model = BatchNormalization()(model)
    # model = Activation('relu')(model)

    model = Dense(hidden, activation='relu')(model)
    model = Activation('relu')(model)
    # model.add(Activation('sigmoid'))
    model = Dropout(dropout)(model)
    out = Dense(nb_classes, name='dense_output', activation='linear')(model)

    model = Model(inputs=[input_pattern], outputs=out)
    model.summary()

    model.compile(loss='mean_squared_error', optimizer=opt)
    history = model.fit(X_train,
                        Y_train,
                        batch_size=batch_size,
                        epochs=nb_epoch,
                        verbose=0,
                        validation_data=[X_val, Y_val],
                        shuffle=True,
                        metric="mse")

    return history, model
Пример #5
0
if __name__ == '__main__':
    # output predicted labels in separate folder for easy viewing
    for i in range(10):
        os.system("mkdir -p predicted_images/" + str(i))

    # select which data you want to evaluate on (validation or testing)
    X_evaluation = X_test
    Y_evaluation = Y_test
    y_evaluation = y_test

    with tf.device('/cpu:0'):
        model.load_weights(
            os.path.join(MODEL_PATH, 'WRN-16-2-own-81accuracy.h5'))
        model.compile(optimizer=sgd,
                      loss="categorical_crossentropy",
                      metrics=['accuracy'])

        validation_datagen = ImageDataGenerator(
            featurewise_center=True,
            featurewise_std_normalization=True,
            zca_whitening=True)
        validation_datagen.fit(X_train)
        generator = validation_datagen.flow(X_evaluation,
                                            Y_evaluation,
                                            batch_size=1,
                                            shuffle=False)

        total_correct = 0
        for sample_idx in range(X_evaluation.shape[0]):
            (X, y) = generator.next()