예제 #1
0
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split

# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)

maxPrice = trainAttrX["price"].max()
trainY = trainAttrX["price"] / maxPrice
testY = testAttrX["price"] / maxPrice
(trainAttrX, testAttrX) = datasets.process_house_attributes(df, trainAttrX, testAttrX)
# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# price *predictions* and the *actual prices*
mlp=models.create_mlp(trainAttrX.shape[1], regress=True)
cnn = models.create_cnn(64, 64, 3, regress=True)
combinedInput=concatenate([mlp.output,cnn.output])

x=Dense(4,activation="relu")(combinedInput)
x=Dense(1,activation="linear")(x)
model=Model(inputs=[mlp.input,cnn.input],outputs=x)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)

# train the model
print("[INFO] training model...")
model.fit(
	[trainAttrX, trainImagesX], trainY,
	validation_data=([testAttrX, testImagesX], testY),
	epochs=200, batch_size=8)
model.save('my_mixed_model.h5')  # creates a HDF5 file 'my_model.h5'
예제 #2
0
# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxPrice = trainAttrX["price"].max()
trainY = trainAttrX["price"] / maxPrice
testY = testAttrX["price"] / maxPrice

# process the house attributes data by performing min-max scaling
# on continuous features, one-hot encoding on categorical features,
# and then finally concatenating them together
(trainAttrX,
 testAttrX) = datasets.process_house_attributes(df, trainAttrX, testAttrX)

# create the MLP and CNN models
mlp = models.create_mlp(trainAttrX.shape[1], regress=False)
cnn = models.create_cnn(64, 64, 3, regress=False)

# create the input to our final set of layers as the *output* of both
# the MLP and CNN
combinedInput = concatenate([mlp.output, cnn.output])

# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)

# our final model will accept categorical/numerical data on the MLP
# input and images on the CNN input, outputting a single value (the
# predicted price of the house)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
예제 #3
0
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxDist = np.max(y_train)
print(maxDist)
trainY = y_train / maxDist
valY = y_val / maxDist
testY = y_test / maxDist

# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# price *predictions* and the *actual prices*

_, width, height, depth, = X_train_cut.shape

model = models.create_cnn(height, width, depth, regress=True)
opt = Adam(lr=1e-3, decay=1e-3 / N_EPOCHS)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
model.summary()

# train the model
print("[INFO] training model...")
training_history = model.fit(X_train_cut,
                             trainY,
                             validation_data=(X_val_cut, valY),
                             epochs=N_EPOCHS,
                             batch_size=8,
                             verbose=1)

print('Saving Model (JSON), Training History & Weights...', end='')
model_json_str = model.to_json()
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
split = train_test_split(df,
                         images,
                         test_size=0.2,
                         random_state=42,
                         stratify=df['Gender&Side'])
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split

testY = testAttrX["Grade"]

# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# price *predictions* and the *actual prices*
model = models.create_cnn(1024, 1024, 1, regress=False)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="sparse_categorical_crossentropy",
              optimizer=opt,
              metrics=['accuracy'])

# train the model
print("[INFO] training model...")
# for every epoch,
splitval = train_test_split(trainAttrX,
                            trainImagesX,
                            test_size=0.1,
                            random_state=42)
(trainAttrX, valAttrX, trainImagesX, valImagesX) = splitval

trainY = trainAttrX["Grade"]
# the data for training and the remaining 25% for testing
split = train_test_split(df, images, test_size=0.25, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split

# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxPrice = trainAttrX["price"].max()
trainY = trainAttrX["price"] / maxPrice
testY = testAttrX["price"] / maxPrice

# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# price *predictions* and the *actual prices*
model = models.create_cnn(256, 256, 3, regress=True)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)

# train the model
print("[INFO] training model...")
model.fit(trainImagesX,
          trainY,
          validation_data=(testImagesX, testY),
          epochs=200,
          batch_size=8)

# make predictions on the testing data
print("[INFO] predicting house prices...")
preds = model.predict(testImagesX)
예제 #6
0
파일: cnn.py 프로젝트: gungor2/earthquake
# the data for training and the remaining 25% for testing
split = train_test_split(y_tr, images, test_size=0.8, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split

# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxPrice = trainAttrX['time_to_failure'].max()
trainY = trainAttrX['time_to_failure'] / maxPrice
testY = testAttrX['time_to_failure'] / maxPrice

# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# price *predictions* and the *actual prices*
model = models.create_cnn(669, 129, 1, regress=True)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)

# train the model
print("[INFO] training model...")
model.fit(trainImagesX,
          trainY,
          validation_data=(testImagesX, testY),
          epochs=1,
          batch_size=5)

# make predictions on the testing data
print("[INFO] predicting house prices...")
preds = model.predict(testImagesX)
    feature_layer = tf.keras.layers.DenseFeatures(feature)
    feature_array = feature_layer(dict(df)).numpy()
    if first:
        test_data = feature_array
        first = False
        continue
    test_data = np.concatenate((test_data, feature_array), axis=1)
    print(feature_layer(dict(df)).numpy())

#%%
import keras
from keras.layers import LeakyReLU
from keras.callbacks import EarlyStopping, ReduceLROnPlateau

mlp = models.create_mlp(np.asarray(test_data).shape[1], regress=True)
cnn_left = models.create_cnn(256, 128, 6, regress=False)
# cnn_right = models.create_cnn(256, 128, 6, regress=False)

# create the input to our final set of layers as the *output* of both
# the MLP and CNN
# combinedInput = concatenate([mlp.output, cnn_left.output, cnn_right.output])
combinedInput = concatenate([mlp.output, cnn_left.output])

# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(8, activation=LeakyReLU(alpha=0.2))(combinedInput)
x = Dense(4, activation=LeakyReLU(alpha=0.2))(x)
x = Dense(1)(x)

# our final model will accept categorical/numerical data on the MLP
# model = Model(inputs=[mlp.input, cnn_left.input, cnn_right.input], outputs=x)