model = tf.keras.models.Sequential([
  tf.keras.layers.Input(shape=(5)),
  tf.keras.layers.Dense(12800, activation="relu"),
  tf.keras.layers.Dropout(0.2),
  tf.keras.layers.Dense(1600, activation="relu"),
  tf.keras.layers.Dropout(0.2),
  tf.keras.layers.Dense(80, activation="relu"),
  tf.keras.layers.Dense(80, activation="relu"),
  tf.keras.layers.Dense(80, activation="relu"),
  tf.keras.layers.Dense(80, activation="relu"),
  tf.keras.layers.Dense(80, activation="relu"),
  tf.keras.layers.Dense(80, activation="relu"),
  tf.keras.layers.Dense(80, activation="relu"),
  tf.keras.layers.Dense(4)
])

model.compile(optimizer="adam",
              loss="mse",
              metrics=['mae', 'mse'])

model.fit(x_train, y_train, epochs=50)


mse, mae, mse = model.evaluate(x_test, y_test, verbose=2)



print('\nTest mean absolute error:', mae)

print(hist.history.keys())
print('train loss: ', hist.history['loss'][-1])
print('train acc: ', hist.history['accuracy'][-1])
print('val acc: ', hist.history['val_loss'][-1])
print('val acc: ', hist.history['val_accuracy'][-1])

# %matplotlib inline
import matplotlib.pyplot as plt

plt.plot(hist.history['val_loss'])
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
"""- 모델 평가(Test Model)"""

test_loss = model.evaluate([test_df.userId, test_df.movieId], test_df.rating)

print('test loss: ', test_loss)
"""- ## 학습된 머신을 활용한 예측"""

pd.options.display.float_format = '{:.2f}'.format  # 출력 포매팅 설정
ratings_df[(ratings_df['userId'] == 249) & (ratings_df['movieId'] == 70)]
movies_df['movieId'].head(575)
ratings_df.loc[7000]

userId = 31  # 1 ~ 610
movieId = 165  # 1 ~ 193609  # sparse하고 ratings_df와 모두 대응되지도 않음
movie_title = list(movies_df[movies_df['movieId'] == movieId].title)[0]

user_v = np.expand_dims(userid2idx[userId], 0)
movie_v = np.expand_dims(movieid2idx[movieId], 0)
Exemplo n.º 3
0
input_dim = X_train.shape[1]  # Number of features

model = Sequential()
model.add(layers.Dense(10, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()
history = model.fit(X_train,
                    y_train,
                    epochs=100,
                    verbose=False,
                    validation_data=(X_test, y_test),
                    batch_size=10)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy:  {:.4f}".format(accuracy))

# In[ ]:

data = [' '.join(text) for text in base]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(data)
temp = hstack((vec_features, X))

X = temp
y = df['Rating']
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,