x_train, y_train = idg.flow(x_train, y_train)
    x_test, y_test = idg2.flow(x_test, y_test)
    test_generator = idg2.flow(x_pred, shuffle=False)

    #모델링
    model = XGBClassifier(learning_rate=0.01, n_jobs=-1)

    #훈련
    learning_hist = model.fit(x_train, y_train)

    # predict
    # model.save('C:/data/h5/vision_model2.h5') #모델저장2
    # model.save_weights('C:/data/h5/vision_model2_weight.h5') #weight저장
    # model.load_model('C:/data/h5/vision_model2.h5') #모델불러오기
    # model.load_weights('C:/data/h5/vision_model2_weight.h5') #weight불러오기
    result = model.predict_generator(test_generator, verbose=True) / 8

    # save val_loss
    hist = pd.DataFrame(learning_hist.history)

model.summary()

#3.1 시각화
hist = pd.DataFrame(learning_hist.history)
hist['val_loss'].min

hist.columns
plt.title('Training and validation loss')
plt.xlabel('epochs')

plt.plot(hist['val_loss'])
########################### MODEL 3 ###########################
model3 = XGBClassifier()

#TODO fit generator
model3.fit_generator(train_generator,
                     steps_per_epoch=15,
                     epochs=50,
                     validation_data=validation_generator,
                     validation_steps=5)
model3.evals_result()

#TODO
score = cross_val_score(model3, X_train2, y_train2, cv=5)

#TODO predict generator
y_pred3 = model3.predict_generator(validation_generator, steps=5)

count = 0
for i in range(y_pred3.shape[0]):
    # if y_pred3[i] == y_test2[i]:
    #     count+=1
    #TODO compare with actual label
    if y_pred3[i] == validation_generator.next():
        count += 1

print('Accuracy for model 3 : ' + str((count / y_pred3.shape[0]) * 100))

# ########################### MODEL 4 ###########################
# img_width = 256
# img_height = 256
# img_channel = 3