def depth_3_cnn(self, X_train, Y_train, X_test, Y_test, num_classes, nb_epoch, verbose, validation_split, batch_size, filterNum, dim1, dim2, img_row, img_col, img_channel): model = Sequential() model.add( Conv2D(filterNum, (dim1, dim2), padding='same', input_shape=(img_row, img_col, img_channel), activation='relu')) model.add(MaxPooling2D(poo_size=(4, 4))) model.add( Conv2D((filterNum * 2), (dim1, dim2), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add( Conv2D((filterNum * 4), (dim1, dim2), padding='same', activation='relu')) model.add(Flatten()) model.add(Dense(512), activation='relu') model.add(Dense(num_classes), activation='softmax') model.model(loss='categorical_crossentropy', optimizer=RMSProp(), metric=['accuracy']) model.summary() model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, validation_split=validation_split, verbose=verbose) score = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose) #Save Model Json model_json = model.to_json() with open("CNN01_model.json", "w") as json_file: json_file.write(model_json) #Save Model H5 model.save_weights("CNN01_model.h5") accuracy = score[1] accuracy = accuracy * 100 return accuracy
Sequential.model = _model_evaluation rho_regressor = Sequential() rho_regressor.add(Dense(4, input_dim=n_q_regressors_weights, init='uniform', activation=ACTIVATION)) rho_regressor.add( Dense(n_q_regressors_weights, init='uniform', activation='linear')) rho_regressor.compile(loss='mse', optimizer='rmsprop') import theano import theano.tensor as T theta = T.matrix() res = rho_regressor.model(theta) # rho_regressor.fit(None, None) ########################################## def terminal_evaluation(old_theta, new_theta, tol_theta=1e-2): if increment_base_termination(old_theta, new_theta, 2, tol_theta): estimator = LQG_Q() estimator.omega = new_theta[0] agent = Algorithm(estimator, state_dim, action_dim, discrete_actions, mdp.gamma, mdp.horizon) agent._iteration = 1 initial_states = np.array([[1, 2, 5, 7, 10]]).T values = evaluation.evaluate_policy(mdp, agent, initial_states=initial_states)
rho_regressor = Sequential() rho_regressor.add( Dense(4, input_dim=n_q_regressors_weights, init='uniform', activation=ACTIVATION)) rho_regressor.add( Dense(n_q_regressors_weights, init='uniform', activation='linear')) rho_regressor.compile(loss='mse', optimizer='rmsprop') import theano import theano.tensor as T theta = T.matrix() res = rho_regressor.model(theta) # rho_regressor.fit(None, None) ########################################## def terminal_evaluation(old_theta, new_theta, tol_theta=1e-2): if increment_base_termination(old_theta, new_theta, 2, tol_theta): estimator = LQG_Q() estimator.omega = new_theta[0] agent = Algorithm(estimator, state_dim, action_dim, discrete_actions, mdp.gamma, mdp.horizon) agent._iteration = 1 initial_states = np.array([[1, 2, 5, 7, 10]]).T values = evaluation.evaluate_policy(mdp, agent,