def testFromSavedModelWithTags(self):
   """Test loading from_saved_model with tags."""
   predictor_factories.from_saved_model(self._export_dir, tags='serve')
 def testFromSavedModelWithBadTags(self):
   """Test that loading fails for bad tags."""
   bad_tags_regex = ('.*? could not be found in SavedModel')
   with self.assertRaisesRegexp(RuntimeError, bad_tags_regex):
     predictor_factories.from_saved_model(self._export_dir, tags='bad_tag')
 def testFromSavedModel(self):
   """Test loading from_saved_model."""
   predictor_factories.from_saved_model(self._export_dir)
def main(argv):
    args = parser.parse_args(argv[1:])
    print("Running model to be trained in :{}".format(
        args.saved_estimator_model_dir))
    print("Running predictors in :{}".format(args.predictor_save_model_dir))

    #hides some warnings that spams the console
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # or any {'0', '1', '2'}
    tf.logging.set_verbosity(tf.logging.ERROR)

    # lagrinsplass for predictor    / Prepare work folders for the Predictor
    if tf.gfile.Exists(args.predictor_save_model_dir):
        tf.gfile.DeleteRecursively(args.predictor_save_model_dir)
    tf.gfile.MakeDirs(args.predictor_save_model_dir)

    # load the saved model. Assumes that a model was allready made using create new model
    my_feature_columns = [
        tf.feature_column.numeric_column(key='x'),
        tf.feature_column.numeric_column(key='x_dot'),
        tf.feature_column.numeric_column(key='theta'),
        tf.feature_column.numeric_column(key='theta_dot')
    ]
    classifier = tf.estimator.Estimator(
        model_fn=my_model,
        params={
            'feature_columns': my_feature_columns,
            'hidden_units': [10, 10],
            'n_classes': 2
        },
        model_dir=args.saved_estimator_model_dir)

    print('now using predictor in file path: {}'.format(
        args.predictor_load_model_dir))
    # print('estimators dense2 kernel: {}'.format(classifier.get_variable_value('dense_2/kernel')))  # watch changes in the weights. Only works after training have been done. therefore disabled

    new_score_requirement = score_requirement
    classifier_predictor = from_saved_model(args.saved_estimator_model_dir)

    best_training_data = []
    training_data = []
    for i in range(number_of_training_loops):
        print('Run number {}'.format(i))
        training_data, new_score_requirement = get_new_training_data(
            classifier_predictor, new_score_requirement, new_game_runs,
            goal_steps, best_training_data)
        # print('best training data len {}'.format(len(best_training_data)))
        train_mode_with_training_data(classifier, training_data, args)
        export_estimator_to_file(classifier, args)
        classifier_predictor = from_saved_model(args.predictor_load_model_dir)
        print(
            '/////////////7////////////////7 \n score_requierment: {}'.format(
                new_score_requirement))
        # print('estimators dense2 kernel: {}'.format(classifier.get_variable_value('dense_2/kernel')))   #watch changes in the weights
        # classifier_predictor=from_saved_model(args.saved_estimator_model_dir)

    print('len best data {}'.format(len(best_training_data)))
    best_training_data = np.array(best_training_data)

    # train one last time on only the best training data
    train_mode_with_training_data(classifier, best_training_data, args)
    export_estimator_to_file(classifier, args)
    classifier_predictor = from_saved_model(args.predictor_load_model_dir)
    # classifier_predictor = from_saved_model(args.saved_estimator_model_dir)

    #this is just to see the results.
    best_training_data2 = []
    training_data, new_score_requirement = get_new_training_data(
        classifier_predictor, new_score_requirement, new_game_runs, goal_steps,
        best_training_data2)
Beispiel #5
0
def main(argv):
    args = parser.parse_args(argv[1:])
    print("Running model in :{}".format(args.saved_model_dir))

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # or any {'0', '1', '2'}
    tf.logging.set_verbosity(tf.logging.ERROR)

    # note that there are multiple functions for creating a Predictor, but I have only gotten this one to work
    classifier_predictor = from_saved_model(args.saved_model_dir)

    # print(output_dict)
    # La modell spille
    modell_games = 5
    for _ in range(modell_games):
        env.reset()
        # env.render()
        score = 0
        # moves specifically from this environment:
        game_memory = []
        # previous observation that we saw
        action = random.randrange(0, 2)
        observation, reward, done, info = env.step(action)
        #observation to dict
        observation = {
            'x': [observation[0]],
            'x_dot': [observation[1]],
            'theta': [observation[2]],
            'theta_dot': [observation[3]]
        }
        for _ in range(goal_steps):
            env.render()
            # choose  action (0 or 1)
            #Predictor wants input as bytes in a very spesific way.
            model_input = tf.train.Example(features=tf.train.Features(
                feature={
                    'x':
                    tf.train.Feature(float_list=tf.train.FloatList(
                        value=observation['x'])),
                    'x_dot':
                    tf.train.Feature(float_list=tf.train.FloatList(
                        value=observation['x_dot'])),
                    'theta':
                    tf.train.Feature(float_list=tf.train.FloatList(
                        value=observation['theta'])),
                    'theta_dot':
                    tf.train.Feature(float_list=tf.train.FloatList(
                        value=observation['theta_dot']))
                }))
            model_input_bytes = model_input.SerializeToString()
            prediction = classifier_predictor({'inputs': [model_input_bytes]})

            # do it!
            action = np.argmax(prediction['scores'], axis=-1)[
                0]  #change one hot array to the index with highest value
            observation, reward, done, info = env.step(action)
            observation = {
                'x': [observation[0]],
                'x_dot': [observation[1]],
                'theta': [observation[2]],
                'theta_dot': [observation[3]]
            }
            score += reward
            # print('score:{}  action:{}  observation:{}'.format(score, action, observation))

            if done:
                break
        print(score)
Beispiel #6
0
 def testFromSavedModelWithSessionConfig(self):
     """Test loading from_saved_model with session config."""
     predictor_factories.from_saved_model(self._export_dir,
                                          config=config_pb2.ConfigProto())
 def testFromSavedModelWithSessionConfig(self):
   """Test loading from_saved_model with session config."""
   predictor_factories.from_saved_model(
       self._export_dir, config=config_pb2.ConfigProto())