예제 #1
0
파일: model.py 프로젝트: cactusgame/awesome
    def make_serving_input_fn(self, tf_transform_output):
        """ Estimator input function generator for model serving.
        :param tf_transform_output: tf.Transform graph output wrapper.
        :return: Estimator input function for serving (prediction).
        """

        data_formatter = DataFormatter()

        def serving_input_fn():
            """
            inputs : supported features
            inputs_ext: all features
            """
            inputs, inputs_ext = {}, {}

            # Used input features
            for key in data_formatter.FEATURES:
                placeholder = tf.placeholder(
                    shape=[None], dtype=data_formatter.get_tf_dtype(key))
                inputs[key] = placeholder
                inputs_ext[key] = placeholder

            transformed_features = tf_transform_output.transform_raw_features(
                inputs)

            return tf.estimator.export.ServingInputReceiver(
                transformed_features, inputs_ext)

        return serving_input_fn
예제 #2
0
 def get_dict_tweets_list(self, limit, query, url):
     data_formatter = DataFormatter()
     dict_tweets_list = []
     i = 0
     while i < limit:
         if (i == 0):
             base_url = url.format(q=query, pos="")
         else:
             base_url = url.format(q=query, pos=next_position)
         url_data = requests.get(base_url, headers=self.HEADER)
         if url_data.status_code != 200:
             break
         content = json.loads(url_data.text)
         next_position = content['min_position']
         html_data = content['items_html']
         soup = BeautifulSoup(html_data)
         tweet_blocks = soup.select('.tweet')
         for tweet_block in tweet_blocks:
             if (i < limit):
                 dict_tweets_list.append(
                     data_formatter.get_dict_one_tweet_data(tweet_block))
                 i += 1
             else:
                 break
         if content['has_more_items'] == False:
             break
     return dict_tweets_list
예제 #3
0
    def make_serving_input_fn(self, tf_transform_output):
        """ Estimator input function generator for model serving.
        :param tf_transform_output: tf.Transform graph output wrapper.
        :return: Estimator input function for serving (prediction).
        """

        data_formatter = DataFormatter()

        def serving_input_fn():
            """
            inputs : supported features
            inputs_ext: all features
            """
            inputs, inputs_ext = {}, {}

            # Used input features
            for key in data_formatter.FEATURES:
                placeholder = tf.placeholder(
                    shape=[None], dtype=data_formatter.get_tf_dtype(key))
                inputs[key] = placeholder
                inputs_ext[key] = placeholder

            transformed_features = tf_transform_output.transform_raw_features(inputs)

            # todo: try RNN List features
            tensors = []
            tensors.append(transformed_features["close_b20"])
            tensors.append(transformed_features["close_b19"])
            tensors.append(transformed_features["close_b18"])
            tensors.append(transformed_features["close_b17"])
            tensors.append(transformed_features["close_b16"])
            tensors.append(transformed_features["close_b15"])
            tensors.append(transformed_features["close_b14"])
            tensors.append(transformed_features["close_b13"])
            tensors.append(transformed_features["close_b12"])
            tensors.append(transformed_features["close_b11"])
            tensors.append(transformed_features["close_b10"])
            tensors.append(transformed_features["close_b9"])
            tensors.append(transformed_features["close_b8"])
            tensors.append(transformed_features["close_b7"])
            tensors.append(transformed_features["close_b6"])
            tensors.append(transformed_features["close_b5"])
            tensors.append(transformed_features["close_b4"])
            tensors.append(transformed_features["close_b3"])
            tensors.append(transformed_features["close_b2"])
            tensors.append(transformed_features["close_b1"])
            tensors.append(transformed_features["close_b0"])

            tensors_concat = tf.stack(tensors, axis=1)
            return tf.estimator.export.ServingInputReceiver({"closes": tensors_concat}, inputs_ext)

        return serving_input_fn
예제 #4
0
파일: model.py 프로젝트: cactusgame/awesome
 def __init__(self):
     self.data_formatter = DataFormatter()
     # Classification and regresion target definition
     self.CLASSIF_TARGETS = self.data_formatter.TARGETS
예제 #5
0
 def __init__(self):
     self.data_formatter = DataFormatter()
     self.CLASSIF_TARGETS = self.data_formatter.TARGETS
예제 #6
0
 def __init__(self):
     self.data_formatter = DataFormatter()
예제 #7
0
# from trackml.score import score_event
# import pdb
# import pandas as pd
# import csv

from data_formatter import DataFormatter

## Load Data ##
path_to_dataset = "../../Data/train_100_events/"
event_path = "event000001052"
model_name = "identity.keras"

hits, cells, particles, truth = load_event(path_to_dataset + event_path)

# Get the sorted tracks
formatter = DataFormatter()
true_tracks, hit_tracks = formatter.getSortedTracks(particles, truth, hits)

## Load Predicted Seeds ##
seed_file = open("SeedCandidates.txt", "r")
our_tracks = []
seed_hits = []
np_hits = np.asarray(hits)
for seed_id in seed_file:
    seed_id = int(float(seed_id.strip()))
    seed_hit = np_hits[np_hits[:, 0] == seed_id][0]
    our_tracks.append([int(seed_hit[0])])
    seed_hits.append(seed_hit)

print("\nStarting with " + str(len(seed_hits)) + " seed hits")
예제 #8
0
 def __init__(self):
     self.schema = DataFormatter()