class CallbackModule(object): ## constructor def __init__(self): ## yaml output directory self.__output_dir = os.path.join(os.getcwd(), "output") # initialize output directory if os.path.exists(self.__output_dir): shutil.rmtree(self.__output_dir) os.makedirs(self.__output_dir) ## module parser self.__parser = {} self.__parser["file"] = FileParser() ## data formatter self.__formatter = DataFormatter() ## output design informations # @param The object pointer # @param host The host name # @param data The ansible callback data def __output(self, host, data): # convert data to design info data_type = data["invocation"]["module_name"] if data_type in self.__parser: parsed_data = self.__parser[data_type].parse(data) self.__formatter.add(host, parsed_data) ## ansible callback function when the result is ok # @param self The object pointer # @param host The host name # @param res The ansible responce def runner_on_ok(self, host, res): self.__output(host, res) ## ansible callback function at the end # @param self The object pointer # @param stats The ansible statistics def playbook_on_stats(self, stats): data = self.__formatter.get() for k, v in data.items(): file_name = os.path.join(self.__output_dir, k + ".yml") with open(file_name, "w") as f: f.write(yaml.safe_dump(v)) # default methods def on_any(self, *args, **kwargs): pass def runner_on_failed(self, host, res, ignore_errors=False): pass def runner_on_skipped(self, host, item=None): pass def runner_on_unreachable(self, host, res): pass def runner_on_no_hosts(self): pass def runner_on_async_poll(self, host, res, jid, clock): pass def runner_on_async_ok(self, host, res, jid): pass def runner_on_async_failed(self, host, res, jid): pass def playbook_on_start(self): pass def playbook_on_notify(self, host, handler): pass def playbook_on_no_hosts_matched(self): pass def playbook_on_no_hosts_remaining(self): pass def playbook_on_task_start(self, name, is_conditional): pass def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): pass def playbook_on_setup(self): pass def playbook_on_import_for_host(self, host, imported_file): pass def playbook_on_not_import_for_host(self, host, missing_file): pass def playbook_on_play_start(self, name): pass
def __init__(self): ## yaml output directory self.__output_dir = os.path.join(os.getcwd(), "output") # initialize output directory if os.path.exists(self.__output_dir): shutil.rmtree(self.__output_dir) os.makedirs(self.__output_dir) ## module parser self.__parser = {} self.__parser["file"] = FileParser() ## data formatter self.__formatter = DataFormatter()
class TestDataFormatter(unittest.TestCase): ## init test case # @param self The object pointer def setUp(self): ## data formatter self.__formatter = DataFormatter() ## test DataFormatter.add(host, data), data is one set # @param self The object pointer def testAddOne(self): parsed_data = { 'resource': 'directory', 'key': '/var/lib/jenkins/updates', 'path': '/var/lib/jenkins/updates', 'owner': 'jenkins', 'group': 'jenkins', 'mode': '0755' } cdata = { 'directory': { '/var/lib/jenkins/updates': { 'attirbutes': { 'path': '/var/lib/jenkins/updates', 'owner': 'jenkins', 'group': 'jenkins', 'mode': '0755' }, 'values': { 'node1': True } } } } self.__formatter.add("node1", parsed_data) self.assertEqual(self.__formatter.get(), cdata) ## test DataFormatter.add(host, data), data has two diretory # @param self The object pointer def testAddTwoData(self): parsed_data1 = { 'resource': 'directory', 'key': '/data1', 'path': '/data1', 'owner': 'sysadm', 'group': 'wheel', 'mode': '0755' } parsed_data2 = { 'resource': 'directory', 'key': '/data2', 'path': '/data2', 'owner': 'sysadm', 'group': 'wheel', 'mode': '0700' } cdata = { 'directory': { '/data1': { 'attirbutes': { 'path': '/data1', 'owner': 'sysadm', 'group': 'wheel', 'mode': '0755' }, 'values': { 'node1': True } }, '/data2': { 'attirbutes': { 'path': '/data2', 'owner': 'sysadm', 'group': 'wheel', 'mode': '0700' }, 'values': { 'node1': True } } } } self.__formatter.add("node1", parsed_data1) self.__formatter.add("node1", parsed_data2) self.assertEqual(self.__formatter.get(), cdata) ## test DataFormatter.add(host, data), data for two hosts # @param self The object pointer def testAddHosts(self): parsed_data = { 'resource': 'directory', 'key': '/var/lib/jenkins/updates', 'path': '/var/lib/jenkins/updates', 'owner': 'jenkins', 'group': 'jenkins', 'mode': '0755' } cdata = { 'directory': { '/var/lib/jenkins/updates': { 'attirbutes': { 'path': '/var/lib/jenkins/updates', 'owner': 'jenkins', 'group': 'jenkins', 'mode': '0755' }, 'values': { 'node1': True, 'node2': True } } } } self.__formatter.add("node1", parsed_data) self.__formatter.add("node2", parsed_data) self.assertEqual(self.__formatter.get(), cdata)
def get_dict_tweets_list(self, limit, query, url): data_formatter = DataFormatter() dict_tweets_list = [] i = 0 while i < limit: if (i == 0): base_url = url.format(q=query, pos="") else: base_url = url.format(q=query, pos=next_position) url_data = requests.get(base_url, headers=self.HEADER) if url_data.status_code != 200: break content = json.loads(url_data.text) next_position = content['min_position'] html_data = content['items_html'] soup = BeautifulSoup(html_data) tweet_blocks = soup.select('.tweet') for tweet_block in tweet_blocks: if (i < limit): dict_tweets_list.append( data_formatter.get_dict_one_tweet_data(tweet_block)) i += 1 else: break if content['has_more_items'] == False: break return dict_tweets_list
def make_serving_input_fn(self, tf_transform_output): """ Estimator input function generator for model serving. :param tf_transform_output: tf.Transform graph output wrapper. :return: Estimator input function for serving (prediction). """ data_formatter = DataFormatter() def serving_input_fn(): """ inputs : supported features inputs_ext: all features """ inputs, inputs_ext = {}, {} # Used input features for key in data_formatter.FEATURES: placeholder = tf.placeholder( shape=[None], dtype=data_formatter.get_tf_dtype(key)) inputs[key] = placeholder inputs_ext[key] = placeholder transformed_features = tf_transform_output.transform_raw_features( inputs) return tf.estimator.export.ServingInputReceiver( transformed_features, inputs_ext) return serving_input_fn
def make_serving_input_fn(self, tf_transform_output): """ Estimator input function generator for model serving. :param tf_transform_output: tf.Transform graph output wrapper. :return: Estimator input function for serving (prediction). """ data_formatter = DataFormatter() def serving_input_fn(): """ inputs : supported features inputs_ext: all features """ inputs, inputs_ext = {}, {} # Used input features for key in data_formatter.FEATURES: placeholder = tf.placeholder( shape=[None], dtype=data_formatter.get_tf_dtype(key)) inputs[key] = placeholder inputs_ext[key] = placeholder transformed_features = tf_transform_output.transform_raw_features(inputs) # todo: try RNN List features tensors = [] tensors.append(transformed_features["close_b20"]) tensors.append(transformed_features["close_b19"]) tensors.append(transformed_features["close_b18"]) tensors.append(transformed_features["close_b17"]) tensors.append(transformed_features["close_b16"]) tensors.append(transformed_features["close_b15"]) tensors.append(transformed_features["close_b14"]) tensors.append(transformed_features["close_b13"]) tensors.append(transformed_features["close_b12"]) tensors.append(transformed_features["close_b11"]) tensors.append(transformed_features["close_b10"]) tensors.append(transformed_features["close_b9"]) tensors.append(transformed_features["close_b8"]) tensors.append(transformed_features["close_b7"]) tensors.append(transformed_features["close_b6"]) tensors.append(transformed_features["close_b5"]) tensors.append(transformed_features["close_b4"]) tensors.append(transformed_features["close_b3"]) tensors.append(transformed_features["close_b2"]) tensors.append(transformed_features["close_b1"]) tensors.append(transformed_features["close_b0"]) tensors_concat = tf.stack(tensors, axis=1) return tf.estimator.export.ServingInputReceiver({"closes": tensors_concat}, inputs_ext) return serving_input_fn
def __init__(self): self.data_formatter = DataFormatter() # Classification and regresion target definition self.CLASSIF_TARGETS = self.data_formatter.TARGETS
def __init__(self): self.data_formatter = DataFormatter() self.CLASSIF_TARGETS = self.data_formatter.TARGETS
def __init__(self): self.data_formatter = DataFormatter()
def setUp(self): ## data formatter self.__formatter = DataFormatter()
# from trackml.score import score_event # import pdb # import pandas as pd # import csv from data_formatter import DataFormatter ## Load Data ## path_to_dataset = "../../Data/train_100_events/" event_path = "event000001052" model_name = "identity.keras" hits, cells, particles, truth = load_event(path_to_dataset + event_path) # Get the sorted tracks formatter = DataFormatter() true_tracks, hit_tracks = formatter.getSortedTracks(particles, truth, hits) ## Load Predicted Seeds ## seed_file = open("SeedCandidates.txt", "r") our_tracks = [] seed_hits = [] np_hits = np.asarray(hits) for seed_id in seed_file: seed_id = int(float(seed_id.strip())) seed_hit = np_hits[np_hits[:, 0] == seed_id][0] our_tracks.append([int(seed_hit[0])]) seed_hits.append(seed_hit) print("\nStarting with " + str(len(seed_hits)) + " seed hits")
] transformation_list = "\n" + "\n".join( str(idx + 1) + ". " + t for idx, t in enumerate(transformations)) transformation_idx = input(transformation_list + "\nChoose which transform to train: ") transformation_idx = int(transformation_idx) - 1 num_epochs = input("Input how many epochs you want to train: ") num_epochs = int(num_epochs) seq_len = input("Input the sequence length you want to train: ") seq_len = int(seq_len) batch_size = input("Input the batch size you want: ") batch_size = int(batch_size) formatter = DataFormatter() ## Create the LSTM model ## # start_end_indices = list(transformation_indices.values()) # tuple_len = 1 + start_end_indices[transformation_idx][1] - start_end_indices[transformation_idx][0] # X = transformed_hits_X[transformation_idx] # Y = transformed_hits_Y[transformation_idx] tuple_len = 3 in_neurons = tuple_len out_neurons = tuple_len hidden_neurons = 500 # load the model if it exists if os.path.exists(transformations[transformation_idx] + str(seq_len) + ".keras"): model = load_model(transformations[transformation_idx] + str(seq_len) +
def __init__(self): self.schema = DataFormatter()