def __init__(self, config, val_config=None): self.config = config self.val_config = val_config self.global_step = tf.get_variable('global_step', shape=[], dtype='int32', trainable=False, initializer=tf.constant_initializer(0)) self.opt = tf.train.AdadeltaOptimizer(config.init_lr) if config.is_train else None config.batch_size_ph = tf.placeholder("int32", shape=[], name='batch_size') config.emb_mat_ph = tf.placeholder("float", shape=[None, config.hidden_size]) self.data = SquadData(config) self.iq = InputQueue(config, self.data) self.inputs = self.iq.inputs if val_config is not None: val_config.batch_size_ph = tf.placeholder("int32", shape=[]) val_config.emb_mat_ph = tf.placeholder("float", shape=[None, val_config.hidden_size]) self.val_data = SquadData(val_config, train_data=self.data) self.val_iq = InputQueue(val_config, self.val_data) self.val_inputs = self.val_iq.inputs outputs_list = [] loss_list = [] grads_list = [] with tf.variable_scope("model"): with tf.name_scope("train") as train_ns: for device_idx in range(config.num_devices): inputs = self.iq.inputs_list[device_idx] with tf.device("/{}:{}".format(config.device_type, device_idx)), \ tf.name_scope("{}_{}".format(config.device_type, device_idx)): each_outputs, each_loss, each_grads = self._pipeline(config, inputs) outputs_list.append(each_outputs) loss_list.append(each_loss) grads_list.append(each_grads) if device_idx < config.num_devices - 1: tf.get_variable_scope().reuse_variables() self.outputs = self._merge_outputs_list(outputs_list) if config.supervised: self.loss = tf.add_n(loss_list)/len(loss_list) if config.is_train: self.grads = grads_list[0] self.train_op = self.opt.apply_gradients(self.grads, global_step=tf.train.get_global_step()) self.summary_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES, scope=train_ns)) if val_config is not None: tf.get_variable_scope().reuse_variables() with tf.device("/cpu:0"), tf.name_scope("val"): self.val_outputs, self.val_loss, _ = self._pipeline(val_config, self.val_inputs) """
class Router: def __init__(self): self.nodes = {} self.input_queue = InputQueue(self) self.output_queue = OutputQueue(self) def init(self, state, condition_engine): self.state = state self.condition_engine = condition_engine self.load_nodes() self.state.prune() self.input_queue.start() #start input queue self.output_queue.start() #start output queue def load_nodes(self): #load nodes from file node_json = [] try: node_json = json.load(open('nodes.json','r+')) except: pass node_list = {} for node in node_json: #for each node in the configuration file... node_module = node['module'] node_class = node['class'] node_label = node['label'] new_node = getattr(globals()[node_module], node_class)(node_label, self.state, self) # create the node object... node_list[node_label] = new_node #and append it to the list of nodes logger.info('Created new node: Module: ' + node_module + ' Class: ' + node_class + ' Label: ' + node_label) self.nodes = node_list def push(self, node_label, message): #push logger.info('Received push from: ' + node_label + '. Message: ' + str(message)) self.input_queue.add_message((node_label, message)) def send_to_node(self, node_label, message): #send logger.info('Sending to node: ' + node_label + '. Message: ' + str(message)) self.nodes[node_label].send(message) def get_from_node(self, node_label, arguments): #get logger.info('Getting from node: ' + node_label + '. Arguments: ' + str(arguments)) self.nodes[node_label].get(arguments) def send_to_condition_engine(self, message): logger.info('Sending message to condition engine: ' + str(message)) self.condition_engine.add_message(message) def stop(self): self.input_queue.stop() self.output_queue.stop()
import time from input_queue import InputQueue if __name__ == '__main__': input_queue = InputQueue() while True: count = input_queue.get_count() print('Get count:', count) time.sleep(5)
def __init__(self): self.nodes = {} self.input_queue = InputQueue(self) self.output_queue = OutputQueue(self)