def close_all_sockets(self): """ Close all sockets """ readers = self.get_sockets(self.get_handle_map()) for s in readers: if s is not None: DTN._cleanup_socket(s)
def close_all_sockets(self): """ Close all sockets """ readers = self.get_sockets(self.get_handle_map()) for s in readers: if s is not None: DTN._cleanup_socket(s)
def clean(self): if self.conn_send is not None: DTN._cleanup_socket(self.conn_send) if self.conn_recv is not None: DTN._cleanup_socket(self.conn_recv) # Set DTNConnection of this SH in Site Manager to None self.sm.dtn[self.sh] = None if self.server_conn: self.sm.server_connected = False
def clean(self): if self.conn_send is not None: DTN._cleanup_socket(self.conn_send) if self.conn_recv is not None: DTN._cleanup_socket(self.conn_recv) # Set DTNConnection of this SH in Site Manager to None self.sm.dtn[self.sh] = None if self.server_conn: self.sm.server_connected = False
def __init__(self): threading.Thread.__init__(self) DTN.DTNConnection.__init__(self, self.empty_cb) self.udp_listen = self._broadcat_server(8888) self.conn = None self.killed = False self.cmd = DTN.DTNQueue() self.data = DTN.DTNQueue()
def connect_to_sm(self, ip, port, server_conn=False): """ connect to specific IP:PORT """ logger.debug('Try to connect to Site Manager and establish DTN connection') conn_send = DTN._tcp_connect(ip, port) if conn_send is not None: # send information conn_send.send('%s %s\n' % (self.sh, self.target)) data = conn_send.recv(1024) port_recv = 0 if data == EXIST_ERR: logger.debug('DTN connection already exists between these two Site Managers') return False sh = data.split()[0] port_recv = int(data.split()[1]) tar = ' '.join(data.split()[2:]) logger.debug('SH: %s, Port: %d, Target: %s' % (sh, port_recv, tar)) # check if site manager is connected if self.dtn.has_key(sh) and self.dtn[sh] is not None: conn_send.send(EXIST_ERR) logger.debug('DTN connection already exists between these two Site Managers') return False else: conn_send.send(SUCCESS_INFO) # connect to ip using another port conn_recv = DTN._tcp_connect(ip, port_recv) if conn_recv is not None: # Ready dtn_conn = DTNConnection(conn_send, conn_recv, self.sh, sh, tar, self, server_conn=server_conn) self.dtn[sh] = dtn_conn if not self.last_hash.has_key(sh): self.last_hash[sh] = '' logger.info('New DTN connection established') dtn_conn.start() return True return False
def pre_train(source_domain_data): xs = tf.placeholder( tf.float32, [PRE_BATCH_SIZE, DTN.IMAGE_SIZE, DTN.IMAGE_SIZE, DTN.NUM_CHANNELS], name='source-x-input') ys_ = tf.placeholder(tf.float32, [None, DTN.OUTPUT_NODE], name='source-y-input') regularizer = tf.contrib.layers.l2_regularizer(RAGULARZTION_RATE) _, ys = DTN.inference(xs, True, regularizer, reuse=False, trainable=True) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=ys, labels=tf.argmax(ys_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) train = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss) saver = tf.train.Saver() with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(PRE_TRAINING_STEPS): x, y = source_domain_data.next_batch(PRE_BATCH_SIZE) reshaped_xs = np.reshape(x, (PRE_BATCH_SIZE, DTN.IMAGE_SIZE, DTN.IMAGE_SIZE, DTN.NUM_CHANNELS)) loss_value, _ = sess.run([loss, train], feed_dict={ xs: reshaped_xs, ys_: y }) print("After %d training steps, loss on training batch is %f" % (i, loss_value)) saver.save(sess, os.path.join(MODEL_SAVE_PATH, PRE_MODEL_NAME))
def evaluate(test_data, model_file): with tf.Graph().as_default() as g: with tf.device('cpu:0'): x = tf.placeholder(tf.float32, [ test_data.num_examples, DTN.IMAGE_SIZE, DTN.IMAGE_SIZE, DTN.NUM_CHANNELS ], name='x-input') y_ = tf.placeholder(tf.float32, [None, DTN.OUTPUT_NODE], name='y-input') _, y = DTN.inference(x, False, None, reuse=False, trainable=False) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # accuracy_sum = tf.reduce_sum(tf.cast(correct_prediction, tf.float32)) # load saved model saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, model_file) accuracy_score = sess.run( accuracy, feed_dict={ x: np.reshape(test_data.images, (test_data.num_examples, DTN.IMAGE_SIZE, DTN.IMAGE_SIZE, DTN.NUM_CHANNELS)), y_: test_data.labels }) print(model_file, "test accuracy = %f" % accuracy_score)
def setUp(self): self.client = DTN.DTNClient("127.0.0.1", 7000) # server_ip, server_port self.client.start()
def open_listener(self): BaseDTNDevice.open_listener(self) self.monitor_listen = DTN._tcp_listen(self.my_ip, self.monitor_port) self.vclient_listen = DTN._udp_open(self.my_ip, self.vclient_port)
def handle_dtn_listen(self, s): logger.info('DTN connection request') conn_recv, remote = s.accept() data = conn_recv.recv(1024) sh = data.split()[0] tar = ' '.join(data.split()[1:]) logger.debug('SH: %s, Target: %s' % (sh, tar)) if self.dtn.has_key(sh): logger.debug(self.dtn[sh]) if self.dtn.has_key(sh) and self.dtn[sh] is not None: conn_recv.send(EXIST_ERR) logger.debug('DTN connection already exists between these two Site Managers') return False # Generate a random port for listening random_n = 1 listener = None while True: try: listener = DTN._tcp_listen(self.my_ip, self.dtn_port+random_n) except: random_n += 1 continue break conn_recv.send('%s %d %s' % (self.sh, self.dtn_port+random_n, self.target)) data = conn_recv.recv(1024) if data == EXIST_ERR: logger.debug('DTN connection already exists between these two Site Managers') return False # wait for connection listener.settimeout(5) try: conn_send, remote = listener.accept() except socket.timeout: logger.debug('timeout') return False # remote[0] is IP # remote[1] is PORT #conn_send = DTN._tcp_connect(remote[0], port) if conn_send is not None: # send my SH info #conn_send.send('%s %s' % (self.sh, self.target)) #data = conn_send.recv(1024) #if data == EXIST_ERR: #logger.debug('DTN connection already exists between these two Site Managers') #return #else: #logger.debug('Good') # Ready dtn_conn = DTNConnection(conn_send, conn_recv, self.sh, sh, tar, self) self.dtn[sh] = dtn_conn if not self.last_hash.has_key(sh): self.last_hash[sh] = '' #if sh == SERVER_SH_INFO: #self.server_connected = True logger.info('New DTN connection established') dtn_conn.start() return True return False
def open_listener(self): """ Open Listeners @rewrite if implementing subclass """ self.dtn_listen = DTN._tcp_listen(self.my_ip, self.dtn_port) self.bcast_listen = DTN._broadcast_listen(self.bcast_port)
DTN.IMAGE_SIZE, DTN.NUM_CHANNELS], name='source-x-input') xt = tf.placeholder(tf.float32, [target_domain_data.num_examples, DTN.IMAGE_SIZE, DTN.IMAGE_SIZE, DTN.NUM_CHANNELS], name='target-x-input') ys_ = tf.placeholder(tf.float32, [None, DTN.OUTPUT_NODE], name='source-y-input') yt_ = tf.placeholder(tf.float32, [None, DTN.OUTPUT_NODE], name='source-y-input') # trainable=False fix lower layer param s_pool, s_fc1, s_fc2, ys = DTN.inference(xs, False, None, reuse=False, trainable=False) # False: without dropout t_pool, t_fc1, t_fc2, yt = DTN.inference(xt, False, None, reuse=True, trainable=False) # pesudo logit: yt # TODO ys_: ground truth label, yt: predicted label pool_mmd_mar, pool_mmd_con = con_MMD(s_pool, ys_, t_pool, yt) fc1_mmd_mar, fc1_mmd_con = con_MMD(s_fc1, ys_, t_fc1, yt) fc2_mmd_mar, fc2_mmd_con = con_MMD(s_fc2, ys_, t_fc2, yt) # TODO learning rate decay variables = slim.get_variables_to_restore() variables_to_restore = [v for v in variables if v.name.split('_')[0] != 'step'] saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: # print(variables_to_restore)
def open_listener(self): BaseDTNDevice.open_listener(self) self.monitor_listen = DTN._tcp_listen(self.my_ip, self.monitor_port) self.vclient_listen = DTN._udp_open(self.my_ip, self.vclient_port)
def connect_to_sm(self, ip, port, server_conn=False): """ connect to specific IP:PORT """ logger.debug( 'Try to connect to Site Manager and establish DTN connection') conn_send = DTN._tcp_connect(ip, port) if conn_send is not None: # send information conn_send.send('%s %s\n' % (self.sh, self.target)) data = conn_send.recv(1024) port_recv = 0 if data == EXIST_ERR: logger.debug( 'DTN connection already exists between these two Site Managers' ) return False sh = data.split()[0] port_recv = int(data.split()[1]) tar = ' '.join(data.split()[2:]) logger.debug('SH: %s, Port: %d, Target: %s' % (sh, port_recv, tar)) # check if site manager is connected if self.dtn.has_key(sh) and self.dtn[sh] is not None: conn_send.send(EXIST_ERR) logger.debug( 'DTN connection already exists between these two Site Managers' ) return False else: conn_send.send(SUCCESS_INFO) # connect to ip using another port conn_recv = DTN._tcp_connect(ip, port_recv) if conn_recv is not None: # Ready dtn_conn = DTNConnection(conn_send, conn_recv, self.sh, sh, tar, self, server_conn=server_conn) self.dtn[sh] = dtn_conn if not self.last_hash.has_key(sh): self.last_hash[sh] = '' logger.info('New DTN connection established') dtn_conn.start() return True return False
def handle_dtn_listen(self, s): logger.info('DTN connection request') conn_recv, remote = s.accept() data = conn_recv.recv(1024) sh = data.split()[0] tar = ' '.join(data.split()[1:]) logger.debug('SH: %s, Target: %s' % (sh, tar)) if self.dtn.has_key(sh): logger.debug(self.dtn[sh]) if self.dtn.has_key(sh) and self.dtn[sh] is not None: conn_recv.send(EXIST_ERR) logger.debug( 'DTN connection already exists between these two Site Managers' ) return False # Generate a random port for listening random_n = 1 listener = None while True: try: listener = DTN._tcp_listen(self.my_ip, self.dtn_port + random_n) except: random_n += 1 continue break conn_recv.send('%s %d %s' % (self.sh, self.dtn_port + random_n, self.target)) data = conn_recv.recv(1024) if data == EXIST_ERR: logger.debug( 'DTN connection already exists between these two Site Managers' ) return False # wait for connection listener.settimeout(5) try: conn_send, remote = listener.accept() except socket.timeout: logger.debug('timeout') return False # remote[0] is IP # remote[1] is PORT #conn_send = DTN._tcp_connect(remote[0], port) if conn_send is not None: # send my SH info #conn_send.send('%s %s' % (self.sh, self.target)) #data = conn_send.recv(1024) #if data == EXIST_ERR: #logger.debug('DTN connection already exists between these two Site Managers') #return #else: #logger.debug('Good') # Ready dtn_conn = DTNConnection(conn_send, conn_recv, self.sh, sh, tar, self) self.dtn[sh] = dtn_conn if not self.last_hash.has_key(sh): self.last_hash[sh] = '' #if sh == SERVER_SH_INFO: #self.server_connected = True logger.info('New DTN connection established') dtn_conn.start() return True return False
xs = tf.placeholder( tf.float32, [BATCH_SIZE, DTN.IMAGE_SIZE, DTN.IMAGE_SIZE, DTN.NUM_CHANNELS], name='source-x-input') xt = tf.placeholder( tf.float32, [BATCH_SIZE, DTN.IMAGE_SIZE, DTN.IMAGE_SIZE, DTN.NUM_CHANNELS], name='target-x-input') ys_ = tf.placeholder(tf.float32, [None, DTN.OUTPUT_NODE], name='source-y-input') # trainable=False fix lower layer param _, _, source_fc, ys = DTN.inference( xs, False, new_regularizer, reuse=False, trainable=False) # False: without dropout _, _, target_fc, yt = DTN.inference( xt, False, new_regularizer, reuse=True, trainable=False) # pesudo logit: yt cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=ys, labels=tf.argmax(ys, 1)) # 加上target y中有标签的部分 cross_entropy_mean = tf.reduce_mean(cross_entropy) mmd_mar, mmd_con = con_MMD(source_fc, ys_, target_fc, yt) loss = cross_entropy_mean + theta1 * mmd_mar + theta2 * mmd_con + tf.add_n( tf.get_collection('losses'))
def open_listener(self): """ Open Listeners @rewrite if implementing subclass """ self.dtn_listen = DTN._tcp_listen(self.my_ip, self.dtn_port) self.bcast_listen = DTN._broadcast_listen(self.bcast_port)