def __restart(): """ restart logstash. @return: result string """ global ssh_host, ssh_port location = __get_location() cmd_list = [] if location == DEF_REMOTE: tmp = __get_ssh_command() cmd_list.append(tmp) tmp = 'service logstash restart' cmd_list.append(tmp) cmd = ' '.join(cmd_list) logger.d('CMD: %s' % cmd) try: if sys.version_info < (3, ): o = sp.check_output(cmd, shell=True) else: o = sp.getoutput(cmd) except: logger.w(sys.exc_info()[0]) return o
def _send_segment(self, data=b'', syn=False, ack=False, fin=False, events=[]) -> Segment: ''' Send the segment and update the sequence number. ''' seg = Segment(self._seq_num, self._ack_num, data, syn=syn, ack=ack, fin=fin) logger.i('sending {}'.format(seg)) # Update the sequence number self._seq_num += len(data) + syn + fin self._send_fn(seg, self._addr, events=events) # Will this segment need an ack? needs_ack = not seg.ack or seg.data or seg.syn or seg.fin if needs_ack: # If so, track and time it self._unacked.append(seg) if not self._timer: self._set_timer() # Set the timer if there isn't one if not self._rtt_seg: logger.w('Tracking RTT for segment {}'.format(seg.seq_num)) self._rtt_seg = seg
def __restart(): """ restart logstash. @return: result string """ global ssh_host, ssh_port location = __get_location() cmd_list = [] if location == DEF_REMOTE: tmp = __get_ssh_command() cmd_list.append(tmp) tmp = 'service logstash restart' cmd_list.append(tmp) cmd = ' '.join(cmd_list) logger.d('CMD: %s' % cmd) try: if sys.version_info < (3,): o = sp.check_output(cmd, shell=True) else: o = sp.getoutput(cmd) except: logger.w(sys.exc_info()[0]) return o
def _fast_retransmit(self, seq_num): ''' Resends the segment with the corresponding sequence number as a result of three duplicate acks ''' if self._rtt_seg: logger.w('Stopping RTT calculation on FAST RXT') self._rtt_seg = None for seg in self._unacked: if seg.seq_num == seq_num: self._send_fn(seg, self._addr, events=['fast_rxt']) break
def timeout(self): ''' Segment response timeout function ''' with self._lock: self._timer = None if self._unacked: seg = self._unacked[0] if self._rtt_seg: logger.w('Stopping RTT calculation on TIMEOUT RXT') self._rtt_seg = None logger.w('Timeout RXT: {}'.format(seg)) self._send_fn(seg, self._addr, events=['timeout_rxt']) self._set_timer(reportRTT=False)
def _do_upgrade(): """ Upgrade the database if necessary. See CHANGELOG for changes made in each version. """ dao = Dao() existing_ver = dao.get_db_version() if existing_ver > DB_VERSION: logger.e('DB version ({}) is older than the existing version ({})!' .format(DB_VERSION, existing_ver)) elif existing_ver < DB_VERSION: logger.w('DB needs to upgrade from version {} to {}' .format(existing_ver, DB_VERSION)) input('Press enter to continue...') dao.upgrade_db(existing_ver, DB_VERSION) dao.set_db_version(DB_VERSION) dao.commit() logger.i('DB upgrade complete!')
def init(cls): try: if cls._port is None: logger.i("searching controller....") cls._port = cls.__find_controller_port() if cls._instance is None and cls._port is not None: logger.i("find port: " + str(cls._port)) try: cls._instance = cls(cls._port) except serial.SerialException: pass else: logger.w("controller is not connected.") except: logger.e('HwControllerManager.init() failed.' + str(sys.exc_info()[0])) return cls._instance
def _do_upgrade(): """ Upgrade the database if necessary. See CHANGELOG for changes made in each version. """ dao = Dao() existing_ver = dao.get_db_version() if existing_ver > DB_VERSION: logger.e( 'DB version ({}) is older than the existing version ({})!'.format( DB_VERSION, existing_ver)) elif existing_ver < DB_VERSION: logger.w('DB needs to upgrade from version {} to {}'.format( existing_ver, DB_VERSION)) input('Press enter to continue...') dao.upgrade_db(existing_ver, DB_VERSION) dao.set_db_version(DB_VERSION) dao.commit() logger.i('DB upgrade complete!')
def run(self): try: while not self.is_stop: try: if self.line is None: self.line = self.get_initial_data() self.event() self.line = self.socket.recv_json() except ValueError: logger.w("HwController recive unexpected data format.") continue except urllib2.URLError: sleep(3) continue finally: if self.socket is not None: self.socket.close() if self.context is not None: self.context.term()
def __init__(self, event, host): super(ReadLineWorker, self).__init__() self.host = host self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) logger.i("Collecting updates from gamepad server...:{0}".format(host)) self.socket.connect("tcp://{0}:5602".format(self.host)) self.socket.setsockopt(zmq.SUBSCRIBE, '') self.is_stop = False self.event = event self.line = None try: self.line = self.get_initial_data() logger.i("initialize gamepad is successfull.") except: logger.w("initialize gamepad failed. trying to connect..:" + str(sys.exc_info()[0]))
def __set_configuration(filename): """ set configuration of logstash at path. """ global conf_path, src_path location = __get_location() if location == DEF_REMOTE: cmd = __get_scp_command() if location == DEF_LOCAL: cmd = 'cp -f {0} {1}'.format(src_path, conf_path) logger.d('CMD: %s' % cmd) try: if sys.version_info < (3, ): o = sp.check_output(cmd, shell=True) else: sp.getoutput(cmd) except: logger.w(sys.exc_info()[0])
def __set_configuration(filename): """ set configuration of logstash at path. """ global conf_path, src_path location = __get_location() if location == DEF_REMOTE: cmd = __get_scp_command() if location == DEF_LOCAL: cmd = 'cp -f {0} {1}'.format(src_path, conf_path) logger.d('CMD: %s' % cmd) try: if sys.version_info < (3,): o = sp.check_output(cmd, shell=True) else: sp.getoutput(cmd) except: logger.w(sys.exc_info()[0])
def main(debug_mode, repeat): """ main process. """ global CONF, DEF_DEBUG_MODE DEF_DEBUG_MODE = debug_mode logger.DEBUG_MODE = debug_mode load_configuration() logstash = CONF.logstash sentinel_num = len(logstash.sentinel) if logstash.sentinel_master is not None: DEF_MASTER_ID = logstash.sentinel_master while True: logger.d("") logger.d("=" * 80) logger.d("[Loop Watch] Start") logger.d("-" * 80) i = logstash.sentinel[random.randint(0, sentinel_num - 1)] logger.d("sentinel info: {0}".format(i)) s = sentinel.get_session(i.host, i.port) logger.d("sentinel session: {0}".format(s)) m = sentinel.get_master(s, DEF_MASTER_ID) logger.d("master info: {0}".format(m)) # ### Change logstash configuration if m is not None: chagne_logstash_conf(m[0], m[1]) else: logger.w("Check Master Name or Slave Failed") if repeat: time.sleep(float(CONF.period)) else: break logger.d("-" * 80) logger.d("[Loop Watch] End")
def __get_configuration(): """ get configuration of logstash. @return: (str) configuration contents """ global ssh_host, ssh_port, ssh_id_file, conf_path location = __get_location() logger.d("logstash:__get_configuration:location: {0}".format(location)) cmd_list = [] if location == DEF_REMOTE: tmp = __get_ssh_command() cmd_list.append(tmp) tmp = 'cat {0}'.format(conf_path) cmd_list.append(tmp) cmd = " ".join(cmd_list) logger.d('CMD: %s' % cmd) o = None if cmd != '': try: logger.d("logstash:__get_configuration:try") if sys.version_info < (3,): o = sp.check_output(cmd_list, shell=True) else: o = sp.getoutput(cmd) except: logger.w("logstash:__get_configuration:except") logger.w(sys.exc_info()[0]) return o
def __get_host_info(): """ get host information. @return: (list) [(host, port), ...] for redis """ global conf hp = r'input.+?[{].+?redis.+?[{].+?host.+?=>.+?"(.+?)".+?\n' hosts = re.findall(hp, conf, re.S) logger.d("current redis hosts: " + str(hosts)) pp = r'input.+?[{].+?redis.+?[{].+?port.+?=>.+?(\d{1,5}).+?\n' ports = re.findall(pp, conf, re.S) logger.d("current redis ports: " + str(ports)) if len(hosts) != len(ports): logger.w('different count hosts and ports') return [] ips = [] for i in range(len(hosts)): ips.append((hosts[i], int(ports[i]))) return ips
def __get_configuration(): """ get configuration of logstash. @return: (str) configuration contents """ global ssh_host, ssh_port, ssh_id_file, conf_path location = __get_location() logger.d("logstash:__get_configuration:location: {0}".format(location)) cmd_list = [] if location == DEF_REMOTE: tmp = __get_ssh_command() cmd_list.append(tmp) tmp = 'cat {0}'.format(conf_path) cmd_list.append(tmp) cmd = " ".join(cmd_list) logger.d('CMD: %s' % cmd) o = None if cmd != '': try: logger.d("logstash:__get_configuration:try") if sys.version_info < (3, ): o = sp.check_output(cmd_list, shell=True) else: o = sp.getoutput(cmd) except: logger.w("logstash:__get_configuration:except") logger.w(sys.exc_info()[0]) return o
import logger class RealsenseManager: pass try: import pyrealsense as pyrs import realsense_manager_impl RealsenseManager = realsense_manager_impl.RealsenseManager except: logger.w("librealsense is not installed.") import realsense_manager_dummy RealsenseManager = realsense_manager_dummy.RealsenseManager
logger.set_log_file(log_file) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('127.0.0.1', args.port)) print('Binding to {}'.format(('127.0.0.1', args.port))) PLD.init(args, sock=sock) addr = None if args.receiver_port: addr = ('127.0.0.1', args.receiver_port) stp = STP(PLD.send, PLD._sock, data_proc, MSS=args.MSS, MWS=args.MWS, gamma=args.gamma, addr=addr) with open(args.data_src, 'rb') as f: data = f.read() if args.receiver_port: logger.w('Initiating Connection') stp.connect() else: logger.w('Awaiting Connection') stp.await_connection() stp.send(data) stp.wait_for_all_ack() stp.close_connection() logger.write(sender=True, receiver=True)
parser.add_argument('port', type=int, help='The port number to listen on') parser.add_argument('filename', type=str, help='The file to store the received data in') parser.add_argument('--logfile', type=str, help='The log file', default='receiver_log.txt') args = parser.parse_args() try: os.remove(args.filename) except OSError: pass def data_proc(d: bytes): global data data += d logger.set_log_file(args.logfile) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('localhost', args.port)) stp = STP(send, sock, data_proc) stp.listen() logger.write(receiver=True) with open(args.filename, 'ab') as f: f.write(data) logger.w('done receiving')
parser.add_argument('maxDelay', type=int, help='Maximum delay in ms ' + \ 'for delayed packets', default=1000) parser.add_argument('seed', type=int, help='RNG seed', default=0) parser.add_argument('--logfile', type=str, help='Log file', default='sender_log.txt') args = parser.parse_args() # Initialize the PLD PLD.init(args) def data_proc(data): print('received data {}'.format(data.decode('utf-8'))) with open(args.filename, 'rb') as f: data = f.read() logger.set_log_file(args.logfile) stp = STP(PLD.send, PLD._sock, data_proc, MSS=args.MSS, MWS=args.MWS, gamma=args.gamma, addr=(args.receiver_ip, args.receiver_port)) stp.connect() stp.send(data) stp.wait_for_all_ack() stp.close_connection() logger.write(sender=True) logger.w('Number of RTT calculations: {}'.format(logger._rtts_calculated))
def _receive(self, seg: Segment, addr: (str, int)) -> None: ''' Process a segment that was received. ''' with self._lock: logger.i('received {}'.format(seg)) events = ['rcv'] # Make sure the packet comes from the correct address if not self._addr: self._addr = addr elif self._addr != addr: logger.e('[{}] Unknown source! {} {}'.format( self._state, self._addr, addr)) return # If the segment has a bit error, log it and do nothing if seg.bit_error: logger.add_bit_error_received() events.append('corr') logger.log(events, seg) return # If this segment has been acked or buffered, log it as duplicate if seg.seq_num < self._ack_num or seg.seq_num in self._buffer: logger.add_duplicate_data_segment() if self._state == States.CLOSED: logger.w('[{}] Received message in closed state'.format( self._state)) return elif self._state == States.LISTEN: if seg.syn: # Syn received, respond with syn_ack and update state self._ack_num = seg.seq_num + 1 self._set_state(States.SYN_RECEIVED) logger.log(events, seg) self._send_segment(syn=True, ack=True) else: logger.w( '[{}] Received non-syn segment in listen state'.format( self._state)) return # If this segment is an ACK if seg.ack: if seg.ack_num == self._last_ack: # We have a duplicate ack self._dup_ack_count += 1 events.append('DA') elif seg.ack_num > self._last_ack: # This acks at least the oldest unacked segment self._dup_ack_count = 0 # Stop the timer if self._timer: self._timer.cancel() self._timer = None if self._rtt_seg: r = self._rtt_seg expected_ack = r.seq_num + len(r.data) + r.syn + r.fin if expected_ack == seg.ack_num: # Calculate RTT dt = seg.created_at - self._rtt_seg.created_at rtt = dt.total_seconds() * 1000 self.addRTT(rtt) logger.w('Received RTT for segment {}, {:0.4f}'.format( self._rtt_seg.seq_num, rtt)) self._rtt_seg = None elif expected_ack < seg.ack_num: logger.w('Received ack beyond RTT seg, clearing') self._rtt_seg = None # Update the last ack received self._last_ack = seg.ack_num # Clear the unacked list for i in range(len(self._unacked)): if self._unacked[i].seq_num >= seg.ack_num: self._unacked = self._unacked[i:] break else: self._unacked.clear() # Track the ack number when this segment arrived previous_ack_num = self._ack_num # Update the ack num if this segment arrived in order. if self._ack_num == seg.seq_num: self._ack_num += len(seg.data) + seg.syn + seg.fin # Log the segment before we send other segments logger.log(events, seg) # Resend on three duplicate acks if self._dup_ack_count == 3: self._fast_retransmit(seg.ack_num) self._dup_ack_count = 0 # Process the segment's data. Define as a function, since this is # done in many states def process_data(): if seg.seq_num == previous_ack_num: logger.add_file_size(len(seg.data)) self._data_proc(seg.data + self._clear_buffer()) elif seg.seq_num > previous_ack_num: self._buffer[seg.seq_num] = seg if self._state == States.SYN_SENT: if seg.ack and seg.syn: self._set_state(States.ESTABLISHED) self._send_segment(ack=True) elif seg.data: # If the segment has data, buffer it. self._buffer[seg.seq_num] = seg elif self._state == States.SYN_RECEIVED: if seg.ack: # Acking the syn/ack we sent self._set_state(States.ESTABLISHED) elif seg.data: # If the segment has data, buffer it. self._buffer[seg.seq_num] = seg elif self._state == States.ESTABLISHED: if seg.data: process_data() if seg.fin: self._send_segment(ack=True) self._set_state(States.CLOSE_WAIT) elif self._state == States.FIN_WAIT_1: if seg.data: process_data() if seg.ack: self._set_state(States.FIN_WAIT_2) elif seg.fin: # Assume two separate segments for FIN / ACK self._set_state(States.CLOSING) elif self._state == States.FIN_WAIT_2: if seg.data: process_data() if seg.fin: self._send_segment(ack=True) self._set_state(States.FINISHED) elif self._state == States.CLOSE_WAIT: # Wait for application to close connection if seg.data: process_data() elif self._state == States.LAST_ACK: if seg.ack: self._set_state(States.FINISHED) elif self._state == States.CLOSING: if seg.ack: self._set_state(States.FINISHED) # If this is a data segment, send an ack if seg.data: events = [] if self._ack_num == previous_ack_num: events.append('DA') logger.add_duplicate_ack_sent() self._send_segment(ack=True, events=events) # Notify threads waiting on receipt of a segment self._receive_condition.notify_all()