def _printheader(self, log):
        """Prints header to log file; inspired by that in GPAW."""
        log(logo)
        log('Amp: Atomistic Machine-learning Package')
        log('Developed by Andrew Peterson, Alireza Khorshidi, and others,')
        log('Brown University.')
        log(' PI Website: http://brown.edu/go/catalyst')
        log(' Official repository: http://bitbucket.org/andrewpeterson/amp')
        log(' Official documentation: http://amp.readthedocs.org/')
        log(' Citation:')
        log('  Khorshidi & Peterson, Computer Physics Communications')
        log('  doi:10.1016/j.cpc.2016.05.010 (2016)')
        log('=' * 70)
        log('User: %s' % getuser())
        log('Hostname: %s' % gethostname())
        log('Date: %s' % now(with_utc=True))
        uname = platform.uname()
        log('Architecture: %s' % uname[4])
        log('PID: %s' % os.getpid())
        log('Amp version: %s' % 'NOT NUMBERED YET.')  # FIXME/ap. Look at GPAW
        ampdirectory = os.path.dirname(os.path.abspath(__file__))
        log('Amp directory: %s' % ampdirectory)
        commithash, commitdate = get_git_commit(ampdirectory)
        log(' Last commit: %s' % commithash)
        log(' Last commit date: %s' % commitdate)
        log('Python: v{0}.{1}.{2}: %s'.format(*sys.version_info[:3]) %
            sys.executable)
        log('ASE v%s: %s' % (aseversion, os.path.dirname(ase.__file__)))
        log('NumPy v%s: %s' %
            (np.version.version, os.path.dirname(np.__file__)))
        # SciPy is not a strict dependency.
        try:
            import scipy
            log('SciPy v%s: %s' %
                (scipy.version.version, os.path.dirname(scipy.__file__)))
        except ImportError:
            log('SciPy: not available')
        # ZMQ an pxssh are only necessary for parallel calculations.
        try:
            import zmq
            log('ZMQ/PyZMQ v%s/v%s: %s' %
                (zmq.zmq_version(), zmq.pyzmq_version(),
                 os.path.dirname(zmq.__file__)))
        except ImportError:
            log('ZMQ: not available')
        try:
            import pxssh
            log('pxssh: %s' % os.path.dirname(pxssh.__file__))
        except ImportError:
            log('pxssh: Not available from pxssh.')
            try:
                from pexpect import pxssh
            except ImportError:
                log('pxssh: Not available from pexpect.')
            else:
                import pexpect
                log('pxssh (via pexpect v%s): %s' %
                    (pexpect.__version__, pxssh.__file__))

        log('=' * 70)
Example #2
0
    def __init__(self, args):
        super().__init__()
        sys.path.append(args.run_script_dir)

        self.logger = set_logger(colored('VENTILATOR', 'red'),
                                 verbose=args.verbose,
                                 logfile=args.logger)
        self.logger.info('run_script_dir: %s' % args.run_script_dir)
        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(
            8, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port
        self.args = args
        self.args.device_map = self.__check_arg_device_map()
        self.status_args = {
            k: (v if k != 'pooling_strategy' else v.value)
            for k, v in sorted(vars(args).items())
        }
        self.status_static = {
            'tensorflow_version': _tf_ver_,
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
        self.is_ready = threading.Event()
        self.__set_zmq_socket_tmp_dir()
Example #3
0
    def __init__(self, args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR', 'magenta'))
        self.args = args

        # ZeroMQ server configuration
        self.num_worker = args.num_worker  # number of Workers

        # restrict number of workers for temporaly
        self.num_concurrent_socket = max(16, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port

        # project configuration
        self.model_dir = args.model_dir  # alpaca_model per project
        self.model = None # pass this model to every sink and worker!!!!
        # learning initial configuration
        self.batch_size = args.batch_size
        self.epoch = args.epoch

        self.status_args = {k: v for k, v in sorted(vars(args).items())}
        self.status_static = {
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }

        self.processes = []
        self.logger.info('Initialize the alpaca_model... could take a while...')
        self.is_ready = threading.Event()
Example #4
0
    def enable_monitor(self, events=None):

        # The standard approach of binding and then connecting does not
        # work in this specific case. The event loop does not properly
        # detect messages on the inproc transport which means that event
        # messages get missed.
        # pyzmq's 'get_monitor_socket' method can't be used because this
        # performs the actions in the wrong order for use with an event
        # loop.
        # For more information on this issue see:
        # http://lists.zeromq.org/pipermail/zeromq-dev/2015-July/029181.html

        if (zmq.zmq_version_info() < (4, ) or zmq.pyzmq_version_info() < (
                14,
                4,
        )):
            raise NotImplementedError(
                "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4, "
                "have libzmq:{}, pyzmq:{}".format(zmq.zmq_version(),
                                                  zmq.pyzmq_version()))

        if self._monitor is None:
            addr = "inproc://monitor.s-{}".format(self._zmq_sock.FD)
            events = events or zmq.EVENT_ALL
            _, self._monitor = yield from create_zmq_connection(
                lambda: _ZmqEventProtocol(self._loop, self._protocol),
                zmq.PAIR,
                connect=addr,
                loop=self._loop)
            # bind must come after connect
            self._zmq_sock.monitor(addr, events)
            yield from self._monitor.wait_ready
Example #5
0
def print_info():
    log.info("Python version: %s, %s", '.'.join(
        (str(e) for e in sys.version_info)), sys.executable)

    log.info("zeromq version: %s", zmq.zmq_version())
    log.info("pyzmq version:  %s", zmq.pyzmq_version())
    log.info("track version:  %s", str(track_base.version_info))
Example #6
0
 def __init__(self, address='*', port1='5566', port2='5567'):
     print("Current libzmq version is %s" % zmq.zmq_version())
     print("Current  pyzmq version is %s" % zmq.pyzmq_version())
     self.context = Context.instance()
     self.url1 = "tcp://{}:{}".format(address, port1)
     self.url2 = "tcp://{}:{}".format(address, port2)
     self.xpub_xsub_proxy()
Example #7
0
    def __init__(self,args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR','magenta'),args.verbose)
        self.max_seq_len = args.max_seq_len
        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(8,args.num_worker * 2)
        self.port = args.port
        self.args = args
        self.status_args = {k:(v if k != 'pooling_strategy' else v.value) for k,v in sorted(vars(args).items())}
        self.status_static = {
            'tensorflow_version':_tf_ver_,
            'python_version':sys.version,
            'server_version':__version__,
            'pyzmq_version':zmq.pyzmq_version(),
            'zmq_version':zmq.zmq_version(),
            'server_start_time':str(datetime.now())
        }
        self.processes = []
        # 如果BERT model path 不是空的,那么就启动bert模型
        if args.mode == 'BERT':
            self.logger.info('freeze,optimize and export graph,could take a while...')
            with Pool(processes=1) as pool:
                # optimize the graph, must be done in another process
                from .graph import  optimize_bert_graph
                self.graph_path = pool.apply(optimize_bert_graph,(self.args,))

            if self.graph_path:
                self.logger.info('optimized graph is stored at:%s' % self.graph_path)
            else:
                raise FileNotFoundError('graph optimization fails and returns empty result')
        elif args.mode == "NER":
            self.logger.info('loading ner model,could take a while..')
            with Pool(processes=1) as pool:
                # optimize the graph, must be done in another process
                from .graph import optimize_ner_model
                num_labels,label2id,id2label = init_predict_var(self.args.model_dir)
                self.num_labels = num_labels + 1
                self.id2label =id2label
                self.graph_path = pool.apply(optimize_ner_model,(self.args,num_labels))
            if self.graph_path:
                self.logger.info('optimized graph is stored at: %s' % self.graph_path)
            else:
                raise FileNotFoundError("graph optimization fails and returns empty result")
        elif args.mode == 'CLASS':
            self.logger.infor("loading classification predict ,could take a while ...")
            with Pool(processes=1) as pool:
                # optimize the graph, must be done in another process
                from .graph import optimize_class_model
                num_labels,label2id,id2label=init_predict_var(self.args.model_dir)
                self.num_labels = num_labels
                self.id2label = id2label
                self.logger.info('contain %d labels:%s' % (num_labels,str(id2label.values())))
                self.graph_path = pool.apply(optimize_class_model,(self.args,num_labels))
            if self.graph_path:
                self.logger.info('optimized graph is stored at: %s' % self.graph_path)
            else:
                raise FileNotFoundError("graph optimization fails and returns empty result")
        else:
            raise ValueError("args model not special")
Example #8
0
    def __init__(self, args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR', 'magenta'),
                                 args.verbose)

        self.model_dir = args.model_dir
        self.max_seq_len = args.max_seq_len
        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.port = args.port
        self.args = args
        self.status_args = {
            k: (v if k != 'pooling_strategy' else v.value)
            for k, v in sorted(vars(args).items())
        }
        self.status_static = {
            'tensorflow_version': _tf_ver_,
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
        self.logger.info(
            'freeze, optimize and export graph, could take a while...')
        with Pool(processes=1) as pool:
            # optimize the graph, must be done in another process
            from .graph import optimize_graph
            self.graph_path = pool.apply(optimize_graph, (self.args, ))
        self.logger.info('optimized graph is stored at: %s' % self.graph_path)
Example #9
0
    def run(self):
        """Run the test suite, with nose, or unittest if nose is unavailable"""
        # crude check for inplace build:
        try:
            import zmq
        except ImportError:
            print_exc()
            fatal(
                "\n       ".join(
                    [
                        "Could not import zmq!",
                        "You must build pyzmq with 'python setup.py build_ext --inplace' for 'python setup.py test' to work.",
                        "If you did build pyzmq in-place, then this is a real error.",
                    ]
                )
            )
            sys.exit(1)

        info("Testing pyzmq-%s with libzmq-%s" % (zmq.pyzmq_version(), zmq.zmq_version()))

        if nose is None:
            warn("nose unavailable, falling back on unittest. Skipped tests will appear as ERRORs.")
            return self.run_unittest()
        else:
            return self.run_nose()
Example #10
0
	def __init__(self, args):
		super().__init__()
		self.logger = set_logger(colored('VENTILATOR', 'magenta'), args.verbose)

		self.model_path			= args.model_path 		# Location where BERT model is stored
		self.max_seq_len		= args.max_seq_len 		# Model related argument
		self.num_worker			= args.num_worker 		# Number of server workers
		self.max_batch_size		= args.max_batch_size 		# Maximum batch size
		self.num_concurrent_socket	= max(8, args.num_worker * 2)  	# optimize concurrency for multi-clients
		self.port			= args.port 			# Port of ventilator PULLING users queries
		self.args			= args
		self.status_args		= {k: (v if k != 'pooling_strategy' else v.value) for k, v in sorted(vars(args).items())}
		self.status_static = {
			'python_version'	: sys.version,
			'server_version'	: __version__,
			'pyzmq_version'		: zmq.pyzmq_version(),
			'zmq_version'		: zmq.zmq_version(),
			'server_start_time'	: str(datetime.now()),
		}
		self.processes = []
		self.logger.info('freeze, optimize and export graph, could take a while...')

		self.model_params	= self.load_model(args.model_path)
		self.type_remap		= json.load(open(args.type_remap_json))
		self.type2id		= json.load(open(args.type2id_json))
		self.umls2type		= pickle.load(open(args.umls2type_file, 'rb'))

		self.is_ready = threading.Event()
Example #11
0
    def run(self):
        """Run the test suite, with nose, or unittest if nose is unavailable"""
        # crude check for inplace build:
        try:
            import zmq
        except ImportError:
            print_exc()
            fatal('\n       '.join([
                "Could not import zmq!",
                "You must build pyzmq with 'python setup.py build_ext --inplace' for 'python setup.py test' to work.",
                "If you did build pyzmq in-place, then this is a real error."
            ]))
            sys.exit(1)

        info("Testing pyzmq-%s with libzmq-%s" %
             (zmq.pyzmq_version(), zmq.zmq_version()))

        try:
            import nose
        except ImportError:
            warn(
                "nose unavailable, falling back on unittest. Skipped tests will appear as ERRORs."
            )
            return self.run_unittest()
        else:
            return self.run_nose()
Example #12
0
def main():
    # Parse command line args.
    parser = argparse.ArgumentParser(description='Calico ACL Manager')
    parser.add_argument('-c', '--config-file', dest='config_file')
    args = parser.parse_args()

    # Read config file.
    config = ConfigParser.ConfigParser()
    config.read(args.config_file or 'acl_manager.cfg')
    plugin_address = config.get('global', 'PluginAddress')
    log_file_path = config.get('log', 'LogFilePath')

    # Configure logging.
    common.mkdir_p(os.path.dirname(log_file_path))
    logging.basicConfig(filename=log_file_path, level=logging.DEBUG)
    
    # Create ZeroMQ context.
    context = zmq.Context()
    log.info("pyzmq version is %s" % zmq.pyzmq_version())
    
    # Create and start components.
    acl_store = ACLStore()
    network_store = NetworkStore()
    
    publisher = ACLPublisher(context, acl_store)
    acl_store.start(publisher)

    processor = RuleProcessor(acl_store, network_store)
    network_store.add_processor(processor)
    
    subscriber = NetworkSubscriber(context, network_store, plugin_address)
Example #13
0
    def enable_monitor(self, events=None):

        # The standard approach of binding and then connecting does not
        # work in this specific case. The event loop does not properly
        # detect messages on the inproc transport which means that event
        # messages get missed.
        # pyzmq's 'get_monitor_socket' method can't be used because this
        # performs the actions in the wrong order for use with an event
        # loop.
        # For more information on this issue see:
        # http://lists.zeromq.org/pipermail/zeromq-dev/2015-July/029181.html

        if (zmq.zmq_version_info() < (4,) or
                zmq.pyzmq_version_info() < (14, 4,)):
            raise NotImplementedError(
                "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4, "
                "have libzmq:{}, pyzmq:{}".format(
                    zmq.zmq_version(), zmq.pyzmq_version()))

        if self._monitor is None:
            addr = "inproc://monitor.s-{}".format(self._zmq_sock.FD)
            events = events or zmq.EVENT_ALL
            _, self._monitor = yield from create_zmq_connection(
                lambda: _ZmqEventProtocol(self._loop, self._protocol),
                zmq.PAIR, connect=addr, loop=self._loop)
            # bind must come after connect
            self._zmq_sock.monitor(addr, events)
            yield from self._monitor.wait_ready
Example #14
0
    def __init__(self, port, require_settings=False):
        """ Constructor
        - port      : the 0mq communication port
        """
        # Setup the 0mq channel.
        self.__context = zmq.Context()
        self.__socket = self.__context.socket(zmq.PAIR)
        print "{}: (pyzmq version: {}) started on: tcp://127.0.0.1:{}"\
            .format(self.__class__.__name__, zmq.pyzmq_version(), port)
        self.__socket.connect("{}:{}".format(
            BaseService.__HOST_ADDRESS, port))

        # Initialize state.
        self.__enabled = False
        self.__shutting_down = False
        self._state = None
        self._update_state()

        # Setup service if possible.
        if not require_settings:
            self._setup()
            self.__initialized = True
        else:
            self.__initialized = False

        # Setup setting handling.
        self.__setting_store = SettingsStore()
Example #15
0
    def __init__(self, args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR', 'magenta'), args.verbose)

        self.model_dir = args.model_dir
        self.max_seq_len = args.max_seq_len
        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(8, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port
        self.args = args
        self.status_args = {k: (v if k != 'pooling_strategy' else v.value) for k, v in sorted(vars(args).items())}
        self.status_static = {
            'tensorflow_version': _tf_ver_,
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
        self.logger.info('freeze, optimize and export graph, could take a while...')
        with Pool(processes=1) as pool:
            # optimize the graph, must be done in another process
            from .graph import optimize_graph
            self.graph_path, self.bert_config = pool.apply(optimize_graph, (self.args,))
        # from .graph import optimize_graph
        # self.graph_path = optimize_graph(self.args, self.logger)
        if self.graph_path:
            self.logger.info('optimized graph is stored at: %s' % self.graph_path)
        else:
            raise FileNotFoundError('graph optimization fails and returns empty result')
        self.is_ready = threading.Event()
Example #16
0
 def __init__(self):
   Tool.__init__(self)
   
   # * Initialize ZMQ context and open subscriber, publisher sockets
   self.logger.debug("ZMQ version: {}, PyZMQ version: {}".format(zmq.zmq_version(), zmq.pyzmq_version()))
   # ** Context
   self.zmqContext = zmq.Context()
   # ** Subscriber
   self.subSocket = self.zmqContext.socket(zmq.SUB)
   self.subSocket.connect(self.sub_address)
   time.sleep(0.005)  # mandatory sleep for ZMQ backend
   self.logger.debug("[sub] Connected to {}".format(self.sub_address))
   # ** Subscriber topics for input messages
   self.subSocket.setsockopt(zmq.SUBSCRIBE, self.sub_topic)
   self.subSocket.setsockopt(zmq.LINGER, self.sub_socket_linger)
   self.logger.debug("[sub]Subscribed to topic \"{}\"".format(self.sub_topic))
   time.sleep(0.005)  # mandatory sleep for ZMQ backend
   # ** Publisher
   self.pubSocket = self.zmqContext.socket(zmq.PUB)
   self.pubSocket.bind(self.pub_address)
   time.sleep(0.005)  # mandatory sleep for ZMQ backend
   self.logger.debug("[pub] Bound to {}".format(self.pub_address))
   
   # * Initialize other members
   self.valid = False
   self.buttons = [0, 0]  # primary, secondary
   self.transform = hm.translation(hm.identity(), self.position_offset)
   #self.position = self.position_offset
   self.loop = True  # TODO ensure this is properly shared across threads
   
   # * Start sensing loop
   self.senseThread = Thread(target=self.senseLoop)
   self.senseThread.daemon = True  # to prevent indefinite wait on recv()
   self.senseThread.start()
   time.sleep(0.005)  # sleep to allow child thread to run
Example #17
0
    def __init__(self, args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR', 'magenta'),
                                 args.verbose)

        self.max_seq_len = args.max_seq_len
        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(
            8, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port
        self.args = args
        self.status_args = {
            k: (v if k != 'pooling_strategy' else v.value)
            for k, v in sorted(vars(args).items())
        }
        self.status_static = {
            'tensorflow_version': _tf_ver_,
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
Example #18
0
    def __init__(self, args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR', 'magenta'), args.verbose)

        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(8, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port
        self.args = args
        self.status_args = {k: (v if k != 'pooling_strategy' else v.value) for k, v in sorted(vars(args).items())}
        self.status_static = {
            'tensorflow_version': _tf_ver_,
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
        self.graph_path, self.config = self.load_graph_config()
        if self.graph_path:
            self.logger.info('optimized graph is stored at: %s' % self.graph_path)
        else:
            raise FileNotFoundError('graph optimization fails and returns empty result')
        self.is_ready = threading.Event()
Example #19
0
    def __init__(self, args):
        threading.Thread.__init__(self)
        self.logger = get_logger()

        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(
            8, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port
        self.args = args
        self.status_args = {
            k: (v if k != 'pooling_strategy' else v.value)
            for k, v in sorted(vars(args).items())
        }
        self.status_static = {
            'tensorflow_version': _tf_ver_,
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []

        self.num_labels = 0
        self.id2theme = dict()
        self.graph_path = None
        self.load_theme_model()
Example #20
0
 def test_pyzmq_version(self):
     vs = zmq.pyzmq_version()
     vs2 = zmq.__version__
     self.assertTrue(isinstance(vs, str))
     if zmq.__revision__:
         self.assertEquals(vs, '@'.join(vs2, zmq.__revision__))
     else:
         self.assertEquals(vs, vs2)
Example #21
0
def print_info():
    log.info("Python version: %s, %s",
             '.'.join((str(e) for e in sys.version_info)),
             sys.executable)

    log.info("zeromq version: %s", zmq.zmq_version())
    log.info("pyzmq version:  %s", zmq.pyzmq_version())
    log.info("track version:  %s", str(track_base.version_info))
Example #22
0
 def test_pyzmq_version(self):
     vs = zmq.pyzmq_version()
     vs2 = zmq.__version__
     self.assertTrue(isinstance(vs, str))
     if zmq.__revision__:
         self.assertEquals(vs, '@'.join(vs2, zmq.__revision__))
     else:
         self.assertEquals(vs, vs2)
Example #23
0
 def test_pyzmq_version(self):
     vs = zmq.pyzmq_version()
     vs2 = zmq.__version__
     self.assertTrue(isinstance(vs, str))
     if zmq.__revision__:
         self.assertEqual(vs, '@'.join(vs2, zmq.__revision__))
     else:
         self.assertEqual(vs, vs2)
     if version.VERSION_EXTRA:
         self.assertTrue(version.VERSION_EXTRA in vs)
         self.assertTrue(version.VERSION_EXTRA in vs2)
Example #24
0
 def test_pyzmq_version(self):
     vs = zmq.pyzmq_version()
     vs2 = zmq.__version__
     self.assertTrue(isinstance(vs, str))
     if zmq.__revision__:
         self.assertEqual(vs, '@'.join(vs2, zmq.__revision__))
     else:
         self.assertEqual(vs, vs2)
     if version.VERSION_EXTRA:
         self.assertTrue(version.VERSION_EXTRA in vs)
         self.assertTrue(version.VERSION_EXTRA in vs2)
Example #25
0
 def __init__(self, address='127.0.0.1', port='5566'):
     print("Current libzmq version is %s" % zmq.zmq_version())
     print("Current  pyzmq version is %s" % zmq.pyzmq_version())
     if len(sys.argv) > 1:
         address = sys.argv[1]
     if len(sys.argv) > 2:
         port = sys.argv[2]
     self.context = Context.instance()
     self.url = "tcp://{}:{}".format(address, port)
     self.hostname = socket.gethostname()
     self.pub_ep_time()
Example #26
0
def main():
    print("worker-dl")
    print("pyzmq version:", zmq.pyzmq_version())

    if len(sys.argv) != 4:
        print('The correct syntax is %s address port_pull port_push' %
              sys.argv[0])
        sys.exit(1)

    _, address, port_pull, port_push = sys.argv
    start(address, port_pull, port_push)
Example #27
0
def log_versions(logger):
    logger.info(
        'Versions:\n'
        '  PyZMQ: %s\n'
        '  libzmq: %s\n'
        '  Tornado: %s\n'
        '  SockJS-Tornado: Not Available\n'  # TODO find the verison
        '  SockJS-ZMQProxy: %s\n',
        zmq.pyzmq_version(),
        zmq.zmq_version(),
        tornado_version,
        version
    )
Example #28
0
def zmq_init():
    global PORT
    logger.info("zmq: " + zmq.pyzmq_version())

    if len(sys.argv) > 1:
        PORT = sys.argv[1]
        int(PORT)

    context = zmq.Context()
    socket = context.socket(zmq.PUB)
    socket.bind("tcp://*:%s" % PORT)

    logger.info("bind: " + PORT)
    return socket
Example #29
0
    def run(self):
        """Run the test suite with py.test"""
        # crude check for inplace build:
        try:
            import zmq
        except ImportError:
            print_exc()
            fatal('\n       '.join(["Could not import zmq!",
            "You must build pyzmq with 'python setup.py build_ext --inplace' for 'python setup.py test' to work.",
            "If you did build pyzmq in-place, then this is a real error."]))
            sys.exit(1)

        info("Testing pyzmq-%s with libzmq-%s" % (zmq.pyzmq_version(), zmq.zmq_version()))
        p = Popen([sys.executable, '-m', 'pytest', '-v', os.path.join('zmq', 'tests')])
        p.wait()
        sys.exit(p.returncode)
Example #30
0
 def run(self):
     """Run the test suite with py.test"""
     # crude check for inplace build:
     try:
         import zmq
     except ImportError:
         print_exc()
         fatal('\n       '.join(["Could not import zmq!",
         "You must build pyzmq with 'python setup.py build_ext --inplace' for 'python setup.py test' to work.",
         "If you did build pyzmq in-place, then this is a real error."]))
         sys.exit(1)
     
     info("Testing pyzmq-%s with libzmq-%s" % (zmq.pyzmq_version(), zmq.zmq_version()))
     p = Popen([sys.executable, '-m', 'pytest', '-v', os.path.join('zmq', 'tests')])
     p.wait()
     sys.exit(p.returncode)
Example #31
0
 def level_1(info):
     # print header (some general info)
     info += ["name: %s" % self.job_name]
     info += ["version: %s" % getattr(self, '__version__', '')]
     info += ["service: %s" % self.service_name]
     info += ["pid: %i" % os.getpid()]
     info += ["started: %s" % strftime(getattr(self, 'started', ''))]
     info += ["status: %s" % getattr(self, 'status', '')]
     info += ["time-consumed: %s" % ' '.join(map(str, os.times()[:4]))]
     info += ["info-period: %s" % self.stats_period]
     info += ["info-written: %s" % strftime(time.time())]
     info += ["platform: %s" % platform.platform()]
     info += ["python: %s" % platform.python_version()]
     info += ["skytools: %s" % skytools.__version__]
     info += ["libzmq: %s" % zmq.zmq_version()]
     info += ["pyzmq: %s" % zmq.pyzmq_version()]
     info += [""]
Example #32
0
File: server.py Project: markokr/cc
 def level_1(info):
     # print header (some general info)
     info += ["name: %s" % self.job_name]
     info += ["version: %s" % getattr(self, "__version__", "")]
     info += ["service: %s" % self.service_name]
     info += ["pid: %i" % os.getpid()]
     info += ["started: %s" % strftime(getattr(self, "started", ""))]
     info += ["status: %s" % getattr(self, "status", "")]
     info += ["time-consumed: %s" % " ".join(map(str, os.times()[:4]))]
     info += ["info-period: %s" % self.stats_period]
     info += ["info-written: %s" % strftime(time.time())]
     info += ["platform: %s" % platform.platform()]
     info += ["python: %s" % platform.python_version()]
     info += ["skytools: %s" % skytools.__version__]
     info += ["libzmq: %s" % zmq.zmq_version()]
     info += ["pyzmq: %s" % zmq.pyzmq_version()]
     info += [""]
Example #33
0
 def __init__(self, providerLocation):
     self._providerLocation = providerLocation
     self._context = zmq.Context()
     self._socket = self._context.socket(zmq.REQ)
     self._socket.connect(providerLocation)
     self._socket.setsockopt(zmq.LINGER, 0)
     self._lock = threading.Lock()
     self._closed = False
     self._activeAllocations = []
     self.call("handshake", versionInfo=dict(
         ASSET_VERSION=api.VERSION,
         ZERO_MQ=dict(
             PYZMQ_VERSION=zmq.pyzmq_version(),
             VERSION=zmq.VERSION,
             VERSION_MAJOR=zmq.VERSION_MAJOR,
             VERSION_MINOR=zmq.VERSION_MINOR,
             VERSION_PATCH=zmq.VERSION_PATCH)))
     self._connectionToProviderInterrupted = suicide.killSelf
     self._heartbeat = heartbeat.HeartBeat(self)
Example #34
0
 def __init__(self, port=5558, port_out=5559, n_workers=1, verbose=False,
              max_batch_size=32, task='coref'):
     super().__init__()
     self.logger = set_logger(colored('VENTILATOR', 'magenta'), verbose)
     self.port = port
     self.port_out = port_out
     self.processes = []
     self.is_ready = threading.Event()
     self.n_workers = n_workers
     self.n_concurrent_sockets = max(8, n_workers * 2)
     self.max_batch_size = max_batch_size
     self.status_static = {
         'python_version': sys.version,
         'server_version': __version__,
         'pyzmq_version': zmq.pyzmq_version(),
         'zmq_version': zmq.zmq_version(),
         'server_start_time': str(datetime.now()),
     }
     self.Worker = WorkerRegistry[task]
Example #35
0
def log_sysinfo(app: Flask, config: Config):
    app.logger.info("ZMQ:")
    app.logger.info("  zmq version: %s", zmq.zmq_version())
    app.logger.info("  pyzmq version: %s", zmq.pyzmq_version())
    app.logger.info("  zmq includes: %s", zmq.get_includes())
    app.logger.info("  zmq library dirs: %s", zmq.get_library_dirs())
    app.logger.info("  has: %s", [c for c in ZMQ_CAPABILITIES if zmq.has(c)])
    app.logger.info("socket:")
    app.logger.info("  fqdn: %s", socket.getfqdn())
    app.logger.info("  has_ipv6: %s", socket.has_ipv6)
    app.logger.info("  hostname: %s", socket.gethostname())
    app.logger.info("  interfaces: %s", [i[1] for i in socket.if_nameindex()])
    app.logger.info("os:")
    app.logger.info("  ctermid: %s", os.ctermid())
    app.logger.info("  cwd: %s", os.getcwd())
    app.logger.info("  groups: %s", os.getgroups())
    app.logger.info("  pgid: %d", os.getpgid(0))
    app.logger.info("  pgrp: %d", os.getpgrp())
    app.logger.info("  pid: %d", os.getpid())
    app.logger.info("  ppid: %d", os.getppid())
    app.logger.info("  priority_process: %d",
                    os.getpriority(os.PRIO_PROCESS, 0))
    app.logger.info("  priority_pgrp: %d", os.getpriority(os.PRIO_PGRP, 0))
    app.logger.info("  priority_user: %d", os.getpriority(os.PRIO_USER, 0))
    app.logger.info("  resuid: ruid=%d, euid=%d, suid=%d", *os.getresuid())
    app.logger.info("  resgid: rgid=%d, egid=%d, sgid=%d", *os.getresgid())
    app.logger.info("  sid: %d", os.getsid(0))
    app.logger.info("  supports_bytes_environ: %s", os.supports_bytes_environ)
    app.logger.info("  uname: %s", os.uname())
    app.logger.info("  cpu_count: %d", os.cpu_count())
    app.logger.info("platform:")
    app.logger.info("  %s", platform.platform())
    app.logger.info("  python_build: %s", platform.python_build())
    app.logger.info("  python_compiler: %s", platform.python_compiler())
    app.logger.info("  python_branch: %s", platform.python_branch())
    app.logger.info("  python_implementation: %s",
                    platform.python_implementation())
    app.logger.info("  python_revision: %s", platform.python_revision())
    app.logger.info("  python_version: %s", platform.python_version())
    app.logger.info("getpass:"******"  user: %s", getpass.getuser())
Example #36
0
    def __init__(self, args, hardprocesser=WKRHardWorker):
        super().__init__()

        self.hardprocessor_skeleton = hardprocesser
        if not issubclass(self.hardprocessor_skeleton, WKRHardWorker):
            raise AssertionError(
                'hardprocesser must inherit from class WKRHardWorker')

        self.model_dir = args.model_dir

        self.num_worker = args.num_worker
        self.device_map = args.device_map
        self.gpu_memory_fraction = args.gpu_memory_fraction
        self.all_cpu = args.cpu

        self.num_concurrent_postsocket = max(8, args.num_worker * 2)
        self.batch_size = args.batch_size

        self.total_concurrent_socket = self.num_concurrent_postsocket

        self.port = args.port
        self.args = args
        self.transfer_protocol = args.protocol

        self.status_args = {k: v for k, v in sorted(vars(args).items())}
        self.status_static = {
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
        self.logdir = args.log_dir
        self.logger = set_logger(colored('NAVIGATOR', 'red'),
                                 logger_dir=self.logdir,
                                 verbose=args.verbose)
        self.logger.info(
            'freeze, optimize and export graph, could take a while...')

        self.is_ready = threading.Event()
Example #37
0
def _get_master_uri(master_ip, master_port, source_ip=None, source_port=None):
    '''
    Return the ZeroMQ URI to connect the Minion to the Master.
    It supports different source IP / port, given the ZeroMQ syntax:

    // Connecting using a IP address and bind to an IP address
    rc = zmq_connect(socket, "tcp://192.168.1.17:5555;192.168.1.1:5555"); assert (rc == 0);

    Source: http://api.zeromq.org/4-1:zmq-tcp
    '''
    libzmq_version_tup = tuple(map(int, zmq.zmq_version().split('.')))
    pyzmq_version_tup = tuple(map(int, zmq.pyzmq_version().split('.')))
    if libzmq_version_tup >= (4, 1, 6) and pyzmq_version_tup >= (16, 0, 1):
        # The source:port syntax for ZeroMQ has been added in libzmq 4.1.6
        # which is included in the pyzmq wheels starting with 16.0.1.
        if source_ip or source_port:
            if source_ip and source_port:
                return 'tcp://{source_ip}:{source_port};{master_ip}:{master_port}'.format(
                    source_ip=source_ip,
                    source_port=source_port,
                    master_ip=master_ip,
                    master_port=master_port)
            elif source_ip and not source_port:
                return 'tcp://{source_ip}:0;{master_ip}:{master_port}'.format(
                    source_ip=source_ip,
                    master_ip=master_ip,
                    master_port=master_port)
            elif not source_ip and source_port:
                return 'tcp://0.0.0.0:{source_port};{master_ip}:{master_port}'.format(
                    source_port=source_port,
                    master_ip=master_ip,
                    master_port=master_port)
    if source_ip or source_port:
        log.warning(
            'Unable to connect to the Master using a specific source IP / port'
        )
        log.warning(
            'Consider upgrading to pyzmq >= 16.0.1 and libzmq >= 4.1.6')
    return 'tcp://{master_ip}:{master_port}'.format(master_ip=master_ip,
                                                    master_port=master_port)
Example #38
0
    def __init__(self, args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR', 'magenta'), args.verbose)

        # self.model_dir = args.model_dir
        # self.max_seq_len = args.max_seq_len
        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(8, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port
        self.args = args
        self.status_args = {k: (v if k != 'pooling_strategy' else v.value) for k, v in sorted(vars(args).items())}
        self.status_static = {
            'python_version': sys.version,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
        self.logger.info('freeze, optimize and export graph, could take a while...')

        self.is_ready = threading.Event()
Example #39
0
def directory_server(store: DirectoryServerStore, zmq_context: Context):
    # pylint: disable=no-member # These zmq.ROUTER and zmq.PUB must be actually exists
    print("Starting on libzmq {} with PyZMQ {}".format(zmq.zmq_version(),
                                                       zmq.pyzmq_version()))
    entrypoint: Socket = zmq_context.socket(zmq.ROUTER)
    entrypoint.bind("tcp://127.0.0.1:5350")  # This is just a PROTOTYPE!
    pub_file_changes: Socket = zmq_context.socket(zmq.PUB)
    pub_file_changes.bind("tcp://127.0.0.1:5351")
    poller = Poller()
    poller.register(entrypoint, flags=zmq.POLLIN)
    print(
        "Directory server is started on 127.0.0.1:5350 (commands) and 127.0.0.1:5351 (file_changes_push)"
    )
    while True:
        events: List[Tuple[Socket, int]] = poller.poll()
        for socket, _ in events:
            frames: List[Frame] = socket.recv_multipart(copy=False)
            id_frame: Frame = frames.pop(0)
            empty_frame: Frame = frames.pop(0)
            assert (len(empty_frame.bytes) == 0)
            command_frame: Frame = frames.pop(0)
            command = str(command_frame.bytes, encoding='utf8')
            if command == 'ping':
                ping_handler(store, socket, frames, id_frame)
            elif command == 'device.cast_address':
                casting_address_handler(store, socket, frames, id_frame)
            elif command == 'device.get_addresses':
                get_addresses_handler(store, socket, frames, id_frame)
            elif command == 'fs.list':
                file_list_handler(store, socket, id_frame)
            elif command == 'fs.declare':
                file_declare_handler(store, socket, frames, id_frame,
                                     pub_file_changes)
            elif command == 'fs.disown':
                file_disown_handler(store, socket, frames, id_frame,
                                    pub_file_changes)
            elif command == 'fs.get':
                file_get_handler(store, socket, frames, id_frame)
Example #40
0
    def __init__(self):
        Tool.__init__(self)

        # * Initialize ZMQ context and open subscriber, publisher sockets
        self.logger.debug("ZMQ version: {}, PyZMQ version: {}".format(
            zmq.zmq_version(), zmq.pyzmq_version()))
        # ** Context
        self.zmqContext = zmq.Context()
        # ** Subscriber
        self.subSocket = self.zmqContext.socket(zmq.SUB)
        self.subSocket.connect(self.sub_address)
        time.sleep(0.005)  # mandatory sleep for ZMQ backend
        self.logger.debug("[sub] Connected to {}".format(self.sub_address))
        # ** Subscriber topics for input messages
        self.subSocket.setsockopt(zmq.SUBSCRIBE, self.sub_topic)
        self.subSocket.setsockopt(zmq.LINGER, self.sub_socket_linger)
        self.logger.debug("[sub]Subscribed to topic \"{}\"".format(
            self.sub_topic))
        time.sleep(0.005)  # mandatory sleep for ZMQ backend
        # ** Publisher
        self.pubSocket = self.zmqContext.socket(zmq.PUB)
        self.pubSocket.bind(self.pub_address)
        time.sleep(0.005)  # mandatory sleep for ZMQ backend
        self.logger.debug("[pub] Bound to {}".format(self.pub_address))

        # * Initialize other members
        self.valid = False
        self.buttons = [0, 0]  # primary, secondary
        self.transform = hm.translation(hm.identity(), self.position_offset)
        #self.position = self.position_offset
        self.loop = True  # TODO ensure this is properly shared across threads

        # * Start sensing loop
        self.senseThread = Thread(target=self.senseLoop)
        self.senseThread.daemon = True  # to prevent indefinite wait on recv()
        self.senseThread.start()
        time.sleep(0.005)  # sleep to allow child thread to run
Example #41
0
import zmq

print(zmq.pyzmq_version())
Example #42
0
import zmq

STATE_PRIMARY = 1
STATE_BACKUP = 2
STATE_ACTIVE = 3
STATE_PASSIVE = 4

PEER_PRIMARY = 1
PEER_BACKUP = 2
PEER_ACTIVE = 3
PEER_PASSIVE = 4
CLIENT_REQUEST = 5

HEARTBEAT = 1000

pyzmq_version = tuple(map(int, zmq.pyzmq_version().split('.')))
if pyzmq_version <= (2, 1, 7):
    zmq.ROUTER = zmq.XREP


class BStarState(object):
    def __init__(self, state, event, peer_expiry):
        self.state = state
        self.event = event
        self.peer_expiry = peer_expiry


class BStarException(Exception):
    pass

fsm_states = {
Example #43
0
def print_info():
    log.info("zeromq version: %s" % zmq.zmq_version())
    log.info("pyzmq version:  %s" % zmq.pyzmq_version())
    log.info("track version:  %s" % str(track_base.version_info))
Example #44
0
    def __init__(self, handler):
        usage = "usage: %prog [options] <start|stop|restart|quit>"
        parser = OptionParser(usage=usage)

        parser.add_option("-u", "--username", dest="username",
                          metavar="USERNAME",
                          help="name of the login to be used")
        parser.add_option("-s", "--server", dest="servername",
                          metavar="SERVER-IP",
                          help="meddle server domain or address")
        parser.add_option("-p", "--port", dest="serverport",
                          metavar="PORT-NR",
                          help="meddle server tcp port")

        (options, args) = parser.parse_args()

        _meddle_default_config_filename = os.path.join(
            pymeddle_common.meddle_directory(), '.meddle-default')

        self._meddle_config_filename = os.path.join(
            pymeddle_common.system_user_directory(), '.meddle')

        logging.info("using environment:")
        logging.info("    Python version       %s", tuple(sys.version_info))
        logging.info("    ZeroMQ version       %s", zmq.zmq_version())
        logging.info("    pyzmq version        %s", zmq.pyzmq_version())
        logging.info("    home directory:      %s", pymeddle_common.system_user_directory())
        logging.info("    meddle directory:    %s", pymeddle_common.meddle_directory())
        logging.info("    default config file: %s", _meddle_default_config_filename)
        logging.info("    user config file:    %s", self._meddle_config_filename)

        self._perstitent_settings = {}
        self._perstitent_settings['tags'] = []
        self._channel_friendly_names = {}

        try:
            self._perstitent_settings.update(
                ast.literal_eval(open(_meddle_default_config_filename).read()))
        except Exception as e:
            print(e)

        try:
            self._perstitent_settings.update(
                ast.literal_eval(open(self._meddle_config_filename).read()))
        except Exception as e:
            print(e)

        # print(self._perstitent_settings)

        self.context = zmq.Context()
        self._handler = handler
        self._my_id = 0
        self._subscriptions = []
        self._preliminary_username = False

        if options.username:
            self._username = options.username
        else:
            if 'username' in self._perstitent_settings:
                self._username = self._perstitent_settings['username']
            else:
                self._username = system_username()
                self._preliminary_username = True

        if options.servername:
            self._servername = options.servername
        else:
            self._servername = find_first_available_server(self._perstitent_settings)
        self._serverport = options.serverport if options.serverport else 32100
        self._mutex_rpc_socket = Lock()
        self._connection_status = None
        self._version = pymeddle_common.get_version()
Example #45
0
    def __init__(self, pgm_group, screens=0, name=None, context=None,
                 error_handler=None):
        LOGGER.debug("ZMQ version: {0} PYZMQ version: {1}".\
                     format(zmq.zmq_version(), zmq.pyzmq_version()))
        # Check pgm_group
        if not pgm_group:
            pgm_group = 1
        pgm_ip = '239.128.128.{0}:5555'.format(pgm_group)
        LOGGER.debug('PGM IP {0}'.format(pgm_ip))

        #Create context
        self.context = context if context else zmq.Context()
        self.context.setsockopt(zmq.LINGER, 0)  # Set default linger value.

        #Create publisher
        self.publisher = self.context.socket(zmq.PUB)
        self.publisher_loopback = self.context.socket(zmq.PUB)
        self.ip = utils.GetLocalIPAddress(diwavars.STORAGE)
        LOGGER.info('Own IP: %s', self.ip)
        if self.ip:
            self.id = self.ip.split('.')[3]
        else:
            self.id = random.randint(1, 154)
        self.node = Node(int(self.id), int(screens), name)
        self.online = True

        # Prevent overflowing slow subscribers
        self.publisher.setsockopt(zmq.LINGER, 0)
        self.publisher.setsockopt(zmq.RATE, 1000000)
        self.publisher.set_hwm(5)
        self.publisher_loopback.setsockopt(zmq.LINGER, 0)
        self.publisher_loopback.set_hwm(50)

        # Bind publisher
        self.tladdr = 'epgm://' + self.ip + ';' + pgm_ip
        self.ipraddr = 'inproc://mcast_loopback'
        self.publisher.bind(self.tladdr)
        self.publisher_loopback.bind(self.ipraddr)
        # Subscriber threads
        targs = ([self.tladdr, self.ipraddr], )
        self.sub_thread = SWNP.start_sub_routine(None, self.sub_routine,
                                                 'Sub thread', targs)
        self.sub_thread_sys = SWNP.start_sub_routine(None,
                                                     self.sub_routine_sys,
                                                     'Sub sys thread', targs)
        LOGGER.debug('Bound listeners on: %s', str(self.tladdr))

        join_str = '{id}_SCREENS_{screens}_NAME_{name}_DATA_{_data}'
        join_str = join_str.format(**self.node.__dict__)
        self.send('SYS', PREFIX_CHOICES[0], join_str)
        self.last_joined = self.id
        self.NODE_LIST.add(self.node)
        self.do_ping()

        #heartbeat
        self.ping_stop = threading.Event()
        self.ping_thread = threading.Thread(
            target=self.ping_routine,
            name='Ping thread',
            args=(error_handler,)
        )
        self.ping_thread.daemon = True
        self.ping_thread.start()
        self.timeout_stop = threading.Event()
        self.timeout_thread = threading.Thread(target=self.timeout_routine,
                                               name='timeout thread')
        self.timeout_thread.daemon = True
        self.timeout_thread.start()
Example #46
0
def main():
    """Main here."""

    args = parse_cmdline_args()

    # decide on logging severity level
    severity_level = logging.ERROR if args.silent else logging.DEBUG if args.verbose else logging.INFO

    # set up logging
    logging.basicConfig(stream=sys.stderr, level=severity_level, format="%(message)s")

    # debug-print the parsed arguments
    logging.debug(args)

    # debug-print zmq version info
    logging.info("zmq version=%s", zmq.zmq_version())
    logging.info("pyzmq version=%s", zmq.pyzmq_version())

    # set up zmq machinery
    zctx = zmq.Context()
    zsck_ctrl = zctx.socket(zmq.PULL)

    ctrl_addr = args.ctrl_address or "tcp://*:17267"
    zsck_ctrl.bind(ctrl_addr)

    zsck_status = zctx.socket(zmq.PUB)
    status_addr = args.status_address or "tcp://*:17268"
    zsck_status.bind(status_addr)

    recorder = FfmpegRecorder()
    recorder.debug_show_video = args.show_video

    # main loop frequency
    frequency = args.frequency or 30.0

    # last status, to send messages on change only
    recording = recorder.running
    paused = recorder.paused
    has_crashed = recorder.has_crashed

    # whether full status has been requested, initially true
    status_requested = True

    should_stop = False
    while not should_stop:

        # check if an incoming message is available
        events = zsck_ctrl.poll(1000.0 / frequency)

        if events & zmq.POLLIN:
            zmsg = zsck_ctrl.recv()
            msg = FfmpegControl()
            msg.ParseFromString(zmsg)
            logging.debug("recved ctrl:\n%s", msg)

            if msg.opcode == FfmpegControl.RECORD:
                recorder.run()

            elif msg.opcode == FfmpegControl.IDLE:
                recorder.stop()

            elif msg.opcode == FfmpegControl.PAUSE:
                recorder.pause()

            elif msg.opcode == FfmpegControl.UNPAUSE:
                recorder.unpause()

            elif msg.opcode == FfmpegControl.SHUTDOWN:
                recorder.stop()
                should_stop = True

            elif msg.opcode == FfmpegControl.PING:
                status_requested = True

            # update recording parameters
            if msg.HasField("capture_x"):
                recorder.capture_x = msg.capture_x

            if msg.HasField("capture_y"):
                recorder.capture_y = msg.capture_y

            if msg.HasField("capture_width"):
                recorder.capture_width = msg.capture_width

            if msg.HasField("capture_height"):
                recorder.capture_height = msg.capture_height

            if msg.HasField("capture_fps"):
                recorder.capture_fps = msg.capture_fps

            if msg.HasField("audio_device"):
                recorder.audio_device = msg.audio_device

            if msg.HasField("video_device"):
                recorder.video_device = msg.video_device

            if msg.HasField("scale"):
                recorder.scale = msg.scale

            if msg.HasField("output_file"):
                recorder.output_file = msg.output_file

            if msg.HasField("debug_show_video"):
                recorder.debug_show_video = msg.debug_show_video

        # dispatch kbhit commands
        if not args.nostdin:
            cmd = msvcrt.getch() if msvcrt.kbhit() else 0

            if cmd == "r":
                recorder.run()

            elif cmd == "t":
                recorder.stop()

            elif cmd == "p":
                recorder.pause()

            elif cmd == "P":
                recorder.unpause()

            elif cmd == "i":
                status_requested = True

            elif cmd == "X":
                recorder.stop()
                should_stop = True

            elif cmd == "/":
                print "Usage:"
                print "r -- start recording"
                print "t -- stop"
                print "p -- pause"
                print "P -- resume"
                print "i -- request explicit status"
                print "X -- shutdown"

        # send full status on shutdown
        if should_stop:
            status_requested = True

        # publish status
        status = FfmpegStatus()
        dirty = False

        if recorder.running != recording:
            recording = recorder.running
            status.is_recording = recorder.running
            dirty = True

        if recorder.paused != paused:
            paused = recorder.paused
            status.is_paused = recorder.paused
            dirty = True

        if recorder.has_crashed != has_crashed:
            has_crashed = recorder.has_crashed
            status.has_crashed = recorder.has_crashed
            dirty = True

        # whether status message has been requested explicitly
        if status_requested:
            status.is_recording = recorder.running
            status.is_paused = recorder.paused
            status.has_crashed = recorder.has_crashed
            status_requested = False
            dirty = True

        if dirty:
            logging.debug("sending status:\n%s", status)
            events = zsck_status.poll(0, zmq.POLLOUT)

            # TODO what if we can't send? the message will be dropped!
            if events & zmq.POLLOUT:
                zsck_status.send(status.SerializeToString())
Example #47
0
def main():
    print zmq.zmq_version()
    print zmq.pyzmq_version()
Example #48
0
PUB_IP = "127.0.0.1"
PUB_PORT = 5000

SUB_IP = "127.0.0.1"
SUB_PORT = 5001

TCP_IP = "127.0.0.1"
TCP_PORT = 5000

UDP_IP = "127.0.0.1"
UDP_PORT = 5005

BUFFER_SIZE = 1024
print ("ZMQ Subsystem")
print ("  Init ZMQ system with version: %s" % zmq.pyzmq_version())
context = zmq.Context()
print ("  ZMQ context created ")

print ("  Init ZMQ PUB")
socketPub = context.socket(zmq.PUB)
socketPub.bind("tcp://%s:%s" % (PUB_IP, PUB_PORT))
print ("  Created ZMQ PUB on tcp://%s:%s" % (PUB_IP, PUB_PORT))

print ("  Init ZMQ SUB")
socketSub = context.socket(zmq.SUB)
socketSub.bind("tcp://%s:%s" % (SUB_IP, SUB_PORT))
print ("  Created ZMQ SUB on tcp://%s:%s" % (SUB_IP, SUB_PORT))


MESSAGE = '{ "controller_name" : "Python_rulez", "controller_type": "PC", "d2c_port": 54321 }")'
Example #49
0
    # The socket monitor can be explicitly disabled if necessary.
    # yield from ct.disable_monitor()

    # If a socket monitor is left enabled on a socket being closed,
    # the socket monitor will be closed automatically.
    ct.close()
    yield from cp.wait_closed

    st.close()
    yield from sp.wait_closed


def main():
    asyncio.get_event_loop().run_until_complete(go())
    print("DONE")


if __name__ == '__main__':
    # import logging
    # logging.basicConfig(level=logging.DEBUG)

    if (zmq.zmq_version_info() < (4,) or
            zmq.pyzmq_version_info() < (14, 4,)):
        raise NotImplementedError(
            "Socket monitor requires libzmq >= 4 and pyzmq >= 14.4, "
            "have libzmq:{}, pyzmq:{}".format(
                zmq.zmq_version(), zmq.pyzmq_version()))

    main()
 def test_version_available(self):
     self.assertIsNotNone(zmq.pyzmq_version())
Example #51
0
import zmq
import sys

if len(sys.argv) < 2:
    print "usage: python %s <target ip address>" % sys.argv[0]
    sys.exit(1)

print "ZMQ=%s pyzmq=%s" % (zmq.zmq_version(), zmq.pyzmq_version())

dest = sys.argv[1]
port = 4711
topics = ["demo", "foo"]


context = zmq.Context()
socket = context.socket(zmq.XSUB)
socket.connect("tcp://%s:%d" % (dest, port))

# subscribe XSUB-style to all topics of interest
for t in topics:
    socket.send("\001%s" % t)

while True:
    (topic, msg) = socket.recv_multipart()
    print "topic='%s' content='%s'" % (topic, msg)
Example #52
0
#initialize any zmq libs
import zmq
print zmq.pyzmq_version()
import time
import sys

port = "5556"

#we want to run the server on multiple ports(so request can be distributed)
if len(sys.argv) >1:
	port = sys.argv[1]
	print port
	int(port)
#multiple contexts can be created in an app.
#contexts are thread-safe unlike sockets
context = zmq.Context()

#create REP socket from the context
socket = context.socket(zmq.REP)
#bind socket to port
socket.bind("tcp://*:%s" % port)

while True:
	#wait for request
	message = socket.recv()
	print "Received client request" , message

	time.sleep(1)
	socket.send("Server got your request, here's your response from %s" %port)
Example #53
0
def print_info():
    log.info("zeromq version: %s" % zmq.zmq_version())
    log.info("pyzmq version:  %s" % zmq.pyzmq_version())
import sys
#sys.path.insert(0, "C:\\Users\\berg.ZALF-AD\\GitHub\\monica\\project-files\\Win32\\Release")
#sys.path.insert(0, "C:\\Users\\berg.ZALF-AD\\GitHub\\monica\\src\\python")
#sys.path.insert(0, "C:\\Program Files (x86)\\MONICA")
print sys.path

import gc
import csv
import types
import os
import json
from datetime import datetime
from collections import defaultdict, OrderedDict

import zmq
print "pyzmq version: ", zmq.pyzmq_version(
), " zmq version: ", zmq.zmq_version()

import monica_io
#print "path to monica_io: ", monica_io.__file__

LOCAL_RUN = False


def create_output(cl_res, cl_row, cl_col, s_res, s_row, s_col, crop_id, period,
                  gcm, result):
    "create crop output lines"

    out = []
    if len(result.get("data", [])) > 0 and len(result["data"][0].get(
            "results", [])) > 0:
        year_to_vals = defaultdict(dict)
Example #55
0
'''
blink_server.py

A server that talks zmq on one end, and pyserial on the other.
Controls an LED on a local RPi via commands over zmq.

By Keenan Fejeran
1/20/2015
'''

import zmq
import serial
import time

#Print the current version of zmq
print "pyzmq version: " + zmq.pyzmq_version()

#Open a serial port (note could be a command line arg)
ser = serial.Serial('/dev/ttyACM0', 9600, timeout = 0)
print ser.name

#Create a ZMQ context
context = zmq.Context()

#Create ZMQ socket, bind it to localhost (for testing)
socket = context.socket(zmq.PAIR)
socket.bind("tcp://*:5556")


#listen to the client, passing a trivial message to arduino
while True:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import zmq
print("zeromq version: %s" % zmq.zmq_version())
print("pyzmq version: %s" % zmq.pyzmq_version())

# how-to-implement-proxy-broker-for-xpub-xsub-messaging-in-zmq
# http://stackoverflow.com/questions/14590122

def main():
    context = zmq.Context()

    xsub_socket = context.socket(zmq.XSUB)
    xsub_socket.bind('tcp://*:6000')
    xpub_socket = context.socket(zmq.XPUB)
    xpub_socket.bind('tcp://*:6001')

    poller = zmq.Poller()
    poller.register(xpub_socket, zmq.POLLIN)
    poller.register(xsub_socket, zmq.POLLIN)
    if True:
        while True:
            #print('listen..')

            try:
                events = dict(poller.poll(1000))
            except KeyboardInterrupt:
                print('KeyboardInterrupt - send quit message')
                #xpub_socket.send_multipart([b'quit'])
                break