def main(): rpc.register_opts(FLAGS) FLAGS.register_opts([ cfg.StrOpt('datafile', default=None, help='Data file to read or write', ), cfg.BoolOpt('record', help='Record events', ), cfg.BoolOpt('replay', help='Replay events', ), ]) remaining_args = FLAGS(sys.argv) utils.monkey_patch() parser = argparse.ArgumentParser( description='record or play back notification events', ) parser.add_argument('mode', choices=('record', 'replay', 'monitor'), help='operating mode', ) parser.add_argument('data_file', default='msgs.dat', nargs='?', help='the data file to read or write', ) parser.add_argument('--topic', default='notifications.info', help='the exchange topic to listen for', ) args = parser.parse_args(remaining_args[1:]) console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) connection = rpc.create_connection() try: if args.mode == 'replay': with open(args.data_file, 'rb') as input: send_messages(connection, args.topic, input) elif args.mode == 'record': with open(args.data_file, 'wb') as output: record_messages(connection, args.topic, output) elif args.mode == 'monitor': monitor_messages(connection, args.topic) finally: connection.close() return 0
def setup(): import mox # Fail fast if you don't have mox. Workaround for bug 810424 from nova import rpc # Register rpc_backend before fake_flags sets it FLAGS.register_opts(rpc.rpc_opts) from nova import context from nova import db from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags fake_flags.set_defaults(FLAGS) rpc.register_opts(FLAGS) if FLAGS.sql_connection == "sqlite://": if migration.db_version() > migration.INIT_VERSION: return else: testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): return migration.db_sync() ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface network.create_networks( ctxt, label="test", cidr=FLAGS.fixed_range, multi_host=FLAGS.multi_host, num_networks=FLAGS.num_networks, network_size=FLAGS.network_size, cidr_v6=FLAGS.fixed_range_v6, gateway=FLAGS.gateway, gateway_v6=FLAGS.gateway_v6, bridge=FLAGS.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=FLAGS.vpn_start, vlan_start=FLAGS.vlan_start, dns1=FLAGS.flat_network_dns, ) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) if FLAGS.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def start(self): vcs_string = version.version_string_with_vcs() LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), { 'topic': self.topic, 'vcs_string': vcs_string }) utils.cleanup_file_locks() rpc.register_opts(FLAGS) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) LOG.debug( _("Creating Consumer connection for Service %s") % self.topic) rpc_dispatcher = self.manager.create_rpc_dispatcher() # Share this same connection for these Consumers self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic)
def start(self): vcs_string = version.version_string_with_vcs() LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {'topic': self.topic, 'vcs_string': vcs_string}) utils.cleanup_file_locks() rpc.register_opts(FLAGS) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) LOG.debug(_("Creating Consumer connection for Service %s") % self.topic) rpc_dispatcher = self.manager.create_rpc_dispatcher() # Share this same connection for these Consumers self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False) self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic)
def setup(): import mox # Fail fast if you don't have mox. Workaround for bug 810424 from nova import rpc # Register rpc_backend before fake_flags sets it FLAGS.register_opts(rpc.rpc_opts) from nova import context from nova import db from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags rpc.register_opts(FLAGS) if FLAGS.sql_connection == "sqlite://": if migration.db_version() > migration.INIT_VERSION: return else: testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): return migration.db_sync() ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface network.create_networks(ctxt, label='test', cidr=FLAGS.fixed_range, multi_host=FLAGS.multi_host, num_networks=FLAGS.num_networks, network_size=FLAGS.network_size, cidr_v6=FLAGS.fixed_range_v6, gateway=FLAGS.gateway, gateway_v6=FLAGS.gateway_v6, bridge=FLAGS.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=FLAGS.vpn_start, vlan_start=FLAGS.vlan_start, dns1=FLAGS.flat_network_dns) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) if FLAGS.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def start(self): """Start serving this service using loaded configuration. Also, retrieve updated port number in case '0' was passed in, which indicates a random port should be used. :returns: None """ utils.cleanup_file_locks() rpc.register_opts(FLAGS) if self.manager: self.manager.init_host() self.server.start() self.port = self.server.port
def wait(): LOG.debug(_("Full set of FLAGS:")) rpc.register_opts(FLAGS) for flag in FLAGS: flag_get = FLAGS.get(flag, None) # hide flag contents from log if contains a password # should use secret flag when switch over to openstack-common if "_password" in flag or "_key" in flag or (flag == "sql_connection" and "mysql:" in flag_get): LOG.debug(_("%(flag)s : FLAG SET ") % locals()) else: LOG.debug("%(flag)s : %(flag_get)s" % locals()) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop() rpc.cleanup()
def wait(): LOG.debug(_('Full set of FLAGS:')) rpc.register_opts(FLAGS) for flag in FLAGS: flag_get = FLAGS.get(flag, None) # hide flag contents from log if contains a password # should use secret flag when switch over to openstack-common if ("_password" in flag or "_key" in flag or (flag == "sql_connection" and "mysql:" in flag_get)): LOG.debug(_('%(flag)s : FLAG SET ') % locals()) else: LOG.debug('%(flag)s : %(flag_get)s' % locals()) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop() rpc.cleanup()
def main(): rpc.register_opts(FLAGS) FLAGS.register_opts( [ cfg.StrOpt("datafile", default=None, help="Data file to read or write"), cfg.BoolOpt("record", help="Record events"), cfg.BoolOpt("replay", help="Replay events"), ] ) remaining_args = FLAGS(sys.argv) utils.monkey_patch() parser = argparse.ArgumentParser(description="record or play back notification events") parser.add_argument("mode", choices=("record", "replay", "monitor"), help="operating mode") parser.add_argument("data_file", default="msgs.dat", nargs="?", help="the data file to read or write") parser.add_argument("--topic", default="notifications.info", help="the exchange topic to listen for") args = parser.parse_args(remaining_args[1:]) console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter("%(message)s") console.setFormatter(formatter) root_logger = logging.getLogger("") root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) connection = rpc.create_connection() try: if args.mode == "replay": with open(args.data_file, "rb") as input: send_messages(connection, args.topic, input) elif args.mode == "record": with open(args.data_file, "wb") as output: record_messages(connection, args.topic, output) elif args.mode == "monitor": monitor_messages(connection, args.topic) finally: connection.close() return 0
def main(): rpc.register_opts(FLAGS) FLAGS.register_opts([ cfg.StrOpt( 'datafile', default=None, help='Data file to read or write', ), cfg.BoolOpt( 'record', help='Record events', ), cfg.BoolOpt( 'replay', help='Replay events', ), ]) remaining_args = FLAGS(sys.argv) utils.monkey_patch() parser = argparse.ArgumentParser( description='record or play back notification events', ) parser.add_argument( 'mode', choices=('record', 'replay', 'monitor'), help='operating mode', ) parser.add_argument( 'data_file', default='msgs.dat', nargs='?', help='the data file to read or write', ) parser.add_argument( '--topic', default='notifications.info', help='the exchange topic to listen for', ) args = parser.parse_args(remaining_args[1:]) console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) connection = rpc.create_connection() try: if args.mode == 'replay': with open(args.data_file, 'rb') as input: send_messages(connection, args.topic, input) elif args.mode == 'record': with open(args.data_file, 'wb') as output: record_messages(connection, args.topic, output) elif args.mode == 'monitor': monitor_messages(connection, args.topic) finally: connection.close() return 0