def find_workbooks(manager, args): # note: we can just ignore the all flag. wbs = manager.list(workbook_name=args.workbook_name, workbook_id=args.workbook_id) # only raise an error if the option '--all' is not used if len(wbs) == 0: if args.workbook_id is not None: raise lb_exception.LBCommandError("workbook '%s' not found" % args.workbook_id) if args.workbook_name is not None: raise lb_exception.LBCommandError("workbook '%s' not found" % args.workbook_name) return wbs
def export_workspace(args, interactive=None): command_args = [ os.path.expandvars('$LOGICBLOX_HOME/bin/dlbatch'), '-command', 'hotcopy', '--workspace', args.name, '--dest', args.dest ] if args.hostname is not None: command_args += ['--hostname', args.hostname] if args.port is not None: command_args += ['--port', args.port] else: conn = _get_conn(args) if (hasattr(conn, 'port')): command_args += ['--port', str(conn.port)] if args.overwrite: command_args += ['--overwrite'] if args.unixDomainSocket is not None: command_args += ['--uxdomain', args.unixDomainSocket] # Currently ignores exit code, matching behavior for lb services # Once a better solution arises, we should set it properly try: subprocess.check_call(command_args) print "exported workspace '%s' to directory '%s'" % (args.name, args.dest) except subprocess.CalledProcessError: raise lb_exception.LBCommandError( "export-workspace '%s' to directory '%s' failed" % (args.name, args.dest))
def cmd_delete_template(args): manager = WorkbookManager(args.workspace) # note: we can just ignore the all flag. templates = manager.list_templates(template_name=args.template_name, template_id=args.template_id) # only raise an error if the option '--all' is not used if len(templates) == 0: if args.template_id is not None: raise lb_exception.LBCommandError("template '%s' not found" % args.template_id) if args.template_name is not None: raise lb_exception.LBCommandError("template '%s' not found" % args.template_name) for wbt in templates: print("deleting template '%s'" % wbt.id(), file=sys.stderr) manager.delete_template(wbt.id())
def close(args, interactive): """Remove the saved workspace""" # if no workspace is open conn = _get_conn(args).open() if not interactive.current_workspace: raise lb_exception.LBCommandError("No workspace open.") if args.destroy: args.name = [str(interactive.current_workspace)] delete_workspace(args, interactive) interactive.current_workspace = '' interactive.prompt = interactive.default_prompt
def open_workspace(args, interactive): """Saves a workspace to the session. Required for commands using workspace""" conn = _get_conn(args).open() workspace = args.workspace list_workspace = _get_workspaces(interactive) if workspace in list_workspace: interactive.current_workspace = workspace interactive.prompt = 'lbi %s> ' % workspace else: raise lb_exception.LBCommandError( "workspace %s does not exist. Use command 'workspaces' for an existing list of workspaces" % workspace)
def run(self): """ takes Test object from the queue and runs it """ ## importing this here so that we don't have to depend on readlines module ## for build, only for testing from interactive import lb_interactive_console while True: try: test_object = self.queue.get() # initialize interactive object interactive_ = lb_interactive_console.LbInteractive( stdout=StringIO(),use_rawinput=False, dev=True) interactive_.set_current_directory(os.path.abspath(os.path.dirname(test_object.test_file))) # run setup if test_object.set_up: lb_unit_common.run_file(test_object.set_up, interactive_, test_object, comment=test_object.test_file) # If setup does not generate errors, run test file if test_object.state == State.SUCCESS: interactive_.clear_variables() if test_object.concurrent: raise lb_exception.LBCommandError("concurrent tests are no longer supported") else: lb_unit_common.run_file(test_object.test_file, interactive_, test_object) finally: # run teardown if test_object.tear_down and not self.no_clean_up: interactive_.clear_variables() lb_unit_common.run_file(test_object.tear_down, interactive_, test_object, comment=test_object.test_file) if len(test_object.messages) != 0: self.error_list.append(test_object.messages) self.count_dict[test_object.state] += 1 test_object.test_report() self.queue.task_done() self.queue.count -= 1
def transaction(args, interactive): """Starts a REPL that handles transaction commands. Commands are all run at commit""" conn = _get_conn(args).open() # check workspace is open if interactive.current_workspace in interactive.list_workspaces: transaction_loop = interactive.transaction # give this transaction variables that it needs transaction_loop.set_variables( cmdqueue=interactive.cmdqueue, stdout=interactive.stdout, stdin=interactive.stdin, current_directory=interactive.current_directory) interactive.in_transaction = True try: transaction_loop.cmdloop() # starts the REPL finally: # add in line count for errors interactive.line_count += transaction_loop.line_count interactive.in_transaction = False cmds = transaction_loop.transaction_commands_parsed cmds_after_fixpoint = transaction_loop.transaction_commands_after_fixpoint_parsed try: if cmds or cmds_after_fixpoint: args.workspace = interactive.current_workspace args.commands = cmds args.commands_after_fixpoint = cmds_after_fixpoint cmd = lb_command.TransactionCommand.from_args( conn, args, interactive) result = cmd.run() _print_transaction_result(result) finally: # even if exception is raised, clear all commands, line_count, cmdqueue transaction_loop.clear_transaction_variables() else: raise lb_exception.LBCommandError('No workspace open.')
def main(args=None, supp_int_stdout=True, supp_int_stderr=True, supp_test_report=False): """ runs set of tests given optional 'args' argument or takes from command line. optional 'suppress' argument for suppressing output from interactive REPL """ # supp_int_stdout, supp_int_stderr suppress any output from running tests # so that only test results are printed # by default they are True # supp_test_report suppresses output from results printed between tests # such as progress and time, however the summary is still printed # by default this is False #Check that lb services is started conn = blox.connect.io.Connection(False) try: conn.open() except Exception: raise lb_exception.LBServerOff() start_time = time.time() thread_count = 1 test_queue = Queue.Queue() test_queue.count = 0 error_list = [] count_dict = {State.SUCCESS: 0, State.FAILURE: 0, State.ERROR: 0} stdout_orig = sys.stdout stderr_orig = sys.stderr Test.print_function = sys.stdout if args.defaultFixtures: # create default setup and teardown setup = tempfile.NamedTemporaryFile(delete=False) setup.write('create --unique') setup.close() teardown = tempfile.NamedTemporaryFile(delete=False) teardown.write('close --destroy') teardown.close() Test.default_set_up = setup.name Test.default_tear_down = teardown.name # find suites and tests from command line arguments add_all_tests(args, test_queue) # if list option, print tests and then return if args.list: print_list(test_queue) return # suppress stdout and stderr from interactive if supp_int_stdout: sys.stdout = StringIO() if supp_int_stderr: sys.stderr = StringIO() if supp_test_report: Test.print_function = StringIO() # set thread count if args.sequential: thread_count = 1 elif args.threads != None: thread_count = args.threads # spawn a pool of threads, and pass them queue instance for i in range(int(thread_count)): thread = ThreadTest(test_queue, error_list, count_dict, args.noCleanup) thread.setDaemon(True) thread.start() Test.print_progress = args.progress Test.print_time = args.time # wait on the queue until everything has been processed while test_queue.count != 0: try: conn.open() time.sleep(0.1) except socket.error: raise lb_exception.LBServerOff() Test.default_set_up = None Test.default_tear_down = None # restore stdout and stderr sys.stdout = stdout_orig sys.stderr = stderr_orig print_summary(count_dict, error_list) print 'Script Elapsed Time: %.5fs' % (time.time() - start_time) if error_list: raise lb_exception.LBCommandError("%s errors detected." % len(error_list))
def execute_server(args): lb_home = os.getenv('LOGICBLOX_HOME') command_line = [lb_home + '/bin/lb-server'] if args.unixDomainSocket: command_line.extend(['--unix_domain_socket', args.unixDomainSocket]) if args.port: command_line.extend(['--port', args.port]) if args.workspace_folder: command_line.extend(['--workspaceFolder', args.workspace_folder]) if args.systemd: command_line.extend(['--systemd', args.systemd]) if args.logfile: command_line.extend(['--logfile', args.logfile]) if args.daemonize == 'false': # Popen does not work with systemd; this works os.execvp(command_line[0], command_line) else: # This branch is only used when services are not managed in # systemd. The lb-server systemd unit sets daemonize to false. # Unfortunately we need to redirect the output of nohup # immediately, and cannot rely on the lb-server to redirect # its output to the log file, so we need to find the logfile # location from lb-server.config logfile = args.logfile if logfile == "-": raise lb_exception.LBCommandError("lb-server daemon cannot log to stdout when daemonize is true") if logfile == None: config = load_default_config('lb-server') logfile = config.get('logging', 'file') logfile = logfile.replace('$LB_DEPLOYMENT_HOME', get_lb_deployment_home()) # To avoid setting up file descriptors, forking etc, we use # the existing systemd notify mechanism to let the daemon # process tell us when it's ready. This is necessary because # otherwise the callee might start using lb-server before it # is ready. # We cannot use mkstemp because the file should not exist. We # use mkdtemp to create a secure directory that cannot be # hijacked. tmpdir = tempfile.mkdtemp() # Unix domain sockets have a limit of 103. If the path is too long, try /tmp instead. if len(tmpdir) > 99: os.rmdir(tmpdir) tmpdir = tempfile.mkdtemp(dir='/tmp') sockname = os.path.join(tmpdir, 'sd') notify = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) notify.bind(sockname) try: os.environ["NOTIFY_SOCKET"] = sockname command_line[:0] = ["nohup"] with open(logfile, 'a') as f: p = subprocess.Popen(command_line, stdout=f.fileno(), stderr=f.fileno(), close_fds=True) # We don't optimize for writing the pidfile only if # starting lb-server succeeded because a pidfile for a # non-existing pid is fine. with open(args.pidfile, 'w') as f: f.write('%d' % p.pid) # To avoid blocking unnecessarily long if there was a # failure, we loop 10 times and attempt to receive # notification. In every iteration, we check if the # process terminated (in which case it will surely never # notify us). # This terminate very quickly if we do receive a # notification, which is the case we want to optimize for notify.settimeout(1.0) timeout = 10 for i in range(0, timeout): try: msg = notify.recvfrom(1024) if msg[0] == 'READY=1': break; else: raise lb_exception.LBCommandError("lb-server daemon sent unexpected notification '%s'" % msg[0]) except socket.timeout: # Check if the process terminated, and if so report that. p.poll() if p.returncode is not None: raise lb_exception.LBCommandError("lb-server daemon terminated with exit code %d. Check '%s'." % (p.returncode, logfile)) # Check if total timeout is exceeded. Still no # notification. We don't know what is wrong with # the process we launched. Return generic failure. if i == timeout - 1: raise lb_exception.LBCommandError("lb-server daemon did not confirm readiness within %d seconds. Check '%s'." % (timeout, logfile)) finally: # Ignore failures here to avoid that we obscure any # other exceptions in flight. try: notify.close() except: pass try: os.remove(sockname) os.rmdir(tmpdir) except: pass
def transaction_fixpoint(arg, interactive): if interactive.after_fixpoint: raise lb_exception.LBCommandError( 'Commands are already after fixpoint!') else: interactive.after_fixpoint = True