Beispiel #1
0
def connect_multiprocess(service = VoidService, config = {}, remote_service = VoidService, remote_config = {}, args={}):
    """starts an rpyc server on a new process, bound to an arbitrary port,
    and connects to it over a socket. Basically a copy of connect_thread().
    However if args is used and if these are shared memory then changes
    will be bi-directional. That is we now have access to shared memmory.

    :param service: the local service to expose (defaults to Void)
    :param config: configuration dict
    :param server_service: the remote service to expose (of the server; defaults to Void)
    :param server_config: remote configuration dict (of the server)
    :param args: dict of local vars to pass to new connection, form {'name':var}

    Contributed by *@tvanzyl*
    """
    from multiprocessing import Process

    listener = socket.socket()
    listener.bind(("localhost", 0))
    listener.listen(1)

    def server(listener=listener, args=args):
        client = listener.accept()[0]
        listener.close()
        conn = connect_stream(SocketStream(client), service = remote_service, config = remote_config)
        try:
            for k in args:
                conn._local_root.exposed_namespace[k] = args[k]
            conn.serve_all()
        except KeyboardInterrupt:
            interrupt_main()

    t = Process(target = server)
    t.start()
    host, port = listener.getsockname()
    return connect(host, port, service = service, config = config)
Beispiel #2
0
    def _find_active_serial_ports_from(self, wait_duration, device_files):
        """
        Find and returns list of active USB serial ports.

        This spawns a process that actually does the work.

        Args:
            device_files (list of strings):
                List of device files that will be checked for serial ports.
                Note that any other device file than ttyUSBx will be ignored.

        Returns:
            List of device files that have active serial port.
            Example: ["ttyUSB2", "ttyUSB4", "ttyUSB7"]

        """
        serial_results = Queue()

        serial_finder = Process(
            target=TopologyBuilder._get_active_serial_device_files,
            args=(self, serial_results, wait_duration, device_files))
        if self._verbose:
            print "Serial thread - Finding active serial ports"

        logging.info("Finding active serial ports")
        serial_finder.start()

        return serial_results
Beispiel #3
0
    def benchmark(self, request, pk):
        queryset = Attempt.objects.all()
        attempt = get_object_or_404(queryset, id=pk)
        serializer = AttemptSerializer(attempt)

        # check payload
        payload = dict(request.data)
        if 'database' not in payload and 'benchmark' not in payload:
            return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
        # run benchmark
        process = Process(target = utils.run_benchmark, args = (pk, payload['database'], payload['benchmark']))
        process.start()
        # utils.run_benchmark(pk, payload['database'], payload['benchmark'])
        # shoule know the deployer id 
        deployer_id = 1
        log_file_path = os.path.join(os.path.dirname(__file__), os.pardir, 'vagrant', str(deployer_id) + '.log')
        
        def stream_response_generator():
            last_line_no = 0
            while process.is_alive():
                time.sleep(1)
                with open(log_file_path, 'r') as log_file:
                    content = log_file.readlines()
                    line_no = len(content)
                    if line_no > last_line_no:
                        yield ''.join(content[last_line_no:])
                        last_line_no = line_no
            time.sleep(1)
            with open(log_file_path, 'r') as log_file:
                    content = log_file.readlines()
                    line_no = len(content)
                    if line_no > last_line_no:
                        yield ''.join(content[last_line_no:])

        return StreamingHttpResponse(stream_response_generator())
 def __init__(self, func):
     Process.__init__(self)
     self.in_buffer = None
     self.out_buffer = None
     self.func = func
     # number of tokens popped from the input buffer each time
     self.n_args = len(inspect.getargspec(func).args)
Beispiel #5
0
def start_echo_server_process():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    sleep()
    p = Process(target=start_echo_server)
    p.start()
    sleep(1.5)
    return p
Beispiel #6
0
def main():

    warnings.filterwarnings("ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning)

    options = getoptions()

    setuplogger(options['log'], options['logfile'], logging.INFO)

    total_procs = options['nprocs'] * options['total_instances']
    start_offset = options['instance_id'] * options['nprocs']

    exit_code = 0

    if options['nprocs'] == 1:
        createsummary(options, None, None)
    else:
        proclist = []
        for procid in xrange(options['nprocs']):
            p = Process( target=createsummary, args=(options, total_procs, start_offset + procid) )
            p.start()
            proclist.append(p)

        for proc in proclist:
            proc.join()
            exit_code += proc.exitcode

    sys.exit(exit_code)
def processFiles(patch_dir):
    root = os.getcwd()
    glbl.data_dirs = {}
    if root != patch_dir: working_path = root+"/"+patch_dir
    else: working_path = root

    for path, dirs, files in os.walk(working_path):
        if len(dirs) == 0: glbl.data_dirs[path] = ''
    

    # Multiprocessing Section
    #########################################
    Qids = glbl.data_dirs.keys()
    manager = Manager()                                      # creates shared memory manager object
    results = manager.dict()                                 # Add dictionary to manager, so it can be accessed across processes
    nextid = Queue()                                         # Create Queue object to serve as shared id generator across processes
    for qid in Qids: nextid.put(qid)                         # Load the ids to be tested into the Queue
    for x in range(0,multiprocessing.cpu_count()):           # Create one process per logical CPU
        p = Process(target=processData, args=(nextid,results)) # Assign process to processCBR function, passing in the Queue and shared dictionary
        glbl.jobs.append(p)                                   # Add the process to a list of running processes
        p.start()                                             # Start process running
    for j in glbl.jobs:
        j.join()                                              # For each process, join them back to main, blocking on each one until finished
    
    # write out results
    c = 1
    sets = results.keys()
    sets.sort()
    for x in sets:
        if results[x] != 'None':
            FINAL = open('result'+str(c)+'.txt','w')
            n = "\n************************************************************************************************\n"
            FINAL.write(n+"* "+x+'    *\n'+n+results[x]+"\n")
            FINAL.close()     
            c += 1
    def __init__(self, response_queue, backup_name, host_port, user, password, authdb, base_dir, binary,
                 dump_gzip=False, verbose=False):
        Process.__init__(self)
        self.host, port     = host_port.split(":")
        self.host_port      = host_port
        self.port           = int(port)
        self.response_queue = response_queue
        self.backup_name    = backup_name
        self.user           = user
        self.password       = password
        self.authdb         = authdb
        self.base_dir       = base_dir
        self.binary         = binary
        self.dump_gzip      = dump_gzip
        self.verbose        = verbose

        self._command   = None
        self.completed  = False 
        self.backup_dir = "%s/%s" % (self.base_dir, self.backup_name)
        self.dump_dir   = "%s/dump" % self.backup_dir
        self.oplog_file = "%s/oplog.bson" % self.dump_dir
        self.start_time = time()

        signal(SIGINT, self.close)
        signal(SIGTERM, self.close)
Beispiel #9
0
class FakeProcess:
    '''
    Runs an instance of multiprocessing.Process, which displays fake results based on PySystemMock.fakeCommandResult{},
    or based on a generic countdown using the command string, in the event that fakeCommandResult{} doesn't match.
    This class functions as an adapter from multiprocessing.Process() to subprocess.Popen(), which the caller will expect.
    '''
    stdout = FakeStdout()  # can be read by callers as if it's a Process.stdout object
    process = None

    MOCK_STEPS_ITERATIONS = 5

    def start(self, command, fakeCommandResults):
        fakeCommandResult = self.getFakeResultForCommand(command, fakeCommandResults)
        self.process = Process(target=writeFakeCommandResultsToPipe, args=(self.stdout.writer, fakeCommandResult))
        self.process.start()

    def getFakeResultForCommand(self, command, fakeCommandResults):
        for regex in fakeCommandResults:
            match = re.search(regex, command.__str__())
            if match:
                return fakeCommandResults[regex].split('\n')
        return ["processing %s [%d]..." % (command, i) for i in range(self.MOCK_STEPS_ITERATIONS, 0, -1)]

    def poll(self):
        return self.process.exitcode

    def wait(self):
        return self.process.wait()

    def terminate(self):
        self.process.terminate()
    def serve(self):
        """Start a fixed number of worker threads and put client into a queue"""

        #this is a shared state that can tell the workers to exit when set as false
        self.isRunning.value = True

        #first bind and listen to the port
        self.serverTransport.listen()

        #fork the children
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except (Exception) as x:
                logging.exception(x)

        #wait until the condition is set by stop()

        while True:

            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
		break
            except (Exception) as x:
                logging.exception(x)

        self.isRunning.value = False
def run_stock_parser():
    symbol_q = Queue()
    price_q = Queue()

    stock_symbols = []
    with open('symbols.txt', 'r') as symfile:
        for n, line in enumerate(symfile):
            sym = line.strip()
            if sym:
                stock_symbols.append(sym)

    ncpu = len([x for x in open('/proc/cpuinfo').read().split('\n')\
                if x.find('processor') == 0])

    pool = [Process(target=read_stock_worker, args=(symbol_q, price_q, )) for _ in range(ncpu * 4)]

    for p in pool:
        p.start()
    output = Process(target=write_output_file, args=(price_q, ))
    output.start()

    for symbol in stock_symbols:
        symbol_q.put(symbol)
    symbol_q.put(_sentinel)
    for p in pool:
        p.join()
    price_q.put(_sentinel)
    output.join()
Beispiel #12
0
class MultiProcessPlot(object):
	## Initilization
	def __init__(self):
		self.plotpipe, PlotterPipe = Pipe()
		## Called process for plotting
		self.plotter = ProcessPlotter()
		## Process holder
		self.plotprocess = Process(target = self.plotter, args = (PlotterPipe, ))
		self.plotprocess.daemon = True
		self.plotprocess.start()

	## Plot function
	def plot(self, finished=False):
		send = self.plotpipe.send

		if finished:
			send(None)
		else:
			if not LoopCounter % plotRefreshPeriod:
				reset = 1
			else:
				reset = 0

			## Compose data for pipe
			data = [reset,
					MessageMeasurement.pose2d.x, MessageMeasurement.pose2d.y, MessageMeasurement.pose2d.theta,
					MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta,
					MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta]
			# print(MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta) # //VB
			# print(MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta) # //VB
			## Send data through pipe
			send(data)
			## Reset global flags to receive new input
			flagSubscriber1 = False
			flagSubscriber2 = False
Beispiel #13
0
 def start_parser_process(self):
     if self.mp_mode:
         from multiprocessing import Process, Event
     else:
         from multiprocessing.dummy import Process, Event
     waiting_shutdown_event = Event()
     if self.mp_mode:
         bot = self.bot.__class__(
             network_result_queue=self.network_result_queue,
             parser_result_queue=self.parser_result_queue,
             waiting_shutdown_event=waiting_shutdown_event,
             shutdown_event=self.shutdown_event,
             parser_mode=True,
             meta=self.bot.meta)
     else:
         # In non-multiprocess mode we start `run_process`
         # method in new semi-process (actually it is a thread)
         # Because the use `run_process` of main spider instance
         # all changes made in handlers are applied to main
         # spider instance, that allows to suppport deprecated
         # spiders that do not know about multiprocessing mode
         bot = self.bot
         bot.network_result_queue = self.network_result_queue
         bot.parser_result_queue = self.parser_result_queue
         bot.waiting_shutdown_event = waiting_shutdown_event
         bot.shutdown_event = self.shutdown_event
         bot.meta = self.bot.meta
     proc = Process(target=bot.run_parser)
     if not self.mp_mode:
         proc.daemon = True
     proc.start()
     return waiting_shutdown_event, proc
Beispiel #14
0
def apply_update(fname, status):
    # As soon as python-apt closes its opened files on object deletion
    # we can drop this fork workaround. As long as they keep their files
    # open, we run the code in an own fork, than the files are closed on
    # process termination an we can remount the filesystem readonly
    # without errors.
    p = Process(target=_apply_update, args=(fname, status))
    with rw_access("/", status):
        try:
            t_ver = get_target_version(fname)
        except BaseException:
            status.log('Reading xml-file failed!')
            return

        try:
            c_ver = get_current_version()
        except IOError as e:
            status.log('get current version failed: ' + str(e))
            c_ver = ""

        pre_sh(c_ver, t_ver, status)
        p.start()
        p.join()
        status.log("cleanup /var/cache/apt/archives")
        # don't use execute() here, it results in an error that the apt-cache
        # is locked. We currently don't understand this behaviour :(
        os.system("apt-get clean")
        if p.exitcode != 0:
            raise Exception(
                "Applying update failed. See logfile for more information")
        post_sh(c_ver, t_ver, status)
Beispiel #15
0
 def __init__(self, dt=1):
     import psutil
     Process.__init__(self)
     self.daemon = True
     self.dt = dt
     self.parent = psutil.Process(current_process().pid)
     self.parent_conn, self.child_conn = Pipe()
Beispiel #16
0
 def __init__(self, worker, outlist, index):
     Process.__init__(self)
     self.worker = worker
     if worker is None or worker.element is None:
         raise MultiProjectException("Bug: Invalid Worker")
     self.outlist = outlist
     self.index = index
Beispiel #17
0
def start_workers(config):
    '''
    Picks up all the external system configuration from the config file and starts up as many processes as non-default sections in the config.
    The following elements are required from the default configuration section :
    - solr_url : base url of the solr server.
    - nova_db_server : IP or hostname of the nova controller.
    - nova_db_port : Port of the nova db to which the workers should connect.For nova+mysql this would be 3306.
    - nova_db_creds : credentials in the format user:password
    - amqp_server : IP or hostname of the amqp server. Usually, this is same as the nova controller.
    - amqp_port : Port of the AMQP server. If using RMQ this should be 5672.
    - amqp_creds : credentials in the format user:password
    
    Each non-default section of the config should represent a resource type that this system monitors. Each individual worker corresponds to
    a resource type and is run in a separate python process.
    '''
 
    logUtils.setup_logging(config)
    global _LOGGER
    _LOGGER = logUtils.get_logger(__name__)
    for section in config.sections():
        process = Process(target=worker.run, args=(config, section,))
        process.daemon = True
        process.start()
        _LOGGER.info('Started worker process - ' + str(process.pid))
        _PROCESSES.append(process)
Beispiel #18
0
def run_parkinglot_expt(net, n):
    "Run experiment"

    seconds = args.time

    # Start the bandwidth and cwnd monitors in the background
    monitor = Process(target=monitor_devs_ng,
            args=('%s/bwm.txt' % args.dir, 1.0))
    monitor.start()
    start_tcpprobe()

    # Get receiver and clients
    recvr = net.getNodeByName('receiver')
    sender1 = net.getNodeByName('h1')

    # Start the receiver
    port = 5001
    recvr.cmd('iperf -s -p', port,
              '> %s/iperf_server.txt' % args.dir, '&')

    waitListening(sender1, recvr, port)

    # TODO: start the sender iperf processes and wait for the flows to finish
    # Hint: Use getNodeByName() to get a handle on each sender.
    # Hint: Use sendCmd() and waitOutput() to start iperf and wait for them to finish
    # Hint: waitOutput waits for the command to finish allowing you to wait on a particular process on the host
    # iperf command to start flow: 'iperf -c %s -p %s -t %d -i 1 -yc > %s/iperf_%s.txt' % (recvr.IP(), 5001, seconds, args.dir, node_name)
    # Hint (not important): You may use progress(t) to track your experiment progress

    recvr.cmd('kill %iperf')

    # Shut down monitors
    monitor.terminate()
    stop_tcpprobe()
Beispiel #19
0
def send_probe_requests(interface=None, ssid=None):

    # initialize shared memory
    results = Queue()

    # start sniffer before sending out probe requests
    p = Process(target=sniffer, args=(interface, results,))
    p.start()

    # give sniffer a chance to initialize so that we don't miss
    # probe responses
    time.sleep(3)

    # send out probe requests... sniffer will catch any responses
    ProbeReq(ssid=ssid, interface='wlp3s0')

    # make sure to get results from shared memory before allowing 
    # sniffer to join with parent process 
    probe_responses = results.get()

    # join sniffer with its parent process
    p.join()

    # return results
    return probe_responses
Beispiel #20
0
def webgui(args):
    os.environ["FWDB_CONFIG"] = json.dumps(get_lp(args).to_dict())
    from fireworks.flask_site.app import app
    if args.wflowquery:
        app.BASE_Q_WF = json.loads(args.wflowquery)
    if args.fwquery:
        app.BASE_Q = json.loads(args.fwquery)
        if "state" in app.BASE_Q:
            app.BASE_Q_WF["state"] = app.BASE_Q["state"]

    if not args.server_mode:
        from multiprocessing import Process
        p1 = Process(
            target=app.run,
            kwargs={"host": args.host, "port": args.port, "debug": args.debug})
        p1.start()
        import webbrowser
        time.sleep(2)
        webbrowser.open("http://{}:{}".format(args.host, args.port))
        p1.join()
    else:
        from fireworks.flask_site.app import bootstrap_app
        try:
            from fireworks.flask_site.gunicorn import (
                StandaloneApplication, number_of_workers)
        except ImportError:
            import sys
            sys.exit("Gunicorn is required for server mode. "
                     "Install using `pip install gunicorn`.")
        options = {
            'bind': '%s:%s' % (args.host, args.port),
            'workers': number_of_workers(),
        }
        StandaloneApplication(bootstrap_app, options).run()
def nct_tagging(index_name, host, port_no, process_ids,
                stopwords, umls, pos, nprocs=1):

    # open the clinical trail ids file to process
    nct_ids = []
    for line in open(process_ids, 'rb'):
        nct_ids.append(line.strip())

    # Check if index exists
    index = es_index.ElasticSearch_Index(index_name, host=host, port=port_no)
    index.add_field('ec_tags_umls', term_vector=True)

    # Get clinical
    # process each clinical trial and store to XML file
    log.info('processing clinical trials')
    procs = []
    chunksize = int(math.ceil(len(nct_ids) / float(nprocs)))
    for i in xrange(nprocs):
        p = Process(target=_worker, args=(nct_ids[chunksize * i:chunksize * (i + 1)],
                                          index_name, host, port_no,
                                          stopwords, umls, pos, (i + 1)))
        procs.append(p)
        p.start()

    for p in procs:
        p.join()
class ClockInfoUpdater(object):
    def __init__(self):
        self.weather_parent_pipe, weather_child_pipe = Pipe()
        weather = WeatherAPIClient(weather_child_pipe)
        self.weather_api_client = Process(target=weather.run_forever)
        self.weather_api_client.start()

        self.traffic_parent_pipe, traffic_child_pipe = Pipe()
        traffic = TrafficAPIClient(traffic_child_pipe)
        self.traffic_api_client = Process(target=traffic.run_forever)
        self.traffic_api_client.start()

    def run(self, clock_info, update_freq):
        now = datetime.now()
        last_update = clock_info.get('last_update_time')
        if last_update:
            update_time_delta = now - last_update
            if update_time_delta.total_seconds() < update_freq:
                return False
        clock_info['last_update_time'] = now
        update_time(clock_info, now)
        update_weather(clock_info, now, self.weather_parent_pipe)
        update_color(clock_info, now)
        update_traffic(clock_info, now, self.traffic_parent_pipe)
        return True
Beispiel #23
0
def start_short_timeout_app_process():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    p = Process(target=start_short_timeout_example_server)
    p.start()
    sleep()
    check_connection()
    return p
 def __init__(self, callback=None, log=True, log_level="DEBUG"):
     """ Initialize the data parser, connect to the can bus. """
     Process.__init__(self)
     self.callback = callback
     self.log = log
     self.log_level = log_level
     self.data = {}
Beispiel #25
0
 def __init__(self, _output_file,  header, _input_pipe, _disk_rows):
     Process.__init__(self)
 
     self.file_name = _output_file 
     self.input_pipe = _input_pipe
     self.write_rows = _disk_rows
     self.header = header
Beispiel #26
0
class TCPServer(object):
    def __init__(self, port):
        self.port = int(port)

    def start(self):
        def go(port):
            from httpretty import HTTPretty
            HTTPretty.disable()
            import socket
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind(('localhost', port))
            s.listen(True)
            conn, addr = s.accept()

            while True:
                data = conn.recv(1024)
                conn.send(b"RECEIVED: " + bytes(data))

            conn.close()

        args = [self.port]
        self.process = Process(target=go, args=args)
        self.process.start()
        time.sleep(0.4)

    def stop(self):
        try:
            os.kill(self.process.pid, 9)
        except OSError:
            self.process.terminate()
        finally:
            self.is_running = False
 def scanner_network(self,gateway):
     scan = ''
     config_gateway = gateway.split('.')
     del config_gateway[-1]
     for i in config_gateway:
         scan += str(i) + '.'
     gateway = scan
     ranger = str(self.ip_range.text()).split('-')
     jobs = []
     manager = Manager()
     on_ips = manager.dict()
     for n in xrange(int(ranger[0]),int(ranger[1])):
         ip='%s{0}'.format(n)%(gateway)
         p = Process(target=self.working,args=(ip,on_ips))
         jobs.append(p)
         p.start()
     for i in jobs: i.join()
     for i in on_ips.values():
         Headers = []
         n = i.split('|')
         self.data['IPaddress'].append(n[0])
         self.data['MacAddress'].append(n[1])
         self.data['Hostname'].append('<unknown>')
         for n, key in enumerate(reversed(self.data.keys())):
             Headers.append(key)
             for m, item in enumerate(self.data[key]):
                 item = QTableWidgetItem(item)
                 item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
                 self.tables.setItem(m, n, item)
     Headers = []
     for key in reversed(self.data.keys()):
         Headers.append(key)
     self.tables.setHorizontalHeaderLabels(Headers)
def main():
	# Threads we will use, don't change this because each thread calculates keys for 100 games exactly
	# (You can change this if you know how, I'm too euphoric now to do more flexibility)
	start = time();
	threads = 10;
	for line in sys.stdin:
		# Parsing the stdin
		encryptedMessage,encryptedGames = line.strip().split(':');
		encryptedGames = encryptedGames.split('~');
		# Queue with keys
		q = Queue();
		# Threads
		for i in range(10):
			p = Process(target=keysFinder, args=(encryptedGames[i*100:(i+1)*100],q));
			p.start();
		# Number of threads already finished
		finished = 0;
		keys = [];
		while finished < threads:
			keys += q.get();
			finished+=1;

		# From all keys, try which one decrypts a valid message
		em = binascii.unhexlify(encryptedMessage);
		found = False;
		for key in keys:
			x = AES.new(key);
			dec = x.decrypt(em);
			if (isCorrect(dec)):
				found = True;
				# Make unpadding and print. Voila!
				print removePadding(dec.strip());
	if (sys.argv[1] == 'benchmark'):
		print "Time elapsed: ",time()-start;
Beispiel #29
0
 def __init__(self, **kwargs):
     self.addressManager = OSC.CallbackManager()
     self.queue = Queue()
     Process.__init__(self, args=(self.queue,))
     self.daemon     = True
     self._isRunning = Value('b', True)
     self._haveSocket= Value('b', False)
Beispiel #30
0
class KeepAliveClientTest(TestCase):

    server_address = ("127.0.0.1", 65535)

    def __init__(self, *args, **kwargs):
        super(KeepAliveClientTest, self).__init__(*args, **kwargs)
        self.server_process = Process(target=self._run_server)

    def setUp(self):
        super(KeepAliveClientTest, self).setUp()
        self.client = Client(["%s:%d" % self.server_address])
        self.server_process.start()
        time.sleep(.10)

    def tearDown(self):
        self.server_process.terminate()
        super(KeepAliveClientTest, self).tearDown()

    def _run_server(self):
        self.server = BaseHTTPServer.HTTPServer(self.server_address, ClientAddressRequestHandler)
        self.server.handle_request()

    def test_client_keepalive(self):
        for x in range(10):
            result = self.client.sql("select * from fake")

            another_result = self.client.sql("select again from fake")
            self.assertEqual(result, another_result)