예제 #1
0
def main():
    server = Server(host, port)

    # For each accepted device create a thread to execute the "server" function
    for dev in accepted_devices:
        thread = Thread(target=threaded_func, args=(server, str(dev)))
        thread.start()
        threads.append(thread)
        print(":main info: thread finished...exiting")

    # Wait for all threads to complete
    for t in threads:
        t.join()

    print(":main info: exiting Main Thread")
    server.close()
예제 #2
0
def start_json_server(queue_object, port=9999, host="0.0.0.0"):
    server = Server(host, 9999)
    while True:
        data = server.accept().recv()
        print("data: ", data)
        if not data:
            server.send({"status": "error", "message": "not valid json"})
        else:
            server.send({"status": "ok"})
            queue_object.put(data)
예제 #3
0
파일: worker.py 프로젝트: shavvn/dispytcher
    def __init__(self, host_name, port, key, num_slots):
        """Initialize a server process

        Arguments:
            host_name {string} -- host ip
            port {int} -- socket port
            num_slots {int} -- total number of slots for this worker

        Raises:
            OSError -- happens when resolving host or creating socket
        """
        host_ip = socket.gethostbyname(host_name)

        # docker daemon
        self.docker = docker.from_env()

        # socket server
        self.server = Server(host_ip, port)

        self._key = key

        self._total_slots = num_slots
        self._avail_slots = num_slots
        self._job_queue = []

        # because it's highly likely to receive multiple jobs at once
        # and each job can take very long, we don't want to block following
        # jobs, so we use one thread for each job received and use these
        # data structures to keep track of threads, indexed by _job_id
        # we use {} instead of [] as they are more robust for multi-threads
        self._threads = {}
        self._thread_stops = {}
        self._running_jobs = {}

        # signal handler to kill the process
        signal.signal(signal.SIGINT, self._gracefully_exit)
        signal.signal(signal.SIGTERM, self._gracefully_exit)

        # image maintaince, image&tag as key, last updated time as value
        self._last_checked = {}
예제 #4
0
def iradio_req_process():

    # Create jsonserver object
    server = Server(host, port)

    # Accepting client requests idefinately
    print("ready to listen to requests")

    # Keeping track of no. of clients
    count_clients = 0

    while True:
        server.accept()
        data = server.recv()

        _process(data)

        # testing with dummy data
        from file_parser import hard_coded
        server.send(hard_coded().__dict__)

        print("Radio list sent to client #{}".format(count_clients))
        count_clients += 1
        print(count_clients)
예제 #5
0
            help="IP for host")
    ap.add_argument("-p", "--port", type=str, default=8000,
            help="port for host")

    print_mode = True

    args = vars(ap.parse_args())

    GLOBAL_CLOUD_RECOGNIZER = pickle.loads(open(args["cloud_recognizer"], "rb").read())
    GLOBAL_CLOUD_LE = pickle.loads(open(args["cloud_le"], "rb").read())

    host = args['host']
    port = args['port']

    # Server code:
    server = Server(host, port)

    # Read until video is completed
    print('ready to accept connections at server')
    while(True):

        # Capture frame-by-frame
        server.accept()
        received_query = server.recv()

        frame = received_query['frame']
        embedding_vec = np.array([float(x) for x in received_query['emb']]).reshape(1,-1)

        if print_mode:
            print(' ')
            print('server query at frame: ', frame)
예제 #6
0
#!/usr/bin/python

from jsonsocket import Client, Server

host = 'localhost'
port = 10000

server = Server(host, port)
while True:
	print >> 'Server listening...'
	server.accept()
	data = server.recv()
	print data
	if data:
		break
server.send({'data': data}).close()
예제 #7
0
# file:server.py
from jsonsocket import Server

host = 'LOCALHOST'
port = 32556

server = Server(host, port)

while True:
    server.accept()
    data = server.recv()
    server.send({"response": data})

server.close()
예제 #8
0
파일: master.py 프로젝트: d3QUone/multipro
def collect():
    global storage
    for item in data:
        storage[item["item"]] = {
            "script": importlib.import_module("apps." + item["item"]),
            "name": item["item"],
            "description": item["description"]
        }
        storage[item["item"]]["script"].test()  # YO, test OK


if __name__ == "__main__":
    dashboard = Popen(["python", "admin.py"])
    print "YO, I'm on!"
    # collect()

    server = Server("localhost", ADMIN_PORT)
    while True:
        try:
            server.accept()
            data = server.recv()

            print data

            if data["endpoint"] is "info":
                server.send({
                    "info": "YO!"
                })
        except KeyboardInterrupt:
            dashboard.kill()
예제 #9
0
        "id": 1,
        "ip": 'localhost',
        "port": 8888
    },
    "ufla": {
        "id": 2,
        "ip": 'localhost',
        "port": 8889
    }
}

serverDomain = input('University:')
host = srvlist[serverDomain]['ip']
port = srvlist[serverDomain]['port']

server = Server(host, port)

msgList = []

while True:
    os.system("clear")
    #print('\tNAME: ' + str(server.socket..gethostname()))
    #print('\tIP: ' + str(server.socket.gethostbyname(server.socket.gethostname())))
    #print('\tPORT:' + str(port))
    #print('\tSERVER: ' + str(emitterDomain))
    #server.accept()
    #data = server.recv()
    data = server.accept().recv()
    print('Msg Memory: ' + str(msgList))
    print('Socket Receive Data: ' + str(data))
    if data['act'] == 'send':
예제 #10
0
파일: worker.py 프로젝트: shavvn/dispytcher
class Worker(object):

    def __init__(self, host_name, port, key, num_slots):
        """Initialize a server process

        Arguments:
            host_name {string} -- host ip
            port {int} -- socket port
            num_slots {int} -- total number of slots for this worker

        Raises:
            OSError -- happens when resolving host or creating socket
        """
        host_ip = socket.gethostbyname(host_name)

        # docker daemon
        self.docker = docker.from_env()

        # socket server
        self.server = Server(host_ip, port)

        self._key = key

        self._total_slots = num_slots
        self._avail_slots = num_slots
        self._job_queue = []

        # because it's highly likely to receive multiple jobs at once
        # and each job can take very long, we don't want to block following
        # jobs, so we use one thread for each job received and use these
        # data structures to keep track of threads, indexed by _job_id
        # we use {} instead of [] as they are more robust for multi-threads
        self._threads = {}
        self._thread_stops = {}
        self._running_jobs = {}

        # signal handler to kill the process
        signal.signal(signal.SIGINT, self._gracefully_exit)
        signal.signal(signal.SIGTERM, self._gracefully_exit)

        # image maintaince, image&tag as key, last updated time as value
        self._last_checked = {}

    def start(self):
        while True:
            self.server.accept()  # blocking
            try:
                data = self._recv()
            except (ValueError, OSError) as e:
                logging.error('Cannot recv data! Closing socket...')
                logging.error(e.message)
                # forcing client to close to free up resource
                continue
            except Exception as e:
                logging.error('Unexpected error!')
                logging.error(e.message)
                continue

            action = data.get('action')
            if action == 'stop':
                logging.info('stop all jobs')
                self.stop()
            elif action == 'run':
                try:
                    job = Job(data)
                except ValueError as err:
                    logging.error('received ill-formated job info, ignoring')
                    continue

                logging.info('receving job {}'.format(job.name))
                if job.slots > self._avail_slots:
                    logging.info('no enough slots, {} queued'.format(job.name))
                    self._job_queue.append(job)
                else:
                    self.run(job)
            elif action == 'report':
                self.report()
            elif action == 'retire':
                self._gracefully_exit(None, None)
            elif action == 'restart':
                self.restart()
            elif action == 'debug':
                logging.debug(data)
            else:
                continue

        return True

    def run(self, job):
        """Create a thread to run a job

        We need a thread because we don't want to block following jobs
        execute() is the thread target function

        Arguments:
            job {Job} -- Job object
        """
        # check if there is already a running container with same name
        if job.name in self._running_jobs:
            logging.error('name conflict for job {}'.format(job.name))
            return

        # check if image up to date before running...
        self.check_update_image(job.image)

        self._avail_slots -= job.slots
        logging.info(
            'start running {}, avail slots {}/{}'.format(
                job.name, self._avail_slots, self._total_slots))
        stop_event = threading.Event()
        thread = threading.Thread(
            target=self.execute,
            args=(job, stop_event,)
        )
        self._thread_stops[job.name] = stop_event
        self._threads[job.name] = thread
        thread.start()
        return

    def execute(self, job, stop_event):
        """Given the job data run the command

        Note this is completely running within one thread
        Arguments:
            job {Job} -- job object
            stop_event {threading.Event} -- an event/flag attached to each job
        """
        self._running_jobs[job.name] = job
        try:
            job.run(self.docker)
        except docker.errors.APIError as err:
            logging.error(err)
        else:
            job.wait()
            job.dump_logs()
            job.remove()

        if stop_event.is_set():
            return

        # normal finishing procedures
        self._avail_slots += job.slots
        self._running_jobs.pop(job.name)
        self._thread_stops.pop(job.name)
        self._threads.pop(job.name)
        logging.info('job {} done'.format(job.name))

        # check if there's queued job, strictly FIFO?
        if len(self._job_queue) > 0:
            if self._job_queue[0].slots <= self._avail_slots:
                logging.info('pop queued job {} from queue')
                self.run(self._job_queue.pop(0))

    def check_update_image(self, repo, interval=60):
        """Check if image is up to date, pull the latest from registry

        By default we pull an image from registry if it hasn't been updated
        for a minute. The registry seems to return a different SHA for the
        same image from locally, so not sure if I can check an image is
        uptodate quickly. Just brute force it...

        Arguments:
            repo {str} -- repository, should include registry and tag!

        Keyword Arguments:
            interval {int} -- update intreval in seconds (default: {60})
        """

        curr_time = int(time.time())
        last_time = self._last_checked.get(repo, 0)
        if curr_time - last_time > interval:
            try:
                self.docker.images.pull(repo)
            except:
                logging.error('failed to pull image {} from registry!'
                              'Using old image if available!'.format(repo))
            else:
                logging.info('image {} pulled at {}'.format(
                    repo, curr_time))
                self._last_checked[repo] = curr_time
        else:
            logging.info('image {} updated within {}s, not pulling'.format(
                repo, interval))

    def stop(self):
        """Stop all jobs that are running and clear the ones are queued

        Eventually we want something that can stop one specific job
        but for now we just shut down everything that's running on this worker
        """
        # clean job queue
        self._job_queue.clear()

        # set stop flags for each activa thread so that following commands
        # will not be executed
        for event in self._thread_stops.values():
            event.set()
        # terminate all currently running processes
        for job in self._running_jobs.values():
            job.dump_logs()
            # no need to remove, already in detached mode
            job.stop()

        # this shouldn't do anything
        for thread in self._threads.values():
            thread.join()
        logging.info("all threads & processes stopped!")
        self._avail_slots = self._total_slots
        self._running_jobs.clear()
        self._thread_stops.clear()
        self._threads.clear()

    def restart(self):
        """Restart this process by starting a timed background process

        Does the following:
        - stop all running jobs on this worker
        - schedule a new worker process in 30s
        - retire this worker

        Primarily used for updating worker code, kinda hacky but works
        """
        logging.warn('restarting worker process...')
        self.stop()

        # hard code this
        config_path = os.path.join(os.environ['HOME'], '.worker.json')
        cmd = 'sleep 30 && ./worker.py {}'.format(config_path)
        subprocess.Popen(cmd, shell=True)

        self._gracefully_exit(None, None)

    def report(self):
        running_jobs = [j.name for j in self._running_jobs.values()]
        stat = {"running_jobs": running_jobs}
        stat['queued_jobs'] = [j.name for j in self._job_queue]

        if psutil:
            mem = psutil.virtual_memory()
            mega = 1024 * 1024
            stat['mem_total(MB)'] = round(mem.total / mega)
            stat['mem_available(MB)'] = round(mem.available / mega)
            stat['cpu_usage(%)'] = psutil.cpu_percent()
            stat['mem_usage(%)'] = mem.percent
        try:
            self.server.settimeout(3.0)
            self.server.send(stat)
            self.server.settimeout(None)
        except (ValueError, OSError) as e:
            logging.warn("cannot send report, continue operation")
            logging.warn(e.message)
        logging.info("stats sent out")
        return

    def _recv(self):
        """Customized receive function

        Returns:
            dict -- empty if key not match
        """
        data = self.server.recv()
        key = data.get('key')
        if key != self._key:
            logging.warn("key does not match!, ignore message")
            return {}
        else:
            return data

    def _gracefully_exit(self, signum, frame):
        logging.warn("gracefully shutting down...")
        self.stop()
        self.server.close()
        exit(0)
예제 #11
0
#!/usr/bin/python

from jsonsocket import Client, Server

host = 'localhost'
port = 10000

server = Server(host, port)
while True:
    print >> 'Server listening...'
    server.accept()
    data = server.recv()
    print data
    if data:
        break
server.send({'data': data}).close()
예제 #12
0
    for thread in threads:
        thread.start()
    while len(threads) > 0:
        # O join basicamente une as threasd para que elas não bloqueiem
        # umas as outras. Filtra-se as threasd que já foram unidas ou
        # retornam None
        threads = [t.join(3) for t in threads if t is not None and t.isAlive()]


if __name__ == '__main__':
    settings.init()

    # host = '10.13.100.83'
    host = 'localhost'
    port = 8001
    server = Server(host, port)
    threads = []

    conn = driver.Quanser("10.13.99.69", 20081)
    # conn = driver.Quanser("localhost", 20081)
    if conn == -1:
        print 'Não foi possível estabelecer uma comunicação.\nRetornou -1'
    else:
        print 'Conectado à planta.'
    try:
        while True:
            main()
    except KeyboardInterrupt:
        print "Ctrl-c recebido! Enviando SIGINT para todas as threads..."
        for t in threads:
            t.kill_received = True
예제 #13
0
#!/usr/bin/python

# Import socket
from jsonsocket import Server
from ClientThread import ClientThread

host = ''
port = 7878 
server = Server(host, port)


server.accept()
data = server.recv()
print data
server.send({'data': [123, 456]})
server.close()

'''
# threaded
while True:
	# Establish connection with client
	server.accept()   
	print 'Got connection from', server.client_addr
	thread=ClientThread(server.client)
	thread.run()
'''



예제 #14
0
#!/usr/bin/python

# Import socket
from jsonsocket import Server
from ClientThread import ClientThread

host = ''
port = 7878
server = Server(host, port)

server.accept()
data = server.recv()
print data
server.send({'data': [123, 456]})
server.close()
'''
# threaded
while True:
	# Establish connection with client
	server.accept()   
	print 'Got connection from', server.client_addr
	thread=ClientThread(server.client)
	thread.run()
'''
예제 #15
0
volt=0
servoAG=0
servoAK=0
servoBG=0
servoBK=0
servoU=0

gripA=0
gripB=0

lockA=0
lockB=0

timeard=0

server = Server(host, port)

logging.info('server open')
os.system("sudo python /home/pi/EuW/comtoard.py &")
print "test"
while True:
    
    server.accept()
    data = server.recv()
   
    if data["command"] == 'shutdown':
        print "Das Wesen schaltet seine Sicht ab"
        logging.info("Das Wesen schaltet seine Sicht ab")
        if statusrpi==0:
            statusrpi=1
        else: