Exemple #1
0
 def __init__(self, againAndAgainIterator, batchSize, toNumpyArray=True, logger=None, verbose=True):
     assert isinstance(againAndAgainIterator, AgainAndAgain)
     self.logger = logger
     self.verbose = verbose
     self.againAndAgainIterator = againAndAgainIterator
     self.toNumpyArray = toNumpyArray
     self.batchSize = batchSize
     self.currentGenerator = None
     self.tlock = TLock()
Exemple #2
0
 def init(self):
     if self.seed is not None:
         random.seed(self.seed)
     log(str(len(self.containers)) + " containers to process.", self)
     self.pbar = ProgressBar(len(self.containers), logger=self.logger, verbose=self.verbose, printRatio=self.printRatio)
     self.processes = [None] * self.parallelProcesses
     self.queues = [None] * self.parallelProcesses
     self.currentIndex = 0
     self.containersQueue = queue.Queue() # WARNING, don't use the processing Queue but queue.Queue because the procesing queue `put` method is async...
     self.tlock = TLock()
     # self.mplock = MPLock()
     for c in self.containers:
         self.containersQueue.put(c)
Exemple #3
0
    def __init__(self, zk_servers, queue_name):
        self._queue_name = queue_name
        self.kz_ses = KazooClient(zk_servers)
        self.kz_ses.start()

        self.servers = zk_servers

        self._kz_queue = self.kz_ses.LockingQueue(
            str(self.path_factory.queue.kz_queue()))

        self._tlock = TLock()
        self._rlock_cache = WeakValueDictionary()  # {<rlock_name>: RLock}

        self._make_paths()
Exemple #4
0
def add_class_lock(cls, lock_name='lock', isasync=False, lock_object=None):
    """Adds additional lock property *lock_name* to class *cls*.

    if *isasync* is ``True`` the lock property is of `asyncio.Lock` type.
    Otherwise (default) it's of `threading.Lock` type.

    It can be used as follows:

    from junky add_class_lock
    from pkg import Cls
    add_class_lock(Cls)

    o = Cls()
    #async with o.lock:  # if isasync is True
    with o.lock:         # if isasync is False (default)
        # some thread safe operations here
        pass

    Also, you can add lock to the particular object directly:

    o = add_class_lock(Cls())

    If you need, you can use your own lock object. Use param *lock_object* for
    that. In that case, param *isasync* is ignored.
    """
    if not lock_object:
        lock_object = ALock() if isasync else TLock()
    '''
    _code = cls.__init__.__code__
    co_varnames, co_argcount = _code.co_varnames, _code.co_argcount
    #co_kwonlyargcount = _code.co_kwonlyargcount
    _defaults = cls.__init__.__defaults__
    #_kwdefaults = cls.__init__.__kwdefaults__
    Cls = type('Cls', (cls,),
               {x: _defaults[i] for i, x in \
                    enumerate(co_varnames[co_argcount - len(_defaults) \
                             :co_argcount])})
    setattr(Cls, lock_name, property(lambda self: lock))
    return Cls
    '''
    setattr(cls, lock_name,
            property(lambda self: lock_object) if isinstance(cls, type) else
                     lock_object)
    return cls
Exemple #5
0
    def __init__(self,
                 persist_mode=False,
                 key_prefix='',
                 min_cache_time=5,
                 force_cache_time=False,
                 base='store',
                 path='cache',
                 redis_host='localhost',
                 redis_port=6379,
                 redis_db=1,
                 redis_file=None):
        self.__key_prefix = key_prefix
        self.__cache_key = '{}:cache'.format(key_prefix)
        self.__persist_mode = persist_mode
        self.__min_cache_time = min_cache_time
        self.__force_cache_time = force_cache_time
        self.__base_path = base
        self.__resource_cache = get_triple_store()
        self._r = get_kv(persist_mode,
                         redis_host,
                         redis_port,
                         redis_db,
                         redis_file,
                         base=base,
                         path=path)
        self.__lock = Lock(self._r, key_prefix)
        self.__mlock = TLock()
        self.__memory_graphs = {}
        self.__memory_order = []

        self.__resources_ts = {}

        # Clean temporal folders under 'base' (others than 'path' subfolder)
        for sub in filter(lambda x: x != path,
                          get_immediate_subdirectories(base)):
            shutil.rmtree('{}/{}'.format(self.__base_path, sub))

        for lock_key in self._r.keys('{}:l*'.format(self.__key_prefix)):
            self._r.delete(lock_key)

        self.__enabled = True
        self.__purge_th = Thread(target=self.__purge)
        self.__purge_th.daemon = True
        self.__purge_th.start()
Exemple #6
0
    def __init__(self,
                 persist_mode=None,
                 key_prefix='',
                 min_cache_time=5,
                 force_cache_time=False,
                 base='store',
                 path='cache',
                 redis_host='localhost',
                 redis_port=6379,
                 redis_db=1,
                 redis_file=None,
                 graph_memory_limit=5000):
        self.__key_prefix = key_prefix
        self.__cache_key = '{}:cache'.format(key_prefix)
        self.__persist_mode = persist_mode
        self.__min_cache_time = min_cache_time
        self.__force_cache_time = force_cache_time
        self.__base_path = base
        self._r = get_kv(persist_mode,
                         redis_host,
                         redis_port,
                         redis_db,
                         redis_file,
                         base=base,
                         path=path)
        self.__lock = Lock(self._r, key_prefix)
        self.__mlock = TLock()
        self.__graph_memory_limit = graph_memory_limit
        self.__memory_graphs = {}
        self.__memory_order = []

        self.__resources_ts = {}

        for lock_key in self._r.keys('{}:l*'.format(self.__key_prefix)):
            self._r.delete(lock_key)

        self._r.delete(key_prefix)

        self.__enabled = True
        self.__purge_th = Thread(target=self.__purge)
        self.__purge_th.daemon = True
        self.__purge_th.start()
Exemple #7
0
    def __init__(self, max_num_of_process, name='process'):
        self.max_num_of_process = max(1, max_num_of_process)
        self._spawned = []
        self._name = name
        self._total_spawned = 0
        self._stop = False

        # _objects is a dictionary that links some global objects to a list of
        # processes that depends on that object. Therefore, this ensures that
        # the garbage collector does not remove such objects until the processes
        # that are using that processes end their execution
        self._objects = {}
        self._objects_lock = TLock()

        self._process_cleaner = ProcessCleaner(
            self._spawned,
            self._objects,
            self._objects_lock,
        )
        self._process_cleaner.start()
Exemple #8
0
 def __init__\
 (
     self,
     againAndAgainIterator,
     batchSize=128,
     skip=0,
     shuffle=0,
     seed=0,
     toNumpyArray=True,
     queueSize=1,
     logger=None,
     verbose=True,
 ):
     # assert isinstance(againAndAgainIterator, AgainAndAgain) or isinstance(againAndAgainIterator, list)
     # if isinstance(againAndAgainIterator, list):
     #     againAndAgainIterator = iter(againAndAgainIterator)
     assert isinstance(againAndAgainIterator, AgainAndAgain)
     self.logger = logger
     self.verbose = verbose
     self.skip = skip
     if self.skip is None:
         self.skip = 0
     self.shuffle = shuffle
     if self.shuffle is None or self.shuffle < 0:
         self.shuffle = 0
     self.againAndAgainIterator = againAndAgainIterator
     self.toNumpyArray = toNumpyArray
     self.batchSize = batchSize
     self.currentGenerator = None
     self.tlock = TLock()
     self.queueSize = queueSize
     if self.queueSize < 1:
         self.queueSize = 1
     self.seed = seed
     self.rd = random.Random(self.seed)
     self.queue = queue.Queue()
Exemple #9
0
    def _handle_req(self, path, args):
        if path[0] == 'status':
            data = job_tracking.get(args.get('subjob_cookie') or None)
            if not data:
                self.do_response(500, 'text/plain', 'bad subjob_cookie!\n')
                return
            timeout = min(float(args.get('timeout', 0)), 128)
            status = DotDict(idle=data.lock.acquire(False))
            deadline = time.time() + timeout
            while not status.idle and time.time() < deadline:
                time.sleep(0.1)
                status.idle = data.lock.acquire(False)
            if status.idle:
                if data.last_error:
                    status.last_error = data.last_error
                    data.last_error = None
                else:
                    status.last_time = data.last_time
                data.lock.release()
            elif path == ['status', 'full']:
                status.status_stacks, status.current = status_stacks_export()
            self.do_response(200, "text/json", status)
            return

        elif path == ['list_workspaces']:
            ws = {k: v.path for k, v in self.ctrl.list_workspaces().items()}
            self.do_response(200, "text/json", ws)

        elif path == ['config']:
            self.do_response(200, "text/json", self.ctrl.config)

        elif path == ['update_methods']:
            self.do_response(200, "text/json", self.ctrl.update_methods())

        elif path == ['methods']:
            """ return a json with everything the Method object knows about the methods """
            self.do_response(200, "text/json", self.ctrl.get_methods())

        elif path[0] == 'method_info':
            method = path[1]
            self.do_response(200, "text/json", self.ctrl.method_info(method))

        elif path[0] == 'workspace_info':
            self.do_response(200, 'text/json',
                             self.ctrl.get_workspace_details())

        elif path[0] == 'abort':
            tokill = list(children)
            print('Force abort', tokill)
            for child in tokill:
                os.killpg(child, signal.SIGKILL)
            self.do_response(200, 'text/json', {'killed': len(tokill)})

        elif path == ['submit']:
            if self.ctrl.broken:
                self.do_response(
                    500, "text/json", {
                        'broken':
                        self.ctrl.broken,
                        'error':
                        'Broken methods: ' + ', '.join(
                            sorted(
                                m.split('.')[-1][2:]
                                for m in self.ctrl.broken))
                    })
            elif 'xml' in args:
                self.do_response(500, 'text/plain', 'JSON > XML!\n')
            elif 'json' in args:
                if DEBUG_WRITE_JSON:
                    with open('DEBUG_WRITE.json', 'wb') as fh:
                        fh.write(args['json'])
                setup = json_decode(args['json'])
                data = job_tracking.get(setup.get('subjob_cookie') or None)
                if not data:
                    self.do_response(500, 'text/plain', 'bad subjob_cookie!\n')
                    return
                if len(job_tracking) - 1 > 5:  # max five levels
                    print('Too deep subjob nesting!')
                    self.do_response(500, 'text/plain',
                                     'Too deep subjob nesting')
                    return
                if data.lock.acquire(False):
                    respond_after = True
                    try:
                        if self.DEBUG:
                            print('@daemon.py:  Got the lock!',
                                  file=sys.stderr)
                        jobidv, job_res = self.ctrl.initialise_jobs(setup)
                        job_res['done'] = False
                        if jobidv:
                            error = []
                            tlock = TLock()
                            link2job = {
                                j['link']: j
                                for j in job_res['jobs'].values()
                            }

                            def run(jobidv, tlock):
                                for jobid in jobidv:
                                    passed_cookie = None
                                    # This is not a race - all higher locks are locked too.
                                    while passed_cookie in job_tracking:
                                        passed_cookie = gen_cookie()
                                    job_tracking[passed_cookie] = DotDict(
                                        lock=JLock(),
                                        last_error=None,
                                        last_time=0)
                                    try:
                                        self.ctrl.run_job(
                                            jobid,
                                            subjob_cookie=passed_cookie,
                                            parent_pid=setup.get(
                                                'parent_pid', 0))
                                        # update database since a new jobid was just created
                                        job = self.ctrl.add_single_jobid(jobid)
                                        with tlock:
                                            link2job[jobid]['make'] = 'DONE'
                                            link2job[jobid][
                                                'total_time'] = job.total
                                    except JobError as e:
                                        error.append(
                                            [e.jobid, e.method, e.status])
                                        with tlock:
                                            link2job[jobid]['make'] = 'FAIL'
                                        return
                                    finally:
                                        del job_tracking[passed_cookie]
                                # everything was built ok, update symlink
                                try:
                                    wn = self.ctrl.target_workdir
                                    dn = self.ctrl.workspaces[wn].path
                                    ln = os.path.join(dn, wn + "-LATEST_")
                                    try:
                                        os.unlink(ln)
                                    except OSError:
                                        pass
                                    os.symlink(jobid, ln)
                                    os.rename(ln,
                                              os.path.join(dn, wn + "-LATEST"))
                                except OSError:
                                    traceback.print_exc()

                            t = Thread(target=run,
                                       name="job runner",
                                       args=(
                                           jobidv,
                                           tlock,
                                       ))
                            t.daemon = True
                            t.start()
                            t.join(2)  # give job two seconds to complete
                            with tlock:
                                for j in link2job.values():
                                    if j['make'] in (
                                            True,
                                            'FAIL',
                                    ):
                                        respond_after = False
                                        job_res_json = json_encode(job_res)
                                        break
                            if not respond_after:  # not all jobs are done yet, give partial response
                                self.do_response(200, "text/json",
                                                 job_res_json)
                            t.join()  # wait until actually complete
                            del tlock
                            del t
                            # verify that all jobs got built.
                            total_time = 0
                            for j in link2job.values():
                                jobid = j['link']
                                if j['make'] == True:
                                    # Well, crap.
                                    error.append([
                                        jobid, "unknown", {
                                            "INTERNAL": "Not built"
                                        }
                                    ])
                                    print("INTERNAL ERROR IN JOB BUILDING!",
                                          file=sys.stderr)
                                total_time += j.get('total_time', 0)
                            data.last_error = error
                            data.last_time = total_time
                    except Exception as e:
                        if respond_after:
                            self.do_response(500, "text/json",
                                             {'error': str(e)})
                        raise
                    finally:
                        data.lock.release()
                    if respond_after:
                        job_res['done'] = True
                        self.do_response(200, "text/json", job_res)
                    if self.DEBUG:
                        print("@daemon.py:  Process releases lock!",
                              file=sys.stderr
                              )  # note: has already done http response
                else:
                    self.do_response(200, 'text/plain',
                                     'Busy doing work for you...\n')
            else:
                self.do_response(500, 'text/plain', 'Missing json input!\n')
        else:
            self.do_response(500, 'text/plain', 'Unknown path\n')
            return
Exemple #10
0
#  akangatu is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with akangatu  If not, see <http://www.gnu.org/licenses/>.
#

import json
from contextlib import contextmanager
from multiprocessing import Lock as PLock
from threading import Lock as TLock

# Some global state, so we need to make it thread-safe; with a context manager.
threadLock = TLock()
processLock = PLock()


@contextmanager
def safety():
    with threadLock, processLock:
        yield


try:
    with open("config.json", "r") as f:
        CONFIG = json.load(f)
except FileNotFoundError:
    CONFIG = {}
Exemple #11
0
    def _handle_req(self, path, args):
        if path[0] == 'status':
            data = job_tracking.get(args.get('subjob_cookie') or None)
            if not data:
                self.do_response(400, 'text/plain', 'bad subjob_cookie!\n')
                return
            timeout = min(float(args.get('timeout', 0)), 128)
            status = DotDict(idle=data.lock.acquire(False))
            deadline = monotonic() + timeout
            while not status.idle and monotonic() < deadline:
                time.sleep(0.1)
                status.idle = data.lock.acquire(False)
            if status.idle:
                if data.last_error:
                    status.last_error_time = data.last_error[0]
                status.last_time = data.last_time
                data.lock.release()
            elif path == ['status', 'full']:
                status.status_stacks, status.current = status_stacks_export()
            status.report_t = monotonic()
            self.do_response(200, "text/json", status)
            return

        elif path == ['last_error']:
            data = job_tracking.get(args.get('subjob_cookie') or None)
            if not data:
                self.do_response(400, 'text/plain', 'bad subjob_cookie!\n')
                return
            status = DotDict()
            if data.last_error:
                status.time = data.last_error[0]
                status.last_error = data.last_error[1]
            self.do_response(200, "text/json", status)
            return

        elif path == ['list_workdirs']:
            ws = {k: v.path for k, v in self.ctrl.list_workdirs().items()}
            self.do_response(200, "text/json", ws)

        elif path[0] == 'workdir':
            self.do_response(200, "text/json",
                             self.ctrl.DataBase.db_by_workdir[path[1]])

        elif path == ['config']:
            self.do_response(200, "text/json", self.ctrl.config)

        elif path == ['update_methods']:
            self.do_response(200, "text/json", self.ctrl.update_methods())

        elif path == ['methods']:
            """ return a json with everything the Method object knows about the methods """
            self.do_response(200, "text/json", self.ctrl.get_methods())

        elif path[0] == 'method_info':
            method = path[1]
            self.do_response(200, "text/json", self.ctrl.method_info(method))

        elif path[0] == 'workspace_info':
            self.do_response(200, 'text/json',
                             self.ctrl.get_workspace_details())

        elif path[0] == 'abort':
            tokill = list(children)
            print('Force abort', tokill)
            for child in tokill:
                os.killpg(child, signal.SIGKILL)
            self.do_response(200, 'text/json', {'killed': len(tokill)})

        elif path[0] == 'method2job':
            method, num = path[1:]
            jobs = self.ctrl.DataBase.db_by_method.get(method, ())
            start_ix = 0
            start_from = args.get('start_from')
            if start_from:
                for start_ix, job in enumerate(jobs):
                    if job.id == start_from:
                        break
                else:
                    start_ix = None
            if start_ix is None:
                res = {
                    'error': '%s is not a current %s job' % (
                        start_from,
                        method,
                    )
                }
            else:
                num = int(num)
                if not jobs:
                    res = {
                        'error':
                        'no current jobs with method %s available' % (method, )
                    }
                elif num + start_ix >= len(jobs):
                    res = {
                        'error':
                        'tried to go %d jobs back from %s, but only %d earlier (current) jobs available'
                        % (
                            num,
                            jobs[start_ix].id,
                            len(jobs) - start_ix - 1,
                        )
                    }
                else:
                    res = {'id': jobs[num + start_ix].id}
            self.do_response(200, 'text/json', res)

        elif path[0] == 'job_is_current':
            job = Job(path[1])
            job = self.ctrl.DataBase.db_by_workdir[job.workdir].get(job)
            self.do_response(200, 'text/json', bool(job and job['current']))

        elif path == ['submit']:
            if self.ctrl.broken:
                self.do_response(
                    500, "text/json", {
                        'broken':
                        self.ctrl.broken,
                        'error':
                        'Broken methods: ' + ', '.join(
                            sorted(
                                m.split('.')[-1][2:]
                                for m in self.ctrl.broken))
                    })
            elif 'json' in args:
                if DEBUG_WRITE_JSON:
                    with open('DEBUG_WRITE.json', 'wb') as fh:
                        fh.write(args['json'])
                setup = json_decode(args['json'])
                data = job_tracking.get(setup.get('subjob_cookie') or None)
                if not data:
                    self.do_response(403, 'text/plain', 'bad subjob_cookie!\n')
                    return
                if len(job_tracking) - 1 > 5:  # max five levels
                    print('Too deep subjob nesting!')
                    self.do_response(403, 'text/plain',
                                     'Too deep subjob nesting')
                    return
                if data.lock.acquire(False):
                    still_locked = True
                    respond_after = True
                    try:
                        if self.DEBUG:
                            print('@server.py:  Got the lock!',
                                  file=sys.stderr)
                        workdir = setup.get('workdir', data.workdir)
                        jobidv, job_res = self.ctrl.initialise_jobs(
                            setup, workdir)
                        job_res['done'] = False
                        if jobidv:
                            error = []
                            tlock = TLock()
                            link2job = {
                                j['link']: j
                                for j in job_res['jobs'].values()
                            }

                            def run(jobidv, tlock):
                                for jobid in jobidv:
                                    passed_cookie = None
                                    # This is not a race - all higher locks are locked too.
                                    while passed_cookie in job_tracking:
                                        passed_cookie = gen_cookie()
                                    concurrency_map = dict(
                                        data.concurrency_map)
                                    concurrency_map.update(
                                        setup.get('concurrency_map', ()))
                                    job_tracking[passed_cookie] = DotDict(
                                        lock=JLock(),
                                        last_error=None,
                                        last_time=0,
                                        workdir=workdir,
                                        concurrency_map=concurrency_map,
                                    )
                                    try:
                                        explicit_concurrency = setup.get(
                                            'concurrency'
                                        ) or concurrency_map.get(setup.method)
                                        concurrency = explicit_concurrency or concurrency_map.get(
                                            '-default-')
                                        if concurrency and setup.method == 'csvimport':
                                            # just to be safe, check the package too
                                            if load_setup(
                                                    jobid
                                            ).package == 'accelerator.standard_methods':
                                                # ignore default concurrency, error on explicit.
                                                if explicit_concurrency:
                                                    raise JobError(
                                                        jobid, 'csvimport', {
                                                            'server':
                                                            'csvimport can not run with reduced concurrency'
                                                        })
                                                concurrency = None
                                        self.ctrl.run_job(
                                            jobid,
                                            subjob_cookie=passed_cookie,
                                            parent_pid=setup.get(
                                                'parent_pid', 0),
                                            concurrency=concurrency)
                                        # update database since a new jobid was just created
                                        job = self.ctrl.add_single_jobid(jobid)
                                        with tlock:
                                            link2job[jobid]['make'] = 'DONE'
                                            link2job[jobid][
                                                'total_time'] = job.total
                                    except JobError as e:
                                        error.append(
                                            [e.job, e.method, e.status])
                                        with tlock:
                                            link2job[jobid]['make'] = 'FAIL'
                                        return
                                    finally:
                                        del job_tracking[passed_cookie]
                                # everything was built ok, update symlink
                                try:
                                    dn = self.ctrl.workspaces[workdir].path
                                    ln = os.path.join(dn, workdir + "-LATEST_")
                                    try:
                                        os.unlink(ln)
                                    except OSError:
                                        pass
                                    os.symlink(jobid, ln)
                                    os.rename(
                                        ln,
                                        os.path.join(dn, workdir + "-LATEST"))
                                except OSError:
                                    traceback.print_exc(file=sys.stderr)

                            t = Thread(target=run,
                                       name="job runner",
                                       args=(
                                           jobidv,
                                           tlock,
                                       ))
                            t.daemon = True
                            t.start()
                            t.join(2)  # give job two seconds to complete
                            with tlock:
                                for j in link2job.values():
                                    if j['make'] in (
                                            True,
                                            'FAIL',
                                    ):
                                        respond_after = False
                                        job_res_json = json_encode(job_res)
                                        break
                            if not respond_after:  # not all jobs are done yet, give partial response
                                self.do_response(200, "text/json",
                                                 job_res_json)
                            t.join()  # wait until actually complete
                            del tlock
                            del t
                            # verify that all jobs got built.
                            total_time = 0
                            for j in link2job.values():
                                jobid = j['link']
                                if j['make'] == True:
                                    # Well, crap.
                                    error.append([
                                        jobid, "unknown", {
                                            "INTERNAL": "Not built"
                                        }
                                    ])
                                    print("INTERNAL ERROR IN JOB BUILDING!",
                                          file=sys.stderr)
                                total_time += j.get('total_time', 0)
                            if error:
                                data.last_error = (time.time(), error)
                            data.last_time = total_time
                    except Exception as e:
                        if respond_after:
                            data.lock.release()
                            still_locked = False
                            self.do_response(500, "text/json",
                                             {'error': str(e)})
                        raise
                    finally:
                        if still_locked:
                            data.lock.release()
                    if respond_after:
                        job_res['done'] = True
                        self.do_response(200, "text/json", job_res)
                    if self.DEBUG:
                        print("@server.py:  Process releases lock!",
                              file=sys.stderr
                              )  # note: has already done http response
                else:
                    self.do_response(503, 'text/plain',
                                     'Busy doing work for you...\n')
            else:
                self.do_response(400, 'text/plain', 'Missing json input!\n')
        else:
            self.do_response(404, 'text/plain', 'Unknown path\n')
            return
Exemple #12
0
    def __init__(self, teamFilename, redTeam, blueTeam):
        """Initializing with csv file to read for team information, and which of
        those teams are playing

        Args:
            teamFilename (TYPE): Description
            redTeam (int): Team number 1-12
            blueTeam (int): Team number 1-12
        """

        self.params = RoBAParams()

        self.allTeams = self.load_team_csv(teamFilename)
        self.redTeam = self.allTeams[redTeam - 1]
        self.blueTeam = self.allTeams[blueTeam - 1]

        # Give the active teams hitqs
        self.redTeam.hitQ = HitQueue()
        self.blueTeam.hitQ = HitQueue()

        # Give the active teams hitqs
        self.redTeam.set_color('Red')
        self.blueTeam.set_color('Blue')

        self.teams = [self.redTeam, self.blueTeam]

        # Use this shallow copy of robots for syncing
        self.robots = self.redTeam.robots + self.blueTeam.robots
        self.nexuses = [self.redTeam.nexus, self.blueTeam.nexus]

        self.redTeam.nexus.ID = 80
        self.blueTeam.nexus.ID = 81

        self.redTeam.nexus.IP = self.params.ipSubnet + '80'  # (Added - 12 Nov 2019 - Aslamah)
        self.blueTeam.nexus.IP = self.params.ipSubnet + '81'  # (Added - 12 Nov 2019 - Aslamah)

        self.towers = [
            Tower(self.params.towerDPS, ID=98),
            Tower(self.params.towerDPS, ID=99)
        ]

        self.autonomousMode = self.params.autonomousStartEnabled
        self.isGameOn = False
        self.isGameStarted = False

        self.lock = TLock()

        self.lastHeartbeat = time.time()
        self.hbDelay = 1
        #self.sync = 0 (Removed - 5 Nov 2019 - Aslamah)
        self.sync = 1  # (Adde - 11 Nov 2019 - Aslamah)
        #self.forceSync = 0 (Removed - 5 Nov 2019 - Aslamah)
        self.demandReset = 0

        self.gameStartTime = datetime.now()

        self.debugFolder = "./debuglogs/" + time.strftime(
            "%Y-%m-%d %H.%M.%S") + "mteams_%d_%d" % (
                self.redTeam.number, self.blueTeam.number) + "/"
        os.mkdir(self.debugFolder)
        self.reset()
        self.logL = LogLoop('arenaLog.txt')
Exemple #13
0
 def __init__(self, client, path, identifier=None):
     super(RLock, self).__init__(client, path, identifier)
     self._reference_lock = TLock()
     self._lock_holding_thread = None
     self._reference_count = 0