Beispiel #1
0
    def __init__(self):

        # set priviate values
        self.config = Config(workpath)
        self.pid = os.getpid()
        self.pname = 'CrontabService.py'

        # logger initial
        self.logger_init()

        # lock initial
        self.lockObj = Lock(self.pname, self.pid, self.config.LOCK_DIR,
                            self.config.LOCK_FILE, self.logger)

        # debug output
        self.logger.debug('Crontab Initial')
        self.logger.debug('[SERVICE_INTERVAL][%s]' %
                          (self.config.SERVICE_INTERVAL))
        self.logger.debug('[CRONTAB_CFG_DIR][%s]' %
                          (self.config.CRONTAB_CFG_DIR))
        self.logger.debug('[CRONTAB_CFG_FILE][%s]' %
                          (self.config.CRONTAB_CFG_FILE))
        self.logger.debug('[MAX_THREADS][%s]' % (self.config.MAX_THREADS))
        self.logger.debug('[THREAD_TIMEOUT][%s]' %
                          (self.config.THREAD_TIMEOUT))
        self.logger.debug('[LOCK_DIR][%s]' % (self.config.LOCK_DIR))
        self.logger.debug('[LOCK_FILE][%s]' % (self.config.LOCK_FILE))
        self.logger.debug('[LOG_DIR][%s]' % (self.config.LOG_DIR))
        self.logger.debug('[LOG_FILE][%s]' % (self.config.LOG_FILE))
        self.logger.debug('[LOG_LEVEL][%s]' % (self.config.LOG_LEVEL))
        self.logger.debug('[LOG_MAX_SIZE][%s]' % (self.config.LOG_MAX_SIZE))
        self.logger.debug('[LOG_BACKUP_COUNT][%s]' %
                          (self.config.LOG_BACKUP_COUNT))

        return (None)
Beispiel #2
0
    def __init__(self):
        ## set priviate values
        self.config = Config(workpath)
        self.pid = os.getpid()
        self.pname = 'Asset.py'

        ## logger initial
        self.loggerInit()

        ## lock initial
        self.lockObj = Lock(self.pname, self.pid, self.config.LOCK_DIR,
                            self.config.LOCK_FILE, self.logger)

        ## debug output
        self.logger.debug('Asset Initial Start')
        self.logger.debug('[SYS_CIS][%s]' % (self.config.SYS_CIS))
        self.logger.debug('[SYS_SAVE_CSV][%s]' % (self.config.SYS_SAVE_CSV))
        self.logger.debug('[SYS_CSV_DIR][%s]' % (self.config.SYS_CSV_DIR))
        self.logger.debug('[MQ_SERVERS][%s]' % (self.config.MQ_SERVERS))
        self.logger.debug('[MQ_PORT][%s]' % (self.config.MQ_PORT))
        self.logger.debug('[MQ_QUEUE][%s]' % (self.config.MQ_QUEUE))
        self.logger.debug('[SUBPROC_SCRIPTSDIR][%s]' %
                          (self.config.SUBPROC_SCRIPTSDIR))
        self.logger.debug('[SUBPROC_TIMEOUT][%s]' %
                          (self.config.SUBPROC_TIMEOUT))
        self.logger.debug('[LOCK_DIR][%s]' % (self.config.LOCK_DIR))
        self.logger.debug('[LOCK_FILE][%s]' % (self.config.LOCK_FILE))
        self.logger.debug('[LOG_DIR][%s]' % (self.config.LOG_DIR))
        self.logger.debug('[LOG_FILE][%s]' % (self.config.LOG_FILE))
        self.logger.debug('[LOG_LEVEL][%s]' % (self.config.LOG_LEVEL))
        self.logger.debug('[LOG_MAX_SIZE][%s]' % (self.config.LOG_MAX_SIZE))
        self.logger.debug('[LOG_BACKUP_COUNT][%s]' %
                          (self.config.LOG_BACKUP_COUNT))
        self.logger.debug('Asset Initial Done')
Beispiel #3
0
    def __init__(self):
        ## set priviate values
        self.config = Config(workpath)
        self.pid = os.getpid()
        self.pname = 'Scheduler.py'

        ## logger initial
        self.loggerInit()

        ## lock initial
        self.lockObj = Lock(self.pname, self.pid, self.config.LOCK_DIR,
                            self.config.LOCK_FILE, self.logger)

        ## debug output
        self.logger.debug('Scheduler Initial Start')
        self.logger.debug('[SYS_CFG_DIR][%s]' % (self.config.SYS_CFG_DIR))
        self.logger.debug('[LOCK_DIR][%s]' % (self.config.LOCK_DIR))
        self.logger.debug('[LOCK_FILE][%s]' % (self.config.LOCK_FILE))
        self.logger.debug('[LOG_DIR][%s]' % (self.config.LOG_DIR))
        self.logger.debug('[LOG_FILE][%s]' % (self.config.LOG_FILE))
        self.logger.debug('[LOG_LEVEL][%s]' % (self.config.LOG_LEVEL))
        self.logger.debug('[LOG_MAX_SIZE][%s]' % (self.config.LOG_MAX_SIZE))
        self.logger.debug('[LOG_BACKUP_COUNT][%s]' %
                          (self.config.LOG_BACKUP_COUNT))
        self.logger.debug('Scheduler Initial Done')
 def init(self):
     self.lock = Lock()
     self.taskManager = []
     self.tmpTaskManger = []
     self.delayTaskManager = []
     self.delayTmpTaskManager = []
     self.taskLock = Lock()
     self.thread = None
     self.stop = False
     self.Start()
Beispiel #5
0
    def registerNewLock(self, bankNum, name, type, number, last):
        from Lock import Lock

        bankNum = "bank" + str(bankNum)

        if self.locks[bankNum] == None:
            if last == "LAST":
                self.locks[bankNum] = Lock(name + "," + type + "," +
                                           str(number) + ",LAST")
            else:
                self.locks[bankNum] = Lock(name + "," + type + "," +
                                           str(number))
            return (True)
        else:
            return (False)
Beispiel #6
0
class Microprocessor(Receiver):
    in_0 = KeyboardPanel()
    out_0 = LCD()
    out_1 = Bell()
    out_2 = Lock()

    def __init__(self):
        self.olh = OpenLockHandler(self)
        self.clh = ClosedLockHandler(self)

        self.pkh = PassKeyHandler(self)
        self.ckh = CtrlKeyHandler(self)

        self.pkch = PassKeyChangeHandler(self)

        self.cbh = CallButtonHandler(self)
        self.pkah = PassKeyAcceptHandler(self.pkh, self)
        self.ckah = PassKeyAcceptHandler(self.ckh, self)

        self.cbh.set_next(self.clh).set_next(self.pkh).set_next(
            self.olh).set_next(self.ckh).set_next(self.ckh).set_next(self.pkch)

    def receive_signal(self, signal):
        signal = self.in_0.invoke()
        self.cbh.handle(signal)
        self.out_0.receive_signal(signal)

    def procedure(self):
        self.out_2.receive_signal(None)
        print(f"Lock: {self.out_2.state.info}")
Beispiel #7
0
        def __write(self, op):

                if (self.TransactionTable[op.tid].state == "BLOCKED"):
                        self.LockTable[op.itemName].waitingOperations.append(op)
                        print "Transaction %s already %s"%(self.TransactionTable[op.tid], self.TransactionTable[op.tid].state)
                elif (self.TransactionTable[op.tid].state == "ABORTED"):
                        print "Transaction %s already %s"%(self.TransactionTable[op.tid], self.TransactionTable[op.tid].state)

                else:
                        if self.LockTable.has_key(op.itemName):
                                if self.LockTable[op.itemName].readlockedTIDS:
                                        for _tid in self.LockTable[op.itemName].readlockedTIDS:
                                                self.handleWaitDie(op, _tid)

                                elif self.LockTable[op.itemName].writeLockTID is not None:
                                        self.handleWaitDie(op, self.LockTable[op.itemName].writeLockTID)
                                else:
                                   self.writeLock(op)
                        else:
                                # print "Creating new WRITE LockTable Record for %s"%op.itemName
                                new_lock = Lock(itemName=op.itemName, lockState="WRITE",
                                                                readLockedTIDS=[], \
                                                                writeLockTID=op.tid, waitingOperations=[])
                                self.LockTable.update({
                                        op.itemName:new_lock
                                })
                                self.writeLock(op)
Beispiel #8
0
        def __read(self, op):
                if (self.TransactionTable[op.tid].state == "BLOCKED"):
                        self.LockTable[op.itemName].waitingOperations.append(op)
                        print "Transaction %s already %s"%(self.TransactionTable[op.tid], self.TransactionTable[op.tid].state)
                elif (self.TransactionTable[op.tid].state == "ABORTED"):
                        print "Transaction %s already %s"%(self.TransactionTable[op.tid], self.TransactionTable[op.tid].state)
                else:
                        if self.LockTable.has_key(op.itemName):
                                # print "%s already %s locked by T%s"%(op.itemName, self.LockTable[op.itemName].lockState, self.LockTable[op.itemName].readlockedTIDS)
                                if self.LockTable[op.itemName].lockState == "WRITE":
                                        # print "Handle Wait Dies"
                                        self.handleWaitDie(op, self.LockTable[op.itemName].writeLockTID)
                                        print "Transaction T%s  %s"%(self.TransactionTable[op.tid], self.TransactionTable[op.tid].state)
                                else:
                                        print "T%s readlocks %s"%(op.tid,op.itemName)
                                        self.readLock(op)

                        else:
                                print "T%s readlocks %s"%(op.tid,op.itemName)
                                new_lock = Lock(itemName=op.itemName, lockState="READ",
                                                                readLockedTIDS=[op.tid],
                                                                writeLockTID=None, waitingOperations=[])
                                self.LockTable.update({
                                        op.itemName:new_lock
                                })
                                self.readLock(op)
Beispiel #9
0
 def create_lock(self, cur_dir, ami_path, exclusive):
     if ami_path == '':
         if cur_dir == None:
             ami_path = "SYS:"
         else:
             ami_path = cur_dir.ami_path
     else:
         ami_path = self.path_mgr.ami_abs_path(cur_dir, ami_path)
     sys_path = self.path_mgr.ami_to_sys_path(cur_dir,
                                              ami_path,
                                              searchMulti=True)
     name = self.path_mgr.ami_name_of_path(cur_dir, ami_path)
     if sys_path == None:
         log_lock.info("lock '%s' invalid: no sys path found: '%s'", name,
                       ami_path)
         return None
     exists = os.path.exists(sys_path)
     if not exists:
         log_lock.info(
             "lock '%s' invalid: sys path does not exist: '%s' -> '%s'",
             name, ami_path, sys_path)
         return None
     lock = Lock(name, ami_path, sys_path, exclusive)
     self._register_lock(lock)
     return lock
Beispiel #10
0
 def addJob(self, jobType, jobWeight=1):
     host, xWeight = self.getNextHost(jobWeight)
     xLock = Lock(os.path.join(self.JobDir, "lock-" + host))
     xLock.getLock()
     try:
         hostDir = os.path.join(self.JobDir, 'Host.' + host)
         if not os.path.exists(hostDir):
             try:
                 os.mkdir(hostDir)
             except:
                 pass
         jFile = open(os.path.join(hostDir, 'JOB.' + jobType), "w")
         jFile.write(str(jobWeight))
         jFile.close()
         self.updateHost(host)
         time.sleep(1)
     except:
         pass
     print 'ADDED JOB:', jobType, ':', jobWeight, '===>', host
     return host, xWeight
Beispiel #11
0
 def addJob(self, jobType, jobWeight=1):
     host, xWeight = self.getNextHost(jobWeight)
     xLock = Lock(os.path.join(self.JobDir, "lock-" + host))
     xLock.getLock()
     try:
         hostDir = os.path.join(self.JobDir, "Host." + host)
         if not os.path.exists(hostDir):
             try:
                 os.mkdir(hostDir)
             except:
                 pass
         jFile = open(os.path.join(hostDir, "JOB." + jobType), "w")
         jFile.write(str(jobWeight))
         jFile.close()
         self.updateHost(host)
         time.sleep(1)
     except:
         pass
     print "ADDED JOB:", jobType, ":", jobWeight, "===>", host
     return host, xWeight
    def getMachineId(self):

        WINDOW = xbmcgui.Window(10000)

        clientId = WINDOW.getProperty("client_id")
        if clientId != None and clientId != "":
            return clientId

        # we need to load and or generate a client machine id
        __addon__ = self.addon
        __addondir__ = xbmc.translatePath(__addon__.getAddonInfo('path'))
        machine_guid_lock_path = os.path.join(__addondir__,
                                              "machine_guid.lock")
        machine_guid_path = os.path.join(__addondir__, "machine_guid")
        clientId = ""

        try:
            lock = Lock(machine_guid_lock_path)
            locked = lock.acquire()

            if locked:

                fd = os.open(machine_guid_path, os.O_CREAT | os.O_RDWR)
                clientId = os.read(fd, 256)

                if len(clientId) == 0:
                    uuid = uuid4()
                    clientId = str("%012X" % uuid)
                    self.logMsg("ClientId saved to FILE: %s" % clientId, 2)
                    os.write(fd, clientId)
                    os.fsync(fd)

                os.close(fd)

                self.logMsg("ClientId saved to WINDOW: %s" % clientId, 1)
                WINDOW.setProperty("client_id", clientId)

        finally:
            lock.release()

        return clientId
Beispiel #13
0
 def get_lock_list(self, oplocks, locktypes, opname, params):
     locks = []
     requiredLocks = self.getlocks(opname, oplocks)
     for r in requiredLocks:
         for t in locktypes:
             if r["name"] == t["name"]:
                 paramValues = [params[t["param"]]]
                 lockname = "_".join([t["name"]] + paramValues)
                 locktype = LockType(t["name"], t["param"], t["placement"])
                 newlock = Lock(lockname, locktype, r["mode"])
                 locks += [newlock]
     return locks
 def getMachineId(self):
 
     WINDOW = xbmcgui.Window( 10000 )
     
     clientId = WINDOW.getProperty("client_id")
     if(clientId != None and clientId != ""):
         return clientId
         
     # we need to load and or generate a client machine id    
     __addon__ = xbmcaddon.Addon(id='plugin.video.xbmb3c')
     __addondir__ = xbmc.translatePath( __addon__.getAddonInfo('path'))
     machine_guid_lock_path = os.path.join(__addondir__, "machine_guid.lock")
     machine_guid_path = os.path.join(__addondir__, "machine_guid")
     clientId = ""
     
     try:
         lock = Lock(machine_guid_lock_path)
         locked = lock.acquire()
         
         if(locked == True):
         
             fd = os.open(machine_guid_path, os.O_CREAT|os.O_RDWR)
             clientId = os.read(fd, 256)
             
             if(len(clientId) == 0):
                 uuid = uuid4()
                 clientId = str("%012X" % uuid)
                 xbmc.log("CLIENT_ID - > Client ID saved to FILE : " + clientId)                    
                 os.write(fd, clientId)
                 os.fsync(fd)
                 
             os.close(fd)
             
             xbmc.log("CLIENT_ID - > Client ID saved to WINDOW : " + clientId)
             WINDOW.setProperty("client_id", clientId)
              
     finally: 
         lock.release()
             
     return clientId
    def getMachineId(self):

        WINDOW = xbmcgui.Window(10000)

        clientId = WINDOW.getProperty("client_id")
        if clientId != None and clientId != "":
            return clientId

        # we need to load and or generate a client machine id    
        __addon__ = self.addon
        __addondir__ = xbmc.translatePath(__addon__.getAddonInfo('path'))
        machine_guid_lock_path = os.path.join(__addondir__, "machine_guid.lock")
        machine_guid_path = os.path.join(__addondir__, "machine_guid")
        clientId = ""

        try:
            lock = Lock(machine_guid_lock_path)
            locked = lock.acquire()

            if locked:

                fd = os.open(machine_guid_path, os.O_CREAT | os.O_RDWR)
                clientId = os.read(fd, 256)

                if len(clientId) == 0:
                    uuid = uuid4()
                    clientId = str("%012X" % uuid)
                    self.logMsg("ClientId saved to FILE: %s" % clientId, 2)
                    os.write(fd, clientId)
                    os.fsync(fd)

                os.close(fd)

                self.logMsg("ClientId saved to WINDOW: %s" % clientId, 1)
                WINDOW.setProperty("client_id", clientId)

        finally:
            lock.release()

        return clientId
Beispiel #16
0
    def __init__(self):
        ## set priviate values
        self.config = Config(workpath)
        self.pid = os.getpid()
        self.pname = 'ETL.py'

        ## logger initial
        self.loggerInit()

        ## lock initial
        self.lockObj = Lock(
            self.pname,
            self.pid,
            self.config.LOCK_DIR,
            self.config.LOCK_FILE,
            self.logger)

        ## debug output
        self.logger.debug('ETL Initial Start')
        self.logger.debug('[SYS_BUFFER_SIZE][%s]' % (self.config.SYS_BUFFER_SIZE))
        self.logger.debug('[SYS_BUFFER_WAIT][%s]' % (self.config.SYS_BUFFER_WAIT))
        self.logger.debug('[MQ_SERVER][%s]' % (self.config.MQ_SERVER))
        self.logger.debug('[MQ_PORT][%s]' % (self.config.MQ_PORT))
        self.logger.debug('[MQ_QUEUE][%s]' % (self.config.MQ_QUEUE))
        self.logger.debug('[MARIADB_HOST][%s]' % (self.config.MARIADB_HOST))
        self.logger.debug('[MARIADB_PORT][%s]' % (self.config.MARIADB_PORT))
        self.logger.debug('[MARIADB_USER][%s]' % (self.config.MARIADB_USER))
        self.logger.debug('[MARIADB_PASSWORD][%s]' % (self.config.MARIADB_PASSWORD))
        self.logger.debug('[MARIADB_DATABASE][%s]' % (self.config.MARIADB_DATABASE))
        self.logger.debug('[LOCK_DIR][%s]' % (self.config.LOCK_DIR))
        self.logger.debug('[LOCK_FILE][%s]' % (self.config.LOCK_FILE))
        self.logger.debug('[LOG_DIR][%s]' % (self.config.LOG_DIR))
        self.logger.debug('[LOG_FILE][%s]' % (self.config.LOG_FILE))
        self.logger.debug('[LOG_LEVEL][%s]' % (self.config.LOG_LEVEL))
        self.logger.debug('[LOG_MAX_SIZE][%s]' % (self.config.LOG_MAX_SIZE))
        self.logger.debug(
            '[LOG_BACKUP_COUNT][%s]' %
            (self.config.LOG_BACKUP_COUNT))
        self.logger.debug('ETL Initial Done')
Beispiel #17
0
 def setLocksAfterLoading(self):
     lines = self.codes["bank1"]["bank_configurations"].code.split("\n")
     for line in lines:
         if line.startswith("*"):
             continue
         else:
             if line.split("=")[0] == "bank1":
                 self.kernel = line.split("=")[1].replace("\n", "").replace(
                     "\r", "")
             else:
                 from Lock import Lock
                 self.locks[line.split("=")[0]] = Lock(
                     line.split("=")[1].replace("\n", "").replace("\r", ""))
Beispiel #18
0
    def init(self, addr, delegate):
        self.__addr = addr
        self.__delegate = delegate
        if delegate.SetSockServer:
            delegate.SetSockServer(self)

        self.__clientObj = []
        self.__listenSock = None
        self.__eventSendSock = None

        self.__eventClient = None
        self.__threadEvent = threading.Event()
        self.__threadState = ThreeState.INIT
        self.__tmpClientObj = []  #上锁
        self.__lock = Lock()
Beispiel #19
0
 def get_by_b_addr(self, b_addr, none_if_missing=False):
     # current dir lock
     if b_addr == 0:
         (cur_dev, cur_path) = self.path_mgr.get_cur_path()
         ami_path = cur_dev + ":" + cur_path
         sys_path = self.path_mgr.ami_to_sys_path(ami_path)
         return Lock("local", ami_path, sys_path)
     elif self.locks_by_b_addr.has_key(b_addr):
         return self.locks_by_b_addr[b_addr]
     else:
         if none_if_missing:
             return None
         else:
             raise VamosInternalError("Invalid File Lock at b@%06x" %
                                      b_addr)
Beispiel #20
0
def accepting_connections():
    close_connections()

    while True:
        try:
            conn, address = s.accept()
            s.setblocking(1)  # prevents timeout

            try:
                device_type = conn.recv(201480)

                if (str(device_type) == "b'keypad_code'"):
                    my_uid = conn.recv(201480)
                    lock_uid = conn.recv(201480)

                    new_Keypad = Keypad(address, conn, lock_uid, my_uid)
                    keypad_list.append(new_Keypad)
                    queue.put(3)
                    create_workers(ADD_WORKER)

                    print("Keypad has been established :" + address[0])

                elif (str(device_type) == "b'lock_code'"):
                    device_code = conn.recv(201480)
                    my_uid = conn.recv(201480)
                    keypad_uid = conn.recv(201480)
                    new_Lock = Lock(address, conn, device_code, keypad_uid,
                                    my_uid)
                    lock_list.append(new_Lock)

                    print("Lock has been established :" + address[0])

                elif (str(device_type) == "b'web_interface'"):
                    global web_interface
                    web_interface = Web_Interface(address, conn)
                    queue.put(5)
                    create_workers(ADD_WORKER)

                    print("Web Interface has been established :" + address[0])

                else:
                    print("Unable to recognize incoming device")
            except:
                print("Something went wrong in obtaining a device code")

        except:
            print("Error accepting connections")
Beispiel #21
0
 def get_devices(self):
     '''Get the list of registered locks'''
     api_url_lock = "https://api.tedee.com/api/v1.15/my/lock"
     r = requests.get(api_url_lock,
                      headers=self._api_header,
                      timeout=self._timeout)
     _LOGGER.debug("Locks %s", r.json())
     result = r.json()["result"]
     for x in result:
         id = x["id"]
         name = x["name"]
         self._lock_id = id
         '''store the found lock in _sensor_list and get the battery_level'''
         self._sensor_list.append(Lock(name, id))
         self.get_battery(id)
     if self._lock_id == None:
         raise TedeeClientException("No lock found")
Beispiel #22
0
def process():
  # Get the first task from the list
  # Check if we know what to do
  # Mark it as started
  # Start doing it
  parser = OptionParser(usage="%prog process [options]")
  parser.add_option("--match-arch", metavar="REGEX", dest="matchArch", help="Limit architectures to those matching REGEX", default=".*")
  parser.add_option("--match-release", metavar="REGEX", dest="matchRelease", help="Limit releases to those matching REGEX", default=".*")
  parser.add_option("--work-dir", "--top-dir", metavar="PATH", dest="workdir", help="Work dir where processing happens", default=None)
  parser.add_option("--jobs", "-j", type="int", metavar="N", dest="jobs", help="Number of parallel building threads", default=1)
  parser.add_option("--builders", type="int", metavar="N", dest="builders", help="Number of packages built in parallel", default=1)
  parser.add_option("--debug", metavar="PATH", dest="debug", help="Print out what's happening", action="store_true", default=False)
  parser.add_option("--dry-run", "-n", metavar="BOOL", dest="dryRun", help="Do not execute", action="store_true", default=False)
  parser.add_option("--api-url", metavar="URL", dest="apiUrl", help="Specify API endpoint URL", default=DEFAULT_API_URL)
  parser.add_option("--max-load", type="int", metavar="LOAD", dest="maxLoad", help="Do not execute if average last 15 minutes load > LOAD", default=8)
  opts, args = parser.parse_args()
  setTCUrl(opts.apiUrl)
  if not opts.workdir:
    print "Please specify a workdir"
    sys.exit(1)

  if exists("/etc/iss.nologin"):
    print "/etc/iss.nologin found. Not doing anything and waiting for machine out of maintainance mode."
    sys.exit(1)
  opts.workdir = abspath(opts.workdir)
  thisPath=dirname(__file__)
  getstatusoutput(format(
    "%(here)s/syncLogs.py %(workdir)s",
    here=thisPath, 
    workdir=opts.workdir))
  lockPath = join(opts.workdir, "cms", ".cmsLock")
  lock = Lock(lockPath, True, 60*60*12)
  if not lock:
    if opts.debug:
      print "Lock found in %s" % lockPath
    sys.exit(1)
  lock.__del__()
   
  if overloaded(opts.maxLoad):
    print "Current load exceeds maximum allowed of %s." % opts.maxLoad
    sys.exit(1)
  tasks = call("/", "GET", 
               release_match=opts.matchRelease,
               architecture_match=opts.matchArch,
               state="Pending")
  print tasks
  if not len(tasks):
    if opts.debug:
      print "Nothing to be done which matches release %s and architecture %s" % (opts.matchArch, opts.matchRelease)
    sys.exit(1)
  # Look up for a hostname-filter option in the payload and if it is there,
  # make sure we match it.
  runnableTask = None
  for task in tasks:
    if not "payload" in task:
      continue
    if re.match(task["payload"].get("hostnameFilter", ".*"), socket.gethostname()):
      runnableTask = task
      break
  if not runnableTask:
    print "Nothing to be done on this machine."
    sys.exit(1)
  # Default payload options.
  payload = {"debug": False}
  payload.update(runnableTask["payload"])

  # We can now specify tags in the format repository:tag to pick up branches
  # from different people.
  payload["pkgtools_remote"] = "cms-sw"
  payload["cmsdist_remote"] = "cms-sw"
  if ":" in payload["PKGTOOLS"]:
    payload["pkgtools_remote"], payload["PKGTOOLS"] = payload["PKGTOOLS"].split(":", 1)
  if ":" in payload["CMSDIST"]:
    payload["cmsdist_remote"], payload["CMSDIST"] = payload["CMSDIST"].split(":", 1)
  
  if opts.dryRun:
    print "Dry run. Not building"
    sys.exit(1)

  ok = call(runnableTask["id"], "PATCH", 
            url="http://cmssdt.cern.ch/SDT/tc-ib-logs/%s/log.%s.html" % (socket.gethostname(), runnableTask["id"]),
            machine=socket.gethostname(),
            pid=os.getpid(),
            state="Running")
  if not ok:
    print "Could not change request %s state to building" % runnableTask["id"] 
    sys.exit(1)
  
  # Build the package.
  # We gracefully handle any exception (broken pipe, ctrl-c, SIGKILL)
  # by failing the request if they happen. We also always cat 
  # the log for this build in a global log file.
  log = ""
  getstatusoutput(format(
    "echo 'Log not sync-ed yet' > %(workdir)s/log.%(task_id)s;\n"
    "%(here)s/syncLogs.py %(workdir)s",
    task_id=runnableTask["id"],
    here=thisPath, 
    workdir=opts.workdir))
  try:
    print "Building..."
    error, log = getstatusoutput(format("set -e ;\n"
       "mkdir -p %(workdir)s/%(task_id)s ;\n"
       "export CMS_PATH=%(workdir)s/cms ;\n"
       "cd %(workdir)s ;\n"
       "( echo 'Building %(package)s using %(cmsdistRemote)s:%(cmsdistTag)s';\n"
       "  rm -rf %(task_id)s;\n"
       "  git clone git://github.com/%(cmsdistRemote)s/cmsdist.git %(task_id)s/CMSDIST || git clone https://:@git.cern.ch/kerberos/CMSDIST.git %(task_id)s/CMSDIST;\n"
       "  pushd %(task_id)s/CMSDIST; git checkout %(cmsdistTag)s; popd;\n"
       "  PKGTOOLS_TAG=\"`echo %(pkgtoolsTag)s | sed -e's/\\(V[0-9]*-[0-9]*\\).*/\\1-XX/'`\";\n"
       "  git clone git://github.com/%(pkgtoolsRemote)s/pkgtools.git %(task_id)s/PKGTOOLS || git clone https://:@git.cern.ch/kerberos/PKGTOOLS.git %(task_id)s/PKGTOOLS;\n"
       "  pushd %(task_id)s/PKGTOOLS; git checkout $PKGTOOLS_TAG; popd;\n"
       "  echo \"### RPM cms dummy `date +%%s`\n%%prep\n%%build\n%%install\n\" > %(task_id)s/CMSDIST/dummy.spec ;\n"
       "  set -x ;\n"
       "  rm -rf %(workdir)s/cms %(workdir)s/b ;\n"
       "  perl -p -i -e 's/### RPM cms cmssw.*/### RPM cms cmssw %(base_release_name)s/' %(task_id)s/CMSDIST/cmssw.spec ;\n"
       "  perl -p -i -e 's/### RPM cms cmssw-ib .*/### RPM cms cmssw-ib %(base_release_name)s/' %(task_id)s/CMSDIST/cmssw-ib.spec ;\n"
       "  perl -p -i -e 's/### RPM cms cmssw-qa .*/### RPM cms cmssw-qa %(base_release_name)s/' %(task_id)s/CMSDIST/cmssw-qa.spec ;\n"
       "  perl -p -i -e 's/### RPM cms cmssw-validation .*/### RPM cms cmssw-validation %(base_release_name)s/' %(task_id)s/CMSDIST/cmssw-validation.spec ;\n"
       "  perl -p -i -e 's/### RPM cms cmssw-patch.*/### RPM cms cmssw-patch %(real_release_name)s/' %(task_id)s/CMSDIST/cmssw-patch.spec ;\n"
       "  %(workdir)s/%(task_id)s/PKGTOOLS/cmsBuild %(debug)s --new-scheduler --cmsdist %(workdir)s/%(task_id)s/CMSDIST %(ignoreErrors)s --builders %(builders)s -j %(jobs)s --repository %(repository)s --architecture %(architecture)s --work-dir %(workdir)s/cms build %(package)s ;\n"
       "  %(workdir)s/%(task_id)s/PKGTOOLS/cmsBuild %(debug)s --new-scheduler --cmsdist %(workdir)s/%(task_id)s/CMSDIST --repository %(repository)s --upload-tmp-repository %(tmpRepository)s %(syncBack)s --architecture %(architecture)s --work-dir %(workdir)s/cms upload %(package)s ;\n"
       "  PKG_BUILD=`find %(workdir)s/cms/RPMS/%(architecture)s -name \"*%(package)s*\"| sed -e's|.*/||g;s|-1-1.*||g'`;\n"
       "  set +x ;\n"
       "  echo Build completed. you can now install the package built by doing: ;\n"
       "  echo \"wget http://cmsrep.cern.ch/cmssw/cms/bootstrap.sh\" ;\n"
       "  echo \"sh -x ./bootstrap.sh setup -path w -arch %(architecture)s -r %(repository)s >& bootstrap_%(architecture)s.log \";\n"
       "  echo \"(source w/%(architecture)s/external/apt/*/etc/profile.d/init.sh ; apt-get install $PKG_BUILD )\" ;\n"
       "  echo AUTOIB SUCCESS) 2>&1 | tee %(workdir)s/log.%(task_id)s",
       workdir=opts.workdir,
       debug=payload["debug"] == True and "--debug" or "",
       cmsdistTag=sanitize(payload["CMSDIST"]),
       pkgtoolsTag=sanitize(payload["PKGTOOLS"]),
       cmsdistRemote=sanitize(payload["cmsdist_remote"]),
       pkgtoolsRemote=sanitize(payload["pkgtools_remote"]),
       architecture=sanitize(runnableTask["architecture"]),
       release_name=sanitize(re.sub("_[A-Z]+_X", "_X", runnableTask["release"])),
       base_release_name=re.sub("_[^_]*patch[0-9]*$", "", sanitize(payload["release"])),
       real_release_name=sanitize(payload["release"]),
       package=sanitize(payload["package"]),
       repository=sanitize(payload["repository"]),
       syncBack=payload["syncBack"] == True and "--sync-back" or "",
       ignoreErrors=payload["ignoreErrors"] == True and "-k" or "",
       tmpRepository=sanitize(payload["tmpRepository"]),
       task_id=runnableTask["id"],
       jobs=opts.jobs,
       builders=opts.builders))
    getstatusoutput(format("echo 'Task %(task_id)s completed successfully.' >> %(workdir)s/log.%(task_id)s",
                           workdir=opts.workdir,
                           task_id=runnableTask["id"]))
  except Exception, e:
    log = open(format("%(workdir)s/log.%(task_id)s", workdir=opts.workdir, task_id=runnableTask["id"])).read()
    log += "\nInterrupted externally."
    log += str(e)
    getstatusoutput(format("echo 'Interrupted externally' >> %(workdir)s/log.%(task_id)s",
                           workdir=opts.workdir,
                           task_id=runnableTask["id"]))
Beispiel #23
0
class Scheduler(object):
    ## initial function
    def __init__(self):
        ## set priviate values
        self.config = Config(workpath)
        self.pid = os.getpid()
        self.pname = 'Scheduler.py'

        ## logger initial
        self.loggerInit()

        ## lock initial
        self.lockObj = Lock(self.pname, self.pid, self.config.LOCK_DIR,
                            self.config.LOCK_FILE, self.logger)

        ## debug output
        self.logger.debug('Scheduler Initial Start')
        self.logger.debug('[SYS_CFG_DIR][%s]' % (self.config.SYS_CFG_DIR))
        self.logger.debug('[LOCK_DIR][%s]' % (self.config.LOCK_DIR))
        self.logger.debug('[LOCK_FILE][%s]' % (self.config.LOCK_FILE))
        self.logger.debug('[LOG_DIR][%s]' % (self.config.LOG_DIR))
        self.logger.debug('[LOG_FILE][%s]' % (self.config.LOG_FILE))
        self.logger.debug('[LOG_LEVEL][%s]' % (self.config.LOG_LEVEL))
        self.logger.debug('[LOG_MAX_SIZE][%s]' % (self.config.LOG_MAX_SIZE))
        self.logger.debug('[LOG_BACKUP_COUNT][%s]' %
                          (self.config.LOG_BACKUP_COUNT))
        self.logger.debug('Scheduler Initial Done')

    ## initial logger
    def loggerInit(self):
        self.logger = logging.getLogger("Scheduler")

        try:
            log_level = getattr(logging, self.config.LOG_LEVEL)

        except BaseException:
            log_level = logging.NOTSET

        self.logger.setLevel(log_level)

        fh = RotatingFileHandler(self.config.LOG_FILE,
                                 mode='a',
                                 maxBytes=self.config.LOG_MAX_SIZE,
                                 backupCount=self.config.LOG_BACKUP_COUNT)
        fh.setLevel(log_level)

        ch = logging.StreamHandler()
        ch.setLevel(log_level)

        formatter = logging.Formatter(
            '[%(asctime)s][%(name)s][%(levelname)s] %(message)s')
        fh.setFormatter(formatter)
        ch.setFormatter(formatter)

        self.logger.addHandler(fh)
        self.logger.addHandler(ch)

        return (True)

    ## run asset function
    def run(self):
        self.logger.debug('Scheduler Start')

        ## load and run tasks
        taskObj = Task(self.logger, self.config)
        taskObj.run()

        ## release lock
        self.lockObj.release()

        return (True)
Beispiel #24
0
    def run(self):
	ret = 0
	try:
            ret = os.system('rfcp '+self.castorFile+' '+self.localFile)
	except:
	    ret = 1
        if ret != 0:
            print "ERROR copying rawRefFile ", self.castorFile, ' to ', self.localFile
            print "      rfcp returned: ", ret
	return
	                
def getRawRefs():
    import PerfSuiteRawRef
    threadList = []
    if not os.path.exists(PerfSuiteRawRef.rawRefDir):
        os.makedirs(PerfSuiteRawRef.rawRefDir)
    for castorDir, refList in PerfSuiteRawRef.referenceFiles.items():
        for rem, ref in refList.items():
            if os.path.exists(PerfSuiteRawRef.rawRefDir+'/'+ref) :
                print "Ignoring existing rawRefFile ", ref
                continue
            else:
	        t = GetRawRefFile(castorDir+rem,PerfSuiteRawRef.rawRefDir+ref)
		t.start()
		threadList.append(t)
    for t in threadList: t.join()

xLock = Lock('prefSuit.rawRef.lock')
xLock.getLock(10,1000)
getRawRefs()
Beispiel #25
0
#!/usr/bin/env python3

from Lock import Lock
from time import sleep

# Usage
try:
    lock = Lock("Worker")
    lock.acquire()
    print("fun1 starting")
    for loop in range(1, 5):
        print("Fun1 Working {}".format(loop))
        sleep(1)
    print("fun1 finished")
finally:
    print("Releasing Lock")
    lock.release()
Beispiel #26
0
            buildDir = a

    plat = None
    try:
        plat = os.environ["SCRAM_ARCH"]
    except KeyError:
        plat = "slc5_ia32_gcc434"  # assume this for now ...
        os.environ[
            "SCRAM_ARCH"] = plat  # make sure we have it for the others :)

    if not rel:
        usage()
        sys.exit(-1)

    bmgr = BuildManager(rel, ibdate, ibstamp)
    bmgr.setDryRun(dryRun)

    if not buildDir:
        from Lock import Lock
        lock = Lock(bmgr.topBuildDir + '/buildLock')
        if not lock:
            print 'WARNING: Another build is still running on ' + config.getHostName(
            ) + ' for release cycle ' + rel
        else:
            bmgr.checkout()
            bmgr.startBuild(buildDir)
    else:
        bmgr.startBuild(buildDir)

    sys.exit(0)
Beispiel #27
0
import cv2
import base64
import datetime
from tkinter import *
from PIL import Image, ImageTk
from FaceRecognition import recognize
import os
from Lock import Lock
import time

device = cv2.VideoCapture(0)
lock = Lock(18)
root = Tk()
root.geometry("500x350")
lmain = Label(root)
lmain.pack()

lstatus = Label(root)
lstatus.pack(side="top")


def show_frame():
    _, frame = device.read()
    frame = cv2.flip(frame, 1)
    cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
    cv2image = cv2.resize(cv2image, (300, 250))
    img = Image.fromarray(frame)
    imgtk = ImageTk.PhotoImage(image=img)
    lmain.imgtk = imgtk
    lmain.configure(image=imgtk)
    lmain.after(10, show_frame)
 def __init__(self):
     """Class constructor"""
     self.lock = Lock()
     self.riches = random.randint(100, 1000)
     self.is_cracked = False
Beispiel #29
0
class CrontabService:

    # initial function
    def __init__(self):

        # set priviate values
        self.config = Config(workpath)
        self.pid = os.getpid()
        self.pname = 'CrontabService.py'

        # logger initial
        self.logger_init()

        # lock initial
        self.lockObj = Lock(self.pname, self.pid, self.config.LOCK_DIR,
                            self.config.LOCK_FILE, self.logger)

        # debug output
        self.logger.debug('Crontab Initial')
        self.logger.debug('[SERVICE_INTERVAL][%s]' %
                          (self.config.SERVICE_INTERVAL))
        self.logger.debug('[CRONTAB_CFG_DIR][%s]' %
                          (self.config.CRONTAB_CFG_DIR))
        self.logger.debug('[CRONTAB_CFG_FILE][%s]' %
                          (self.config.CRONTAB_CFG_FILE))
        self.logger.debug('[MAX_THREADS][%s]' % (self.config.MAX_THREADS))
        self.logger.debug('[THREAD_TIMEOUT][%s]' %
                          (self.config.THREAD_TIMEOUT))
        self.logger.debug('[LOCK_DIR][%s]' % (self.config.LOCK_DIR))
        self.logger.debug('[LOCK_FILE][%s]' % (self.config.LOCK_FILE))
        self.logger.debug('[LOG_DIR][%s]' % (self.config.LOG_DIR))
        self.logger.debug('[LOG_FILE][%s]' % (self.config.LOG_FILE))
        self.logger.debug('[LOG_LEVEL][%s]' % (self.config.LOG_LEVEL))
        self.logger.debug('[LOG_MAX_SIZE][%s]' % (self.config.LOG_MAX_SIZE))
        self.logger.debug('[LOG_BACKUP_COUNT][%s]' %
                          (self.config.LOG_BACKUP_COUNT))

        return (None)

    # initial logger
    def logger_init(self):

        self.logger = logging.getLogger("Crontab")

        try:
            log_level = getattr(logging, self.config.LOG_LEVEL)
        except BaseException:
            log_level = logging.NOTSET

        self.logger.setLevel(log_level)

        fh = RotatingFileHandler(self.config.LOG_FILE,
                                 mode='a',
                                 maxBytes=self.config.LOG_MAX_SIZE,
                                 backupCount=self.config.LOG_BACKUP_COUNT)
        fh.setLevel(log_level)

        ch = logging.StreamHandler()
        ch.setLevel(log_level)

        formatter = logging.Formatter(
            '[%(asctime)s][%(name)s][%(levelname)s] %(message)s')
        fh.setFormatter(formatter)
        ch.setFormatter(formatter)

        self.logger.addHandler(fh)
        self.logger.addHandler(ch)

        return (True)

    # run crontab function
    def run(self):

        while True:

            # crontab initial
            self.crontabObj = Crontab(self.config.CRONTAB_CFG_FILE,
                                      self.logger, self.config.MAX_THREADS,
                                      self.config.THREAD_TIMEOUT,
                                      self.config.SUBPROC_LIMITS,
                                      self.config.MAX_RETRY,
                                      self.config.THREAD_DELAY)
            self.crontabObj.run()
            time.sleep(self.config.SERVICE_INTERVAL)

        return (True)

    # destructor function
    def __del__(self):

        # lock release
        try:
            self.lockObj.lock_release(self.config.LOCK_FILE)
        except Exception as e:
            pass

        return (None)
Beispiel #30
0
from CheckFaceService import CheckFaceService
from Lock import Lock
from ButtonController import ButtonController 
from TaskManager import taskManager
from PasswordController import  PasswordController
#from PCButtonController import PCButtonController 
from HistoryController import HistoryController
from BulbController import bulbController
import config

historyController = HistoryController(config.USER_EMAIL, config.USER_PASSWORD, config.EQUIPMENT_NAME)
lock = Lock()
buttonController = ButtonController()

PasswordController = PasswordController()
checkFaceService = CheckFaceService()
checkFaceService.check_success_task = lock.open_door
checkFaceService.record_task = historyController.AddRecord

taskManager.add_task(checkFaceService.model.update,2)
taskManager.add_task(buttonController.password_controller.update,3)
taskManager.add_task(checkFaceService.camera.CatchImage,0.1)

buttonController.star_task = checkFaceService.start_check 
buttonController.password_correct_task = lock.open_door
buttonController.add_record_task = historyController.AddRecord
buttonController.camera = checkFaceService.camera
buttonController.enable()


Beispiel #31
0
import time
from urlparse import urlparse
from itchat.itchat import itchat
from constdata import const
import os
#from tlrobot import TlRobot
from protocal import protocal
import json
import copy
from datetime import datetime

import requests
import threadpool
from Lock import Lock, AutoLock

l = Lock()


def M():
    AutoLock(l)


M()


def x(hh):
    #AutoLock(l)
    print "hello " + hh


pool = threadpool.ThreadPool(10)
Beispiel #32
0
def process():
  # Get the first task from the list
  # Check if we know what to do
  # Mark it as started
  # Start doing it
  parser = OptionParser(usage="%prog process [options]")
  parser.add_option("--match-arch", metavar="REGEX", dest="matchArch", help="Limit architectures to those matching REGEX", default=".*")
  parser.add_option("--match-release", metavar="REGEX", dest="matchRelease", help="Limit releases to those matching REGEX", default=".*")
  parser.add_option("--work-dir", "--top-dir", metavar="PATH", dest="workdir", help="Work dir where processing happens", default=None)
  parser.add_option("--jobs", "-j", type="int", metavar="N", dest="jobs", help="Number of parallel building threads", default=1)
  parser.add_option("--builders", type="int", metavar="N", dest="builders", help="Number of packages built in parallel", default=1)
  parser.add_option("--debug", metavar="PATH", dest="debug", help="Print out what's happening", action="store_true", default=False)
  parser.add_option("--dry-run", "-n", metavar="BOOL", dest="dryRun", help="Do not execute", action="store_true", default=False)
  parser.add_option("--testbed", metavar="BOOL", dest="useTestBed", help="Use the testbed tag collector to ", action="store_true", default=False)
  parser.add_option("--max-load", type="int", metavar="LOAD", dest="maxLoad", help="Do not execute if average last 15 minutes load > LOAD", default=8)
  opts, args = parser.parse_args()
  if not opts.workdir:
    print "Please specify a workdir"
    sys.exit(1)

  if exists("/etc/iss.nologin"):
    print "/etc/iss.nologin found. Not doing anything and waiting for machine out of maintainance mode."
    sys.exit(1)
  opts.workdir = abspath(opts.workdir)
  thisPath=dirname(__file__)
  getstatusoutput(format(
    "%(here)s/syncLogs.py %(workdir)s",
    here=thisPath, 
    workdir=opts.workdir))
  lockPath = join(opts.workdir, "cms", ".cmsLock")
  lock = Lock(lockPath, True, 60*60*12)
  if not lock:
    if opts.debug:
      print "Lock found in %s" % lockPath
    sys.exit(1)
  lock.__del__()
   
  if overloaded(opts.maxLoad):
    print "Current load exceeds maximum allowed of %s." % opts.maxLoad
    sys.exit(1)
  options = {"release_pattern": opts.matchRelease,
             "architecture_pattern": opts.matchArch}
  if opts.useTestBed:
    options["tcBaseURL"] = TESTBED_URL
  tasks = tagCollectorAPI.listPendingTasks(**options)
  print tasks
  if not len(tasks):
    if opts.debug:
      print "Nothing to be done which matches release %s and architecture %s" % (opts.matchArch, opts.matchRelease)
    sys.exit(1)
  # Default payload options.
  payload = {"debug": False}
  task_id, architecture_name, release_name, payloadNew = tasks[0]
  payload.update(payloadNew)
  
  if not payload.has_key("build-task"):
    print "Request task %s is not a valid build task" % task_id
    sys.exit(1)

  buildTask = payload["build-task"]
  if not buildTask in ["build-package"]:
    print "Unknown task for request %s: %s" % (task_id, buildTask)
    sys.exit(1)

  if opts.dryRun:
    print "Dry run. Not building"
    sys.exit(1)

  options = {"request_id": task_id,
             "release_name": release_name,
             "machine": socket.gethostname(),
             "pid": os.getpid()}
  if opts.useTestBed:
    options["tcBaseURL"] = TESTBED_URL
  options["results_url"] = "http://cmssdt.cern.ch/SDT/tc-ib-logs/%s/log.%s.html" % (socket.gethostname(), task_id)
  ok = tagCollectorAPI.setRequestBuilding(**options)
  if not ok:
    print "Could not change request %s state to building" % task_id
    sys.exit(1)
  
  # Build the package.
  # We gracefully handle any exception (broken pipe, ctrl-c, SIGKILL)
  # by failing the request if they happen. We also always cat 
  # the log for this build in a global log file.
  log = ""
  getstatusoutput(format(
    "echo 'Log not sync-ed yet' > %(workdir)s/log.%(task_id)s;\n"
    "%(here)s/syncLogs.py %(workdir)s",
    task_id=task_id,
    here=thisPath, 
    workdir=opts.workdir))
  try:
    print "Building..."
    error, log = getstatusoutput(format("set -e ;\n"
       "mkdir -p %(workdir)s/%(task_id)s ;\n"
       "export CMS_PATH=%(workdir)s/cms ;\n"
       "cd %(workdir)s ;\n"
       "( export CVSROOT=:pserver:[email protected]/local/reps/CMSSW ;\n"
       "  export CVS_PASSFILE=%(workdir)s/.cvspass ;\n"
       "  echo '/1 :pserver:[email protected]:/cvs_server/repositories/CMSSW %(cvspass)s' > $CVS_PASSFILE ;\n"
       "  echo '/1 :pserver:[email protected]:2401/cvs/CMSSW %(cvspass)s' >> $CVS_PASSFILE ;\n"
       "  echo '/1 :pserver:[email protected]:2401/cvs_server/repositories/CMSSW %(cvspass)s' >> $CVS_PASSFILE ;\n"
       "  echo '/1 :pserver:[email protected]/local/reps/CMSSW %(cvspass)s' >> $CVS_PASSFILE ;\n"
       "  echo '/1 :pserver:[email protected]/local/reps/CMSSW %(cvspass)s' >> $CVS_PASSFILE ;\n"
       "  echo '/1 :pserver:[email protected]:2401/local/reps/CMSSW %(cvspass)s' >> $CVS_PASSFILE ;\n"
       "  echo '/1 :pserver:[email protected]:2401/local/reps/CMSSW %(cvspass)s' >> $CVS_PASSFILE ;\n"
       "  echo '/1 :pserver:[email protected]:2401/local/reps/CMSSW %(cvspass)s' >> $CVS_PASSFILE;\n"
       "  git clone https://github.com/cms-sw/cmsdist.git %(task_id)s/CMSDIST;\n"
       "  pushd %(task_id)s/CMSDIST; git checkout %(cmsdistTag)s; popd;\n"
       "  PKGTOOLS_TAG=\"`echo %(pkgtoolsTag)s | sed -e's/\\(V[0-9]*-[0-9]*\\).*/\\1-XX/'`\";\n"
       "  git clone https://github.com/cms-sw/pkgtools.git %(task_id)s/PKGTOOLS\n"
       "  pushd %(task_id)s/PKGTOOLS; git checkout $PKGTOOLS_TAG; popd;\n"
       "  echo \"### RPM cms dummy `date +%%s`\n%%prep\n%%build\n%%install\n\" > %(task_id)s/CMSDIST/dummy.spec ;\n"
       "  set -x ;\n"
       "  rm -rf %(workdir)s/cms %(workdir)s/b ;\n"
       "  perl -p -i -e 's/### RPM cms cmssw.*/### RPM cms cmssw %(base_release_name)s/' %(task_id)s/CMSDIST/cmssw.spec ;\n"
       "  perl -p -i -e 's/### RPM cms cmssw-patch.*/### RPM cms cmssw-patch %(real_release_name)s/' %(task_id)s/CMSDIST/cmssw-patch.spec ;\n"
       "  %(workdir)s/%(task_id)s/PKGTOOLS/cmsBuild %(debug)s --new-scheduler --cmsdist %(workdir)s/%(task_id)s/CMSDIST %(ignoreErrors)s --builders %(builders)s -j %(jobs)s --repository %(repository)s --architecture %(architecture)s --work-dir %(workdir)s/cms build %(package)s ;\n"
       "  %(workdir)s/%(task_id)s/PKGTOOLS/cmsBuild %(debug)s --new-scheduler --cmsdist %(workdir)s/%(task_id)s/CMSDIST --repository %(repository)s --upload-tmp-repository %(tmpRepository)s %(syncBack)s --architecture %(architecture)s --work-dir %(workdir)s/cms upload %(package)s ;\n"
       "  set +x ;\n"
       "  echo AUTOIB SUCCESS) 2>&1 | tee %(workdir)s/log.%(task_id)s",
       workdir=opts.workdir,
       cvspass=CMSSW_CVSPASS,
       debug=payload["debug"] == True and "--debug" or "",
       cmsdistTag=sanitize(payload["CMSDIST"]),
       pkgtoolsTag=sanitize(payload["PKGTOOLS"]),
       architecture=sanitize(architecture_name),
       release_name=sanitize(release_name),
       base_release_name=re.sub("_[^_]*patch[0-9]*$", "", sanitize(payload["real_release_name"])),
       real_release_name=sanitize(payload["real_release_name"]),
       package=sanitize(payload["package"]),
       repository=sanitize(payload["repository"]),
       syncBack=payload["syncBack"] == True and "--sync-back" or "",
       ignoreErrors=payload["ignoreErrors"] == True and "-k" or "",
       tmpRepository=sanitize(payload["tmpRepository"]),
       task_id=task_id,
       jobs=opts.jobs,
       builders=opts.builders))
    getstatusoutput(format("echo 'Task %(task_id)s completed successfully.' >> %(workdir)s/log.%(task_id)s",
                           workdir=opts.workdir,
                           task_id=task_id))
  except Exception, e:
    log = open(format("%(workdir)s/log.%(task_id)s", workdir=opts.workdir, task_id=task_id)).read()
    log += "\nInterrupted externally."
    log += str(e)
    getstatusoutput(format("echo 'Interrupted externally' >> %(workdir)s/log.%(task_id)s",
                           workdir=opts.workdir,
                           task_id=task_id))
Beispiel #33
0
 def __init__(self, id):
     self.sid = id
     self.variables = {}
     self.status = 0 # active = 0, failed = 1
     self.init_variables()
     self.locks = Lock(self.variables)
Beispiel #34
0
    def get_devices(self):
        '''Get the list of registered locks'''
        api_url_lock = api_url_base + "my/lock"
        r = requests.get(api_url_lock,
                         headers=self._api_header,
                         timeout=self._timeout)
        _LOGGER.debug("Locks %s", r.json())
        result = r.json()["result"]

        for x in result:
            id = x["id"]
            name = x["name"]
            isConnected = x["isConnected"]
            state = self.assign_null_or_lock_state(x["lockProperties"])
            batteryLevel = self.assign_null_or_lock_batteryLevel(
                x["lockProperties"])
            isCharging = self.assign_null_or_lock_isCharging(
                x["lockProperties"])
            isEnabledPullSpring = x["deviceSettings"]["pullSpringEnabled"]
            durationPullSpring = x["deviceSettings"]["pullSpringDuration"]

            lock = Lock(name, id)
            lock.set_connected(isConnected)
            lock.set_state(state)
            lock.set_battery_level(batteryLevel)
            lock.set_is_charging(isCharging)
            lock.set_is_enabled_pullspring(isEnabledPullSpring)
            lock.set_duration_pullspring(durationPullSpring)

            self._lock_id = id
            '''store the found lock in _sensor_list and get the battery_level'''

            self._sensor_list.append(lock)

        if self._lock_id == None:
            raise TedeeClientException("No lock found")
Beispiel #35
0
class Site:
    def __init__(self, id):
        self.sid = id
        self.variables = {}
        self.status = 0 # active = 0, failed = 1
        self.init_variables()
        self.locks = Lock(self.variables)


    def init_variables(self):
        for x in range(1, 21):
            vid = 'x' + str(x)
            if x % 2 == 1:
                if self.sid == x % 10 + 1:
                    self.variables[vid] = Variable(x)
            else:
                self.variables[vid] = Variable(x)

    def is_active(self):
        return self.status == 0

    def abort(self, transaction):
        self.locks.release_read_lock(transaction)
        self.locks.release_write_lock(transaction)

    def fail(self):
        if self.is_active():
            self.status = 1
            self.locks.release_all_locks()

    def read(self, transaction, vid):
        if self.variables[vid].status == 0:
            if self.locks.hold_read_lock(transaction, vid):
                print('Read', vid, '=', self.variables[vid].read_uncommited(transaction), 'at Site', self.sid, 'in', transaction.get_id())
                return
            else:
                ret = None
                if not transaction.is_readonly():
                    ret = self.locks.acquire_read_lock(transaction, vid)
                if ret != None:
                    return ret
                print('Read', vid, '=', self.variables[vid].read_commited(transaction), 'at Site', self.sid, 'in', transaction.get_id())
                return

    def write(self, transaction, vid, val):
        try:
            ret = None
            ret = self.locks.acquire_write_lock(transaction, vid)
            if ret != None:
                return ret
            self.variables[vid].write(transaction, val)
            return
        except WaitFor:
            return [WaitFor.args]

    def commit(self, transaction, timestamp):
        for vid in self.variables.keys():
            if self.locks.hold_write_lock(transaction, vid):
                self.variables[vid].commit(timestamp)
        self.locks.release_write_lock(transaction)
        self.locks.release_read_lock(transaction)

    def dump(self):
        ret = {}
        for vid in self.variables.keys():
            ret[vid] = self.variables[vid].read_commitvalue()
        return ret

    def dumpv(self, vid):
        ret = {}
        if vid in self.variables:
            ret[vid] = self.variables[vid].read_commitvalue()
        return ret

    def recover(self):
        if not self.is_active():
            for vid in self.variables.keys():
                if self.variables[vid].is_replicated():
                    self.variables[vid].recover()
            self.status = 0