Beispiel #1
0
def login():
	if(lock.islock('person.info') == True):
		print "Your account is locked,please contact BANK to unlock"
		sys.exit()
	else:
		pass
	f_sec = open("person.info")
	informations=f_sec.readlines()
	f_sec.close()
	counter=0
	while True:
		user_input=raw_input("Please input your account: ").strip()
		pass_input = getpass.getpass("Please input your password: "******"Wrong account or password,please input again"
			
		if(counter>=3):
			lock.lock('person.info')
			print "You use wrong account or password for 3 times,I will lock your account,please contact with BANK"
			sys.exit()
def main(arguments=None):
    arguments = arguments or docopt(__doc__)
    reqs_path = arguments['<file>']
    endpoint = arguments.get('<index_url>', DEFAULT_PYPI_SIMPLE_ENDPOINT)
    try:
        lock(requirements_path=reqs_path, endpoint=endpoint)
    except FileNotFoundError:
        print(f'Could not find file to update: {reqs_path}')
    else:
        print(f'Done updating {reqs_path} using index {endpoint}')
Beispiel #3
0
def main():
    if lock.lock("sending-files", non_block=1) == None:
        return
    init_conf()
    maybe_flush_queue(path.notify_queue_dir)
    maybe_flush_queue(path.buildlogs_queue_dir)
    maybe_flush_queue(path.ftp_queue_dir)
Beispiel #4
0
def copystore(ui, srcrepo, destpath):
    """copy files from store of srcrepo in destpath

    returns destlock
    """
    destlock = None
    try:
        hardlink = None
        num = 0
        srcpublishing = srcrepo.ui.configbool("phases", "publish", True)
        srcvfs = scmutil.vfs(srcrepo.sharedpath)
        dstvfs = scmutil.vfs(destpath)
        for f in srcrepo.store.copylist():
            if srcpublishing and f.endswith("phaseroots"):
                continue
            dstbase = os.path.dirname(f)
            if dstbase and not dstvfs.exists(dstbase):
                dstvfs.mkdir(dstbase)
            if srcvfs.exists(f):
                if f.endswith("data"):
                    # 'dstbase' may be empty (e.g. revlog format 0)
                    lockfile = os.path.join(dstbase, "lock")
                    # lock to avoid premature writing to the target
                    destlock = lock.lock(dstvfs, lockfile)
                hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f), hardlink)
                num += n
        if hardlink:
            ui.debug("linked %d files\n" % num)
        else:
            ui.debug("copied %d files\n" % num)
        return destlock
    except:  # re-raises
        release(destlock)
        raise
Beispiel #5
0
 def lock(self, timeout=30, delay=1):
     import lock
     lockfile = os.path.join(rcEnv.paths.pathlock, 'vgimport')
     lockfd = None
     try:
         lockfd = lock.lock(timeout=timeout, delay=delay, lockfile=lockfile)
     except lock.LockTimeout:
         self.log.error("timed out waiting for lock (%s)" % lockfile)
         raise ex.excError
     except lock.LockNoLockFile:
         self.log.error("lock_nowait: set the 'lockfile' param")
         raise ex.excError
     except lock.LockCreateError:
         self.log.error("can not create lock file %s" % lockfile)
         raise ex.excError
     except lock.LockAcquire as e:
         self.log.warning("another action is currently running (pid=%s)" %
                          e.pid)
         raise ex.excError
     except ex.excSignal:
         self.log.error("interrupted by signal")
         raise ex.excError
     except:
         self.save_exc()
         raise ex.excError("unexpected locking error")
     self.lockfd = lockfd
def builders_order():
    bs = {}
    bl = []
    for b in config.binary_builders:
        bs[b] = 0
        bl.append(b)

    lck = lock.lock("got-lock")
    f = open(path.got_lock_file, "r+")
    line_no = 0

    for l in f.xreadlines():
        line_no += 1
        b = string.strip(l)
        if bs.has_key(b):
            bs[b] = line_no
        else:
            log.alert("found strange lock in got-lock: %s" % b)

    def mycmp(b1, b2):
        return cmp(bs[b1], bs[b2])

    bl.sort(mycmp)

    f.seek(0)
    f.truncate(0)
    for l in bl: f.write(l + "\n")
    f.close()
    lck.close()

    return bl
Beispiel #7
0
def copystore(ui, srcrepo, destpath):
    '''copy files from store of srcrepo in destpath

    returns destlock
    '''
    destlock = None
    try:
        hardlink = None
        num = 0
        srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
        for f in srcrepo.store.copylist():
            if srcpublishing and f.endswith('phaseroots'):
                continue
            src = os.path.join(srcrepo.sharedpath, f)
            dst = os.path.join(destpath, f)
            dstbase = os.path.dirname(dst)
            if dstbase and not os.path.exists(dstbase):
                os.mkdir(dstbase)
            if os.path.exists(src):
                if dst.endswith('data'):
                    # lock to avoid premature writing to the target
                    destlock = lock.lock(os.path.join(dstbase, "lock"))
                hardlink, n = util.copyfiles(src, dst, hardlink)
                num += n
        if hardlink:
            ui.debug("linked %d files\n" % num)
        else:
            ui.debug("copied %d files\n" % num)
        return destlock
    except:
        release(destlock)
        raise
Beispiel #8
0
def main():
    if lock.lock("sending-files", non_block = 1) == None:
        return
    init_conf()
    maybe_flush_queue(path.notify_queue_dir)
    maybe_flush_queue(path.buildlogs_queue_dir)
    maybe_flush_queue(path.ftp_queue_dir)
def process(command):
    global data_is_encrypted
    if (command == 'exit'):
        conn.send("Exiting...\n")
        close_all()
    elif (command == 'encrypt'):
        data_is_encrypted = True
        return "Using Encrypted Commands now.\n"
    elif (command == 'plain'):
        data_is_encrypted = False
        return "Using Plain-Text Commands now.\n"
    elif (command == 'reboot please'):
        reboot()
        return "You asked for it.\n"
    elif (command == 'reboot'):
        return "If you ask nicely.\n"
    elif (command == 'lock'):
        return lock.lock() + "\n"
    elif (command == 'unlock'):
        return lock.unlock() + "\n"
    elif (command == 'attempt'):
        return "Attempt " + str(attempt_num) + "\n"
    elif (command == 'hello'):
        return "Hi.\n"
    else:
        return command + ' is not a recognized command\n'
Beispiel #10
0
def copystore(ui, srcrepo, destpath):
    '''copy files from store of srcrepo in destpath

    returns destlock
    '''
    destlock = None
    try:
        hardlink = None
        num = 0
        srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
        for f in srcrepo.store.copylist():
            if srcpublishing and f.endswith('phaseroots'):
                continue
            src = os.path.join(srcrepo.sharedpath, f)
            dst = os.path.join(destpath, f)
            dstbase = os.path.dirname(dst)
            if dstbase and not os.path.exists(dstbase):
                os.mkdir(dstbase)
            if os.path.exists(src):
                if dst.endswith('data'):
                    # lock to avoid premature writing to the target
                    destlock = lock.lock(os.path.join(dstbase, "lock"))
                hardlink, n = util.copyfiles(src, dst, hardlink)
                num += n
        if hardlink:
            ui.debug("linked %d files\n" % num)
        else:
            ui.debug("copied %d files\n" % num)
        return destlock
    except:
        release(destlock)
        raise
Beispiel #11
0
        def decorator(*args, **kwargs):
            if len(args) > 0 and hasattr(args[0], "log"):
                log = args[0].log
            else:
                log = None

            if len(args) > 0 and hasattr(args[0], "cache_sig_prefix"):
                _sig = args[0].cache_sig_prefix + sig
            else:
                _sig = sig.format(args=args, kwargs=kwargs)

            fpath = cache_fpath(_sig)

            try:
                lfd = lock.lock(timeout=30, delay=0.1, lockfile=fpath + '.lock', intent="cache")
            except Exception as e:
                if log:
                    log.warning("cache locking error: %s. run command uncached." % str(e))
                return fn(*args, **kwargs)
            try:
                data = cache_get(fpath, log=log)
            except Exception as e:
                if log:
                    log.debug(str(e))
                data = fn(*args, **kwargs)
                cache_put(fpath, data, log=log)
            lock.unlock(lfd)
            return data
Beispiel #12
0
def copystore(ui, srcrepo, destpath):
    '''copy files from store of srcrepo in destpath

    returns destlock
    '''
    destlock = None
    try:
        hardlink = None
        num = 0
        srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
        srcvfs = scmutil.vfs(srcrepo.sharedpath)
        dstvfs = scmutil.vfs(destpath)
        for f in srcrepo.store.copylist():
            if srcpublishing and f.endswith('phaseroots'):
                continue
            dstbase = os.path.dirname(f)
            if dstbase and not dstvfs.exists(dstbase):
                dstvfs.mkdir(dstbase)
            if srcvfs.exists(f):
                if f.endswith('data'):
                    # 'dstbase' may be empty (e.g. revlog format 0)
                    lockfile = os.path.join(dstbase, "lock")
                    # lock to avoid premature writing to the target
                    destlock = lock.lock(dstvfs, lockfile)
                hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
                                             hardlink)
                num += n
        if hardlink:
            ui.debug("linked %d files\n" % num)
        else:
            ui.debug("copied %d files\n" % num)
        return destlock
    except:  # re-raises
        release(destlock)
        raise
Beispiel #13
0
    def provisioner(self, need_boot=True):
        """provision zone
        - configure zone
        - if snapof and zone brand is native
           then create zonepath from snapshot of snapof
           then attach zone
        - if snapof and zone brand is ipkg
           then try to detect zone associated with snapof
           then define container_origin
        - if container_origin
           then clone  container_origin
        - create sysidcfg
        - if need_boot boot and wait multiuser
        """
        self.osver = get_solaris_version()
        self.zone_configure()

        if self.osver >= 11:
            self.create_sysidcfg(self.r)
        else:
            if self.snapof is not None and self.r.brand == 'native':
                self.create_zonepath()
                self.r.zoneadm("attach", ["-F"])
            elif self.snapof is not None and self.r.brand == 'ipkg':
                zones = rcZone.Zones()
                src_dataset = Dataset(self.snapof)
                zonepath = src_dataset.getprop('mountpoint')
                self.container_origin = zones.zonename_from_zonepath(
                    zonepath).zonename
                self.log.info("source zone is %s (detected from snapof %s)" %
                              (self.container_origin, self.snapof))

        if self.container_origin is not None:
            lockname = 'create_zone2clone-' + self.container_origin
            lockfile = os.path.join(rcEnv.paths.pathlock, lockname)
            self.log.info("wait get lock %s" % (lockname))
            try:
                lockfd = lock.lock(timeout=1200, delay=5, lockfile=lockfile)
            except:
                raise (ex.excError("failure in get lock %s" % (lockname)))
            try:
                self.create_zone2clone()
            except:
                lock.unlock(lockfd)
                raise
            lock.unlock(lockfd)
            self.create_cloned_zone()

        if self.osver < 11:
            self.create_sysidcfg(self.r)

        if need_boot is True:
            self.r.zone_boot()
            self.r.wait_multi_user()

        self.r.log.info("provisioned")
        return True
Beispiel #14
0
def daemon_test_lock():
    lockfd = None
    try:
        lockfd = lock.lock(lockfile=rcEnv.paths.daemon_lock, timeout=0, delay=0)
    except:
        return True
    finally:
        lock.unlock(lockfd)
    return False
Beispiel #15
0
 def lock(self):
     try:
         self.lockfd = lock(lockfile=rcEnv.paths.daemon_lock,
                            timeout=0,
                            delay=0)
     except Exception:
         self.log.error("a daemon is already running, and holding the "
                        "daemon lock")
         sys.exit(1)
Beispiel #16
0
 def worker():
     import sys
     try:
         sys.exit(
             lock.lock(lockfile=tmp_file,
                       timeout=timeout,
                       intent="test"))
     except lock.LockTimeout:
         sys.exit(255)
def checkArgs(options, args):
	"""Given a list of options from the arguments (through an Optionparser),
	check them and act approriately
	"""
	# Handle varying log levels
	setupLogging(options.loglevel)
	
	# Create a lock object
	lk = lock.lock()
	# and acquire the lock
	lk.acquire()
	
	# Automount stuff
	mounter.autoMount()
	
	#If we're running the GUI
	if options.useGUI:
		logging.debug("Attempting to setup GUI.")
		try: #importing the gtk libraries
			import pygtk
			pygtk.require("2.0")
			import gobject
		except:
			pass
		try:
			import gtk
			import gtk.glade
			import gobject
		except:
			logging.error("Unable to start the GUI!")
			sys.exit("Cannot start GUI because Gtk is not available! Please make sure you are running in a graphical environment and that Gtk is installed.")
		# Now add a console logging handler
		addConsoleHandler()
		#Now that the libraries are imported, start the GUI
		logging.debug("Importing BackupGui for gui.")
		from backupgui import BackupGui
		hwg = BackupGui()
		logging.debug("Initializing gobject threading.")
		gobject.threads_init()
		logging.debug("Starting gtk.main().")
		gtk.main()
	#Otherwise, we're running the command line interface (CLI)
	else:
		#Import important methods for the CLI
		logging.debug("Importing BackupCLI for command-line environment.")
		from backupcli import BackupCLI
		logging.debug("Parsing input for CLI.")
		hwg = BackupCLI(options, args)
		logging.debug("Starting cli.")
		hwg.start()
		print "\n\n**********\nLog will be located at: %s \n**********\n\n" % global_log_file
		print "**********\nPLEASE SAVE THE LOG\n**********\n\n"
	
	# Release the lock file
	lk.release()
Beispiel #18
0
def web_unlock():
    if not (request.args.get("token") and request.args.get("state")):
        return "Error"
    else:
        with shelve.open("Settings.conf") as settings:
            if "token" in settings:
                token = settings["token"]
            else:
                return "System not setup !"
        if request.args.get("token") != token:
            return "Invalid Token"
        if request.args.get("state") == "open":
            lock.unlock()
        elif request.args.get("state") == "close":
            lock.lock()
        elif request.args.get("state") == "switch":
            lock.switch()
        else:
            return "Invalid State"
    return "Done"
Beispiel #19
0
def clear_cache(sig, o=None):
    if o and hasattr(o, "cache_sig_prefix"):
        sig = o.cache_sig_prefix + sig
    fpath = cache_fpath(sig)
    if not os.path.exists(fpath):
        return
    if o and hasattr(o, "log"):
        o.log.debug("cache CLEAR: %s" % fpath)
    lfd = lock.lock(timeout=30, delay=0.1, lockfile=fpath + '.lock')
    try:
        os.unlink(fpath)
    except:
        pass
    lock.unlock(lfd)
Beispiel #20
0
def copystore(ui, srcrepo, destpath):
    """copy files from store of srcrepo in destpath

    returns destlock
    """
    destlock = None
    try:
        hardlink = None
        num = 0
        closetopic = [None]

        def prog(topic, pos):
            if pos is None:
                closetopic[0] = topic
            else:
                ui.progress(topic, pos + num)

        srcpublishing = srcrepo.publishing()
        srcvfs = scmutil.vfs(srcrepo.sharedpath)
        dstvfs = scmutil.vfs(destpath)
        for f in srcrepo.store.copylist():
            if srcpublishing and f.endswith("phaseroots"):
                continue
            dstbase = os.path.dirname(f)
            if dstbase and not dstvfs.exists(dstbase):
                dstvfs.mkdir(dstbase)
            if srcvfs.exists(f):
                if f.endswith("data"):
                    # 'dstbase' may be empty (e.g. revlog format 0)
                    lockfile = os.path.join(dstbase, "lock")
                    # lock to avoid premature writing to the target
                    destlock = lock.lock(dstvfs, lockfile)
                hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f), hardlink, progress=prog)
                num += n
        if hardlink:
            ui.debug("linked %d files\n" % num)
            if closetopic[0]:
                ui.progress(closetopic[0], None)
        else:
            ui.debug("copied %d files\n" % num)
            if closetopic[0]:
                ui.progress(closetopic[0], None)
        return destlock
    except:  # re-raises
        release(destlock)
        raise
Beispiel #21
0
def copystore(ui, srcrepo, destpath):
    '''copy files from store of srcrepo in destpath

    returns destlock
    '''
    destlock = None
    try:
        hardlink = None
        num = 0
        closetopic = [None]
        def prog(topic, pos):
            if pos is None:
                closetopic[0] = topic
            else:
                ui.progress(topic, pos + num)
        srcpublishing = srcrepo.publishing()
        srcvfs = scmutil.vfs(srcrepo.sharedpath)
        dstvfs = scmutil.vfs(destpath)
        for f in srcrepo.store.copylist():
            if srcpublishing and f.endswith('phaseroots'):
                continue
            dstbase = os.path.dirname(f)
            if dstbase and not dstvfs.exists(dstbase):
                dstvfs.mkdir(dstbase)
            if srcvfs.exists(f):
                if f.endswith('data'):
                    # 'dstbase' may be empty (e.g. revlog format 0)
                    lockfile = os.path.join(dstbase, "lock")
                    # lock to avoid premature writing to the target
                    destlock = lock.lock(dstvfs, lockfile)
                hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
                                             hardlink, progress=prog)
                num += n
        if hardlink:
            ui.debug("linked %d files\n" % num)
            if closetopic[0]:
                ui.progress(closetopic[0], None)
        else:
            ui.debug("copied %d files\n" % num)
            if closetopic[0]:
                ui.progress(closetopic[0], None)
        return destlock
    except: # re-raises
        release(destlock)
        raise
Beispiel #22
0
    def test_lock_raise_lock_timeout_if_held_by_another_pid_real_multiprocess(
            tmp_file, timeout):
        def worker():
            import sys
            try:
                sys.exit(
                    lock.lock(lockfile=tmp_file,
                              timeout=timeout,
                              intent="test"))
            except lock.LockTimeout:
                sys.exit(255)

        assert lock.lock(lockfile=tmp_file, timeout=timeout, intent="test") > 0
        from multiprocessing import Process
        proc = Process(target=worker)
        proc.start()
        proc.join()
        assert proc.exitcode == 255
Beispiel #23
0
 def lock(self):
     try:
         self.lockfd = lock.lock(timeout=0, delay=0, lockfile=self.lockfile)
     except lock.lockTimeout:
         print("timed out waiting for lock")
         raise lock.lockError
     except lock.lockNoLockFile:
         print("lock_nowait: set the 'lockfile' param")
         raise lock.lockError
     except lock.lockCreateError:
         print("can not create lock file %s"%lockfile)
         raise lock.lockError
     except lock.lockAcquire as e:
         print("another daemon is currently running (pid=%s)"%e.pid)
         raise lock.lockError
     except:
         print("unexpected locking error")
         import traceback
         traceback.print_exc()
         raise lock.lockError
Beispiel #24
0
    def lock(self):
        """
        Acquire the startip lock, protecting against allocation of the same
        ipdev stacked device to multiple resources or multiple services.
        """
        timeout = convert_duration(self.svc.options.waitlock)
        if timeout < 0:
            timeout = 120
        delay = 1
        lockfd = None
        action = "startip"
        lockfile = os.path.join(rcEnv.paths.pathlock, action)
        details = "(timeout %d, delay %d, action %s, lockfile %s)" % \
                  (timeout, delay, action, lockfile)
        self.log.debug("acquire startip lock %s", details)

        try:
            lockfd = lock.lock(timeout=timeout,
                               delay=delay,
                               lockfile=lockfile,
                               intent="startip")
        except lock.LockTimeout as exc:
            raise ex.excError("timed out waiting for lock %s: %s" %
                              (details, str(exc)))
        except lock.LockNoLockFile:
            raise ex.excError("lock_nowait: set the 'lockfile' param %s" %
                              details)
        except lock.LockCreateError:
            raise ex.excError("can not create lock file %s" % details)
        except lock.LockAcquire as exc:
            raise ex.excError("another action is currently running %s: %s" %
                              (details, str(exc)))
        except ex.excSignal:
            raise ex.excError("interrupted by signal %s" % details)
        except Exception as exc:
            self.save_exc()
            raise ex.excError("unexpected locking error %s: %s" %
                              (details, str(exc)))

        if lockfd is not None:
            self.lockfd = lockfd
Beispiel #25
0
    def reslock(self, action=None, timeout=30, delay=1, suffix=None):
        """
        Acquire the resource action lock.
        """
        if self.lockfd is not None:
            # already acquired
            return

        lockfile = os.path.join(self.var_d, "lock")
        if suffix is not None:
            lockfile = ".".join((lockfile, suffix))

        details = "(timeout %d, delay %d, action %s, lockfile %s)" % \
                  (timeout, delay, action, lockfile)
        self.log.debug("acquire resource lock %s", details)

        try:
            lockfd = lock.lock(timeout=timeout,
                               delay=delay,
                               lockfile=lockfile,
                               intent=action)
        except lock.LockTimeout as exc:
            raise ex.excError("timed out waiting for lock %s: %s" %
                              (details, str(exc)))
        except lock.LockNoLockFile:
            raise ex.excError("lock_nowait: set the 'lockfile' param %s" %
                              details)
        except lock.LockCreateError:
            raise ex.excError("can not create lock file %s" % details)
        except lock.LockAcquire as exc:
            raise ex.excError("another action is currently running %s: %s" %
                              (details, str(exc)))
        except ex.excSignal:
            raise ex.excError("interrupted by signal %s" % details)
        except Exception as exc:
            self.save_exc()
            raise ex.excError("unexpected locking error %s: %s" %
                              (details, str(exc)))

        if lockfd is not None:
            self.lockfd = lockfd
Beispiel #26
0
def main():
    init_conf("src")
    if lock("building-srpm", non_block = 1) == None:
        return
    while True:
        status.push("srpm: processing queue")
        q = B_Queue(path.queue_file)
        if not q.lock(1):
            status.pop()
            return
        q.read()
        if q.requests == []:
            q.unlock()
            status.pop()
            return
        r = pick_request(q)
        q.write()
        q.unlock()
        status.pop()
        status.push("srpm: handling request from %s" % r.requester)
        handle_request(r)
        status.pop()
Beispiel #27
0
def main():
    init_conf("src")
    if lock("building-srpm", non_block=1) == None:
        return
    while True:
        status.push("srpm: processing queue")
        q = B_Queue(path.queue_file)
        if not q.lock(1):
            status.pop()
            return
        q.read()
        if q.requests == []:
            q.unlock()
            status.pop()
            return
        r = pick_request(q)
        q.write()
        q.unlock()
        status.pop()
        status.push("srpm: handling request from %s" % r.requester)
        handle_request(r)
        status.pop()
def main():
    lck = lock.lock("request_fetcher", non_block = True)
    if lck == None:
        sys.exit(1)
    init_conf()
    acl.try_reload()

    status.push("fetching requests")
    if has_new(config.control_url):
        q = fetch_queue(config.control_url)
        max_no = 0
        q_new = []
        for r in q:
            if r.no > max_no:
                max_no = r.no
            if r.no > last_count:
                q_new.append(r)
        for b in config.binary_builders:
            handle_reqs(b, q_new)
        f = open(path.last_req_no_file, "w")
        f.write("%d\n" % max_no)
        f.close()
    status.pop()
    lck.close()
Beispiel #29
0
    def lock(self, action=None, timeout=0, delay=1):
        """
        Acquire the app action lock.
        """
        if self.lockfd is not None:
            return

        details = "(timeout %d, delay %d, action %s, lockfile %s)" % \
                  (timeout, delay, action, self.lockfile)
        self.log.debug("acquire app lock %s", details)
        lockfd = None
        try:
            lockfd = lock.lock(timeout=timeout,
                               delay=delay,
                               lockfile=self.lockfile,
                               intent=action)
        except lock.LockTimeout as exc:
            raise ex.excError("timed out waiting for lock %s: %s" %
                              (details, str(exc)))
        except lock.LockNoLockFile:
            raise ex.excError("lock_nowait: set the 'lockfile' param %s" %
                              details)
        except lock.LockCreateError:
            raise ex.excError("can not create lock file %s" % details)
        except lock.LockAcquire as exc:
            raise ex.excError("another action is currently running %s: %s" %
                              (details, str(exc)))
        except ex.excSignal:
            self.log.info("interrupted by signal %s" % details)
        except Exception as exc:
            self.save_exc()
            raise ex.excError("unexpected locking error %s: %s" %
                              (details, str(exc)))
        finally:
            if lockfd is not None:
                self.lockfd = lockfd
def main():
    lck = lock.lock("request_fetcher", non_block=True)
    if lck == None:
        sys.exit(1)
    init_conf()
    acl.try_reload()

    status.push("fetching requests")
    if has_new(config.control_url):
        q = fetch_queue(config.control_url)
        max_no = 0
        q_new = []
        for r in q:
            if r.no > max_no:
                max_no = r.no
            if r.no > last_count:
                q_new.append(r)
        for b in config.binary_builders:
            handle_reqs(b, q_new)
        f = open(path.last_req_no_file, "w")
        f.write("%d\n" % max_no)
        f.close()
    status.pop()
    lck.close()
Beispiel #31
0
 def test_lock_acquire_release_context_manager(self):
     with lock(self.collection, 'test1') as l:
         self.assertTrue(l.locked)
     self.assertEqual(self.collection.find().count(), 0)
Beispiel #32
0
def clone(ui,
          source,
          dest=None,
          pull=False,
          rev=None,
          update=True,
          stream=False):
    """Make a copy of an existing repository.

    Create a copy of an existing repository in a new directory.  The
    source and destination are URLs, as passed to the repository
    function.  Returns a pair of repository objects, the source and
    newly created destination.

    The location of the source is added to the new repository's
    .hg/hgrc file, as the default to be used for future pulls and
    pushes.

    If an exception is raised, the partly cloned/updated destination
    repository will be deleted.

    Arguments:

    source: repository object or URL

    dest: URL of destination repository to create (defaults to base
    name of source repository)

    pull: always pull from source repository, even in local case

    stream: stream raw data uncompressed from repository (fast over
    LAN, slow over WAN)

    rev: revision to clone up to (implies pull=True)

    update: update working directory after clone completes, if
    destination is local repository (True means update to default rev,
    anything else is treated as a revision)
    """

    if isinstance(source, str):
        origsource = ui.expandpath(source)
        source, rev, checkout = parseurl(origsource, rev)
        src_repo = repository(ui, source)
    else:
        src_repo = source
        origsource = source = src_repo.url()
        checkout = rev and rev[-1] or None

    if dest is None:
        dest = defaultdest(source)
        ui.status(_("destination directory: %s\n") % dest)

    dest = localpath(dest)
    source = localpath(source)

    if os.path.exists(dest):
        if not os.path.isdir(dest):
            raise util.Abort(_("destination '%s' already exists") % dest)
        elif os.listdir(dest):
            raise util.Abort(_("destination '%s' is not empty") % dest)

    class DirCleanup(object):
        def __init__(self, dir_):
            self.rmtree = shutil.rmtree
            self.dir_ = dir_

        def close(self):
            self.dir_ = None

        def cleanup(self):
            if self.dir_:
                self.rmtree(self.dir_, True)

    src_lock = dest_lock = dir_cleanup = None
    try:
        if islocal(dest):
            dir_cleanup = DirCleanup(dest)

        abspath = origsource
        copy = False
        if src_repo.cancopy() and islocal(dest):
            abspath = os.path.abspath(util.drop_scheme('file', origsource))
            copy = not pull and not rev

        if copy:
            try:
                # we use a lock here because if we race with commit, we
                # can end up with extra data in the cloned revlogs that's
                # not pointed to by changesets, thus causing verify to
                # fail
                src_lock = src_repo.lock(wait=False)
            except error.LockError:
                copy = False

        if copy:
            src_repo.hook('preoutgoing', throw=True, source='clone')
            hgdir = os.path.realpath(os.path.join(dest, ".hg"))
            if not os.path.exists(dest):
                os.mkdir(dest)
            else:
                # only clean up directories we create ourselves
                dir_cleanup.dir_ = hgdir
            try:
                dest_path = hgdir
                os.mkdir(dest_path)
            except OSError, inst:
                if inst.errno == errno.EEXIST:
                    dir_cleanup.close()
                    raise util.Abort(
                        _("destination '%s' already exists") % dest)
                raise

            for f in src_repo.store.copylist():
                src = os.path.join(src_repo.path, f)
                dst = os.path.join(dest_path, f)
                dstbase = os.path.dirname(dst)
                if dstbase and not os.path.exists(dstbase):
                    os.mkdir(dstbase)
                if os.path.exists(src):
                    if dst.endswith('data'):
                        # lock to avoid premature writing to the target
                        dest_lock = lock.lock(os.path.join(dstbase, "lock"))
                    util.copyfiles(src, dst)

            # we need to re-init the repo after manually copying the data
            # into it
            dest_repo = repository(ui, dest)
            src_repo.hook('outgoing', source='clone', node='0' * 40)
        else:
Beispiel #33
0
def clone(ui, source, dest=None, pull=False, rev=None, update=True,
          stream=False, branch=None):
    """Make a copy of an existing repository.

    Create a copy of an existing repository in a new directory.  The
    source and destination are URLs, as passed to the repository
    function.  Returns a pair of repository objects, the source and
    newly created destination.

    The location of the source is added to the new repository's
    .hg/hgrc file, as the default to be used for future pulls and
    pushes.

    If an exception is raised, the partly cloned/updated destination
    repository will be deleted.

    Arguments:

    source: repository object or URL

    dest: URL of destination repository to create (defaults to base
    name of source repository)

    pull: always pull from source repository, even in local case

    stream: stream raw data uncompressed from repository (fast over
    LAN, slow over WAN)

    rev: revision to clone up to (implies pull=True)

    update: update working directory after clone completes, if
    destination is local repository (True means update to default rev,
    anything else is treated as a revision)

    branch: branches to clone
    """

    if isinstance(source, str):
        origsource = ui.expandpath(source)
        source, branch = parseurl(origsource, branch)
        src_repo = repository(ui, source)
    else:
        src_repo = source
        origsource = source = src_repo.url()
    rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)

    if dest is None:
        dest = defaultdest(source)
        ui.status(_("destination directory: %s\n") % dest)
    else:
        dest = ui.expandpath(dest)

    dest = localpath(dest)
    source = localpath(source)

    if os.path.exists(dest):
        if not os.path.isdir(dest):
            raise util.Abort(_("destination '%s' already exists") % dest)
        elif os.listdir(dest):
            raise util.Abort(_("destination '%s' is not empty") % dest)

    class DirCleanup(object):
        def __init__(self, dir_):
            self.rmtree = shutil.rmtree
            self.dir_ = dir_
        def close(self):
            self.dir_ = None
        def cleanup(self):
            if self.dir_:
                self.rmtree(self.dir_, True)

    src_lock = dest_lock = dir_cleanup = None
    try:
        if islocal(dest):
            dir_cleanup = DirCleanup(dest)

        abspath = origsource
        copy = False
        if src_repo.cancopy() and islocal(dest):
            abspath = os.path.abspath(util.drop_scheme('file', origsource))
            copy = not pull and not rev

        if copy:
            try:
                # we use a lock here because if we race with commit, we
                # can end up with extra data in the cloned revlogs that's
                # not pointed to by changesets, thus causing verify to
                # fail
                src_lock = src_repo.lock(wait=False)
            except error.LockError:
                copy = False

        if copy:
            src_repo.hook('preoutgoing', throw=True, source='clone')
            hgdir = os.path.realpath(os.path.join(dest, ".hg"))
            if not os.path.exists(dest):
                os.mkdir(dest)
            else:
                # only clean up directories we create ourselves
                dir_cleanup.dir_ = hgdir
            try:
                dest_path = hgdir
                os.mkdir(dest_path)
            except OSError, inst:
                if inst.errno == errno.EEXIST:
                    dir_cleanup.close()
                    raise util.Abort(_("destination '%s' already exists")
                                     % dest)
                raise

            for f in src_repo.store.copylist():
                src = os.path.join(src_repo.sharedpath, f)
                dst = os.path.join(dest_path, f)
                dstbase = os.path.dirname(dst)
                if dstbase and not os.path.exists(dstbase):
                    os.mkdir(dstbase)
                if os.path.exists(src):
                    if dst.endswith('data'):
                        # lock to avoid premature writing to the target
                        dest_lock = lock.lock(os.path.join(dstbase, "lock"))
                    util.copyfiles(src, dst)

            # we need to re-init the repo after manually copying the data
            # into it
            dest_repo = repository(ui, dest)
            src_repo.hook('outgoing', source='clone', node='0'*40)
        else:
Beispiel #34
0
 def ManageLock(self):
     filelock = lock.lock(self.lockfile, 'dupinanny backup')
     filelock.acquire(wait=None, expire=None)
     yield
     filelock.release()
Beispiel #35
0
def clone(ui,
          source,
          dest=None,
          pull=False,
          rev=None,
          update=True,
          stream=False):
    """Make a copy of an existing repository.

    Create a copy of an existing repository in a new directory.  The
    source and destination are URLs, as passed to the repository
    function.  Returns a pair of repository objects, the source and
    newly created destination.

    The location of the source is added to the new repository's
    .hg/hgrc file, as the default to be used for future pulls and
    pushes.

    If an exception is raised, the partly cloned/updated destination
    repository will be deleted.

    Arguments:

    source: repository object or URL

    dest: URL of destination repository to create (defaults to base
    name of source repository)

    pull: always pull from source repository, even in local case

    stream: stream raw data uncompressed from repository (fast over
    LAN, slow over WAN)

    rev: revision to clone up to (implies pull=True)

    update: update working directory after clone completes, if
    destination is local repository
    """

    if isinstance(source, str):
        origsource = ui.expandpath(source)
        source, rev, checkout = parseurl(origsource, rev)
        src_repo = repository(ui, source)
    else:
        src_repo = source
        origsource = source = src_repo.url()
        checkout = None

    if dest is None:
        dest = defaultdest(source)
        ui.status(_("destination directory: %s\n") % dest)

    def localpath(path):
        if path.startswith('file://localhost/'):
            return path[16:]
        if path.startswith('file://'):
            return path[7:]
        if path.startswith('file:'):
            return path[5:]
        return path

    dest = localpath(dest)
    source = localpath(source)

    if os.path.exists(dest):
        raise util.Abort(_("destination '%s' already exists") % dest)

    class DirCleanup(object):
        def __init__(self, dir_):
            self.rmtree = shutil.rmtree
            self.dir_ = dir_

        def close(self):
            self.dir_ = None

        def __del__(self):
            if self.dir_:
                self.rmtree(self.dir_, True)

    src_lock = dest_lock = dir_cleanup = None
    try:
        if islocal(dest):
            dir_cleanup = DirCleanup(dest)

        abspath = origsource
        copy = False
        if src_repo.cancopy() and islocal(dest):
            abspath = os.path.abspath(util.drop_scheme('file', origsource))
            copy = not pull and not rev

        if copy:
            try:
                # we use a lock here because if we race with commit, we
                # can end up with extra data in the cloned revlogs that's
                # not pointed to by changesets, thus causing verify to
                # fail
                src_lock = src_repo.lock()
            except lock.LockException:
                copy = False

        if copy:

            def force_copy(src, dst):
                if not os.path.exists(src):
                    # Tolerate empty source repository and optional files
                    return
                util.copyfiles(src, dst)

            src_store = os.path.realpath(src_repo.spath)
            if not os.path.exists(dest):
                os.mkdir(dest)
            try:
                dest_path = os.path.realpath(os.path.join(dest, ".hg"))
                os.mkdir(dest_path)
            except OSError, inst:
                if inst.errno == errno.EEXIST:
                    dir_cleanup.close()
                    raise util.Abort(
                        _("destination '%s' already exists") % dest)
                raise
            if src_repo.spath != src_repo.path:
                # XXX racy
                dummy_changelog = os.path.join(dest_path, "00changelog.i")
                # copy the dummy changelog
                force_copy(src_repo.join("00changelog.i"), dummy_changelog)
                dest_store = os.path.join(dest_path, "store")
                os.mkdir(dest_store)
            else:
                dest_store = dest_path
            # copy the requires file
            force_copy(src_repo.join("requires"),
                       os.path.join(dest_path, "requires"))
            # we lock here to avoid premature writing to the target
            dest_lock = lock.lock(os.path.join(dest_store, "lock"))

            files = ("data", "00manifest.d", "00manifest.i", "00changelog.d",
                     "00changelog.i")
            for f in files:
                src = os.path.join(src_store, f)
                dst = os.path.join(dest_store, f)
                force_copy(src, dst)

            # we need to re-init the repo after manually copying the data
            # into it
            dest_repo = repository(ui, dest)

        else:
Beispiel #36
0
def f_lock():
    lock.lock()
Beispiel #37
0
""""""

from ExperienceSampling.App import App
import sys, os, appdirs
from PyQt5.QtWidgets import QApplication, QErrorMessage
from lock import lock

lockfolder = appdirs.user_data_dir('ExperienceSampling', 'UniBA')
lockfile = os.path.join(lockfolder,'lockfile')

if not os.path.exists(lockfolder):
    os.makedirs(lockfolder)

# display an error if other instances are running
if lock(lockfile):
    app = QApplication([])
    error_dialog = QErrorMessage()
    error_dialog.showMessage("Application already running!")
    sys.exit(app.exec_())

try:
    # the content of "timer.txt" defines the default timer
    with open("timer.txt", 'r') as file:
        timer = file.read()
    timer = int(timer)
    app = App(pollTime=timer)
except (ValueError, FileNotFoundError):
    # if "timer.txt" is not present, the default timer is set to 60'
    app = App()

sys.exit(app.exec_())
Beispiel #38
0
def main_for(builder):
    msg = ""

    init_conf(builder)

    q = B_Queue(path.queue_file + "-" + config.builder)
    q.lock(0)
    q.read()
    if q.requests == []:
        q.unlock()
        return
    req = pick_request(q)
    q.unlock()

    # high priority tasks have priority < 0, normal tasks >= 0
    if req.priority >= 0:

        # allow only one build in given builder at once
        if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1):
            return
        # don't kill server
        check_load()
        # not more then job_slots builds at once
        locked = 0
        for slot in range(config.job_slots):
            if lock.lock("building-rpm-slot-%d" % slot, non_block = 1):
                locked = 1
                break
        if not locked:
            return

        # record fact that we got lock for this builder, load balancer
        # will use it for fair-queuing
        l = lock.lock("got-lock")
        f = open(path.got_lock_file, "a")
        f.write(config.builder + "\n")
        f.close()
        l.close()
    else:
        msg = "HIGH PRIORITY: "

    msg += "handling request %s (%d) for %s from %s, priority %s" \
            % (req.id, req.no, config.builder, req.requester, req.priority)
    log.notice(msg)
    status.push(msg)
    handle_request(req)
    status.pop()

    def otherreqs(r):
        if r.no==req.no:
            return False
        else:
            return True

    q = B_Queue(path.queue_file + "-" + config.builder)
    q.lock(0)
    q.read()
    previouslen=len(q.requests)
    q.requests=filter(otherreqs, q.requests)
    if len(q.requests)<previouslen:
        q.write()
    q.unlock()
Beispiel #39
0
 def test_create_lock_dir_if_absent(tmp_path):
     assert lock.lock(
         lockfile=os.path.join(str(tmp_path), 'lockdir', 'lockfile')) > 0
Beispiel #40
0
def main_for(builder):
    msg = ""

    init_conf(builder)

    q = B_Queue(path.queue_file + "-" + config.builder)
    q.lock(0)
    q.read()
    if q.requests == []:
        q.unlock()
        return
    req = pick_request(q)
    q.unlock()

    # high priority tasks have priority < 0, normal tasks >= 0
    if req.priority >= 0:

        # allow only one build in given builder at once
        if not lock.lock("building-rpm-for-%s" % config.builder, non_block=1):
            return
        # don't kill server
        check_load()
        # not more then job_slots builds at once
        locked = 0
        for slot in range(config.job_slots):
            if lock.lock("building-rpm-slot-%d" % slot, non_block=1):
                locked = 1
                break
        if not locked:
            return

        # record fact that we got lock for this builder, load balancer
        # will use it for fair-queuing
        l = lock.lock("got-lock")
        f = open(path.got_lock_file, "a")
        f.write(config.builder + "\n")
        f.close()
        l.close()
    else:
        msg = "HIGH PRIORITY: "

    msg += "handling request %s (%d) for %s from %s, priority %s" \
            % (req.id, req.no, config.builder, req.requester, req.priority)
    log.notice(msg)
    status.push(msg)
    handle_request(req)
    status.pop()

    def otherreqs(r):
        if r.no == req.no:
            return False
        else:
            return True

    q = B_Queue(path.queue_file + "-" + config.builder)
    q.lock(0)
    q.read()
    previouslen = len(q.requests)
    q.requests = filter(otherreqs, q.requests)
    if len(q.requests) < previouslen:
        q.write()
    q.unlock()
def handle_group(r, user):
    lockf = None
    def fail_mail(msg):
        if len(r.batches) >= 1:
            spec = r.batches[0].spec
        else:
            spec = "None.spec"
        log.error("%s: %s" % (spec, msg))
        m = Message()
        m.set_headers(to = r.requester_email, cc = config.builder_list)
        m.set_headers(subject = "building %s failed" % spec)
        m.write_line(msg)
        m.send()

    lockf = lock("request")
    if check_double_id(r.id):
        lockf.close()
        return

    try:
        if (user.change_requester and r.requester):
            user = acl.user_by_login(r.requester)
    except KeyError:
            r.requester += '/' + user.get_login()
    else:
        r.requester = user.get_login()
        r.requester_email = user.mail_to()

    for batch in r.batches:

        if not user.can_do("src", config.builder, batch.branch):
            fail_mail("user %s is not allowed to src:%s:%s" \
                        % (user.get_login(), config.builder, batch.branch))
            lockf.close()
            return

        if 'test-build' in r.flags and 'upgrade' in r.flags:
            fail_mail("it's forbidden to upgrade from a test build")
            lockf.close()
            return

        if "upgrade" in r.flags and not user.can_do("upgrade", config.builder, batch.branch):
            fail_mail("user %s is not allowed to upgrade:%s:%s" \
                        % (user.get_login(), config.builder, batch.branch))
            lockf.close()
            return

        # src builder handles only special commands
        if batch.is_command() and (batch.command in ["git pull"] or batch.command[:5] == "skip:"  or config.builder in batch.builders):
            batch.expand_builders(config.binary_builders + [config.src_builder])
        else:
            batch.expand_builders(config.binary_builders)

        if not batch.is_command() and config.builder in batch.builders:
            batch.builders.remove(config.builder)

        for bld in batch.builders:
            batch.builders_status[bld] = '?'
            batch.builders_status_time[bld] = time.time()
            if bld not in config.binary_builders and bld != config.builder:
                fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \
                        (config.builder, bld, string.join(config.binary_builders)))
                lockf.close()
                return
            if batch.is_command():
                if "no-chroot" in batch.command_flags:
                    if not user.can_do("command-no-chroot", bld):
                        fail_mail("user %s is not allowed to command-no-chroot:%s" \
                                % (user.get_login(), bld))
                        lockf.close()
                        return
                if not user.can_do("command", bld):
                    fail_mail("user %s is not allowed to command:%s" \
                                % (user.get_login(), bld))
                    lockf.close()
                    return
            elif not user.can_do("binary", bld, batch.branch):
                pkg = batch.spec
                if pkg.endswith(".spec"):
                    pkg = pkg[:-5]
                if not user.can_do("binary-" + pkg, bld, batch.branch):
                    fail_mail("user %s is not allowed to binary-%s:%s:%s" \
                                % (user.get_login(), pkg, bld, batch.branch))
                    lockf.close()
                    return
            if not "test-build" in r.flags and not user.can_do("ready", bld, batch.branch):
                   fail_mail("user %s is not allowed to send ready builds (ready:%s:%s)" \
                        % (user.get_login(), bld, batch.branch))
                   lockf.close()
                   return

    r.priority = user.check_priority(r.priority,config.builder)
    r.time = time.time()
    log.notice("queued %s from %s" % (r.id, user.get_login()))
    q = B_Queue(path.queue_file)
    q.lock(0)
    q.read()
    q.add(r)
    q.write()
    q.unlock()
    lockf.close()
Beispiel #42
0
 def test_lock_acquire_release_context_manager(self):
     with lock(self.collection, 'test1') as l:
         self.assertTrue(l.locked)
     self.assertEqual(self.collection.find().count(), 0)
Beispiel #43
0
def handle_group(r, user):
    lockf = None

    def fail_mail(msg):
        if len(r.batches) >= 1:
            spec = r.batches[0].spec
        else:
            spec = "None.spec"
        log.error("%s: %s" % (spec, msg))
        m = Message()
        m.set_headers(to=r.requester_email, cc=config.builder_list)
        m.set_headers(subject="building %s failed" % spec)
        m.write_line(msg)
        m.send()

    lockf = lock("request")
    if check_double_id(r.id):
        lockf.close()
        return

    try:
        if (user.change_requester and r.requester):
            user = acl.user_by_login(r.requester)
    except KeyError:
        r.requester += '/' + user.get_login()
    else:
        r.requester = user.get_login()
        r.requester_email = user.mail_to()

    for batch in r.batches:

        if not user.can_do("src", config.builder, batch.branch):
            fail_mail("user %s is not allowed to src:%s:%s" \
                        % (user.get_login(), config.builder, batch.branch))
            lockf.close()
            return

        if 'test-build' in r.flags and 'upgrade' in r.flags:
            fail_mail("it's forbidden to upgrade from a test build")
            lockf.close()
            return

        if "upgrade" in r.flags and not user.can_do("upgrade", config.builder,
                                                    batch.branch):
            fail_mail("user %s is not allowed to upgrade:%s:%s" \
                        % (user.get_login(), config.builder, batch.branch))
            lockf.close()
            return

        # src builder handles only special commands
        if batch.is_command() and (batch.command in ["git pull"]
                                   or batch.command[:5] == "skip:"
                                   or config.builder in batch.builders):
            batch.expand_builders(config.binary_builders +
                                  [config.src_builder])
        else:
            batch.expand_builders(config.binary_builders)

        if not batch.is_command() and config.builder in batch.builders:
            batch.builders.remove(config.builder)

        for bld in batch.builders:
            batch.builders_status[bld] = '?'
            batch.builders_status_time[bld] = time.time()
            if bld not in config.binary_builders and bld != config.builder:
                fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \
                        (config.builder, bld, string.join(config.binary_builders)))
                lockf.close()
                return
            if batch.is_command():
                if "no-chroot" in batch.command_flags:
                    if not user.can_do("command-no-chroot", bld):
                        fail_mail("user %s is not allowed to command-no-chroot:%s" \
                                % (user.get_login(), bld))
                        lockf.close()
                        return
                if not user.can_do("command", bld):
                    fail_mail("user %s is not allowed to command:%s" \
                                % (user.get_login(), bld))
                    lockf.close()
                    return
            elif not user.can_do("binary", bld, batch.branch):
                pkg = batch.spec
                if pkg.endswith(".spec"):
                    pkg = pkg[:-5]
                if not user.can_do("binary-" + pkg, bld, batch.branch):
                    fail_mail("user %s is not allowed to binary-%s:%s:%s" \
                                % (user.get_login(), pkg, bld, batch.branch))
                    lockf.close()
                    return
            if not "test-build" in r.flags and not user.can_do(
                    "ready", bld, batch.branch):
                fail_mail("user %s is not allowed to send ready builds (ready:%s:%s)" \
                     % (user.get_login(), bld, batch.branch))
                lockf.close()
                return

            pkg = batch.spec
            if pkg.endswith(".spec"):
                pkg = pkg[:-5]
            if not "test-build" in r.flags and blacklist.package(pkg):
                fail_mail(
                    "package '%s' is blacklisted, only test-builds allowed" %
                    pkg)
                lockf.close()
                return

    r.priority = user.check_priority(r.priority, config.builder)
    r.time = time.time()
    log.notice("queued %s from %s" % (r.id, user.get_login()))
    q = B_Queue(path.queue_file)
    q.lock(0)
    q.read()
    q.add(r)
    q.write()
    q.unlock()
    lockf.close()