Exemplo n.º 1
0
def __log_into_file(filename, mode, data, language, final=False):
    if version() is 2:

        if _builder(_paths(), default_paths())["tmp_path"] in filename:
            if not final:
                flock = lockfile.FileLock(filename)
                flock.acquire()
            with open(filename, mode) as save:
                save.write(data + '\n')
            if not final:
                flock.release()
        else:
            if final:
                with open(filename, mode) as save:
                    save.write(data + '\n')
            else:
                submit_logs_to_db(language, data)

    else:

        if _builder(_paths(), default_paths())["tmp_path"] in filename:
            if not final:
                flock = lockfile.FileLock(filename)
                flock.acquire()
            with open(filename, mode, encoding='utf-8') as save:
                save.write(data + '\n')
            if not final:
                flock.release()
        else:
            if final:
                with open(filename, mode, encoding='utf-8') as save:
                    save.write(data + '\n')
            else:
                submit_logs_to_db(language, data)
Exemplo n.º 2
0
def __log_into_file(filename, mode, data, language, final=False):
    """
    write a content into a file (support unicode) and submit logs in database. if final=False its writing log in
    the database.

    Args:
        filename: the filename
        mode: writing mode (a, ab, w, wb, etc.)
        data: content
        language: language
        final: True if it's final report otherwise False (default False)

    Returns:
        True if success otherwise None
    """
    log = ''
    if version() is 2:
        if isinstance(data, str):
            try:
                log = json.loads(data)
            except ValueError:
                log = ''

        if isinstance(log, dict):
            if final:
                with open(filename, mode) as save:
                    save.write(data + '\n')
            else:
                submit_logs_to_db(language, data)
        else:
            if not final:
                flock = lockfile.FileLock(filename)
                flock.acquire()
            with open(filename, mode) as save:
                save.write(data + '\n')
            if not final:
                flock.release()
    else:

        if isinstance(data, str):
            try:
                log = json.loads(data)
            except ValueError:
                log = ''

        if isinstance(log, dict):
            if final:
                with open(filename, mode, encoding='utf-8') as save:
                    save.write(data + '\n')
            else:
                submit_logs_to_db(language, data)
        else:
            if not final:
                flock = lockfile.FileLock(filename)
                flock.acquire()
            with open(filename, mode, encoding='utf-8') as save:
                save.write(data + '\n')
            if not final:
                flock.release()
    return True
Exemplo n.º 3
0
 def other_op(queue):
     """Lock the lockfile, then wait for poison pill
     """
     lockfile.FileLock(temp.name).acquire()
     while queue.empty():
         sleep(0.01)
     lockfile.FileLock(temp.name).release()
Exemplo n.º 4
0
def __log_into_file(filename, mode, data, language, final=False):
    """
    write a content into a file (support unicode) and submit logs in database. if final=False its writing log in
    the database.

    Args:
        filename: the filename
        mode: writing mode (a, ab, w, wb, etc.)
        data: content
        language: language
        final: True if it's final report otherwise False (default False)

    Returns:
        True if success otherwise None
    """
    if version() is 2:

        if _builder(_paths(), default_paths())["tmp_path"] in filename:
            if not final:
                flock = lockfile.FileLock(filename)
                flock.acquire()
            with open(filename, mode) as save:
                save.write(data + '\n')
            if not final:
                flock.release()
        else:
            if final:
                with open(filename, mode) as save:
                    save.write(data + '\n')
            else:
                submit_logs_to_db(language, data)

    else:

        if _builder(_paths(), default_paths())["tmp_path"] in filename:
            if not final:
                flock = lockfile.FileLock(filename)
                flock.acquire()
            with open(filename, mode, encoding='utf-8') as save:
                save.write(data + '\n')
            if not final:
                flock.release()
        else:
            if final:
                with open(filename, mode, encoding='utf-8') as save:
                    save.write(data + '\n')
            else:
                submit_logs_to_db(language, data)
    return True
Exemplo n.º 5
0
def main():
    global mainloop

    parser = argparse.ArgumentParser(description="KNoT DBUS-Control Daemon")
    parser.add_argument("-w", "--working-dir", metavar="<path>",
                        default="/usr/local/bin",
                        type=str,
                        help="Daemon working directory")
    parser.add_argument("-p", "--pid-filepath", metavar="<path/control>",
                        default="/tmp/control", type=str,
                        help="PID file path and name")
    parser.add_argument("-n", "--detach-process", action="store_false",
                        help="Detached process")
    args = parser.parse_args()

    context = daemon.DaemonContext(
        working_directory=args.working_dir,
        umask=0o002,
        detach_process=args.detach_process,
        pidfile=lockfile.FileLock(args.pid_filepath),
        signal_map={signal.SIGTERM: quit_cb, signal.SIGINT: quit_cb},
        stdout=sys.stdout,
        stderr=sys.stderr,
    )

    with context:
        dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)

        control = Control()
        control.start()

        mainloop = GObject.MainLoop()

        mainloop.run()
    def unTagRepo(self, static_reponame, tag):
        initialTags = self.getTags(static_reponame)
        tag_encoded = self.utf8Encode(tag)

        if not tag_encoded in initialTags:
            raise NoSuchTagException()

        initialTags.remove(tag_encoded)

        repo = self.config.getStaticRepoDir(static_reponame)
        if not os.path.exists(repo):
            raise IsNotAStaticRepoException()
        tagpath = self.config.getTagsFileForStaticRepo(static_reponame)

        lock = lockfile.FileLock(tagpath)
        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=15)  #wait 15sec max
            except LockTimeout:
                raise CouldNotLogTagsException()
        try:
            fileHandle = open(tagpath, 'w')  #replace instead of appending
            for tag in initialTags:
                tag_encoded = self.utf8Encode(tag)
                fileHandle.write(tag_encoded)
                fileHandle.write('\n')
            fileHandle.close()
        finally:
            lock.release()
        return "Untagged OK"
Exemplo n.º 7
0
 def __init__(self, name, maxattempts, filename, logger):
     self.name = name
     self.filename = filename
     self.maxattempts = maxattempts
     self.filelock = lockfile.FileLock(self.filename)
     self.counter = 0
     self.logger = logger
Exemplo n.º 8
0
    def fetch(self):
        """Download and extract the dataset."""

        home = self.home()

        lock = lockfile.FileLock(home)
        if lock.is_locked():
            log.warn('%s is locked, waiting for release' % home)

        with lock:
            # -- download pair labels
            for fname, sha1 in PAIRS_FILENAMES:
                url = path.join(PAIRS_BASE_URL, fname)
                basename = path.basename(url)
                filename = path.join(home, basename)
                if not path.exists(filename):
                    if not path.exists(home):
                        os.makedirs(home)
                    download(url, filename, sha1=sha1)

            # -- download and extract images
            url = self.URL
            sha1 = self.SHA1
            output_dirname = self.home('images')
            if not path.exists(output_dirname):
                os.makedirs(output_dirname)

            # -- various disruptions might cause this to fail
            #    but if any process gets as far as writing the completion
            #    marker, then it should be all good.
            done_marker = os.path.join(output_dirname, 'completion_marker')
            if not path.exists(done_marker):
                download_and_extract(url, output_dirname, sha1=sha1)
                open(done_marker, 'w').close()
Exemplo n.º 9
0
def append_line(fname, line):
    # lock for writing, released when fp is closed
    with lockfile.FileLock(fname):
        fp = open(fname, 'a+')
        fp.write(line)
        fp.write('\n')
        fp.close()
Exemplo n.º 10
0
def writeToFile(data):
    lock = lockfile.FileLock(outputFile)
    lock.acquire()
    with open(outputFile, 'a') as f:
        save = csv.writer(f, lineterminator='\n')
        save.writerows(data)
    lock.release()
Exemplo n.º 11
0
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn\
            #              amically-allocating-and-destroying-mutexes
            if name not in _semaphores:
                _semaphores[name] = semaphore.Semaphore()
            sem = _semaphores[name]
            LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
                        '"%(method)s"...' % {'lock': name,
                                             'method': f.__name__}))
            with sem:
                if external:
                    LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
                                'method "%(method)s"...' %
                                {'lock': name, 'method': f.__name__}))
                    lock_file_path = os.path.join(FLAGS.lock_path,
                                                  'nova-%s.lock' % name)
                    lock = lockfile.FileLock(lock_file_path)
                else:
                    lock = _NoopContextManager()

                with lock:
                    retval = f(*args, **kwargs)

            # If no-one else is waiting for it, delete it.
            # See note about possible raciness above.
            if not sem.balance < 1:
                del _semaphores[name]

            return retval
Exemplo n.º 12
0
def main():
    ''' start the code '''
    global CONFIG, loglevel, level, logger

    #Option parser
    usage = "usage: %prog \n    configuration paramaters should be in dnsproxy.conf"
    parser = OptionParser(usage)
    parser.add_option("-f",
                      "--foreground",
                      action="store_true",
                      help="Does not start as a Daemon")
    parser.add_option("-c",
                      "--configfile",
                      dest="configfile",
                      help="Load corresponding configuration file")

    (options, args) = parser.parse_args()

    conf = 'dnsproxy.conf'
    if options.configfile:
        conf = options.configfile

    try:
        CONFIG.readfp(open(conf))
    except:
        print "Could not open configuration file \"{}\"".format(conf)
        exit(1)

    host = '127.0.0.1'
    port = 53
    if config.has_option('dnsproxy', 'host'):
        host = CONFIG.get('dnsproxy', 'host')
    if config.has_option('dnsproxy', 'port'):
        port = int(CONFIG.get('dnsproxy', 'port'))

    pid = './dnsproxy.pid'
    if config.has_option('dnsproxy', 'pidfile'):
        pid = CONFIG.get('dnsproxy', 'pidfile')

    work = '/var/tmp'
    if config.has_option('dnsproxy', 'workdir'):
        work = CONFIG.get('dnsproxy', 'workdir')

    context = daemon.DaemonContext(
        working_directory=work,
        umask=0o002,
        pidfile=lockfile.FileLock(pid),
    )

    blacklist = config.get('dnsproxy', 'blacklist') if config.has_option(
        'dnsproxy', 'blacklist') else None
    if blacklist and getblacklist(blacklist) == 1:
        exit(1)

    #Start daemon
    if options.foreground:
        thread_main(host, port)
    else:
        with context:
            thread_main(host, port)
Exemplo n.º 13
0
def releaseLock():
    global processLockFile
    lock = lockfile.FileLock(processLockFile)
    try:
        lock.release()
    except (lockfile.NotLocked, lockfile.NotMyLock):
        print('lock release failure')
Exemplo n.º 14
0
    def start(self):
        options = self.options
        # set path
        sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(options.settings_file)), ".."))
        sys.path.insert(0, os.path.dirname(os.path.abspath(options.settings_file)))

        # set setting
        exec("import %s as settings" % os.path.splitext(os.path.basename(options.settings_file))[0])
        torneira.settings = settings  

        from torneira.core.server import TorneiraServer
        server = TorneiraServer(
            port=options.port,
            media_dir=os.path.abspath(options.media_dir),
            xheaders=options.xheaders
        )

        if options.daemon:
            pidfile = '%s.%s' % (options.pidfile, options.port)
            lock = lockfile.FileLock(pidfile)
            if lock.is_locked():
                sys.stderr.write("torneira already running on port %s\n" % options.port)
                return

            context = daemon.DaemonContext(pidfile=lock)
            with context:
                server.run()
        else:
            server.run()
Exemplo n.º 15
0
        def wrapper(*args, **kwargs):
            # If function has history -> do not log
            if 'history' in kwargs:
                return func(*args, **kwargs)

            # Modify positional and keyword arguments to match function signature, if needed
            args, kwargs = alignWithFuncDef(func, args, kwargs)

            # Translate args and kwargs into string signature
            fsignature = funcSig(func, args, kwargs)

            # If entry present in log, return corresponding output
            if os.path.isfile(fpath):
                with open(fpath, 'r', newline='') as f:
                    reader = csv.reader(f, delimiter=delimiter)
                    for row in reader:
                        if row[0] == fsignature:
                            logger.debug(
                                f'entry found in "{os.path.basename(fpath)}"')
                            return out_type(row[1])

            # Otherwise, compute output and log it into file before returning
            out = func(*args, **kwargs)
            lock = lockfile.FileLock(fpath)
            lock.acquire()
            with open(fpath, 'a', newline='') as csvfile:
                writer = csv.writer(csvfile, delimiter=delimiter)
                writer.writerow([fsignature, str(out)])
            lock.release()

            return out
def main():
    args = parse_args()

    # Setup logger
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter("%(asctime)s - %(name)s "
                                  "- %(levelname)s - %(message)s")
    if args.daemonize:
        log_handler = logging.FileHandler(args.log_file)
    else:
        log_handler = logging.StreamHandler(sys.stdout)
    log_handler.setFormatter(formatter)
    logger.addHandler(log_handler)

    if args.daemonize:
        logger.info('Starting')
        daemon_context = daemon.DaemonContext(
            pidfile=lockfile.FileLock(args.pid_file),
            files_preserve=[log_handler.stream.fileno()],
        )
        with daemon_context:
            run_subscriber(args)
    else:
        run_subscriber(args)

    logger.info('Stopped')
Exemplo n.º 17
0
def main(args):

    settings.init_env()
    logbee_config = settings.get_logbee_config()

    context = daemon.DaemonContext(
        working_directory=logbee_config.workdir,
        umask=0,
        pidfile=lockfile.FileLock(args["pidname"]),
    )

    context.signal_map = {
        #     signal.SIGTERM: end_of_daemon,
        #     signal.SIGHUP: 'terminate',
        signal.SIGUSR1:
        end_of_daemon,
    }

    mail_gid = grp.getgrnam('root').gr_gid
    context.gid = mail_gid

    logger_io = [handler.stream for handler in logger.handlers]
    context.files_preserve = logger_io

    with context:
        logbee_daemon(args["pidname"], logbee_config)
Exemplo n.º 18
0
    def get_product_options(self):
        """Return a list of per-product_option dictionaries."""

        if self._product_options is None:
            filename = os.path.join(self._cache_dir, "product_options.csv")

            with lockfile.FileLock(self._download_lock_filename):
                # Download products file if it is out of date.
                if self._is_file_expired(filename):
                    self._download_product_options_csv(filename + ".tmp")
                    repair_product_options_csv(filename, filename + ".tmp")

                # Read product_options file.
                # The header has 47 fields, but each data record has
                # 48 fields.  By setting restkey to "Extra", we
                # prevent the extra field from having a key of None.
                self._product_options = list(
                    csv.DictReader(open(filename), restkey="Extra")
                )

                # Cleanup suspect data.
                if self._clean:
                    self._clean_product_options()

        return self._product_options
def lock(filename):
    """Prevents other autotest/tradefed instances from accessing cache.

    @param filename: The file to be locked.
    """
    filelock = lockfile.FileLock(filename)
    # It is tempting just to call filelock.acquire(3600). But the implementation
    # has very poor temporal granularity (timeout/10), which is unsuitable for
    # our needs. See /usr/lib64/python2.7/site-packages/lockfile/
    attempts = 0
    while not filelock.i_am_locking():
        try:
            attempts += 1
            logging.info('Waiting for cache lock...')
            filelock.acquire(random.randint(1, 5))
        except (lockfile.AlreadyLocked, lockfile.LockTimeout):
            if attempts > 1000:
                # Normally we should aqcuire the lock in a few seconds. Once we
                # wait on the order of hours either the dev server IO is
                # overloaded or a lock didn't get cleaned up. Take one for the
                # team, break the lock and report a failure. This should fix
                # the lock for following tests. If the failure affects more than
                # one job look for a deadlock or dev server overload.
                logging.error('Permanent lock failure. Trying to break lock.')
                filelock.break_lock()
                raise error.TestFail('Error: permanent cache lock failure.')
        else:
            logging.info('Acquired cache lock after %d attempts.', attempts)
    try:
        yield
    finally:
        filelock.release()
        logging.info('Released cache lock.')
Exemplo n.º 20
0
def run_backend(opts):
    """
    Start main backend daemon

    :param opts: Bunch object with command line options

    Expected **opts** fields:
        - `config_file` - path to the backend config file
        - `daemonize` - boolean flag to enable daemon mode
        - `pidfile` - path to the backend pidfile

    """
    cbe = None
    try:
        context = DaemonContext(
            pidfile=lockfile.FileLock(opts.pidfile),
            gid=grp.getgrnam("copr").gr_gid,
            uid=pwd.getpwnam("copr").pw_uid,
            detach_process=opts.daemonize,
            umask=0o22,
            stderr=sys.stderr,
            signal_map={
                signal.SIGTERM: "terminate",
                signal.SIGHUP: "terminate",
            },
        )
        with context:
            cbe = CoprBackend(opts.config_file, ext_opts=opts)
            cbe.run()
    except (Exception, KeyboardInterrupt):
        sys.stderr.write("Killing/Dying\n")
        if cbe is not None:
            cbe.terminate()
        raise
def write_to_csv(fname, array, header, subject):
    head, tail = os.path.split(fname)
    if not os.path.isdir(head):
        os.mkdir(head)

    array = list(array)
    array.insert(0, subject)
    header = list(header)
    header.insert(0, 'Image')

    # file locking mechanism. A process/thread would only write to fname only if it can acquire the lock.
    lock = lockfile.FileLock(fname)
    lock.timeout = 200
    try:
        with lock:
            isfile = os.path.isfile(fname)
            with open(fname, 'a+') as f:
                writer = csv.writer(f, delimiter=',')
                if not isfile:
                    writer.writerow(header)
                writer.writerow(array)
    except lockfile.LockTimeout:
        # lock.unique_name: hostname-tname.pid-somedigits
        unique_name = os.path.split(lock.unique_name)[1]
        fname_tmp = fname + '_' + unique_name
        logger.warning('Lock timeout. Log the entry to ' + fname_tmp)
        with open(fname_tmp, 'a+') as f:
            writer = csv.writer(f, delimiter=',')
            writer.writerow(header)
            writer.writerow(array)
Exemplo n.º 22
0
def log_tofile(filename, subject_id, sequence, **kwargs):
    ''' Log a custome string to subject_id row and sequence column and update the 
        timestamp in Time_Last_Update column in a dataframe and in the end save the 
        dataframe to file. The timestamp format:'%Y/%m/%d-%H:%M:%S'.
        
        :param filename: A dataframe created by :func:`dicom2nifti.logger.create_log` that
                   has 'ID' as index name and Time_Last_Update and sequences as column names.
        :type df: pandas.DataFrame
        :param string: text/message to log.
        :type string: str
        :param subject_id: A subject identifier to be added to rows.
        :type subject_id: str
        :param sequence: A sequence to be added to columns.
        :type sequence: str

    '''

    _module_logger.debug('received a call to log_tofile')

    import lockfile
    lock = lockfile.FileLock(filename)
    lock.timeout = 3600
    try:
        with lock:  ## argument poll_intervall for acquire() not available in this version of lockfile
            if _os.path.exists(filename):
                df_nifti_log = pd.read_csv(filename, index_col=0, dtype=str)
            else:
                df_nifti_log = create_log()

            log_conversion(df_nifti_log,
                           subject_id,
                           sequence,
                           inplace=True,
                           **kwargs)
            df_nifti_log.to_csv(filename, index=True)

    except lockfile.LockTimeout:
        # lock.unique_name: hostname-tname.pid-somedigits
        unique_name = _os.path.split(lock.unique_name)[1]
        filename_tmp = filename + '_' + unique_name
        _module_logger.warning('Lock timeout. Log the entry to ' +
                               filename_tmp)

        if _os.path.exists(filename_tmp):
            df_nifti_log = pd.read_csv(filename_tmp, index_col=0, dtype=str)
        else:
            df_nifti_log = create_log()

        log_conversion(df_nifti_log,
                       subject_id,
                       sequence,
                       inplace=True,
                       **kwargs)
        df_nifti_log.to_csv(filename_tmp, index=True)


#    except:
#        _tb.print_exception(_sys.exc_info()[0], _sys.exc_info()[1], _sys.exc_info()[2])

    return 0
Exemplo n.º 23
0
def status():

    fl = lockfile.FileLock('take_shot')

    if fl.is_locked():

        requests = glob.glob('*.request')

        if len(requests) == 1:

            return """<html><body>

            Processing %s request
            
            </body></html>
            

            """ % requests[0].split('.request')[0]
        else:

            return "Something wrong, \n requests list is: \n %s " % (
                str(requests), )

    else:

        return "<html><body>Idle</body></html>"
Exemplo n.º 24
0
 def __init__(self, fname, thread_mutex, event, barrier):
     super().__init__()
     self.fname = fname
     self.thread_mutex = thread_mutex
     self.event = event
     self.barrier = barrier
     self.ipc_lock = lockfile.FileLock(fname)
Exemplo n.º 25
0
def open(path,
         readonly=False,
         progress=None,
         nworkers=None,
         use_threads=False,
         always_rerandomize=True):
    """ Loads a safe from the filesystem.

        Contrary to `Safe.load_from_stream', this function also takes care
        of locking. """
    # TODO Allow multiple readers.
    locked = False
    try:
        lock = lockfile.FileLock(path)
        lock.acquire(0)
        locked = True
        if not os.path.exists(path):
            raise SafeNotFoundError
        with _builtin_open(path) as f:
            safe = Safe.load_from_stream(f, nworkers, use_threads)
        yield safe
        if not readonly:
            safe.autosave_containers()
            if safe.touched or always_rerandomize:
                safe.rerandomize(progress=progress,
                                 nworkers=nworkers,
                                 use_threads=use_threads)
                with tempfile.NamedTemporaryFile(delete=False) as f:
                    safe.store_to_stream(f)
                    shutil.move(f.name, path)
    except lockfile.AlreadyLocked:
        raise SafeLocked
    finally:
        if locked:
            lock.release()
Exemplo n.º 26
0
    def __init__(self):
        '''
    Wrapper around the python-vagrant module for use with ansible.
    Note that Vagrant itself is non-thread safe, as is the python-vagrant lib, so we need to lock on basically all operations ...
    '''
        # Get a lock
        self.lock = None

        try:
            self.lock = lockfile.FileLock(VAGRANT_LOCKFILE)
            self.lock.acquire()
        except Exception:
            # fall back to using flock instead ...
            try:
                import fcntl
                self.lock = open(VAGRANT_LOCKFILE, 'w')
                fcntl.flock(self.lock, fcntl.LOCK_EX)
            except Exception:
                print(
                    "failed=True msg='Could not get a lock for using vagrant. Install python module \"lockfile\" to use vagrant on non-POSIX filesytems.'"
                )
                sys.exit(1)

        # Initialize vagrant and state files
        log_cm = vagrant.make_file_cm('vagrant.log')
        self.vg = vagrant.Vagrant(out_cm=log_cm, err_cm=log_cm)

        # operation will create a default data structure if none present
        self.vg_data = {"instances": {}, "num_inst": 0}
Exemplo n.º 27
0
def IncRunID(project_name, db_dir):
    """Increment the RunID and append new value with project name to the file"""
    database_file = db_dir + '/runID_database.txt'

    # lock the file
    lock = lf.FileLock(database_file)
    while not lock.i_am_locking():
        try:
            # wait up to 10 seconds
            lock.acquire(timeout=5)
        except lf.LockTimeout:
            raise Exception('ERROR: Timed out waiting for file lock at ' +
                            lock.path)

    # get the last run_id from the db file
    rundb = open(database_file, 'r')
    for line in rundb:
        (old_id, old_project) = line.split()

    rundb.close()
    global run_id
    run_id = int(old_id) + 1

    # write the incremented run_id with project name to the db file
    with open(database_file, 'a') as rundb:
        rundb.write(str(run_id) + '\t' + project_name + '\n')

    rundb.close()
    lock.release()

    return
Exemplo n.º 28
0
def run_backend(opts):
    """
    Start main backend daemon

    :param opts: Munch object with command line options

    Expected **opts** fields:
        - `config_file` - path to the backend config file
        - `daemonize` - boolean flag to enable daemon mode
        - `pidfile` - path to the backend pidfile

        - `daemon_user`
        - `daemon_group`
    """
    cbe = None
    try:
        context = DaemonContext(
            pidfile=lockfile.FileLock(opts.pidfile),
            # gid=grp.getgrnam("copr").gr_gid,
            # uid=pwd.getpwnam("copr").pw_uid,
            gid=grp.getgrnam(opts.daemon_user).gr_gid,
            uid=pwd.getpwnam(opts.daemon_group).pw_uid,
            detach_process=opts.daemonize,
            umask=0o22,
            stderr=sys.stderr)
        with context:
            cbe = CoprBackend(opts.config_file, ext_opts=opts)
            cbe.run()
    except (Exception, KeyboardInterrupt):
        sys.stderr.write("Killing/Dying\n")
        raise
Exemplo n.º 29
0
def ReleaseLock(static_dir, tag, destroy=False):
    """Releases the lock for a given tag.

  Optionally, removes the locked directory entirely.

  Args:
    static_dir: Directory where builds are served from.
    tag:        Unique resource/task identifier. Use '/' for nested tags.
    destroy:    Determines whether the locked directory should be removed
                entirely.

  Raises:
    CommonUtilError: If lock can't be released.
  """
    build_dir = os.path.join(static_dir, tag)
    if not SafeSandboxAccess(static_dir, build_dir):
        raise CommonUtilError('Invalid tag "%s".' % tag)

    lock = lockfile.FileLock(os.path.join(build_dir, DEVSERVER_LOCK_FILE))
    try:
        lock.break_lock()
        if destroy:
            shutil.rmtree(build_dir)
    except Exception, e:
        raise CommonUtilError(str(e))
Exemplo n.º 30
0
def main():
    global typeRunning
    try:
        # set appropriate global vars for run type (i.e. continual or sensor)
        SetRunType()

        # first make sure all the necessary paths are in place
        if (checkPaths() != 0):
            sys.exit(2)

        lock = lockfile.FileLock("/tmp/procdownloadjob" + typeRunning)
        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=0)
            except:
                raise Exception("FileLock")

        delFilesPath(DOWNLOAD_WEB_DIR)

        dbconn = MySQLdb.connect(host=DBHOST,
                                 user=DBUSER,
                                 passwd=DBPASSWD,
                                 db=DBNAME)

        totalmb = processContinualJobs(dbconn)

        print str(totalmb) + " MB of zip files processed"

        dbconn.close()
        lock.release()

    except:
        traceback.print_exc()
        lock.release()
        sys.exit(1)