Exemplo n.º 1
0
    def run_capsule(self, run_info, gpu_idx):
        lock_path = os.path.join(run_info['trial_dir'], 'lock')
        lock = Lock(lock_path, datetime.timedelta(days=365))
        lock.lock(timeout=datetime.timedelta(seconds=1))
        if not lock.is_locked:
            self.print_log('locking failed for', run_info['trial_id'])
            return None

        # run capsule

        env = {'CUDA_VISIBLE_DEVICES': str(gpu_idx),
               'INFR_TRIAL': run_info['trial_id'],
               'INFR_EXP_PATH': self.experiment_dir,
               'INFR_MODE': self.mode,
               'INFR_REDIRECT_IO': '1',
               'INFR_START_STATE': os.path.join(run_info['trial_dir'], 'start_state.json')}
        if self.cuda_sync:
            env['CUDA_LAUNCH_BLOCKING'] = '1'

        proc = subprocess.Popen([sys.executable, '-m', run_info['start_state']['module_name']],
                                env=env)

        self.print_log('started worker', proc.pid, 'for', run_info['trial_id'], self.mode)

        return {'trial_dir': run_info['trial_dir'],
                'trial_id': run_info['trial_id'],
                'start_at': time.time(),
                'lock': lock,
                'gpu_idx': gpu_idx,
                'proc': proc,
                'pid': proc.pid,
                'ret_code': None}
Exemplo n.º 2
0
def acquire_lock_1(force, lock_file=None):
    """Try to acquire the master lock.

    :param force: Flag that controls whether to force acquisition of the lock.
    :type force: bool
    :param lock_file: Path to the lock file, otherwise `config.LOCK_FILE`.
    :type lock_file: str
    :return: The master lock.
    :raises: `TimeOutError` if the lock could not be acquired.
    """
    if lock_file is None:
        lock_file = config.LOCK_FILE
    lock = Lock(lock_file, LOCK_LIFETIME)
    try:
        lock.lock(timedelta(seconds=0.1))
        return lock
    except TimeOutError:
        if not force:
            raise
        # Force removal of lock first.
        hostname, pid, tempfile = lock.details
        os.unlink(lock_file)
        # Also remove any stale claim files.
        dname = os.path.dirname(lock_file)
        for fname in os.listdir(dname):
            fpath = os.path.join(dname, fname)
            if fpath.startswith(lock_file):
                os.unlink(fpath)
        return acquire_lock_1(force=False)
 def test_acquire_lock_1_force(self):
     # Create the lock and lock it.
     my_lock = Lock(self.lock_file)
     my_lock.lock(timedelta(seconds=60))
     # Try to aquire it again with force.
     lock = master.acquire_lock_1(True, self.lock_file)
     self.assertTrue(lock.is_locked)
     lock.unlock()
Exemplo n.º 4
0
 def __init__(self, lifetime=LOCK_LIFETIME, timeout=LOCK_TIMEOUT):
     """
     Create a SafeUpdater with the given lock lifetime and timeout
     (see flufl.lock documentation). The defaults are the lifetime
     and timeout found in the config.
     """
     self.lock = Lock(LOCK_FILE, lifetime=lifetime)
     self.timeout = timeout
Exemplo n.º 5
0
 def test_master_state(self):
     my_lock = Lock(self.lock_file)
     # Mailman is not running.
     state, lock = master.master_state(self.lock_file)
     self.assertEqual(state, master.WatcherState.none)
     # Acquire the lock as if another process had already started the
     # master.
     my_lock.lock()
     try:
         state, lock = master.master_state(self.lock_file)
     finally:
         my_lock.unlock()
     self.assertEqual(state, master.WatcherState.conflict)
Exemplo n.º 6
0
    def append_to_csv(self, dataframe, file_name):
        '''Append a pandas dataframe to a csv file. This function is thread save.'''

        # Make a lock

        lock_name = os.path.join(file_name + '.lock')
        lock = Lock(lock_name)
        lock.lifetime = timedelta(minutes=10)

        # Write to the result file with a lock

        with lock:
            with open(file_name, 'a+') as f:
                dataframe.to_csv(f, header=False, index=False)
    def __init__(self, path):
        # Specify the path to a file that will be used to synchronize the lock.
        # Per the flufl.lock documentation, use a file that does not exist.
        self._lock = Lock(path)

        # Locks have a lifetime (default 15 seconds) which is the period of time that the process expects
        # to keep the lock once it has been acquired. We set the lifetime to be 5 minutes as we expect
        # all operations that require locks to be completed within that time.
        self._lock.lifetime = timedelta(minutes=5)

        # Ensure multiple threads within a process run NFSLock operations one at a time.
        # We must acquire the reentrant lock before acquiring the flufl lock and only release after
        # the flufl lock is released.
        self._r_lock = threading.RLock()
Exemplo n.º 8
0
def append_to_csv(data_list, file_name):
    '''Append a list of tuples to a csv file. This function is thread safe.'''
    
    # Make a lock

    lock_name = os.path.join(file_name + '.lock')
    lock = Lock(lock_name)
    lock.lifetime = timedelta(minutes=10)

    # Write to the result file with a lock
    
    with lock:
      with open(file_name, 'a+') as f:
        for t in data_list:
            f.write(','.join([str(x) for x in t]) + '\n')
Exemplo n.º 9
0
def safe_append(file_name, string):
    '''Append a string to a file. This function
    is thread safe.
    '''
    # Make a lock

    lock_name = os.path.join(file_name + '.lock')
    lock = Lock(lock_name)
    lock.lifetime = timedelta(minutes=10)

    # Write to the result file with a lock

    with lock:
        with open(file_name, 'a+') as f:
            f.write(string)
Exemplo n.º 10
0
    def archive_message(mlist, message):
        """See `IArchiver`.

        This archiver saves messages into a maildir.
        """
        archive_dir = os.path.join(config.ARCHIVE_DIR, 'prototype')
        try:
            os.makedirs(archive_dir, 0o775)
        except OSError as error:
            # If this already exists, then we're fine
            if error.errno != errno.EEXIST:
                raise

        # Maildir will throw an error if the directories are partially created
        # (for instance the toplevel exists but cur, new, or tmp do not)
        # therefore we don't create the toplevel as we did above.
        list_dir = os.path.join(archive_dir, mlist.fqdn_listname)
        mailbox = Maildir(list_dir, create=True, factory=None)
        lock_file = os.path.join(
            config.LOCK_DIR, '{0}-maildir.lock'.format(mlist.fqdn_listname))
        # Lock the maildir as Maildir.add() is not threadsafe.  Don't use the
        # context manager because it's not an error if we can't acquire the
        # archiver lock.  We'll just log the problem and continue.
        #
        # XXX 2012-03-14 BAW: When we extend the chain/pipeline architecture
        # to other runners, e.g. the archive runner, it would be better to let
        # any TimeOutError propagate up.  That would cause the message to be
        # re-queued and tried again later, rather than being discarded as
        # happens now below.
        lock = Lock(lock_file)
        try:
            lock.lock(timeout=timedelta(seconds=1))
            # Add the message to the maildir.  The return value could be used
            # to construct the file path if necessary.  E.g.
            #
            # os.path.join(archive_dir, mlist.fqdn_listname, 'new',
            #              message_key)
            mailbox.add(message)
        except TimeOutError:
            # Log the error and go on.
            log.error('Unable to acquire prototype archiver lock for {0}, '
                      'discarding: {1}'.format(
                          mlist.fqdn_listname,
                          message.get('message-id', 'n/a')))
        finally:
            lock.unlock(unconditionally=True)
        # Can we get return the URL of the archived message?
        return None
Exemplo n.º 11
0
 def test_archive_lock_used(self):
     # Test that locking the maildir when adding works as a failure here
     # could mean we lose mail.
     lock_file = os.path.join(
         config.LOCK_DIR, '{0}-maildir.lock'.format(
             self._mlist.fqdn_listname))
     with Lock(lock_file):
         # Acquire the archiver lock, then make sure the archiver logs the
         # fact that it could not acquire the lock.
         archive_thread = threading.Thread(
             target=Prototype.archive_message,
             args=(self._mlist, self._msg))
         mark = LogFileMark('mailman.error')
         archive_thread.run()
         # Test that the archiver output the correct error.
         line = mark.readline()
         # XXX 2012-03-15 BAW: we really should remove timestamp prefixes
         # from the loggers when under test.
         self.assertTrue(line.endswith(
             'Unable to acquire prototype archiver lock for {0}, '
             'discarding: {1}\n'.format(
                 self._mlist.fqdn_listname,
                 self._msg.get('message-id'))))
     # Check that the message didn't get archived.
     created_files = self._find(config.ARCHIVE_DIR)
     self.assertEqual(self._expected_dir_structure, created_files)
Exemplo n.º 12
0
    def load_store_matrix(self, metadata, as_of_dates, return_matrix=True):
        """
        Calls get_dataset to return a pandas DataFrame for the as_of_dates selected
        Args:
           list as_of_dates: as_of_dates to use
        Returns:
           matrix: dataframe with the features and the last column as the label (called: outcome)
        """
        uuid = metta.metta_io.generate_uuid(metadata)
        matrix_filename = self.matrices_path + '/' + uuid

        with Lock(matrix_filename + '.lock',
                  lifetime=datetime.timedelta(minutes=20)):
            if os.path.isfile(matrix_filename + '.h5'):
                log.debug(' Matrix {} already stored'.format(uuid))
                if return_matrix:
                    df = metta.metta_io.recover_matrix(metadata,
                                                       self.matrices_path)
                    return df, uuid

            else:

                df = self.feature_loader.get_dataset(as_of_dates)
                log.debug('storing matrix {}'.format(uuid))
                metta.metta_io.archive_matrix(matrix_config=metadata,
                                              df_matrix=df,
                                              directory=self.matrices_path,
                                              format='hd5')
                if return_matrix:
                    return df, uuid
Exemplo n.º 13
0
 def __init__(self, config_file):
     ''' initialise the task queue, from the config file '''
     self.config = Config(config_file)
     self.lock = Lock(pathjoin(self.config.get('DIRS', 'db'),
                      'TaskQueue.lock'))
     self.db = DictLiteStore(pathjoin(self.config.get('DIRS', 'db'),
                             'TaskQueue.db'), 'Tasks')
Exemplo n.º 14
0
def master_state(lock_file=None):
    """Get the state of the master watcher.

    :param lock_file: Path to the lock file, otherwise `config.LOCK_FILE`.
    :type lock_file: str
    :return: 2-tuple of the WatcherState describing the state of the lock
        file, and the lock object.
    """
    if lock_file is None:
        lock_file = config.LOCK_FILE
    # We'll never acquire the lock, so the lifetime doesn't matter.
    lock = Lock(lock_file)
    try:
        hostname, pid, tempfile = lock.details
    except NotLockedError:
        return WatcherState.none, lock
    if hostname != socket.getfqdn():
        return WatcherState.host_mismatch, lock
    # Find out if the process exists by calling kill with a signal 0.
    try:
        os.kill(pid, 0)
        return WatcherState.conflict, lock
    except ProcessLookupError:
        # No matching process id.
        return WatcherState.stale_lock, lock
Exemplo n.º 15
0
 def regenerate(self, directory=None):
     """See `IMailTransportAgentLifecycle`."""
     # Acquire a lock file to prevent other processes from racing us here.
     if directory is None:
         directory = config.DATA_DIR
     lock_file = os.path.join(config.LOCK_DIR, 'mta')
     with Lock(lock_file):
         lmtp_path = os.path.join(directory, 'postfix_lmtp')
         lmtp_path_new = lmtp_path + '.new'
         with open(lmtp_path_new, 'w') as fp:
             self._generate_lmtp_file(fp)
         # Atomically rename to the intended path.
         os.rename(lmtp_path_new, lmtp_path)
         domains_path = os.path.join(directory, 'postfix_domains')
         domains_path_new = domains_path + '.new'
         with open(domains_path_new, 'w') as fp:
             self._generate_domains_file(fp)
         # Atomically rename to the intended path.
         os.rename(domains_path_new, domains_path)
         # If the transport_file_type is 'hash' then run the postmap command
         # on newly generated file to convert them in to hash table like
         # Postfix wants.
         if self.transport_file_type == 'hash':
             errors = []
             for path in (lmtp_path, domains_path):
                 command = self.postmap_command + ' ' + path
                 status = (os.system(command) >> 8) & 0xff
                 if status:
                     msg = 'command failure: %s, %s, %s'
                     errstr = os.strerror(status)
                     log.error(msg, command, status, errstr)
                     errors.append(msg % (command, status, errstr))
             if errors:
                 raise RuntimeError(NL.join(errors))
Exemplo n.º 16
0
def write_json(path, data, lock_path=None, merge_func=None):
    """
    Shortcut for writing a structure as json to the file system.

    merge_func is a callable that takes two dict and merges them
    together.

    :param path: The full path to the file to write
    :type: path: str
    :param data:  structure to write out as json
    :type data: dict or list
    :param lock_path: path for the lock file to use
    :type lock_path: string
    :raises: ValueError, OSError
    """
    # lock before moving
    if not lock_path:
        lock_path = get_lock_path(path)

    with Lock(lock_path):
        if callable(merge_func):
            try:
                disk_data = load_json(path, require_exclusive=False)
            except FileNotFoundError:
                disk_data = {}
            mem_data = data.copy()
            data = merge_func(disk_data, mem_data)

        # we could probably write directly to the file,
        # but set the permissions to RO
        dn = os.path.dirname(path)
        f = tempfile.NamedTemporaryFile(mode='w', dir=dn, delete=False)
        json.dump(data, f, indent=4)
        os.fchmod(f.file.fileno(), 0o644)
        shutil.move(f.name, path)
Exemplo n.º 17
0
def child_locker(filename, lifetime, queue):
    # First, acquire the file lock.
    with Lock(filename, lifetime):
        # Now inform the parent that we've acquired the lock.
        queue.put(True)
        # Keep the file lock for a while.
        time.sleep(lifetime.seconds - 1)
Exemplo n.º 18
0
def get_holes_score(pose):
    '''Get the holes score for a list of residues.'''
    dalphaball = os.path.join(
        os.getcwd(), 'dependencies/dependencies/DAlpahBall/DAlphaBall.gcc')
    rosetta.basic.options.set_file_option('holes:dalphaball', dalphaball)

    # Make a lock

    lock_name = os.path.join('hole_score.lock')
    lock = Lock(lock_name)
    lock.lifetime = timedelta(minutes=3)

    # Write to the result file with a lock

    with lock:
        hf = rosetta.protocols.simple_filters.HolesFilter()
        return hf.compute(pose)
Exemplo n.º 19
0
def write_job(job_fullpath, job_spec):
    """ TODO: Docstring
    """

    # Write the contents to a job
    with Lock(lock_file):
        with open(job_fullpath, "w") as ofp:
            json.dump(job_spec, ofp, sort_keys=True)
Exemplo n.º 20
0
 def lock(self, timeout=None):
     while True:
         try:
             result = Lock.lock(self, timeout)
         except AlreadyLockedError, e:
             self._sleep()
         else:
             return result
Exemplo n.º 21
0
    def __init__(self, filename, header="report('", footer="');"):
        self.filename = Path(filename)
        self.filename.parent.mkdir(parents=True, exist_ok=True)

        lockfilename = f"{filename}.lock"
        self.lock = Lock(str(lockfilename))

        if isinstance(header, str):
            header = header.encode()
        self.header = header

        if isinstance(footer, str):
            footer = footer.encode()
        self.footer = footer

        self.dictlist = None
        self.is_dirty = None
Exemplo n.º 22
0
def write_env(job_fullpath, env):
    """TODO: write"""

    env_fullpath = job_fullpath.replace(".job", ".env")

    # write env to env_fullpath
    with Lock(lock_file):
        with open(env_fullpath, "w") as ofp:
            json.dump(env, ofp, sort_keys=True)
Exemplo n.º 23
0
def remove_job(job_fullpath):
    """ TODO: writeme
    """

    with Lock(lock_file):
        if os.path.exists(job_fullpath):
            os.remove(job_fullpath)
        if os.path.exists(job_fullpath.replace(".job", ".env")):
            os.remove(job_fullpath.replace(".job", ".env"))
Exemplo n.º 24
0
 def _lock(self):
     if self._lockobj is None:
         # These will get automatically cleaned up by the test
         # infrastructure.
         self._uid_file = os.path.join(config.VAR_DIR, '.uid')
         if self._context is not None:
             self._uid_file += '.' + self._context
         self._lock_file = self._uid_file + '.lock'
         self._lockobj = Lock(self._lock_file)
     return self._lockobj
Exemplo n.º 25
0
def read_job(job_fullpath):
    """ TODO: Docstring
    """

    # Parse the contents of the job
    with Lock(lock_file):
        with open(job_fullpath, "r") as ifp:
            job_spec = json.load(ifp)

    return job_spec
Exemplo n.º 26
0
 def create():
     """See `IDatabaseFactory`."""
     with Lock(os.path.join(config.LOCK_DIR, 'dbcreate.lck')):
         database_class = config.database['class']
         database = call_name(database_class)
         verifyObject(IDatabase, database)
         database.initialize()
         SchemaManager(database).setup_database()
         database.commit()
         return database
Exemplo n.º 27
0
def read_env(job_fullpath):
    """TODO: write"""

    env_fullpath = job_fullpath.replace(".job", ".env")

    # read env from env_fullpath
    with Lock(lock_file):
        with open(env_fullpath, "r") as ifp:
            env = json.load(ifp)

    return env
Exemplo n.º 28
0
def write_result(output_file, pdb_id, input_type, model_id, score, rmsd):
    '''Write the result in a thread safe manner.'''
    type_map = {'native': 0, 'lowest_rmsd': 1, 'lowest_score': 2}
    #Make a lock

    lock = Lock('lock_filename')
    lock.lifetime = timedelta(minutes=10)

    #Write to the result file with a lock

    with lock:
        open_mode = 'a' if os.path.exists(output_file) else 'w'

        with open(output_file, open_mode) as f:
            if open_mode == 'w':
                os.mkdir(output_file + '.structures')
                f.write('#PDB\tModel\tLoop_rmsd\tTotal_energy\tInput_type\n')

            f.write('%s\t%d\t%f\t%f\t%d\n' %
                    (pdb_id, model_id, rmsd, score, type_map[input_type]))
class NFSLock:
    def __init__(self, path):
        # Specify the path to a file that will be used to synchronize the lock.
        # Per the flufl.lock documentation, use a file that does not exist.
        self._lock = Lock(path)

        # Locks have a lifetime (default 15 seconds) which is the period of time that the process expects
        # to keep the lock once it has been acquired. We set the lifetime to be 5 minutes as we expect
        # all operations that require locks to be completed within that time.
        self._lock.lifetime = timedelta(minutes=5)

        # Ensure multiple threads within a process run NFSLock operations one at a time.
        # We must acquire the reentrant lock before acquiring the flufl lock and only release after
        # the flufl lock is released.
        self._r_lock = threading.RLock()

    def acquire(self):
        self._r_lock.acquire()
        try:
            self._lock.lock()
        except AlreadyLockedError:
            # Safe to re-attempt to acquire a lock
            pass

    def release(self):
        try:
            self._lock.unlock()
        except NotLockedError:
            # Safe to re-attempt to release a lock
            pass
        self._r_lock.release()

    def __enter__(self):
        self.acquire()

    def __exit__(self, t, v, tb):
        self.release()

    @property
    def is_locked(self):
        return self._lock.is_locked
Exemplo n.º 30
0
 def ensure_directories_exist(self):
     """Create all the paths if the directories do not exist."""
     if self.create_paths:
         for path_name, path in self.paths.items():
             makedirs(path)
         # Create a paper-git.cfg file if it already doesn't exist.
         lock_file = os.path.join(self.VAR_DIR, 'paper-git-cfg.lck')
         paper_git_cfg = os.path.join(self.ETC_DIR, 'paper-git.cfg')
         with Lock(lock_file):
             if not os.path.exists(paper_git_cfg):
                 with open(paper_git_cfg, 'w') as fp:
                     print(CFG_TEMPLATE, file=fp)
Exemplo n.º 31
0
def get_next_apk(apks_dir):
    """
    Gets the first available (not-locked) apk files and locks it

    :param apks_dir: directory to scan
    :return: a tuple containing the apk's path and the locked lock
    :rtype: (str, lockfile.LockFile)
    """
    try:
        files = os.listdir(apks_dir)
    except FileNotFoundError:
        # folder doesn't exist
        return None, None
    for f in files:
        if not f.endswith(".apk"):
            continue
        f = os.path.join(apks_dir, f)
        try:
            # lock file should not exist
            filename = f + LOCK_PREFIX

            lock = Lock(filename, lifetime=datetime.timedelta(
                seconds=6000))  # expires in 10 minutes
            if not lock.is_locked:
                lock.lock(timeout=datetime.timedelta(milliseconds=350))
                if os.path.isfile(
                        f
                ):  # the original file could be deleted in the meantime
                    return f, lock
                if lock.is_locked:
                    lock.unlock()
        except (AlreadyLockedError, TimeOutError):
            # some other process is analyzing the file; go ahead and look for another file
            pass
    return None, None
Exemplo n.º 32
0
class NFSFileLock(object):

    def __init__(self, filename):
        self.lock_obj = Lock(filename + '.lock', timedelta(days = 999))

    def lock(self):
        while True:
            try:
                self.lock_obj.lock()
                break
            except OSError as error:
                if errno.ESTALE != error.errno:
                    raise

        # Disowning the lock allows us to delete the flufl.lock.Lock object
        # without it calling the object destructor, which releases the lock. We
        # want to be able to delete the object but keep the lock in order to
        # store the lock inside a file.
        self.lock_obj.disown()

    def unlock(self):
        self.lock_obj.unlock()

    def is_locked(self):
        return self.lock_obj.is_locked
Exemplo n.º 33
0
def dump_to_file(entries, package, version_code, version_name):
    lock_acquired = False
    while not lock_acquired:
        try:
            filename = dest + LOCK_PREFIX
            lock = Lock(filename, lifetime=datetime.timedelta(seconds=6000))  # expires in 10 minutes
            if not lock.is_locked:
                lock.lock(timeout=datetime.timedelta(milliseconds=350))
                lock_acquired = True
                with open(dest, 'a') as f:
                    first = True
                    if os.path.exists(dest) and os.path.getsize(dest) > 0:
                        first = False
                    for entry in entries:
                        entry_dict = entry.__dict__
                        entry_dict['package'] = package
                        entry_dict['versionCode'] = version_code
                        entry_dict['versionName'] = version_name
                        if first:
                            first = False
                        else:
                            f.write(",\n")
                        json.dump(entry_dict, f, indent=4)
                if lock.is_locked:
                    lock.unlock()
        except (AlreadyLockedError, TimeOutError):
            # some other process is analyzing the file; go ahead and look for another file
            pass
Exemplo n.º 34
0
def load_json(path, require_exclusive=True, lock_path=None):
    """
    Shortcut for loading json from a file path.

    :param path: The full path to the file
    :type: path: str
    :param require_exclusive: lock file for exclusive read
    :type require_exclusive: bool
    :param lock_path: path for the lock file to use
    :type lock_path: string
    :returns: loaded json
    :rtype: dict
    :raises: IOError, ValueError
    """
    lock = None
    if require_exclusive:
        if not lock_path:
            lock_path = get_lock_path(path)
        lock = Lock(lock_path)
        lock.lock()
    try:
        with open(path) as f:
            return json.load(f)
    finally:
        if lock:
            lock.unlock(unconditionally=True)
Exemplo n.º 35
0
def acquire_lock_1(force, lock_file=None):
    """Try to acquire the master lock.

    :param force: Flag that controls whether to force acquisition of the lock.
    :type force: bool
    :param lock_file: Path to the lock file, otherwise `config.LOCK_FILE`.
    :type lock_file: str
    :return: The master lock.
    :raises: `TimeOutError` if the lock could not be acquired.
    """
    if lock_file is None:
        lock_file = config.LOCK_FILE
    lock = Lock(lock_file, LOCK_LIFETIME)
    try:
        lock.lock(timedelta(seconds=0.1))
        return lock
    except TimeOutError:
        if not force:
            raise
        # Force removal of lock first.
        lock.disown()
        hostname, pid, tempfile = lock.details
        os.unlink(lock_file)
        return acquire_lock_1(force=False)
Exemplo n.º 36
0
class TaskQueue(object):
    ''' The actual Task Queue object. See Module docs '''

    def __init__(self, config_file):
        ''' initialise the task queue, from the config file '''
        self.config = Config(config_file)
        self.lock = Lock(pathjoin(self.config.get('DIRS', 'db'),
                         'TaskQueue.lock'))
        self.db = DictLiteStore(pathjoin(self.config.get('DIRS', 'db'),
                                'TaskQueue.db'), 'Tasks')

    def __enter__(self):
        ''' start of with TaskQueue(...) as t: block '''
        self.lock.lock()
        self.db.open()
        return self

    def __exit__(self, exptype, value, tb):
        ''' end of with ... block '''
        self.db.close()
        self.lock.unlock()

    def tasks(self, group=None, state=None):

        q = []
        if group:
            q.append(('group', 'LIKE', NoJSON('%"' + group + '"%')))
        if state:
            q.append(('state', '==', state))

        return self.db.get(*q) # pylint: disable=W0142


    def active_groups(self):
        ''' return a list of all groups currently in the task list, and how
            many tasks they each are running '''

        # grouplist looks like:
        #
        # dict[groupname] -> dict[state] -> count
        # so you can do awesome things.

        grouplist = defaultdict(lambda:defaultdict(lambda:0))

        sql = u'SELECT Tasks."group", Tasks."state" From Tasks'

        try:
            rows = self.db.cur.execute(sql).fetchall()
        except OperationalError as err:
            # usually no such column, which means usually no rows.
            rowcount = self.db.cur.execute(u'SELECT Count(id) FROM Tasks')
            if rowcount.fetchone()[0] == 0:
                return grouplist
            else:
                raise err

        for rawgroups, rawstate in rows:

            groups = json.loads(rawgroups)
            state = json.loads(rawstate)

            if isinstance(groups, list):
                for g in groups:
                    grouplist[g][state] += 1
            else:
                grouplist[groups][state] += 1

        return grouplist

        ######################################
        # If for some reason it would be better to return dicts
        # rather than defaultdicts, then this is the code:
        #
        #to_return = {}
        #for groupname in grouplist:
        #    to_return[groupname] = dict(grouplist[groupname])

        #return to_return

    def grouplimit(self, groupname):
        ''' how many tasks can be run at the same time in this group? '''

        return int(self.config.get(groupname, 'limit', 1))


    def _getnexttask(self, group, new_state='running'):
        ''' get the next 'ready' task of this group. This should ONLY be called
        by self.getnexttask, not by end users. getnexttask checks that limits
        haven't been reached, etc. '''
        try:
            task = self.tasks(group, 'ready')[0]
            if new_state:
                task['state'] = new_state
                self.db.update(task, False, ('uid', '==', task['uid']))

            # Now we are going to start the task, import the defaults from
            # the group config:
            if self.config.config.has_section(group):
                for k, v in self.config.config.items(group):
                    if not k in task:
                        task[k] = v

            # and finally load defaults:
            if self.config.config.has_section('task_defaults'):
                for k, v in self.config.config.items('task_defaults'):
                    if not k in task:
                        task[k] = v

            return task
        except IndexError as err:
            import pdb; pdb.set_trace()
            raise NoAvailableTasks()


    def getnexttask(self, group=None, new_state='running'):
        ''' Get one available next task, as long as 'group' isn't overloaded.
            When the task is 'got', sets the state to new_state in the database.
            So this can be used as an atomic action on tasks. '''

        if group:
            running_tasks = self.active_groups()[group]['running']
            group_limit = self.grouplimit(group)

            if running_tasks < group_limit:
                return self._getnexttask(group, new_state)
            else:
                raise TooBusy()

        else: #no group specified.

            all_groups = self.active_groups()

            for groupname, grouptasks in all_groups.items():

                # already at limit:
                if grouptasks['running'] >= self.grouplimit(groupname):
                    continue

                # no ready tasks:
                if grouptasks['ready'] == 0:
                    continue

                # we have a winner! (a group with available tasks)
                return self._getnexttask(groupname, new_state)

            # if there are no ready tasks at all, then raise that exception

            if all((g['ready'] == 0 for g in all_groups.values())):
                raise NoAvailableTasks()

            # otherwise, there are availible tasks, but we're too busy.

            raise TooBusy()


    def save(self, data):
        ''' add needed fields if they're not there, and then save to the
            database.  If the same uuid is already there, then update it. '''

        if 'state' not in data:
            data['state'] = 'ready'

        if not 'uid' in data:
            data['uid'] = uuid1().hex

        if not 'group' in data:
            data['group'] = 'none'

        # If output files are not absolute paths, then place them in the config
        # file specified logfile directory.

        if 'stdout' in data and not data['stdout'] == abspath(data['stdout']):
            data['stdout'] = abspath(pathjoin(self.config.get('DIRS', 'log'),
                                              data['stdout']))

        if 'stderr' in data and not data['stderr'] == abspath(data['stderr']):
            data['stderr'] = abspath(pathjoin(self.config.get('DIRS', 'log'),
                                              data['stderr']))

        # And save it to the database.

        self.db.update(data, True, ('uid', '==', data['uid']))

        return data

    def get(self, uid):
        ''' get a task based of its uuid '''

        return self.db.get(('uid', '==', uid))
Exemplo n.º 37
0
def dmg_signfile(filename, keychain, signing_identity, code_resources, identifier, subject_ou, lockfile, fake=False, passphrase=None):
    """ Sign a mac .app folder
    """
    from flufl.lock import Lock, TimeOutError, NotLockedError
    from datetime import timedelta
    import pexpect

    basename = os.path.basename(filename)
    dirname = os.path.dirname(filename)
    stdout = tempfile.TemporaryFile()

    sign_command = ['codesign',
                    '-s', signing_identity, '-fv',
                    '--keychain', keychain,
                    '--resource-rules', code_resources,
                    '--requirement', MAC_DESIGNATED_REQUIREMENTS % locals(),
                    basename]

    # pexpect requires a string as input
    unlock_command = 'security unlock-keychain ' + keychain
    lock_command = ['security', 'lock-keychain', keychain]
    try:
        sign_lock = None
        try:
            # Acquire a lock for the signing command, to ensure we don't have a
            # race condition where one process locks the keychain immediately after another
            # unlocks it.
            log.debug("Try to acquire %s", lockfile)
            sign_lock = Lock(lockfile)
            # Put a 30 second timeout on waiting for the lock.
            sign_lock.lock(timedelta(0, 30))

            # Unlock the keychain so that we do not get a user-interaction prompt to use
            # the keychain for signing. This operation requires a password.
            child = pexpect.spawn(unlock_command)
            child.expect('password to unlock .*')
            child.sendline(passphrase)
            # read output until child exits
            child.read()
            child.close()
            if child.exitstatus != 0:
                raise ValueError("keychain unlock failed")

            # Execute the signing command
            check_call(sign_command, cwd=dirname, stdout=stdout, stderr=STDOUT)

        except TimeOutError, error:
            # timed out acquiring lock, give an error
            log.exception("Timeout acquiring lock  %s for codesign, is something broken? ", lockfile, error)
            raise
        except:
            # catch any other locking error
            log.exception("Error acquiring  %s for codesign, is something broken?", lockfile)
            raise
        finally:
            # Lock the keychain again, no matter what happens
            # This command does not require a password
            check_call(lock_command)

            # Release the lock, if it was acquired
            if sign_lock:
                try:
                    sign_lock.unlock()
                    log.debug("Release %s", lockfile)
                except NotLockedError:
                    log.debug("%s was already unlocked", lockfile)
Exemplo n.º 38
0
 def __init__(self, filename):
     self.lock_obj = Lock(filename + '.lock', timedelta(days = 999))
Exemplo n.º 39
0
def dmg_signpackage(pkgfile, dstfile, keychain, mac_id, subject_ou, fake=False, passphrase=None):
    """ Sign a mac build, putting results into `dstfile`.
        pkgfile must be a tar, which gets unpacked, signed, and repacked.
    """
    # Keep track of our output in a list here, and we can output everything
    # when we're done This is to avoid interleaving the output from
    # multiple processes.
    from flufl.lock import Lock, TimeOutError, NotLockedError
    from datetime import timedelta
    import pexpect

    # TODO: Is it even possible to do 'fake' signing?
    logs = []
    logs.append("Repacking %s to %s" % (pkgfile, dstfile))

    # pexpect requires a string as input
    unlock_command = 'security unlock-keychain ' + keychain
    lock_command = ['security', 'lock-keychain', keychain]
    lockfile = os.path.join(os.path.dirname(keychain), '.lock')

    tmpdir = tempfile.mkdtemp()
    try:
        # Unpack it
        logs.append("Unpacking %s to %s" % (pkgfile, tmpdir))
        unpacktar(pkgfile, tmpdir)


        for macdir in os.listdir(tmpdir):
            macdir = os.path.join(tmpdir, macdir)
            log.debug('Checking if we should sign %s', macdir)
            if shouldSign(macdir, 'mac'):
                log.debug('Need to sign %s', macdir)

                try:
                    sign_lock = None
                    # Acquire a lock for the signing command, to ensure we don't have a
                    # race condition where one process locks the keychain immediately after another
                    # unlocks it.
                    log.debug("Try to acquire %s", lockfile)
                    sign_lock = Lock(lockfile)
                    # Put a 30 second timeout on waiting for the lock.
                    sign_lock.lock(timedelta(0, 30))

                    # Unlock the keychain so that we do not get a user-interaction prompt to use
                    # the keychain for signing. This operation requires a password.
                    child = pexpect.spawn(unlock_command)
                    child.expect('password to unlock .*')
                    child.sendline(passphrase)
                    # read output until child exits
                    child.read()
                    child.close()
                    if child.exitstatus != 0:
                        raise ValueError("keychain unlock failed")

                    # Sign the thing!
                    dmg_signfile(macdir, keychain, mac_id, subject_ou, fake)

                except TimeOutError:
                    # timed out acquiring lock, give an error
                    log.exception("Timeout acquiring lock  %s for codesign, is something broken? ", lockfile)
                    raise
                except:
                    # catch any other locking error
                    log.exception("Error acquiring  %s for codesign, is something broken?", lockfile)
                    raise
                finally:
                    # Lock the keychain again, no matter what happens
                    # This command does not require a password
                    check_call(lock_command)

                    # Release the lock, if it was acquired
                    if sign_lock:
                        try:
                            sign_lock.unlock()
                            log.debug("Release %s", lockfile)
                        except NotLockedError:
                            log.debug("%s was already unlocked", lockfile)


        # Repack it
        logs.append("Packing %s" % dstfile)
        tar_dir(dstfile, tmpdir)
    except:
        log.exception("Error signing %s", pkgfile)
        raise
    finally:
        # Clean up after ourselves, and output our logs
        shutil.rmtree(tmpdir)
        log.info("\n  ".join(logs))