Esempio n. 1
0
    def _do_alloc_ids(self, count, id_alloc_state):
        """Helper to allocate unique ids using the key/value store"""
        batch = Config().batch

        if not id_alloc_state:
            id_alloc_state = []
        else:
            id_alloc_state = yaml.safe_load(id_alloc_state)

        jsonschema.validate(id_alloc_state,
                            yaml.safe_load(id_allocation_schema))

        num_ids_preclean = len(id_alloc_state)
        # Cleanup completed jobs
        try:
            joblist = batch.list_all_jobs()
            id_alloc_state = [
                pk for pk in id_alloc_state if int(pk['batchid']) in joblist
            ]
        except PcoccError:
            pass

        num_ids = len(id_alloc_state)
        stray_ids = num_ids_preclean - num_ids
        if stray_ids > 0:
            logging.warning('Found %s leftover Ids, will try to cleanup',
                            stray_ids)

        if num_ids + count > self.num_ids:
            raise PcoccError('Not enough free ids in %s' % self.key_path)

        id_indexes = []
        i = 0
        for allocated_id in sorted(id_alloc_state,
                                   key=lambda x: x['pkey_index']):

            while i < allocated_id['pkey_index'] and count > 0:
                id_indexes.append(i)
                i += 1
                count -= 1

            if count == 0:
                break

            i += 1
        else:
            id_indexes += [i for i in xrange(i, i + count)]

        for i in id_indexes:
            id_alloc_state.append({'pkey_index': i, 'batchid': batch.batchid})

        return yaml.dump(id_alloc_state), id_indexes
Esempio n. 2
0
    def coll_alloc(self, count, master, key):
        # Master allocates the ids and broadcasts to the others
        coll_path = os.path.join('coll_alloc', self.key_path, key)

        if Config().batch.node_rank == master:
            ids = Config().batch.atom_update_key('global', self.key_path,
                                                 self._do_alloc_ids, count)

            Config().batch.write_key('cluster', coll_path, yaml.dump(ids))
        else:
            ids = Config().batch.read_key('cluster',
                                          coll_path,
                                          blocking=True,
                                          timeout=30)

            ids = yaml.safe_load(ids)

        return ids
Esempio n. 3
0
    def _do_free_ids(self, id_indexes, id_alloc_state):
        """Helper to free unique ids using the key/value store"""
        id_alloc_state = yaml.safe_load(id_alloc_state)
        jsonschema.validate(id_alloc_state,
                            yaml.safe_load(id_allocation_schema))

        batchid = Config().batch.batchid
        id_alloc_state[:] = [
            allocated_id for allocated_id in id_alloc_state
            if allocated_id['batchid'] != batchid
            or allocated_id['pkey_index'] not in id_indexes
        ]

        return yaml.dump(id_alloc_state), None
Esempio n. 4
0
 def free(self, ids):
     return Config().batch.atom_update_key('global', self.key_path,
                                           self._do_free_ids, ids)
Esempio n. 5
0
 def alloc(self, count):
     return Config().batch.atom_update_key('global', self.key_path,
                                           self._do_alloc_ids, count)