コード例 #1
0
ファイル: blob.py プロジェクト: mandoonandy/shakenfist
    def get(self, node=None):
        retval = []

        with etcd.ThreadLocalReadOnlyCache():
            for b in Blobs(filters=[baseobject.active_states_filter]):
                if node and node in b.locations:
                    retval.append(b.external_view())
                else:
                    retval.append(b.external_view())

        return retval
コード例 #2
0
 def get(self, node=None):
     retval = []
     with etcd.ThreadLocalReadOnlyCache():
         for a in Artifacts(filters=[baseobject.active_states_filter]):
             if node:
                 idx = a.most_recent_index
                 if 'blob_uuid' in idx:
                     b = Blob.from_db(idx['blob_uuid'])
                     if b and node in b.locations:
                         retval.append(a.external_view())
             else:
                 retval.append(a.external_view())
     return retval
コード例 #3
0
ファイル: cluster.py プロジェクト: mandoonandy/shakenfist
    def _cluster_wide_cleanup(self, last_loop_run):
        LOG.info('Running cluster maintenance')

        # Cleanup soft deleted objects
        for objtype in OBJECT_NAMES:
            for _, objdata in etcd.get_all(objtype, None):
                try:
                    obj = OBJECT_NAMES_TO_CLASSES[objtype].from_db(
                        objdata['uuid'])
                    if (obj.state.value == dbo.STATE_DELETED and
                            time.time() - obj.state.update_time > config.CLEANER_DELAY):
                        LOG.with_object(obj).info('Hard deleting')
                        obj.hard_delete()
                except exceptions.BadObjectVersion:
                    LOG.with_fields({
                        objtype: obj.uuid
                    }).warning('Could not load object for hard delete, bad version')

        # Prune artifacts which might have too many versions
        for a in artifact.Artifacts([]):
            a.delete_old_versions()

        # Inspect current state of blobs, the actual changes are done below outside
        # the read only cache. We define being low on disk has having less than three
        # times the minimum amount of disk. This is so we start to rearrange blobs
        # before scheduling starts to fail.
        overreplicated = {}
        underreplicated = []
        low_disk_nodes = nodes_by_free_disk_descending(
            minimum=0, maximum=(config.MINIMUM_FREE_DISK * 3),
            intention='blobs')

        absent_nodes = []
        for n in Nodes([node_inactive_states_filter]):
            LOG.with_fields({
                'node': n.fqdn}).info('Node is absent for blob replication')
            absent_nodes.append(n.fqdn)
        LOG.info('Found %d inactive nodes' % len(absent_nodes))

        current_fetches = defaultdict(list)
        for workname, workitem in etcd.get_outstanding_jobs():
            # A workname looks like: /sf/queue/sf-3/jobname
            _, _, phase, node, _ = workname.split('/')
            if node == 'networknode':
                continue

            for task in workitem:
                if isinstance(task, FetchBlobTask):
                    if node in absent_nodes:
                        LOG.with_fields({
                            'blob': task.blob_uuid,
                            'node': node,
                            'phase': phase
                        }).warning('Node is absent, ignoring fetch')
                    else:
                        LOG.with_fields({
                            'blob': task.blob_uuid,
                            'node': node,
                            'phase': phase
                        }).info('Node is fetching blob')
                        current_fetches[task.blob_uuid].append(node)

        with etcd.ThreadLocalReadOnlyCache():
            for b in blob.Blobs([active_states_filter]):
                # If there is current work for a blob, we ignore it until that
                # work completes
                if b.uuid in current_fetches:
                    LOG.with_fields({
                        'blob': task.blob_uuid
                    }).info('Blob has current fetches, ignoring')
                    continue

                locations = b.locations
                ignored_locations = []
                for n in absent_nodes:
                    if n in locations:
                        locations.remove(n)
                        ignored_locations.append(n)

                if ignored_locations:
                    LOG.with_fields({
                        'blob': b,
                        'ignored_locations': ignored_locations
                    }).info('Ignored some blob locations as nodes are absent')

                delta = len(locations) - config.BLOB_REPLICATION_FACTOR
                if delta > 0:
                    # So... The blob replication factor is a target not a limit.
                    # Specifically, if there are more locations than the target
                    # but we aren't low on disk, we don't clean them up. That's
                    # because its hard for us to predict which machine will run
                    # out of disk first, and copying a blob back to a machine if
                    # its needed there is slow and annoying.

                    # Work out where the blob is in active use.
                    excess_locations = b.locations
                    in_use_locations = []

                    for instance_uuid in b.instances:
                        i = instance.Instance.from_db(instance_uuid)
                        node = i.placement.get('node')
                        if node in excess_locations:
                            excess_locations.remove(node)
                            in_use_locations.append(node)

                    # Only remove excess copies from nodes which are running
                    # low on disk. Do not end up with too few replicas.
                    overreplicated[b.uuid] = []
                    target = (config.BLOB_REPLICATION_FACTOR -
                              len(in_use_locations))
                    for n in low_disk_nodes:
                        if n in excess_locations:
                            overreplicated[b.uuid].append(n)
                        if len(overreplicated[b.uuid]) == target:
                            break

                elif delta < 0:
                    # The tuple is blob UUID, and how much to over replicate by.
                    underreplicated.append((b.uuid, 0))

                else:
                    # We have exactly the right number of copies, but what if
                    # the blob is on a really full node?
                    for n in low_disk_nodes:
                        if n in b.locations:
                            # We have at least one space constrained node with
                            # this blob. Request an extra temporary copy of the
                            # blob elsewhere so we can hopefully clean up one of
                            # these next pass. The tuple is blob UUID, and how
                            # much to over replicate by.
                            underreplicated.append((b.uuid, 1))
                            break

        # Prune over replicated blobs
        for blob_uuid in overreplicated:
            b = blob.Blob.from_db(blob_uuid)
            for node in overreplicated[blob_uuid]:
                LOG.with_fields({
                    'blob': b,
                    'node': node
                }).info('Blob over replicated, removing from node with no users')
                b.drop_node_location(node)

        # Replicate under replicated blobs, but only if we don't have heaps of
        # queued replications already
        for blob_uuid, excess in underreplicated:
            LOG.with_fields({
                'current': len(current_fetches),
                'maximum': config.MAX_CONCURRENT_BLOB_TRANSFERS
            }).info('Concurrent blob transfers')
            if len(current_fetches) > config.MAX_CONCURRENT_BLOB_TRANSFERS:
                LOG.info(
                    'Too many concurrent blob transfers queued, not queueing more')
                break

            b = blob.Blob.from_db(blob_uuid)
            LOG.with_fields({
                'blob': b
            }).info('Blob under replicated, attempting to correct')
            b.request_replication(allow_excess=excess)
            current_fetches[blob_uuid].append('unknown')

        # Node management
        for n in Nodes([node_inactive_states_filter]):
            age = time.time() - n.last_seen

            # Find nodes which have returned from being missing
            if age < config.NODE_CHECKIN_MAXIMUM:
                n.state = Node.STATE_CREATED
                LOG.with_object(n).info('Node returned from being missing')

            # Find nodes which have been offline for a long time, unless
            # this machine has been asleep for a long time (think developer
            # laptop).
            if (time.time() - last_loop_run < config.NODE_CHECKIN_MAXIMUM
                    and age > config.NODE_CHECKIN_MAXIMUM * 10):
                n.state = Node.STATE_ERROR
                for i in instance.healthy_instances_on_node(n):
                    LOG.with_object(i).with_object(n).info(
                        'Node in error state, erroring instance')
                    # Note, this queue job is just in case the node comes
                    # back.
                    i.enqueue_delete_due_error('Node in error state')

        # Find nodes which haven't checked in recently
        for n in Nodes([node_active_states_filter]):
            age = time.time() - n.last_seen
            if age > config.NODE_CHECKIN_MAXIMUM:
                n.state = Node.STATE_MISSING

        # And we're done
        LOG.info('Cluster maintenance loop complete')
コード例 #4
0
ファイル: cleaner.py プロジェクト: mandoonandy/shakenfist
    def _maintain_blobs(self):
        # Find orphaned and deleted blobs still on disk
        blob_path = os.path.join(config.STORAGE_PATH, 'blobs')
        os.makedirs(blob_path, exist_ok=True)
        cache_path = os.path.join(config.STORAGE_PATH, 'image_cache')
        os.makedirs(cache_path, exist_ok=True)

        for ent in os.listdir(blob_path):
            entpath = os.path.join(blob_path, ent)
            st = os.stat(entpath)

            # If we've had this file for more than two cleaner delays...
            if time.time() - st.st_mtime > config.CLEANER_DELAY * 2:
                if ent.endswith('.partial'):
                    # ... and its a stale partial transfer
                    LOG.with_fields({
                        'blob': ent
                    }).warning('Deleting stale partial transfer')
                    os.unlink(entpath)

                else:
                    b = Blob.from_db(ent)
                    if (not b or b.state.value == Blob.STATE_DELETED
                            or config.NODE_NAME not in b.locations):
                        LOG.with_fields({
                            'blob': ent
                        }).warning('Deleting orphaned blob')
                        os.unlink(entpath)
                        cached = util_general.file_permutation_exists(
                            os.path.join(cache_path, ent), ['iso', 'qcow2'])
                        if cached:
                            os.unlink(cached)

        # Find transcoded blobs in the image cache which are no longer in use
        for ent in os.listdir(cache_path):
            entpath = os.path.join(cache_path, ent)

            # Broken symlinks will report an error here that we have to catch
            try:
                st = os.stat(entpath)
            except OSError as e:
                if e.errno == errno.ENOENT:
                    LOG.with_fields({
                        'blob': ent
                    }).warning('Deleting broken symlinked image cache entry')
                    os.unlink(entpath)
                    continue
                else:
                    raise e

            # If we haven't seen this file in use for more than two cleaner delays...
            if time.time() - st.st_mtime > config.CLEANER_DELAY * 2:
                blob_uuid = ent.split('.')[0]
                b = Blob.from_db(blob_uuid)
                if not b:
                    LOG.with_fields({
                        'blob': ent
                    }).warning('Deleting orphaned image cache entry')
                    os.unlink(entpath)
                    continue

                if b.ref_count == 0:
                    LOG.with_fields({
                        'blob': ent
                    }).warning('Deleting globally unused image cache entry')
                    os.unlink(entpath)
                    continue

                this_node = 0
                for instance_uuid in b.instances:
                    i = instance.Instance.from_db(instance_uuid)
                    if i:
                        if i.placement.get('node') == config.NODE_NAME:
                            this_node += 1

                LOG.with_fields({
                    'blob': blob_uuid,
                    'this_node': this_node
                }).info('Blob users on this node')
                if this_node == 0:
                    LOG.with_fields({
                        'blob': blob_uuid
                    }).warning('Deleting unused image cache entry')
                    os.unlink(entpath)
                else:
                    # Record that this file is in use for the benefit of
                    # the above time check.
                    pathlib.Path(entpath).touch(exist_ok=True)

        # Find blobs which should be on this node but are not.
        missing = []
        with etcd.ThreadLocalReadOnlyCache():
            for b in Blobs([active_states_filter]):
                if config.NODE_NAME in b.locations:
                    if not os.path.exists(
                            os.path.join(config.STORAGE_PATH, 'blobs',
                                         b.uuid)):
                        missing.append(b.uuid)

        for blob_uuid in missing:
            b = Blob.from_db(blob_uuid)
            if b:
                LOG.with_fields({
                    'blob': blob_uuid
                }).warning('Blob missing from node')
                b.drop_node_location(config.NODE_NAME)
コード例 #5
0
 def get(self, artifact_uuid=None, artifact_from_db=None):
     with etcd.ThreadLocalReadOnlyCache():
         return artifact_from_db.external_view()
コード例 #6
0
    def run(self):
        LOG.info('Starting')
        observers = {}

        while not self.exit.is_set():
            # Cleanup terminated observers
            all_observers = list(observers.keys())
            for instance_uuid in all_observers:
                if not observers[instance_uuid].is_alive():
                    # Reap process
                    observers[instance_uuid].join(1)
                    LOG.with_instance(instance_uuid).info(
                        'Trigger observer has terminated')
                    db.add_event('instance', instance_uuid, 'trigger monitor',
                                 'crashed', None, None)
                    del observers[instance_uuid]

            # Audit desired observers
            extra_instances = list(observers.keys())
            missing_instances = []

            with etcd.ThreadLocalReadOnlyCache():
                for inst in instance.Instances([
                        instance.this_node_filter,
                        partial(baseobject.state_filter,
                                [instance.Instance.STATE_CREATED])
                ]):
                    if inst.uuid in extra_instances:
                        extra_instances.remove(inst.uuid)

                    if inst.uuid not in observers:
                        missing_instances.append(inst.uuid)

            # Start missing observers
            for instance_uuid in missing_instances:
                console_path = os.path.join(config.STORAGE_PATH, 'instances',
                                            instance_uuid, 'console.log')
                p = multiprocessing.Process(
                    target=observe,
                    args=(console_path, instance_uuid),
                    name='%s-%s' %
                    (daemon.process_name('triggers'), instance_uuid))
                p.start()

                observers[instance_uuid] = p
                LOG.with_instance(instance_uuid).info(
                    'Started trigger observer')
                db.add_event('instance', instance_uuid, 'trigger monitor',
                             'started', None, None)

            # Cleanup extra observers
            for instance_uuid in extra_instances:
                p = observers[instance_uuid]
                try:
                    os.kill(p.pid, signal.SIGKILL)
                    observers[instance_uuid].join(1)
                except Exception:
                    pass

                del observers[instance_uuid]
                LOG.with_instance(instance_uuid).info(
                    'Finished trigger observer')
                db.add_event('instance', instance_uuid, 'trigger monitor',
                             'finished', None, None)

            self.exit.wait(1)

        # No longer running, clean up all trigger deaemons
        for instance_uuid in observers:
            os.kill(observers[instance_uuid].pid, signal.SIGKILL)