def handle(self, *args, **options):
        try:
            # before starting, make sure to lock the storage so that any other
            # processes with heavy/frequent operations on the storage don't get
            # in our way
            lock = Lock('storage')
            lock.acquire()

            # collect current proxied node ids
            proxied_ids = [p for p in settings.PROXIED_NODES]
    
            # iterate over proxy resources and check for each if its source node
            # id is still listed in the proxied node id list
            remove_count = 0
            for proxy_res in StorageObject.objects.filter(copy_status=PROXY):
                if not proxy_res.source_node in proxied_ids:
                    # delete the associated resource
                    sys.stdout.write("\nremoving proxied resource {}\n" \
                        .format(proxy_res.identifier))
                    LOGGER.info("removing from proxied node {} resource {}" \
                        .format(proxy_res.source_node, proxy_res.identifier))
                    remove_count += 1
                    remove_resource(proxy_res)
            sys.stdout.write("\n{} proxied resources removed\n" \
                .format(remove_count))
            LOGGER.info("A total of {} resources have been removed" \
                .format(remove_count))
        finally:
            lock.release()
Exemple #2
0
 def sync_with_nodes(nodes, is_proxy, id_file=None):
     """
     Synchronizes this META-SHARE node with the given other META-SHARE nodes.
     
     `nodes` is a dict of dicts with synchronization settings for the nodes
         to synchronize with
     `is_proxy` must be True if this node is a proxy for the given nodes;
         it must be False if the given nodes are not proxied by this node
     """
     for node_id, node in nodes.items():
         LOGGER.info("syncing with node {} at {} ...".format(
           node_id, node['URL']))
         try:
             # before starting the actual synchronization, make sure to lock
             # the storage so that any other processes with heavy/frequent
             # operations on the storage don't get in our way
             lock = Lock('storage')
             lock.acquire()
             Command.sync_with_single_node(
               node_id, node, is_proxy, id_file=id_file)
         except:
             LOGGER.error('There was an error while trying to sync with '
                 'node "%s":', node_id, exc_info=True)
         finally:
             lock.release()
 def handle(self, *args, **options):
     try:
         # before starting to remove the storage objects, make sure to lock
         # the storage so that any other processes with heavy/frequent
         # operations on the storage don't get in our way
         lock = Lock('storage')
         lock.acquire()
         repair_storage_objects()
     finally:
         lock.release()
 def handle(self, *args, **options):
     """
     Update digests.
     """
     try:
         # before starting the digest updating, make sure to lock the storage
         # so that any other processes with heavy/frequent operations on the
         # storage don't get in our way
         lock = Lock('storage')
         lock.acquire()
         update_digests()
     finally:
         lock.release()
Exemple #5
0
    def handle(self, *args, **options):
        try:
            # before starting, make sure to lock the storage so that any other
            # processes with heavy/frequent operations on the storage don't get
            # in our way
            lock = Lock('storage')
            lock.acquire()

            for node_name in args:
                LOGGER.info("checking node {}".format(node_name))
                remove_count = 0
                for res in StorageObject.objects.filter(source_node=node_name):
                    remove_count += 1
                    LOGGER.info("removing resource {}".format(res.identifier))
                    remove_resource(res)
                LOGGER.info("removed {} resources of node {}" \
                        .format(remove_count, node_name))
        finally:
            lock.release()
Exemple #6
0
    def handle(self, *args, **options):
        try:
            # before starting, make sure to lock the storage so that any other
            # processes with heavy/frequent operations on the storage don't get
            # in our way
            lock = Lock('storage')
            lock.acquire()

            # collect current proxied node ids
            proxied_ids = [p for p in settings.PROXIED_NODES]

            # iterate over proxy resources and check for each if its source node
            # id is still listed in the proxied node id list
            remove_count = 0
            for proxy_res in StorageObject.objects.filter(copy_status=PROXY):
                if not proxy_res.source_node in proxied_ids:
                    # delete the associated resource
                    sys.stdout.write("\nremoving proxied resource {}\n" \
                        .format(proxy_res.identifier))
                    LOGGER.info("removing from proxied node {} resource {}" \
                        .format(proxy_res.source_node, proxy_res.identifier))
                    remove_count += 1
                    remove_resource(proxy_res)
            sys.stdout.write("\n{} proxied resources removed\n" \
                .format(remove_count))
            LOGGER.info("A total of {} resources have been removed" \
                .format(remove_count))
        finally:
            lock.release()
Exemple #7
0
 def sync_with_nodes(nodes, is_proxy, id_file=None):
     """
     Synchronizes this META-SHARE node with the given other META-SHARE nodes.
     
     `nodes` is a dict of dicts with synchronization settings for the nodes
         to synchronize with
     `is_proxy` must be True if this node is a proxy for the given nodes;
         it must be False if the given nodes are not proxied by this node
     """
     for node_id, node in nodes.items():
         LOGGER.info("syncing with node {} at {} ...".format(
             node_id, node['URL']))
         try:
             # before starting the actual synchronization, make sure to lock
             # the storage so that any other processes with heavy/frequent
             # operations on the storage don't get in our way
             lock = Lock('storage')
             lock.acquire()
             Command.sync_with_single_node(node_id,
                                           node,
                                           is_proxy,
                                           id_file=id_file)
         except:
             LOGGER.error(
                 'There was an error while trying to sync with '
                 'node "%s":',
                 node_id,
                 exc_info=True)
         finally:
             lock.release()
 def handle(self, *args, **options):
     try:
         # before starting to remove the storage objects, make sure to lock
         # the storage so that any other processes with heavy/frequent
         # operations on the storage don't get in our way
         lock = Lock('storage')
         lock.acquire()
         repair_storage_objects()
     finally:
         lock.release()
 def handle(self, *args, **options):
     """
     Update digests.
     """
     try:
         # before starting the digest updating, make sure to lock the storage
         # so that any other processes with heavy/frequent operations on the
         # storage don't get in our way
         lock = Lock('storage')
         lock.acquire()
         update_digests()
     finally:
         lock.release()
Exemple #10
0
    def handle(self, *args, **options):
        """
        Repair recommendations.
        """
        try:
            # before starting, make sure to lock the storage so that any other
            # processes with heavy/frequent operations on the storage don't get
            # in our way
            lock = Lock('storage')
            lock.acquire()

            repair_recommendations()
        finally:
            lock.release()
Exemple #11
0
    def handle(self, *args, **options):
        try:
            # before starting, make sure to lock the storage so that any other
            # processes with heavy/frequent operations on the storage don't get
            # in our way
            lock = Lock('storage')
            lock.acquire()

            for node_name in args:
                LOGGER.info("checking node {}".format(node_name))
                remove_count = 0
                for res in StorageObject.objects.filter(source_node=node_name):
                    remove_count += 1
                    LOGGER.info("removing resource {}".format(res.identifier))
                    remove_resource(res)
                LOGGER.info("removed {} resources of node {}" \
                        .format(remove_count, node_name))
        finally:
            lock.release()