def get_completed_deletes(backend_object):
    """Get all the completed deletes for the ObjectStore"""
    # avoiding a circular dependency
    from jdma_control.models import MigrationRequest, Migration, StorageQuota
    # get the storage id
    storage_id = StorageQuota.get_storage_index("objectstore")
    # get the decrypt key
    key = AES_tools.AES_read_key(settings.ENCRYPT_KEY_FILE)

    # list of completed DELETEs to return
    completed_DELETEs = []
    # now loop over the PUT requests
    del_reqs = MigrationRequest.objects.filter(
        (Q(request_type=MigrationRequest.DELETE))
        & Q(stage=MigrationRequest.DELETING)
        & Q(migration__storage__storage=storage_id))
    for dr in del_reqs:
        # decrypt the credentials
        credentials = AES_tools.AES_decrypt_dict(key, dr.credentials)
        try:
            # create a connection to the object store
            s3c = boto3.client(
                "s3",
                endpoint_url=backend_object.OS_Settings["S3_ENDPOINT"],
                aws_access_key_id=credentials['access_key'],
                aws_secret_access_key=credentials['secret_key'])
            # if the bucket has been deleted then the deletion has completed
            buckets = s3c.list_buckets()
            if ('Buckets' not in buckets
                    or dr.migration.external_id not in buckets['Buckets']):
                completed_DELETEs.append(dr.migration.external_id)
        except Exception as e:
            raise Exception(e)
    return completed_DELETEs
Exemplo n.º 2
0
def get_completed_puts(backend_object):
    """Get all the completed puts for the FTP backend"""
    # avoiding a circular dependency
    from jdma_control.models import MigrationRequest, Migration, StorageQuota
    # get the storage id
    storage_id = StorageQuota.get_storage_index("ftp")
    # get the decrypt key
    key = AES_tools.AES_read_key(settings.ENCRYPT_KEY_FILE)

    # list of completed PUTs to return
    completed_PUTs = []
    # now loop over the PUT requests
    put_reqs = MigrationRequest.objects.filter(
        (Q(request_type=MigrationRequest.PUT)
         | Q(request_type=MigrationRequest.MIGRATE))
        & Q(stage=MigrationRequest.PUTTING)
        & Q(migration__stage=Migration.PUTTING)
        & Q(migration__storage__storage=storage_id))
    for pr in put_reqs:
        # decrypt the credentials
        credentials = AES_tools.AES_decrypt_dict(key, pr.credentials)
        try:
            ftp = ftplib.FTP(host=backend_object.FTP_Settings["FTP_ENDPOINT"],
                             user=credentials['username'],
                             passwd=credentials['password'])
            # loop over each archive in the migration
            archive_set = pr.migration.migrationarchive_set.order_by('pk')
            # counter for number of uploaded archives
            n_up_arch = 0
            for archive in archive_set:
                # get the list of files for this archive
                file_list = archive.get_file_names()['FILE']
                n_files = 0
                for file_path in file_list['FILE']:
                    # object name is the file_path, without the gws prefix
                    object_name = (pr.migration.external_id + "/" + file_path)
                    # enforce switch to binary (images here, but that doesn't
                    # matter)
                    ftp.voidcmd('TYPE I')
                    try:
                        fsize = ftp.size(object_name)
                        if fsize is not None:
                            n_files += 1
                    except:
                        pass
                # check if all files uploaded and then inc archive
                if n_files == len(file_list):
                    n_up_arch += 1

            if n_up_arch == pr.migration.migrationarchive_set.count():
                completed_PUTs.append(pr.migration.external_id)

            ftp.quit()
        except Exception as e:
            raise Exception(e)

    return completed_PUTs
def get_completed_puts(backend_object):
    """Get all the completed puts for the ObjectStore"""
    # avoiding a circular dependency
    from jdma_control.models import MigrationRequest, Migration, StorageQuota
    # get the storage id
    storage_id = StorageQuota.get_storage_index("objectstore")
    # get the decrypt key
    key = AES_tools.AES_read_key(settings.ENCRYPT_KEY_FILE)

    # list of completed PUTs to return
    completed_PUTs = []
    # now loop over the PUT requests
    put_reqs = MigrationRequest.objects.filter(
        (Q(request_type=MigrationRequest.PUT)
         | Q(request_type=MigrationRequest.MIGRATE))
        & Q(stage=MigrationRequest.PUTTING)
        & Q(migration__stage=Migration.PUTTING)
        & Q(migration__storage__storage=storage_id))
    for pr in put_reqs:
        # decrypt the credentials
        credentials = AES_tools.AES_decrypt_dict(key, pr.credentials)
        try:
            # create a connection to the object store
            s3c = boto3.client(
                "s3",
                endpoint_url=backend_object.OS_Settings["S3_ENDPOINT"],
                aws_access_key_id=credentials['access_key'],
                aws_secret_access_key=credentials['secret_key'])
            # loop over each archive in the migration
            archive_set = pr.migration.migrationarchive_set.order_by('pk')
            # counter for number of uploaded archives
            n_up_arch = 0
            for archive in archive_set:
                # get the list of files for this archive
                if archive.packed:
                    file_list = [archive.get_archive_name()]
                else:
                    file_list = archive.get_file_names()['FILE']
                n_files = 0
                for file_path in file_list:
                    # object name is the file_path, without any prefix
                    try:
                        if s3c.head_object(Bucket=pr.migration.external_id,
                                           Key=file_path):
                            n_files += 1
                    except:
                        pass
                # check if all files uploaded and then inc archive
                if n_files == len(file_list):
                    n_up_arch += 1
            if n_up_arch == pr.migration.migrationarchive_set.count():
                completed_PUTs.append(pr.migration.external_id)

        except Exception as e:
            raise Exception(e)

    return completed_PUTs
Exemplo n.º 4
0
def get_completed_deletes(backend_object):
    """Get all the completed deletes for the ObjectStore"""
    # avoiding a circular dependency
    from jdma_control.models import MigrationRequest, Migration, StorageQuota
    # get the storage id
    storage_id = StorageQuota.get_storage_index("ftp")
    # get the decrypt key
    key = AES_tools.AES_read_key(settings.ENCRYPT_KEY_FILE)

    # list of completed DELETEs to return
    completed_DELETEs = []
    # now loop over the PUT requests
    del_reqs = MigrationRequest.objects.filter(
        (Q(request_type=MigrationRequest.DELETE))
        & Q(stage=MigrationRequest.DELETING)
        & Q(migration__storage__storage=storage_id))
    for dr in del_reqs:
        # decrypt the credentials
        credentials = AES_tools.AES_decrypt_dict(key, dr.credentials)
        try:
            # create a connection to the object store
            ftp = ftplib.FTP(host=backend_object.FTP_Settings["FTP_ENDPOINT"],
                             user=credentials['username'],
                             passwd=credentials['password'])
            # if the external_id directory has been deleted then the
            # deletion has completed
            dir_list = ftp.mlsd("/")
            found = False
            for d in dir_list:
                # check if directory and groupworkspace name is in directory
                if d[1]['type'] == 'dir' and dr.migration.external_id in d[0]:
                    found = True
                    break
            if not found:
                completed_DELETEs.append(dr.migration.external_id)

        except Exception as e:
            raise Exception(e)
    return completed_DELETEs
Exemplo n.º 5
0
def get_transfers(backend_object, key):
    """Work through the state machine to download batches from the external
    storage"""
    # get the storage id for the backend object
    storage_id = StorageQuota.get_storage_index(backend_object.get_id())

    # get the GET requests which are queued (GET_PENDING) for this backend
    gr = MigrationRequest.objects.filter(
        Q(request_type=MigrationRequest.GET)
        & Q(locked=False)
        & Q(migration__storage__storage=storage_id)
        #& ~Q(user__name="n1280run")    # NRM 21/04/2022 - blocking this user name for now until disk cleared
        & Q(stage__in=[
            MigrationRequest.GET_PENDING,
            MigrationRequest.GETTING,
            MigrationRequest.GET_RESTORE,
        ])).first()

    # .first() returns None when no requests that match the filter are found
    if not gr:
        return
    # lock the Migration to prevent other processes acting upon it
    if not gr.lock():
        return
    # determine the credentials for the user - decrypt if necessary
    if gr.credentials != {}:
        credentials = AES_tools.AES_decrypt_dict(key, gr.credentials)
    else:
        credentials = {}

    if gr.stage == MigrationRequest.GET_PENDING:
        # we might have to do something here, like create a download batch
        # for elastic tape.  Also create the directory and transition the
        # state
        try:
            download(backend_object, credentials, gr)
        except Exception as e:
            # Something went wrong, set FAILED and failure_reason
            mark_migration_failed(gr, str(e), e, upload_mig=False)

    elif gr.stage == MigrationRequest.GETTING:
        pass

    elif gr.stage == MigrationRequest.GET_RESTORE:
        # restore the file permissions
        try:
            restore_owner_and_group_on_get(backend_object, gr)
        except Exception as e:
            mark_migration_failed(gr, str(e), e, upload_mig=False)
    gr.unlock()
Exemplo n.º 6
0
def put_transfers(backend_object, key):
    """Work through the state machine to upload batches to the external
    storage"""
    # get the storage id for the backend object
    storage_id = StorageQuota.get_storage_index(backend_object.get_id())
    # Get the first non-locked PUT request for this backend.
    # This involves resolving two foreign keys
    pr = MigrationRequest.objects.filter(
        (Q(request_type=MigrationRequest.PUT)
         | Q(request_type=MigrationRequest.MIGRATE))
        & Q(locked=False)
        & Q(migration__storage__storage=storage_id)
        & Q(stage__in=[
            MigrationRequest.PUT_PENDING,
            MigrationRequest.VERIFY_PENDING,
        ])).first()

    # .first() returns None when no requests that match the filter are found
    if not pr:
        return
    # lock the Migration to prevent other processes acting upon it
    if not pr.lock():
        return
    # determine the credentials for the user - decrypt if necessary
    if pr.credentials != {}:
        credentials = AES_tools.AES_decrypt_dict(key, pr.credentials)
    else:
        credentials = {}

    # Check whether data is being put to external storage
    if pr.stage == MigrationRequest.PUT_PENDING:
        # create the batch on this instance, next time the script is run
        # the archives will be created as tarfiles
        try:
            upload(backend_object, credentials, pr)
        except Exception as e:
            # Something went wrong, set FAILED and failure_reason
            mark_migration_failed(pr, str(e), e)
    # check if data is now on external storage and should be pulled
    # back for verification
    elif pr.stage == MigrationRequest.VERIFY_PENDING:
        # pull back the data from the external storage
        try:
            verify(backend_object, credentials, pr)
        except Exception as e:
            # Something went wrong, set FAILED and failure_reason
            mark_migration_failed(pr, str(e), e)
    # unlock
    pr.unlock()
Exemplo n.º 7
0
def run_loop(backend_objects):
    # Run the main loop over and over
    try:
        # read the decrypt key
        key = AES_tools.AES_read_key(settings.ENCRYPT_KEY_FILE)
        for backend_object in backend_objects:
            process(backend_object, key)
    except SystemExit:
        for backend_object in backend_objects:
            backend_object.exit()
        sys.exit(0)
    except Exception as e:
        # catch all exceptions as we want this to run in a loop for all
        # backends and transfers - we don't want one transfer to crash out
        # the transfer daemon with a single bad transfer!
        # output the exception to the log so we can see what went wrong
        logging.error(str(e))
Exemplo n.º 8
0
def delete_transfers(backend_object, key):
    """Work through the state machine to delete batches from the external
    storage"""
    # get the storage id for the backend object
    storage_id = StorageQuota.get_storage_index(backend_object.get_id())

    # get the DELETE requests which are queued (DELETE_PENDING) for this backend
    dr = MigrationRequest.objects.filter(
        Q(request_type=MigrationRequest.DELETE)
        & Q(locked=False)
        & Q(migration__storage__storage=storage_id)
        & Q(stage__in=[
            MigrationRequest.DELETE_PENDING, MigrationRequest.DELETING
        ])).first()

    # .first() returns None when no requests that match the filter are found
    if not dr:
        return
    # lock the Migration to prevent other processes acting upon it
    if not dr.lock():
        return
    # find the associated PUT or MIGRATE migration request
    # if there is one - if not, set put_req to None
    # there will not be a migration request if the migration has completed
    # as the migration request is deleted when a PUT or MIGRATE completes
    try:
        put_req = MigrationRequest.objects.get(
            (Q(request_type=MigrationRequest.PUT)
             | Q(request_type=MigrationRequest.MIGRATE))
            & Q(migration=dr.migration)
            & Q(migration__storage__storage=storage_id))
    except:
        put_req = None

    # determine the credentials for the user - decrypt if necessary
    if dr.credentials != {}:
        credentials = AES_tools.AES_decrypt_dict(key, dr.credentials)
    else:
        credentials = {}

    # switch on the state machine status
    if dr.stage == MigrationRequest.DELETE_PENDING:
        try:
            # only try to do the delete if some files have been uploaded!
            # and the external id is not None
            if ((put_req and put_req.stage > MigrationRequest.PUT_PACKING
                 and dr.migration.external_id is not None)
                    or (dr.migration.stage == Migration.ON_STORAGE)):
                delete(backend_object, credentials, dr)
            else:
                # transition to DELETE_TIDY if there are no files to delete
                dr.stage = MigrationRequest.DELETE_TIDY
                logging.info((
                    "Transition: request ID: {} external_id {}: DELETING->DELETE_TIDY"
                ).format(dr.pk, dr.migration.external_id))
                dr.save()
        except Exception as e:
            # Something went wrong, set FAILED and failure_reason
            mark_migration_failed(dr, str(e), e, upload_mig=False)

    elif dr.stage == MigrationRequest.DELETING:
        # in the process of deleting
        pass
    # unlock
    dr.unlock()