class Backup:
    def __init__(self, is_ipfs=False):
        self.client = BackupClient(is_ipfs)
        self.auth = Auth()
        self.is_ipfs = is_ipfs

    @hive_restful_response
    def get_state(self):
        user_did, _ = check_auth_and_vault(VAULT_ACCESS_R)
        return self.client.get_state(user_did)

    @hive_restful_response
    def backup(self, credential):
        user_did, app_did = check_auth_and_vault(VAULT_ACCESS_R)
        credential_info = self.auth.get_backup_credential_info(credential)
        self.client.check_backup_status(user_did)
        self.client.execute_backup(
            user_did, credential_info,
            self.client.get_access_token(credential, credential_info))

    @hive_restful_response
    def restore(self, credential):
        user_did, app_did = check_auth_and_vault(VAULT_ACCESS_WR)
        credential_info = self.auth.get_backup_credential_info(credential)
        self.client.check_backup_status(user_did, True)
        self.client.execute_restore(
            user_did, credential_info,
            self.client.get_access_token(credential, credential_info))

    @hive_restful_response
    def promotion(self):
        raise NotImplementedException()
 def __init__(self, is_ipfs=False):
     self.http = HttpClient()
     self.backup_thread = None
     self.mongo_host, self.mongo_port = None, None
     if hive_setting:
         self.mongo_host, self.mongo_port = hive_setting.MONGO_HOST, hive_setting.MONGO_PORT
     self.auth = Auth()
     self.is_ipfs = is_ipfs
 def __init__(self, is_ipfs=False):
     self.client = BackupClient(is_ipfs)
     self.auth = Auth()
     self.is_ipfs = is_ipfs
 def __init__(self):
     self.ela_address = hive_setting.HIVE_PAYMENT_ADDRESS
     PaymentConfig.init_config()
     self.auth = Auth()
     self.vault_subscription = None
     self.ela_resolver = ElaResolver(hive_setting.ELA_RESOLVER)
class Payment(metaclass=Singleton):
    def __init__(self):
        self.ela_address = hive_setting.HIVE_PAYMENT_ADDRESS
        PaymentConfig.init_config()
        self.auth = Auth()
        self.vault_subscription = None
        self.ela_resolver = ElaResolver(hive_setting.ELA_RESOLVER)

    def _get_vault_subscription(self):
        if not self.vault_subscription:
            from src.modules.subscription.subscription import VaultSubscription
            self.vault_subscription = VaultSubscription()
        return self.vault_subscription

    @hive_restful_response
    def get_version(self):
        _, _ = check_auth()
        return {'version': self._get_vault_subscription().get_price_plans_version()}

    @hive_restful_response
    def place_order(self, json_body):
        user_did, app_did = check_auth_and_vault()
        subscription, plan = self._check_place_order_params(json_body)
        return self._get_order_vo(self._create_order(user_did, subscription, plan))

    def _check_place_order_params(self, json_body):
        if not json_body:
            raise InvalidParameterException(msg='Request body should not empty.')
        validate_exists(json_body, '', ('subscription', 'pricing_name'))

        subscription, pricing_name = json_body.get('subscription', None), json_body.get('pricing_name', None)
        if subscription not in ('vault', 'backup'):
            raise InvalidParameterException(msg=f'Invalid subscription: {subscription}.')

        plan = self._get_vault_subscription().get_price_plan(subscription, pricing_name)
        if not plan:
            raise InvalidParameterException(msg=f'Invalid pricing_name: {pricing_name}.')

        if plan['amount'] <= 0:
            raise InvalidParameterException(msg=f'Invalid pricing_name which is free.')

        return subscription, plan

    def _create_order(self, user_did, subscription, plan):
        doc = {
            USR_DID: user_did,
            COL_ORDERS_SUBSCRIPTION: subscription,
            COL_ORDERS_PRICING_NAME: plan['name'],
            COL_ORDERS_ELA_AMOUNT: plan['amount'],
            COL_ORDERS_ELA_ADDRESS: self.ela_address,
            COL_ORDERS_PROOF: '',
            COL_ORDERS_STATUS: COL_ORDERS_STATUS_NORMAL
        }

        res = cli.insert_one_origin(DID_INFO_DB_NAME, COL_ORDERS, doc, create_on_absence=True)

        doc['_id'] = res['inserted_id']
        doc[COL_ORDERS_PROOF] = self.auth.create_order_proof(user_did, doc['_id'])
        cli.update_one_origin(DID_INFO_DB_NAME, COL_ORDERS, {'_id': ObjectId(doc['_id'])},
                              {'$set': {COL_ORDERS_PROOF: doc[COL_ORDERS_PROOF]}}, is_extra=True)

        return doc

    def _get_order_vo(self, order):
        return {
            'order_id': str(order['_id']),
            COL_ORDERS_SUBSCRIPTION: order[COL_ORDERS_SUBSCRIPTION],
            COL_ORDERS_PRICING_NAME: order[COL_ORDERS_PRICING_NAME],
            COL_ORDERS_ELA_AMOUNT: order[COL_ORDERS_ELA_AMOUNT],
            COL_ORDERS_ELA_ADDRESS: order[COL_ORDERS_ELA_ADDRESS],
            COL_ORDERS_PROOF: order[COL_ORDERS_PROOF],
            CREATE_TIME: int(order[CREATE_TIME]),
        }

    @hive_restful_response
    def pay_order(self, order_id, json_body):
        user_did, app_did = check_auth()
        vault = self._get_vault_subscription().get_checked_vault(user_did)

        order, transaction_id, paid_did = self._check_pay_order_params(user_did, order_id, json_body)

        receipt = self._create_receipt(user_did, order, transaction_id, paid_did)
        self._update_order_status(str(order['_id']), COL_ORDERS_STATUS_PAID)
        self._get_vault_subscription().upgrade_vault_plan(user_did, vault, order[COL_ORDERS_PRICING_NAME])
        return self._get_receipt_vo(order, receipt)

    def _update_order_status(self, order_id, status):
        update = {
            COL_ORDERS_STATUS: status,
            MODIFY_TIME: datetime.utcnow().timestamp(),
        }
        cli.update_one_origin(DID_INFO_DB_NAME, COL_ORDERS, {'_id': ObjectId(order_id)}, {'$set': update},
                              is_extra=True)

    def _get_receipt_vo(self, order, receipt):
        return {
            COL_RECEIPTS_ID: str(receipt['_id']),
            COL_RECEIPTS_ORDER_ID: str(order['_id']),
            COL_RECEIPTS_TRANSACTION_ID: receipt[COL_RECEIPTS_TRANSACTION_ID],
            COL_ORDERS_PRICING_NAME: order[COL_ORDERS_PRICING_NAME],
            COL_RECEIPTS_PAID_DID: receipt[COL_RECEIPTS_PAID_DID],
            COL_ORDERS_ELA_AMOUNT: order[COL_ORDERS_ELA_AMOUNT],
            COL_ORDERS_PROOF: order[COL_ORDERS_PROOF]
        }

    def _check_pay_order_params(self, user_did, order_id, json_body):
        order = self._check_param_order_id(user_did, order_id, is_pay_order=True)
        if not json_body:
            raise InvalidParameterException(msg='Request body should not empty.')
        validate_exists(json_body, '', ['transaction_id', ])

        transaction_id = json_body.get('transaction_id', None)
        paid_did = self._check_transaction_id(user_did, order, transaction_id)
        return order, transaction_id, paid_did

    def _check_param_order_id(self, user_did, order_id, is_pay_order=False):
        if not order_id:
            raise InvalidParameterException(msg='Order id MUST be provided.')

        col_filter = {'_id': ObjectId(order_id), USR_DID: user_did}
        if is_pay_order:
            col_filter[COL_ORDERS_STATUS] = COL_ORDERS_STATUS_NORMAL
        order = cli.find_one_origin(DID_INFO_DB_NAME, COL_ORDERS, col_filter, throw_exception=False)
        if not order:
            raise InvalidParameterException(msg='Order id is invalid because of not finding the order.')

        if is_pay_order:
            receipt = cli.find_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS,
                                          {COL_RECEIPTS_ORDER_ID: order_id}, throw_exception=False)
            if receipt:
                raise InvalidParameterException(msg='Order id is invalid because of existing the relating receipt.')

        return order

    def _check_transaction_id(self, user_did, order, transaction_id):
        # INFO: do not need local check because of binding order_id
        # self._check_transaction_id_local(transaction_id)
        self._check_transaction_id_remote(user_did, order, transaction_id)
        return user_did

    def _check_transaction_id_local(self, transaction_id):
        receipt = cli.find_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS,
                                      {COL_RECEIPTS_TRANSACTION_ID: transaction_id}, throw_exception=False)
        if receipt:
            raise InvalidParameterException(msg=f'Transaction id {transaction_id} has already been used.')

    def _check_transaction_id_remote(self, user_did, order, transaction_id):
        result = self.ela_resolver.get_transaction_info(transaction_id)
        # INFO: this is used to check whether the transaction is on block chain.
        if result['time'] < 1:
            raise BadRequestException(msg='invalid transaction id with result time abnormal')
        proof = self._get_proof_by_result(result)
        self.auth.verify_order_proof(proof, user_did, str(order['_id']))
        amount, address = float(result['vout'][0]['value']), result['vout'][0]['address']
        if amount - order[COL_ORDERS_ELA_AMOUNT] < -0.01 or order[COL_ORDERS_ELA_ADDRESS] != address:
            raise BadRequestException(msg='invalid transaction id with no more amount or invalid address')

    def _get_proof_by_result(self, result):
        try:
            memo = result['attributes'][0]['data']
            json_memo = json.loads(self.ela_resolver.hexstring_to_bytes(memo, reverse=False).decode('utf-8'))
            if not isinstance(json_memo, dict) or json_memo.get('source') != 'hive node':
                raise BadRequestException(msg='invalid transaction id with invalid memo type')
            return json_memo.get('proof')
        except Exception as e:
            raise BadRequestException(msg=f'invalid transaction id with invalid memo: {str(e)}')

    def _create_receipt(self, user_did, order, transaction_id, paid_did):
        now = datetime.utcnow().timestamp()
        receipt = {
            USR_DID: user_did,
            COL_RECEIPTS_ORDER_ID: str(order['_id']),
            COL_RECEIPTS_TRANSACTION_ID: transaction_id,
            COL_RECEIPTS_PAID_DID: paid_did,
            COL_ORDERS_PROOF: '',
            COL_ORDERS_STATUS: COL_ORDERS_STATUS_NORMAL
        }
        res = cli.insert_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS, receipt, create_on_absence=True)

        receipt['_id'] = res['inserted_id']
        receipt[COL_ORDERS_PROOF] = self.auth.create_order_proof(
            user_did, receipt['_id'], amount=order[COL_ORDERS_ELA_AMOUNT], is_receipt=True)
        cli.update_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS, {'_id': ObjectId(receipt['_id'])},
                              {'$set': {COL_ORDERS_PROOF: receipt[COL_ORDERS_PROOF]}}, is_extra=True)
        return receipt

    @hive_restful_response
    def get_orders(self, subscription, order_id):
        _, _ = check_auth()
        if subscription not in ('vault', 'backup'):
            raise InvalidParameterException(msg=f'Invalid subscription: {subscription}.')

        col_filter = {}
        if subscription:
            col_filter[COL_ORDERS_SUBSCRIPTION] = subscription
        if order_id:
            col_filter[COL_RECEIPTS_ORDER_ID] = order_id
        orders = cli.find_many_origin(DID_INFO_DB_NAME, COL_ORDERS, col_filter, throw_exception=False)
        if not orders:
            raise OrderNotFoundException(msg='Can not get the matched orders.')
        return {'orders': list(map(lambda o: {'order_id': str(o['_id']),
                                              COL_ORDERS_SUBSCRIPTION: o[COL_ORDERS_SUBSCRIPTION],
                                              COL_ORDERS_PRICING_NAME: o[COL_ORDERS_PRICING_NAME],
                                              COL_ORDERS_ELA_AMOUNT: o[COL_ORDERS_ELA_AMOUNT],
                                              COL_ORDERS_ELA_ADDRESS: o[COL_ORDERS_ELA_ADDRESS],
                                              COL_ORDERS_PROOF: o[COL_ORDERS_PROOF],
                                              COL_ORDERS_STATUS: o[COL_ORDERS_STATUS],
                                              CREATE_TIME: int(o[CREATE_TIME])}, orders))}

    @hive_restful_response
    def get_receipt_info(self, order_id):
        user_did, app_did = check_auth()
        order = self._check_param_order_id(user_did, order_id)
        receipt = cli.find_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS,
                                      {COL_RECEIPTS_ORDER_ID: order_id}, throw_exception=False)
        if not receipt:
            raise ReceiptNotFoundException(msg='Receipt can not be found by order_id.')
        return self._get_receipt_vo(order, receipt)

    def archive_orders(self, user_did):
        """ for unsubscribe the vault """
        update = {
            COL_ORDERS_STATUS: COL_ORDERS_STATUS_ARCHIVE,
            MODIFY_TIME: datetime.utcnow().timestamp(),
        }
        if cli.is_col_exists(DID_INFO_DB_NAME, COL_ORDERS):
            cli.update_one_origin(DID_INFO_DB_NAME, COL_ORDERS, {USR_DID: user_did}, {'$set': update},
                                  is_many=True, is_extra=True)
        if cli.is_col_exists(DID_INFO_DB_NAME, COL_RECEIPTS):
            cli.update_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS, {USR_DID: user_did}, {'$set': update},
                                  is_many=True, is_extra=True)
Beispiel #6
0
def get_auth():
    global auth
    if auth is None:
        auth = Auth()
    return auth
Beispiel #7
0
def init_app(app):
    """ This will be called by application initializer. """
    global auth
    auth = Auth()
    app.register_blueprint(blueprint)
class BackupClient:
    def __init__(self, is_ipfs=False):
        self.http = HttpClient()
        self.backup_thread = None
        self.mongo_host, self.mongo_port = None, None
        if hive_setting:
            self.mongo_host, self.mongo_port = hive_setting.MONGO_HOST, hive_setting.MONGO_PORT
        self.auth = Auth()
        self.is_ipfs = is_ipfs

    def check_backup_status(self, user_did, is_restore=False):
        doc = cli.find_one_origin(DID_INFO_DB_NAME,
                                  VAULT_BACKUP_INFO_COL, {USER_DID: user_did},
                                  create_on_absence=True)
        if doc and doc[VAULT_BACKUP_INFO_STATE] != VAULT_BACKUP_STATE_STOP \
                and doc[VAULT_BACKUP_INFO_TIME] < (datetime.utcnow().timestamp() - 60 * 60 * 24):
            raise BackupIsInProcessingException(
                'The backup/restore is in process.')

        if is_restore and not (
                doc[VAULT_BACKUP_INFO_STATE] == VAULT_BACKUP_STATE_STOP
                or doc[VAULT_BACKUP_INFO_MSG] == VAULT_BACKUP_MSG_SUCCESS):
            raise BadRequestException(
                msg='No successfully backup for restore.')

    def get_access_token(self, credential, credential_info):
        target_host = credential_info['targetHost']
        challenge_response, backup_service_instance_did = \
            self.auth.backup_client_sign_in(target_host, credential, 'DIDBackupAuthResponse')
        return self.auth.backup_client_auth(target_host, challenge_response,
                                            backup_service_instance_did)

    def execute_backup(self, user_did, credential_info, access_token):
        cli.update_one_origin(
            DID_INFO_DB_NAME,
            VAULT_BACKUP_INFO_COL, {USER_DID: user_did}, {
                "$set": {
                    USER_DID: user_did,
                    VAULT_BACKUP_INFO_STATE: VAULT_BACKUP_STATE_STOP,
                    VAULT_BACKUP_INFO_TYPE: VAULT_BACKUP_INFO_TYPE_HIVE_NODE,
                    VAULT_BACKUP_INFO_MSG: VAULT_BACKUP_MSG_SUCCESS,
                    VAULT_BACKUP_INFO_TIME: datetime.utcnow().timestamp(),
                    VAULT_BACKUP_INFO_DRIVE: credential_info['targetHost'],
                    VAULT_BACKUP_INFO_TOKEN: access_token
                }
            },
            options={'upsert': True},
            create_on_absence=True)

        clog().debug('start new thread for backup processing.')

        if hive_setting.BACKUP_IS_SYNC:
            self.__class__.backup_main(user_did, self)
        else:
            _thread.start_new_thread(self.__class__.backup_main,
                                     (user_did, self))

    def update_backup_state(self, user_did, state, msg):
        cli.update_one_origin(
            DID_INFO_DB_NAME, VAULT_BACKUP_INFO_COL, {USER_DID: user_did}, {
                "$set": {
                    VAULT_BACKUP_INFO_STATE: state,
                    VAULT_BACKUP_INFO_MSG: msg,
                    VAULT_BACKUP_INFO_TIME: datetime.utcnow().timestamp()
                }
            })

    @staticmethod
    def backup_main(user_did, client):
        try:
            clog().info(
                f'[backup_main] enter backup thread, {user_did}, {client}.')
            client.backup(user_did)
        except Exception as e:
            clog().error(f'Failed to backup really: {traceback.format_exc()}')
            client.delete_mongodb_data(user_did)
            client.update_backup_state(user_did, VAULT_BACKUP_STATE_STOP,
                                       VAULT_BACKUP_MSG_FAILED)
        clog().info('[backup_main] leave backup thread.')

    @staticmethod
    def restore_main(user_did, client):
        try:
            clog().info(
                f'[restore_main] enter restore thread, {user_did}, {client}.')
            client.restore(user_did)
        except Exception as e:
            clog().error(
                f'[restore_main] Failed to restore really: {traceback.format_exc()}'
            )
            client.delete_mongodb_data(user_did)
            client.update_backup_state(user_did, VAULT_BACKUP_STATE_STOP,
                                       VAULT_BACKUP_MSG_FAILED)

    def backup(self, user_did):
        clog().info('[backup_main] enter backup().')
        cli.export_mongodb(user_did)
        clog().info('[backup_main] success to export mongodb data.')

        doc = cli.find_one_origin(DID_INFO_DB_NAME, VAULT_BACKUP_INFO_COL,
                                  {USER_DID: user_did})
        clog().info('[backup_main] success to get backup info.')
        if self.is_ipfs:
            vault_size = fm.get_vault_storage_size(user_did)
            self.update_server_state_to(doc[VAULT_BACKUP_INFO_DRIVE],
                                        doc[VAULT_BACKUP_INFO_TOKEN],
                                        STATE_RUNNING, vault_size)
            clog().info('[backup_main: ipfs] success to start the backup.')
            self.backup_ipfs_upload_dbfiles(user_did,
                                            doc[VAULT_BACKUP_INFO_DRIVE],
                                            doc[VAULT_BACKUP_INFO_TOKEN])
            clog().info(
                '[backup_main: ipfs] success to upload database files.')
            self.backup_ipfs_cids(user_did, doc[VAULT_BACKUP_INFO_DRIVE],
                                  doc[VAULT_BACKUP_INFO_TOKEN])
            clog().info('[backup_main: ipfs] success to backup ipfs cids.')
            self.update_server_state_to(doc[VAULT_BACKUP_INFO_DRIVE],
                                        doc[VAULT_BACKUP_INFO_TOKEN],
                                        STATE_FINISH)
            clog().info(
                '[backup_main: ipfs] success to finish the backup process.')
        else:
            vault_root = get_vault_path(user_did)
            self.backup_files_really(vault_root, doc[VAULT_BACKUP_INFO_DRIVE],
                                     doc[VAULT_BACKUP_INFO_TOKEN])
            clog().info('[backup_main] success to execute backup.')
            checksum_list = get_file_checksum_list(vault_root)
            self.backup_finish(doc[VAULT_BACKUP_INFO_DRIVE],
                               doc[VAULT_BACKUP_INFO_TOKEN], checksum_list)
            clog().info('[backup_main] success to finish backup.')

        self.delete_mongodb_data(user_did)
        self.update_backup_state(user_did, VAULT_BACKUP_STATE_STOP,
                                 VAULT_BACKUP_MSG_SUCCESS)
        clog().info('[backup_main] success to backup really.')

    def restore(self, user_did):
        clog().info('[restore_main] enter restore().')

        doc = cli.find_one_origin(DID_INFO_DB_NAME, VAULT_BACKUP_INFO_COL,
                                  {USER_DID: user_did})
        if self.is_ipfs:
            self.restore_ipfs_download_dbfiles(user_did,
                                               doc[VAULT_BACKUP_INFO_DRIVE],
                                               doc[VAULT_BACKUP_INFO_TOKEN])
            clog().info(
                '[restore_main: ipfs] success to download database files.')
            cli.import_mongodb(user_did)
            clog().info(
                '[restore_main: ipfs] success to import mongodb database.')
            self.restore_ipfs_pin_cids(user_did)
            clog().info('[restore_main: ipfs] success to pin ipfs cids.')
        else:
            vault_root = get_vault_path(user_did)
            if not vault_root.exists():
                create_full_path_dir(vault_root)
            clog().info(f'[restore_main] success to get vault root path.')
            self.restore_really(vault_root, doc[VAULT_BACKUP_INFO_DRIVE],
                                doc[VAULT_BACKUP_INFO_TOKEN])
            clog().info(f'[restore_main] success to execute restore.')
            self.restore_finish(user_did, doc[VAULT_BACKUP_INFO_DRIVE],
                                doc[VAULT_BACKUP_INFO_TOKEN])
            clog().info(f'[restore_main] success to restore finish.')
            cli.import_mongodb(user_did)

        self.delete_mongodb_data(user_did)
        self.update_backup_state(user_did, VAULT_BACKUP_STATE_STOP,
                                 VAULT_BACKUP_MSG_SUCCESS)
        clog().info('[restore_main] success to restore really.')

    def backup_files_really(self, vault_root, host_url, access_token):
        remote_files = self.http.get(host_url + URL_BACKUP_FILES,
                                     access_token)['backup_files']
        local_files = fm.get_file_checksum_list(vault_root)
        new_files, patch_files, delete_files = self.diff_files(
            local_files, remote_files)
        self.backup_new_files(host_url, access_token, vault_root, new_files)
        self.backup_patch_files(host_url, access_token, vault_root,
                                patch_files)
        self.backup_delete_files(host_url, access_token, vault_root,
                                 delete_files)

    def restore_really(self, vault_root, host_url, access_token):
        remote_files = self.http.get(host_url + URL_BACKUP_FILES,
                                     access_token)['backup_files']
        local_files = fm.get_file_checksum_list(vault_root)
        new_files, patch_files, delete_files = self.diff_files(
            remote_files, local_files)
        self.restore_new_files(host_url, access_token, vault_root, new_files)
        self.restore_patch_files(host_url, access_token, vault_root,
                                 patch_files)
        self.restore_delete_files(host_url, access_token, vault_root,
                                  delete_files)

    def backup_finish(self, host_url, access_token, checksum_list):
        self.http.post(host_url + URL_BACKUP_FINISH,
                       access_token, {'checksum_list': checksum_list},
                       is_body=False)

    def diff_files(self, base_files, target_files):
        """
        Diff two file list from base to target. Every files list contains item (name, checksum).
        """
        b_files, t_files = dict((n, c) for c, n in base_files), dict(
            (n, c) for c, n in target_files)
        new_files = [n for n, c in b_files.items() if n not in t_files]
        patch_files = [
            n for n, c in b_files.items() if n in t_files and c != t_files[n]
        ]
        delete_files = [n for n, c in t_files.items() if n not in b_files]
        return new_files, patch_files, delete_files

    def backup_new_files(self, host_url, access_token, vault_root: Path,
                         new_files):
        for name in new_files:
            self.http.put_file(host_url + URL_BACKUP_FILE + f'?file={name}',
                               access_token, (vault_root / name).resolve())

    def restore_new_files(self, host_url, access_token, vault_root: Path,
                          new_files):
        for name in new_files:
            self.http.get_to_file(host_url + URL_BACKUP_FILE + f'?file={name}',
                                  access_token, (vault_root / name).resolve())

    def backup_patch_files(self, host_url, access_token, vault_root: Path,
                           patch_files):
        for name in patch_files:
            hashes = self.get_remote_file_hashes(host_url, access_token, name)
            pickle_data = fm.get_rsync_data((vault_root / name).resolve(),
                                            hashes)
            self.http.post(host_url + URL_BACKUP_PATCH_FILE + f'?file={name}',
                           access_token,
                           pickle_data,
                           is_json=False,
                           is_body=False)

    def restore_patch_files(self, host_url, access_token, vault_root: Path,
                            patch_files):
        for name in patch_files:
            full_name = (vault_root / name).resolve()
            hashes = fm.get_hashes_by_file(full_name)
            pickle_data = self.http.post_to_pickle_data(
                host_url + URL_BACKUP_PATCH_DELTA + f'?file={name}',
                access_token, hashes)
            fm.apply_rsync_data(full_name, pickle_data)

    def backup_delete_files(self, host_url, access_token, vault_root: Path,
                            delete_files):
        for name in delete_files:
            self.http.delete(host_url + URL_BACKUP_FILE + f'?file={name}',
                             access_token)

    def restore_delete_files(self, host_url, access_token, vault_root: Path,
                             delete_files):
        for name in delete_files:
            fm.delete_file((vault_root / name).resolve())

    def get_remote_file_hashes(self, host_url, access_token, name):
        r = self.http.get(host_url + URL_BACKUP_PATCH_HASH + f'?file={name}',
                          access_token,
                          is_body=False)
        return fm.get_hashes_by_lines(r.iter_lines(chunk_size=CHUNK_SIZE))

    def delete_mongodb_data(self, user_did):
        mongodb_root = get_save_mongo_db_path(user_did)
        if mongodb_root.exists():
            shutil.rmtree(mongodb_root)

    def execute_restore(self, user_did, credential_info, access_token):
        cli.update_one_origin(
            DID_INFO_DB_NAME,
            VAULT_BACKUP_INFO_COL, {USER_DID: user_did}, {
                "$set": {
                    USER_DID: user_did,
                    VAULT_BACKUP_INFO_STATE: VAULT_BACKUP_STATE_STOP,
                    VAULT_BACKUP_INFO_TYPE: VAULT_BACKUP_INFO_TYPE_HIVE_NODE,
                    VAULT_BACKUP_INFO_MSG: VAULT_BACKUP_MSG_SUCCESS,
                    VAULT_BACKUP_INFO_TIME: datetime.utcnow().timestamp(),
                    VAULT_BACKUP_INFO_DRIVE: credential_info['targetHost'],
                    VAULT_BACKUP_INFO_TOKEN: access_token
                }
            },
            options={'upsert': True},
            create_on_absence=True)

        if hive_setting.BACKUP_IS_SYNC:
            self.__class__.restore_main(user_did, self)
        else:
            _thread.start_new_thread(self.__class__.restore_main,
                                     (user_did, self))

    def restore_finish(self, user_did, host_url, access_token):
        body = self.http.get(host_url + URL_RESTORE_FINISH, access_token)
        checksum_list = body["checksum_list"]
        vault_root = get_vault_path(user_did)
        if not vault_root.exists():
            create_full_path_dir(vault_root)

        local_checksum_list = get_file_checksum_list(vault_root)
        for checksum in checksum_list:
            if checksum not in local_checksum_list:
                raise BadRequestException(msg='Failed to finish restore.')

    def get_state(self, user_did):
        doc = cli.find_one_origin(DID_INFO_DB_NAME,
                                  VAULT_BACKUP_INFO_COL, {USER_DID: user_did},
                                  create_on_absence=True)
        state, result = 'stop', 'success'
        if doc:
            state, result = doc[VAULT_BACKUP_INFO_STATE], doc[
                VAULT_BACKUP_INFO_MSG]
        return {'state': state, 'result': result}

    def backup_ipfs_cids(self, user_did, host_url, access_token):
        total_size, cids = fm.get_file_cids(user_did)
        if not cids:
            return
        self.http.post(host_url + URL_IPFS_BACKUP_PIN_CIDS,
                       access_token, {
                           'total_size': total_size,
                           'cids': cids
                       },
                       is_body=False)

    def backup_ipfs_upload_dbfiles(self, user_did, host_url, access_token):
        database_dir = get_save_mongo_db_path(user_did)
        if not database_dir.exists():
            # this means no user databases
            return
        for dir_root, dir_names, filenames in os.walk(database_dir.as_posix()):
            for name in filenames:
                if not name.endswith(BACKUP_FILE_SUFFIX):
                    # skip none backup files.
                    continue
                self.http.put_file(
                    host_url + URL_BACKUP_FILE + f'?file={name}', access_token,
                    Path(dir_root) / name)
            # no need recursive
            break

    def restore_ipfs_download_dbfiles(self, user_did, host_url, access_token):
        body = self.http.get(host_url + URL_IPFS_BACKUP_GET_DBFILES,
                             access_token)
        if not body['files']:
            return
        if body['origin_size'] > fm.get_vault_max_size(user_did):
            raise InsufficientStorageException('No enough space for restore.')
        database_dir = get_save_mongo_db_path(user_did)
        for name in body['files']:
            self.http.get_to_file(f'{host_url}{URL_BACKUP_FILE}?file={name}',
                                  access_token, database_dir / name)

    def restore_ipfs_pin_cids(self, user_did):
        _, cids = fm.get_file_cids(user_did)
        for cid in cids:
            fm.ipfs_pin_cid(cid)

    def update_server_state_to(self,
                               host_url,
                               access_token,
                               state,
                               vault_size=0):
        self.http.post(
            f'{host_url}{URL_IPFS_BACKUP_STATE}?to={state}&vault_size={vault_size}',
            access_token,
            None,
            is_body=False)
Beispiel #9
0
 def __init__(self):
     self.auth = Auth()
     self.http = HttpClient()
Beispiel #10
0
class IpfsBackupClient:
    def __init__(self):
        self.auth = Auth()
        self.http = HttpClient()

    @hive_restful_response
    def get_state(self):
        user_did, _ = check_auth_and_vault(VAULT_ACCESS_R)
        return self.get_remote_backup_state(user_did)

    """
    The client application request to backup vault data to target backup node.
     - Check a backup/restore proess already is inprogress; if not, then
     - Record the backup request in case to restart the backup/restore process
     - Create a dedeicated thread to:
        --- store all data on vault to local IPFS node to get the root CID;
        --- send this CID value to remote backup hive node;
        --- remote backup hive node will synchronize valut data from IPFS network to
            its local IPFS node via the root CID.
    """

    @hive_restful_response
    def backup(self, credential, is_force):
        user_did, _ = check_auth_and_vault(VAULT_ACCESS_R)
        credential_info = self.auth.get_backup_credential_info(credential)
        if not is_force:
            self.check_remote_backup_in_progress(user_did)
        req = self.save_request(user_did, credential, credential_info)
        BackupExecutor(user_did, self, req, is_force=is_force).start()

    """
    The client application request to store vault data from the backup node.
     - Check a backup/restore proess already is inprogress; if not, then
     - Record the backup request in case to restart the backup/restore process
     - Create a dedeicated thread to:
        --- Get a root CID from the backup node;
        --- Synhorize the vault data from local IPFS node (but currently from Gatway node)
            via root CID
    """

    @hive_restful_response
    def restore(self, credential, is_force):
        user_did, _ = check_auth_and_vault(VAULT_ACCESS_R)
        credential_info = self.auth.get_backup_credential_info(credential)
        if not is_force:
            self.check_remote_backup_in_progress(user_did)
        self.save_request(user_did,
                          credential,
                          credential_info,
                          is_restore=True)
        RestoreExecutor(user_did, self).start()

    def check_remote_backup_in_progress(self, user_did):
        result = self.get_remote_backup_state(user_did)
        if result['result'] == BACKUP_REQUEST_STATE_INPROGRESS:
            raise BadRequestException(
                msg=
                f'The remote backup is being in progress. Please await the process finished'
            )

    def get_remote_backup_state(self, user_did):
        state, result, msg = BACKUP_REQUEST_STATE_STOP, BACKUP_REQUEST_STATE_SUCCESS, ''
        req = self.get_request(user_did)
        if req:
            state = req.get(BACKUP_REQUEST_ACTION)
            result = req.get(BACKUP_REQUEST_STATE)
            msg = req.get(BACKUP_REQUEST_STATE_MSG)

            # request to remote backup node to retrieve the current backup progress state if
            # its being backuped.
            if state == BACKUP_REQUEST_ACTION_BACKUP and result == BACKUP_REQUEST_STATE_SUCCESS:
                body = self.http.get(
                    req.get(BACKUP_REQUEST_TARGET_HOST) +
                    URL_VAULT_BACKUP_SERVICE_STATE,
                    req.get(BACKUP_REQUEST_TARGET_TOKEN))
                result, msg = body['result'], body['message']
        return {
            'state': state if state else BACKUP_REQUEST_STATE_STOP,
            'result': result if result else BACKUP_REQUEST_STATE_SUCCESS,
            'message': msg if msg else '',
        }

    def get_request(self, user_did):
        col_filter = {
            USR_DID: user_did,
            BACKUP_TARGET_TYPE: BACKUP_TARGET_TYPE_HIVE_NODE
        }
        return cli.find_one_origin(DID_INFO_DB_NAME,
                                   COL_IPFS_BACKUP_CLIENT,
                                   col_filter,
                                   create_on_absence=True,
                                   throw_exception=False)

    def save_request(self,
                     user_did,
                     credential,
                     credential_info,
                     is_restore=False):
        # verify the credential
        target_host = credential_info['targetHost']
        challenge_response, backup_service_instance_did = \
            self.auth.backup_client_sign_in(target_host, credential, 'DIDBackupAuthResponse')

        access_token = self.auth.backup_client_auth(
            target_host, challenge_response, backup_service_instance_did)
        target_host, target_did = credential_info[
            'targetHost'], credential_info['targetDID']
        req = self.get_request(user_did)
        if not req:
            self.insert_request(user_did,
                                target_host,
                                target_did,
                                access_token,
                                is_restore=is_restore)
        else:
            self.update_request(user_did,
                                target_host,
                                target_did,
                                access_token,
                                req,
                                is_restore=is_restore)
        return self.get_request(user_did)

    def insert_request(self,
                       user_did,
                       target_host,
                       target_did,
                       access_token,
                       is_restore=False):
        new_doc = {
            USR_DID:
            user_did,
            BACKUP_TARGET_TYPE:
            BACKUP_TARGET_TYPE_HIVE_NODE,
            BACKUP_REQUEST_ACTION:
            BACKUP_REQUEST_ACTION_RESTORE
            if is_restore else BACKUP_REQUEST_ACTION_BACKUP,
            BACKUP_REQUEST_STATE:
            BACKUP_REQUEST_STATE_INPROGRESS,
            BACKUP_REQUEST_STATE_MSG:
            None,
            BACKUP_REQUEST_TARGET_HOST:
            target_host,
            BACKUP_REQUEST_TARGET_DID:
            target_did,
            BACKUP_REQUEST_TARGET_TOKEN:
            access_token
        }
        cli.insert_one_origin(DID_INFO_DB_NAME,
                              COL_IPFS_BACKUP_CLIENT,
                              new_doc,
                              create_on_absence=True)

    def update_request(self,
                       user_did,
                       target_host,
                       target_did,
                       access_token,
                       req,
                       is_restore=False):
        # if request.args.get('is_multi') != 'True':
        #     # INFO: Use url parameter 'is_multi' to skip this check.
        #     cur_target_host = req.get(BACKUP_REQUEST_TARGET_HOST)
        #     cur_target_did = req.get(BACKUP_REQUEST_TARGET_DID)
        #     if cur_target_host and cur_target_did \
        #             and (cur_target_host != target_host or cur_target_did != target_did):
        #         raise InvalidParameterException(msg='Do not support backup to multi hive node.')

        updated_doc = {
            BACKUP_REQUEST_ACTION:
            BACKUP_REQUEST_ACTION_RESTORE
            if is_restore else BACKUP_REQUEST_ACTION_BACKUP,
            BACKUP_REQUEST_STATE:
            BACKUP_REQUEST_STATE_INPROGRESS,
            BACKUP_REQUEST_STATE_MSG:
            None,
            BACKUP_REQUEST_TARGET_HOST:
            target_host,
            BACKUP_REQUEST_TARGET_DID:
            target_did,
            BACKUP_REQUEST_TARGET_TOKEN:
            access_token
        }

        _filter = {
            USR_DID: user_did,
            BACKUP_TARGET_TYPE: BACKUP_TARGET_TYPE_HIVE_NODE
        }
        cli.update_one_origin(DID_INFO_DB_NAME,
                              COL_IPFS_BACKUP_CLIENT,
                              _filter, {'$set': updated_doc},
                              is_extra=True)

    # the flowing is for the executors.

    def update_request_state(self, user_did, state, msg=None):
        updated_doc = {
            BACKUP_REQUEST_STATE: state,
            BACKUP_REQUEST_STATE_MSG: msg
        }

        _filter = {
            USR_DID: user_did,
            BACKUP_TARGET_TYPE: BACKUP_TARGET_TYPE_HIVE_NODE
        }
        cli.update_one_origin(DID_INFO_DB_NAME,
                              COL_IPFS_BACKUP_CLIENT,
                              _filter, {'$set': updated_doc},
                              is_extra=True)

    """
    Each application holds its database under same user did.
    The steps to dump each database data to each application under the specific did:
    - dump the specific database to a snapshot file;
    - upload this snapshot file into IPFS node
    """

    def dump_database_data_to_backup_cids(self, user_did):
        names = cli.get_all_user_database_names(user_did)
        metadata_list = list()
        for name in names:
            d = {'path': gene_temp_file_name(), 'name': name}
            ## dump the database data to snapshot file.
            succeeded = export_mongo_db_to_full_path(d['name'], d['path'])
            if not succeeded:
                raise BadRequestException(
                    f'Failed to dump {d["name"]} for {user_did}')

            ## upload this snapshot file onto IPFS node.
            d['cid'] = fm.ipfs_upload_file_from_path(d['path'])
            d['sha256'] = fm.get_file_content_sha256(d['path'])
            d['size'] = d['path'].stat().st_size
            d['path'].unlink()

            metadata_list.append(d)
        return metadata_list

    """
    All files data have been uploaded to IPFS node and save with array of cids.
    The method here is to get array of cids to save it as json document then.
    """

    def get_files_data_as_backup_cids(self, user_did):
        return fm.get_file_cid_metadatas(user_did)

    """
    All vault data would be uploaded onto IPFS node and identified by CID.
    then this CID would be sent to backup node along with certain other meta information.
    """

    def send_root_backup_cid_to_backup_node(self, user_did, cid, sha256, size,
                                            is_force):
        body = {
            'cid': cid,
            'sha256': sha256,
            'size': size,
            'is_force': is_force
        }

        req = self.get_request(user_did)
        self.http.post(req[BACKUP_REQUEST_TARGET_HOST] +
                       URL_VAULT_BACKUP_SERVICE_BACKUP,
                       req[BACKUP_REQUEST_TARGET_TOKEN],
                       body,
                       is_json=True,
                       is_body=False)

    """
    When restoring vault data from a sepcific backup node, it will condcut the following steps:
    - get the root cid to recover vault data;
    - get a json document by the root cid, where the json document contains a list of CIDs
      to the files and database data on IPFS network.
    """

    def get_vault_data_cid_from_backup_node(self, user_did):
        req = self.get_request(user_did)
        data = self.http.get(
            req[BACKUP_REQUEST_TARGET_HOST] + URL_VAULT_BACKUP_SERVICE_RESTORE,
            req[BACKUP_REQUEST_TARGET_TOKEN])
        vault_metadata = fm.ipfs_download_file_content(data['cid'],
                                                       is_proxy=True,
                                                       sha256=data['sha256'],
                                                       size=data['size'])

        if vault_metadata['vault_size'] > fm.get_vault_max_size(user_did):
            raise InsufficientStorageException(
                msg=
                'No alowed enough space to restore vault data from backup node.'
            )
        return vault_metadata

    def restore_database_by_dump_files(self, request_metadata):
        databases = request_metadata['databases']
        if not databases:
            logging.info(
                '[IpfsBackupClient] No user databases dump files, skip.')
            return
        for d in databases:
            temp_file = gene_temp_file_name()
            msg = fm.ipfs_download_file_to_path(d['cid'],
                                                temp_file,
                                                is_proxy=True,
                                                sha256=d['sha256'],
                                                size=d['size'])
            if msg:
                logging.error(
                    f'[IpfsBackupClient] Failed to download dump file for database {d["name"]}.'
                )
                temp_file.unlink()
                raise BadRequestException(msg=msg)
            import_mongo_db_by_full_path(temp_file)
            temp_file.unlink()
            logging.info(
                f'[IpfsBackupClient] Success to restore the dump file for database {d["name"]}.'
            )

    def retry_backup_request(self, user_did):
        req = self.get_request(user_did)
        if not req or req.get(
                BACKUP_REQUEST_STATE) != BACKUP_REQUEST_STATE_INPROGRESS:
            return
        elif req.get(BACKUP_REQUEST_STATE) != BACKUP_REQUEST_STATE_INPROGRESS:
            return
        logging.info(
            f"[IpfsBackupClient] Found uncompleted request({req.get(USR_DID)}), retry."
        )
        if req.get(BACKUP_REQUEST_ACTION) == BACKUP_REQUEST_ACTION_BACKUP:
            BackupExecutor(user_did, self, req, start_delay=30).start()
        elif req.get(BACKUP_REQUEST_ACTION) == BACKUP_REQUEST_ACTION_RESTORE:
            RestoreExecutor(user_did, self, start_delay=30).start()
        else:
            logging.error(
                f'[IpfsBackupClient] Unknown action({req.get(BACKUP_REQUEST_ACTION)}), skip.'
            )