def _update_order_status(self, order_id, status): update = { COL_ORDERS_STATUS: status, MODIFY_TIME: datetime.utcnow().timestamp(), } cli.update_one_origin(DID_INFO_DB_NAME, COL_ORDERS, {'_id': ObjectId(order_id)}, {'$set': update}, is_extra=True)
def __update_vault_state(self, status): user_did, app_did = check_auth() self.get_checked_vault(user_did) col_filter = {VAULT_SERVICE_DID: user_did} doc = {VAULT_SERVICE_DID: user_did, VAULT_SERVICE_MODIFY_TIME: datetime.utcnow().timestamp(), VAULT_SERVICE_STATE: status} cli.update_one_origin(DID_INFO_DB_NAME, VAULT_SERVICE_COL, col_filter, {"$set": doc})
def update_backup_state(self, user_did, state, msg): cli.update_one_origin( DID_INFO_DB_NAME, VAULT_BACKUP_INFO_COL, {USER_DID: user_did}, { "$set": { VAULT_BACKUP_INFO_STATE: state, VAULT_BACKUP_INFO_MSG: msg, VAULT_BACKUP_INFO_TIME: datetime.utcnow().timestamp() } })
def ipfs_increase_used_size(self, backup, size, is_reset=False): update = {'$set': dict()} if is_reset: update['$set'][VAULT_BACKUP_SERVICE_USE_STORAGE] = 0 else: update['$set'][VAULT_BACKUP_SERVICE_USE_STORAGE] = backup[ VAULT_BACKUP_SERVICE_USE_STORAGE] + size cli.update_one_origin(DID_INFO_DB_NAME, VAULT_BACKUP_SERVICE_COL, {DID: backup[DID]}, update)
def archive_orders(self, user_did): """ for unsubscribe the vault """ update = { COL_ORDERS_STATUS: COL_ORDERS_STATUS_ARCHIVE, MODIFY_TIME: datetime.utcnow().timestamp(), } if cli.is_col_exists(DID_INFO_DB_NAME, COL_ORDERS): cli.update_one_origin(DID_INFO_DB_NAME, COL_ORDERS, {USR_DID: user_did}, {'$set': update}, is_many=True, is_extra=True) if cli.is_col_exists(DID_INFO_DB_NAME, COL_RECEIPTS): cli.update_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS, {USR_DID: user_did}, {'$set': update}, is_many=True, is_extra=True)
def ipfs_update_state_really(self, user_did, to, vault_size=0): update = { '$set': { STATE: to, VAULT_BACKUP_SERVICE_MODIFY_TIME: datetime.utcnow().timestamp() } } if to == STATE_RUNNING: # This is the start of the backup processing. update['$set'][VAULT_BACKUP_SERVICE_USE_STORAGE] = 0 update['$set'][ORIGINAL_SIZE] = vault_size cli.update_one_origin(DID_INFO_DB_NAME, VAULT_BACKUP_SERVICE_COL, {DID: user_did}, update)
def update_request_state(self, user_did, state, msg=None): updated_doc = { BACKUP_REQUEST_STATE: state, BACKUP_REQUEST_STATE_MSG: msg } _filter = { USR_DID: user_did, BACKUP_TARGET_TYPE: BACKUP_TARGET_TYPE_HIVE_NODE } cli.update_one_origin(DID_INFO_DB_NAME, COL_IPFS_BACKUP_CLIENT, _filter, {'$set': updated_doc}, is_extra=True)
def _create_receipt(self, user_did, order, transaction_id, paid_did): now = datetime.utcnow().timestamp() receipt = { USR_DID: user_did, COL_RECEIPTS_ORDER_ID: str(order['_id']), COL_RECEIPTS_TRANSACTION_ID: transaction_id, COL_RECEIPTS_PAID_DID: paid_did, COL_ORDERS_PROOF: '', COL_ORDERS_STATUS: COL_ORDERS_STATUS_NORMAL } res = cli.insert_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS, receipt, create_on_absence=True) receipt['_id'] = res['inserted_id'] receipt[COL_ORDERS_PROOF] = self.auth.create_order_proof( user_did, receipt['_id'], amount=order[COL_ORDERS_ELA_AMOUNT], is_receipt=True) cli.update_one_origin(DID_INFO_DB_NAME, COL_RECEIPTS, {'_id': ObjectId(receipt['_id'])}, {'$set': {COL_ORDERS_PROOF: receipt[COL_ORDERS_PROOF]}}, is_extra=True) return receipt
def upgrade_vault_plan(self, user_did, vault, pricing_name): remain_days = 0 now = datetime.utcnow().timestamp() # seconds in UTC plan = self.get_price_plan('vault', pricing_name) if vault[VAULT_SERVICE_END_TIME] != -1: cur_plan = self.get_price_plan('vault', vault[VAULT_SERVICE_PRICING_USING]) remain_days = self._get_remain_days(cur_plan, vault[VAULT_SERVICE_END_TIME], now, plan) end_time = -1 if plan['serviceDays'] == -1 else now + (plan['serviceDays'] + remain_days) * 24 * 60 * 60 col_filter = {VAULT_SERVICE_DID: user_did} update = {VAULT_SERVICE_PRICING_USING: pricing_name, VAULT_SERVICE_MAX_STORAGE: int(plan["maxStorage"]) * 1024 * 1024, VAULT_SERVICE_START_TIME: now, VAULT_SERVICE_END_TIME: end_time, VAULT_SERVICE_MODIFY_TIME: now, VAULT_SERVICE_STATE: VAULT_SERVICE_STATE_RUNNING} cli.update_one_origin(DID_INFO_DB_NAME, VAULT_SERVICE_COL, col_filter, {"$set": update})
def _create_order(self, user_did, subscription, plan): doc = { USR_DID: user_did, COL_ORDERS_SUBSCRIPTION: subscription, COL_ORDERS_PRICING_NAME: plan['name'], COL_ORDERS_ELA_AMOUNT: plan['amount'], COL_ORDERS_ELA_ADDRESS: self.ela_address, COL_ORDERS_PROOF: '', COL_ORDERS_STATUS: COL_ORDERS_STATUS_NORMAL } res = cli.insert_one_origin(DID_INFO_DB_NAME, COL_ORDERS, doc, create_on_absence=True) doc['_id'] = res['inserted_id'] doc[COL_ORDERS_PROOF] = self.auth.create_order_proof(user_did, doc['_id']) cli.update_one_origin(DID_INFO_DB_NAME, COL_ORDERS, {'_id': ObjectId(doc['_id'])}, {'$set': {COL_ORDERS_PROOF: doc[COL_ORDERS_PROOF]}}, is_extra=True) return doc
def upload_ipfs_files_by_db(db_name): # find 10 docs and ordered by ascending. col_filter = {COL_IPFS_FILES_IPFS_CID: {'$exists': True, '$eq': None}} options = {'limit': 10, 'sort': [('modified', pymongo.ASCENDING), ]} file_docs = cli.find_many_origin(db_name, COL_IPFS_FILES, col_filter, throw_exception=False, options=options) logging.info(f'[task_upload_ipfs_files] get {len(file_docs) if file_docs else 0} ' f'{db_name} files for uploading to ipfs node') if not file_docs: return ipfs_files = IpfsFiles() for doc in file_docs: try: cid = fm.ipfs_uploading_file(doc[DID], doc[APP_DID], doc[COL_IPFS_FILES_PATH]) ipfs_files.increase_cid_ref(cid) col_filter = {DID: doc[DID], APP_DID: doc[APP_DID], COL_IPFS_FILES_PATH: doc[COL_IPFS_FILES_PATH]} cli.update_one_origin(db_name, COL_IPFS_FILES, col_filter, {'$set': {COL_IPFS_FILES_IPFS_CID: cid}}, is_extra=True) except Exception as e: logging.error(f'[task_upload_ipfs_files] failed upload file to ipfs with exception: {str(e)}')
def backup_finish(self, checksum_list): user_did, _, doc = self._check_auth_backup() backup_root = get_vault_backup_path(user_did) # TODO: remove this check. if not backup_root.exists(): create_full_path_dir(backup_root) local_checksum_list = get_file_checksum_list(backup_root) for checksum in checksum_list: if checksum not in local_checksum_list: raise BadRequestException( msg='Failed to finish backup process.') cli.update_one_origin(DID_INFO_DB_NAME, VAULT_BACKUP_SERVICE_COL, {VAULT_BACKUP_SERVICE_DID: user_did}, { "$set": { VAULT_BACKUP_SERVICE_USE_STORAGE: get_dir_size(backup_root.as_posix(), 0) } })
def execute_restore(self, user_did, credential_info, access_token): cli.update_one_origin( DID_INFO_DB_NAME, VAULT_BACKUP_INFO_COL, {USER_DID: user_did}, { "$set": { USER_DID: user_did, VAULT_BACKUP_INFO_STATE: VAULT_BACKUP_STATE_STOP, VAULT_BACKUP_INFO_TYPE: VAULT_BACKUP_INFO_TYPE_HIVE_NODE, VAULT_BACKUP_INFO_MSG: VAULT_BACKUP_MSG_SUCCESS, VAULT_BACKUP_INFO_TIME: datetime.utcnow().timestamp(), VAULT_BACKUP_INFO_DRIVE: credential_info['targetHost'], VAULT_BACKUP_INFO_TOKEN: access_token } }, options={'upsert': True}, create_on_absence=True) if hive_setting.BACKUP_IS_SYNC: self.__class__.restore_main(user_did, self) else: _thread.start_new_thread(self.__class__.restore_main, (user_did, self))
def update_request(self, user_did, target_host, target_did, access_token, req, is_restore=False): # if request.args.get('is_multi') != 'True': # # INFO: Use url parameter 'is_multi' to skip this check. # cur_target_host = req.get(BACKUP_REQUEST_TARGET_HOST) # cur_target_did = req.get(BACKUP_REQUEST_TARGET_DID) # if cur_target_host and cur_target_did \ # and (cur_target_host != target_host or cur_target_did != target_did): # raise InvalidParameterException(msg='Do not support backup to multi hive node.') updated_doc = { BACKUP_REQUEST_ACTION: BACKUP_REQUEST_ACTION_RESTORE if is_restore else BACKUP_REQUEST_ACTION_BACKUP, BACKUP_REQUEST_STATE: BACKUP_REQUEST_STATE_INPROGRESS, BACKUP_REQUEST_STATE_MSG: None, BACKUP_REQUEST_TARGET_HOST: target_host, BACKUP_REQUEST_TARGET_DID: target_did, BACKUP_REQUEST_TARGET_TOKEN: access_token } _filter = { USR_DID: user_did, BACKUP_TARGET_TYPE: BACKUP_TARGET_TYPE_HIVE_NODE } cli.update_one_origin(DID_INFO_DB_NAME, COL_IPFS_BACKUP_CLIENT, _filter, {'$set': updated_doc}, is_extra=True)
def update_backup_request(self, user_did, update): col_filter = {USR_DID: user_did} cli.update_one_origin(DID_INFO_DB_NAME, COL_IPFS_BACKUP_SERVER, col_filter, {'$set': update}, is_extra=True)