def download_file(self, request, django_field, ledger_field=None): lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field key = self.kwargs[lookup_url_kwarg] channel_name = get_channel_name(request) validate_key(key) try: asset = get_object_from_ledger(channel_name, key, self.ledger_query_call) except LedgerError as e: return Response({'message': str(e.msg)}, status=e.status) try: self.check_access(channel_name, request.user, asset, is_proxied_request(request)) except PermissionError as e: return Response({'message': str(e)}, status=status.HTTP_403_FORBIDDEN) if get_owner() == asset['owner']: response = self._download_local_file(django_field) else: if not ledger_field: ledger_field = django_field storage_address = self.get_storage_address(asset, ledger_field) response = self._download_remote_file(channel_name, storage_address, asset) return response
def authenticate_outgoing_request(outgoing_node_id): try: outgoing = OutgoingNode.objects.get(node_id=outgoing_node_id) except OutgoingNode.DoesNotExist: raise NodeError(f'Unauthorized to call remote node with node_id: {outgoing_node_id}') # to authenticate to remote node we use the current node id # with the associated outgoing secret. current_node_id = get_owner() return HTTPBasicAuth(current_node_id, outgoing.secret)
def list(self, request, *args, **kwargs): try: nodes = query_ledger(fcn=self.ledger_query_call) except LedgerError as e: return Response({'message': str(e.msg)}, status=e.status) current_node_id = get_owner() for node in nodes: node.update({ 'isCurrent': node['id'] == current_node_id, }) return Response(nodes, status=status.HTTP_200_OK)
def authenticate_worker(node_id): from node.models import OutgoingNode owner = get_owner() try: outgoing = OutgoingNode.objects.get(node_id=node_id) except OutgoingNode.DoesNotExist: raise NodeError(f'Unauthorized to call node_id: {node_id}') auth = HTTPBasicAuth(owner, outgoing.secret) return auth
def has_access(self, user, asset): """Returns true if API consumer can access asset data.""" if user.is_anonymous: # safeguard, should never happened return False permission = asset['permissions']['process'] if type(user) is NodeUser: # for node node_id = user.username else: # for classic user, test on current msp id node_id = get_owner() return permission['public'] or node_id in permission['authorized_ids']
def prepare_task(tuple_type): data_owner = get_owner() worker_queue = f"{settings.LEDGER['name']}.worker" tuples = query_tuples(tuple_type, data_owner) for subtuple in tuples: tkey = subtuple['key'] # Verify that tuple task does not already exist if AsyncResult(tkey).state == 'PENDING': prepare_tuple.apply_async((subtuple, tuple_type), task_id=tkey, queue=worker_queue) else: print(f'[Scheduler] Tuple task ({tkey}) already exists')
def authenticate_worker(node_id): from node.models import OutgoingNode owner = get_owner() try: outgoing = OutgoingNode.objects.get(node_id=node_id) except OutgoingNode.DoesNotExist: raise NodeError(f'Unauthorized to call node_id: {node_id}') if node_id != outgoing.node_id: # Ensure the response is valid. This is a safety net for the case when the DB connection is shared # across processes running in parallel. raise NodeError(f'Wrong response: Request {node_id} - Get {outgoing.node_id}') auth = HTTPBasicAuth(owner, outgoing.secret) return auth
def download_file(self, request, django_field, ledger_field=None): lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field pk = self.kwargs[lookup_url_kwarg] try: asset = get_object_from_ledger(pk, self.ledger_query_call) except LedgerError as e: return Response({'message': str(e.msg)}, status=e.status) if not self._has_access(request.user, asset): return Response({'message': 'Unauthorized'}, status=status.HTTP_403_FORBIDDEN) if get_owner() == asset['owner']: obj = self.get_object() data = getattr(obj, django_field) response = CustomFileResponse( open(data.path, 'rb'), as_attachment=True, filename=os.path.basename(data.path) ) else: node_id = asset['owner'] auth = authenticate_outgoing_request(node_id) if not ledger_field: ledger_field = django_field r = get_remote_file(asset[ledger_field]['storageAddress'], auth, stream=True) if not r.ok: return Response({ 'message': f'Cannot proxify asset from node {asset["owner"]}: {str(r.text)}' }, status=r.status_code) response = CustomFileResponse( streaming_content=(chunk for chunk in r.iter_content(512 * 1024)), status=r.status_code) for header in r.headers: # We don't use hop_by_hop headers since they are incompatible # with WSGI if not is_hop_by_hop(header): response[header] = r.headers.get(header) return response
def on_tuples(cc_event, block_number, tx_id, tx_status): payload = json.loads(cc_event['payload']) owner = get_owner() worker_queue = f"{LEDGER['name']}.worker" for tuple_type, _tuples in payload.items(): if not _tuples: continue for _tuple in _tuples: key = _tuple['key'] status = _tuple['status'] if tx_status != 'VALID': logger.error( f'Failed transaction on task {key}: type={tuple_type}' f' status={status} with tx status: {tx_status}') continue logger.info( f'Processing task {key}: type={tuple_type} status={status}') if status != 'todo': continue if tuple_type is None: continue tuple_owner = tuple_get_worker(tuple_type, _tuple) if tuple_owner != owner: logger.debug(f'Skipping task {key}: owner does not match' f' ({tuple_owner} vs {owner})') continue if AsyncResult(key).state != 'PENDING': logger.info(f'Skipping task {key}: already exists') continue prepare_tuple.apply_async((_tuple, tuple_type), task_id=key, queue=worker_queue)
def on_tuples_event(channel_name, block_number, tx_id, tx_status, event_type, asset): owner = get_owner() worker_queue = f"{settings.ORG_NAME}.worker" key = asset['key'] status = asset['status'] if tx_status != 'VALID': logger.error( f'Failed transaction on task {key}: type={event_type}' f' status={status} with tx status: {tx_status}') return logger.info(f'Processing task {key}: type={event_type} status={status}') if status != 'todo': return if event_type is None: return tuple_owner = tuple_get_worker(event_type, asset) if tuple_owner != owner: logger.info(f'Skipping task {key}: owner does not match' f' ({tuple_owner} vs {owner})') return if AsyncResult(key).state != 'PENDING': logger.info(f'Skipping task {key}: already exists') return prepare_tuple.apply_async( (channel_name, asset, event_type), task_id=key, queue=worker_queue )
def check_access(self, channel_name: str, user, asset, is_proxied_request: bool) -> None: """Returns true if API consumer is allowed to access data. :param is_proxied_request: True if the API consumer is another backend-server proxying a user request :raises: PermissionError """ if user.is_anonymous: # safeguard, should never happened raise PermissionError() if type(user) is NodeUser: # for node permission = asset['permissions']['process'] node_id = user.username else: # for classic user, test on current msp id # TODO: This should be 'download' instead of 'process', # but 'download' is not consistently exposed by chaincode yet. permission = asset['permissions']['process'] node_id = get_owner() if not permission['public'] and node_id not in permission[ 'authorized_ids']: raise PermissionError()
def download_file(self, request, django_field, ledger_field=None): lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field key = self.kwargs[lookup_url_kwarg] try: asset = get_object_from_ledger(get_channel_name(request), key, self.ledger_query_call) except LedgerError as e: return Response({'message': str(e.msg)}, status=e.status) if not self.has_access(request.user, asset): return Response({'message': 'Unauthorized'}, status=status.HTTP_403_FORBIDDEN) if not ledger_field: ledger_field = django_field if get_owner() == asset['owner']: response = self._download_local_file(django_field) else: response = self._download_remote_file(get_channel_name(request), ledger_field, asset) return response
def node_has_process_permission(asset): """Check if current node can process input asset.""" permission = asset['permissions']['process'] return permission['public'] or get_owner() in permission['authorized_ids']