def IDclaimThisTask(self, code): self.SRmutex.acquire() try: response = self.session.patch( "https://{}/ID/tasks/{}".format(self.server, code), json={ "user": self.user, "token": self.token }, verify=False, ) if response.status_code == 204: raise PlomTakenException("Task taken by another user.") response.raise_for_status() except requests.HTTPError as e: if response.status_code == 401: raise PlomAuthenticationException() from None else: raise PlomSeriousException( "Some other sort of error {}".format(e)) from None finally: self.SRmutex.release() imageList = [] for img in MultipartDecoder.from_response(response).parts: imageList.append(BytesIO( img.content).getvalue()) # pass back image as bytes return imageList
def IDrequestImage(self, code): self.SRmutex.acquire() try: response = self.session.get( "https://{}/ID/images/{}".format(self.server, code), json={ "user": self.user, "token": self.token }, verify=False, ) response.raise_for_status() imageList = [] for img in MultipartDecoder.from_response(response).parts: imageList.append(BytesIO( img.content).getvalue()) # pass back image as bytes except requests.HTTPError as e: if response.status_code == 401: raise PlomAuthenticationException() from None elif response.status_code == 404: raise PlomSeriousException( "Cannot find image file for {}.".format(code)) from None elif response.status_code == 409: raise PlomSeriousException( "Another user has the image for {}. This should not happen" .format(code)) from None else: raise PlomSeriousException( "Some other sort of error {}".format(e)) from None finally: self.SRmutex.release() return imageList
def MrequestOriginalImages(self, task): self.SRmutex.acquire() try: response = self.session.get( "https://{}/MK/originalImages/{}".format(self.server, task), json={ "user": self.user, "token": self.token }, verify=False, ) if response.status_code == 204: raise PlomNoMoreException("No task = {}.".format(task)) response.raise_for_status() # response is [image1, image2,... image.n] imageList = [] for img in MultipartDecoder.from_response(response).parts: imageList.append(BytesIO(img.content).getvalue()) except requests.HTTPError as e: if response.status_code == 401: raise PlomAuthenticationException() from None elif response.status_code == 404: raise PlomNoMoreException( "Cannot find image file for {}".format(task)) from None else: raise PlomSeriousException( "Some other sort of error {}".format(e)) from None finally: self.SRmutex.release() return imageList
def uris(self, database, root=None, connection=None): """Get a list of all the URIs in a database. If root is provided, only URIs that start-with() that string will be returned. """ uris = [] if connection is None: connection = self.connection mleval = Eval(connection) version = "xquery version '1.0-ml';" if root is None or root == "": mleval.set_xquery("{} cts:uris()".format(version)) else: root = root.replace("'", "'") mleval.set_xquery("{} cts:uris()[starts-with(.,'{}')]" \ .format(version, root)) mleval.set_database(database) response = mleval.eval() if 'content-type' in response.headers: if response.headers['content-type'].startswith("multipart/mixed"): decoder = MultipartDecoder.from_response(response) for part in decoder.parts: uris.append(part.text) return uris
def acquire_bulk(changes): postlist = [] for change in changes: postlist.append('{{"id":"{}","rev":"{}","atts_since":[]}}'.format( change['id'], change['changes'][0]['rev'])) payload = '{{"docs":[{}]}}'.format(','.join(postlist)) post_headers = { "Host": 'couchbase-fallenlondon.storynexus.com:4984', "Content-Type": "application/json", "Connection": None, "Accept-Encoding": None, "Accept": None, "User-Agent": None } r = requests.post( 'http://couchbase-fallenlondon.storynexus.com:4984/sync_gateway_json/_bulk_get?revs=true&attachments=true', data=payload, headers=post_headers) decoder = Decoder.from_response(r) updates = [json.loads(x.text) for x in decoder.parts] dec = [] for u in updates: try: dec.append(json.loads(clean(decrypt(u['body'])))) except KeyError: print(u) return dec
def _download_batch(self, dataset_id, ids): for batch_ids in batched(ids): response = self._api.post( 'images.bulk.download', {ApiField.DATASET_ID: dataset_id, ApiField.IMAGE_IDS: batch_ids}) decoder = MultipartDecoder.from_response(response) for part in decoder.parts: img_id = int(re.findall('name="(.*)"', part.headers[b'Content-Disposition'].decode('utf-8'))[0]) yield img_id, part
def _download_batch(self, docs, down_map): """Download a batch of files""" if self.dryrun: return self.logger.debug("Downloading batch") self.logger.debug(docs) resp = docs.get() decoder = MultipartDecoder.from_response(resp) self.logger.debug("Downloaded {} bytes in {} parts" \ .format(len(resp.text), len(decoder.parts))) meta_part = None content_part = None splitregex = re.compile(';\\s*') if not decoder.parts: raise RuntimeError("FAILED TO GET ANY PARTS!?") for mimepart in decoder.parts: disp = mimepart.headers[b'Content-Disposition'].decode('utf-8') if 'category=metadata' in disp: if meta_part is not None: raise RuntimeError("More than one metadata part!?") meta_part = mimepart else: content_part = mimepart disp = content_part.headers[b'Content-Disposition'].decode( 'utf-8') dispositions = splitregex.split(disp) filename = None for disp in dispositions: if disp.startswith("filename="): filename = disp[10:len(disp) - 1] body_content_type = content_part.headers[ b'Content-Type'].decode('utf-8') if filename is None: raise RuntimeError("Multipart without filename!?") #print("FN:",filename) last_modified = None stanza = down_map[filename] if meta_part is not None: last_modified = self._store_metadata(meta_part, down_map[filename], \ body_content_type, filename) if last_modified is not None: stanza['timestamp'] = last_modified self._store_content(content_part, stanza) meta_part = None content_part = None
def _download_batch(self, docs, down_map): """Download a batch of files""" if self.dryrun: return self.logger.debug("Downloading batch") self.logger.debug(docs) resp = docs.get() decoder = MultipartDecoder.from_response(resp) self.logger.debug("Downloaded {} bytes in {} parts" \ .format(len(resp.text), len(decoder.parts))) meta_part = None content_part = None splitregex = re.compile(';\\s*') if not decoder.parts: raise RuntimeError("FAILED TO GET ANY PARTS!?") for mimepart in decoder.parts: disp = mimepart.headers[b'Content-Disposition'].decode('utf-8') if 'category=metadata' in disp: if meta_part is not None: raise RuntimeError("More than one metadata part!?") meta_part = mimepart else: content_part = mimepart disp = content_part.headers[b'Content-Disposition'].decode('utf-8') dispositions = splitregex.split(disp) filename = None for disp in dispositions: if disp.startswith("filename="): filename = disp[10:len(disp)-1] body_content_type = content_part.headers[b'Content-Type'].decode('utf-8') if filename is None: raise RuntimeError("Multipart without filename!?") #print("FN:",filename) last_modified = None stanza = down_map[filename] if meta_part is not None: last_modified = self._store_metadata(meta_part, down_map[filename], \ body_content_type, filename) if last_modified is not None: stanza['timestamp'] = last_modified self._store_content(content_part, stanza) meta_part = None content_part = None
def _download_batch_by_hashes(self, hashes): for batch_hashes in batched(hashes): response = self._api.post( 'images.bulk.download-by-hash', {ApiField.HASHES: batch_hashes}) decoder = MultipartDecoder.from_response(response) for part in decoder.parts: content_utf8 = part.headers[b'Content-Disposition'].decode('utf-8') # Find name="1245" preceded by a whitespace, semicolon or beginning of line. # The regex has 2 capture group: one for the prefix and one for the actual name value. h = content_utf8.replace("form-data; name=\"", "")[:-1] yield h, part
def _download_batch(self, dataset_id, ids): for batch_ids in batched(ids): response = self._api.post( 'images.bulk.download', {ApiField.DATASET_ID: dataset_id, ApiField.IMAGE_IDS: batch_ids}) decoder = MultipartDecoder.from_response(response) for part in decoder.parts: content_utf8 = part.headers[b'Content-Disposition'].decode('utf-8') # Find name="1245" preceded by a whitespace, semicolon or beginning of line. # The regex has 2 capture group: one for the prefix and one for the actual name value. img_id = int(re.findall(r'(^|[\s;])name="(\d*)"', content_utf8)[0][1]) yield img_id, part
def last_modified(self, database, uris=None, connection=None): """Get a list of last-modified times. If uris are provided, returns last modified times for those URIs, otherwise attempts to find times for all URIs in the database. This requires the database setting to manage last modified times, naturally. """ if connection is None: connection = self.connection mleval = Eval(connection) if uris is None: leturis = "let $uris := cts:uris()" else: leturis = "let $uris := cts:uris()[" comma = "" for uri in uris: leturis += "{}'{}'".format(comma, uri) comma = ", " leturis += "]" lines = (leturis, \ 'let $dts := for $uri in $uris', \ ' let $dt := xdmp:document-get-properties($uri, xs:QName("prop:last-modified"))', \ ' where $dt', \ ' return', \ ' object-node { "uri": $uri, "dt": string($dt) }', \ 'return', \ ' array-node { $dts }') xquery = "\n".join(lines) #print(xquery) mleval.set_xquery(xquery) mleval.set_database(database) response = mleval.eval() data = None if 'content-type' in response.headers: if response.headers['content-type'].startswith("multipart/mixed"): decoder = MultipartDecoder.from_response(response) for part in decoder.parts: if data is None: data = json.loads(part.text) else: raise RuntimeError( "Multipart reply to timestamp query!?") return data
def last_modified(self, database, uris=None, connection=None): """Get a list of last-modified times. If uris are provided, returns last modified times for those URIs, otherwise attempts to find times for all URIs in the database. This requires the database setting to manage last modified times, naturally. """ if connection is None: connection = self.connection mleval = Eval(connection) if uris is None: leturis = "let $uris := cts:uris()" else: leturis = "let $uris := cts:uris()[" comma = "" for uri in uris: leturis += "{}'{}'".format(comma, uri) comma = ", " leturis += "]" lines = (leturis, \ 'let $dts := for $uri in $uris', \ ' let $dt := xdmp:document-get-properties($uri, xs:QName("prop:last-modified"))', \ ' where $dt', \ ' return', \ ' object-node { "uri": $uri, "dt": string($dt) }', \ 'return', \ ' array-node { $dts }') xquery = "\n".join(lines) #print(xquery) mleval.set_xquery(xquery) mleval.set_database(database) response = mleval.eval() data = None if 'content-type' in response.headers: if response.headers['content-type'].startswith("multipart/mixed"): decoder = MultipartDecoder.from_response(response) for part in decoder.parts: if data is None: data = json.loads(part.text) else: raise RuntimeError("Multipart reply to timestamp query!?") return data
def download_batch(self, dataset_id, ids, paths, progress_cb=None): id_to_path = {id: path for id, path in zip(ids, paths)} MAX_BATCH_SIZE = 50 for batch_ids in batched(ids, MAX_BATCH_SIZE): response = self.api.post('images.bulk.download', { ApiField.DATASET_ID: dataset_id, ApiField.IMAGE_IDS: batch_ids }) decoder = MultipartDecoder.from_response(response) for idx, part in enumerate(decoder.parts): img_id = int( re.findall( 'name="(.*)"', part.headers[b'Content-Disposition'].decode('utf-8')) [0]) with open(id_to_path[img_id], 'wb') as w: w.write(part.content) progress_cb(1)
def MrequestWholePaper(self, code, questionNumber=0): self.SRmutex.acquire() # note - added default value for questionNumber so that this works correctly # when called from identifier. - Fixes #921 try: response = self.session.get( "https://{}/MK/whole/{}/{}".format(self.server, code, questionNumber), json={ "user": self.user, "token": self.token }, verify=False, ) response.raise_for_status() # response should be multipart = [ pageData, f1,f2,f3..] imagesAsBytes = MultipartDecoder.from_response(response).parts images = [] i = 0 for iab in imagesAsBytes: if i == 0: pageData = json.loads(iab.content) else: images.append(BytesIO( iab.content).getvalue()) # pass back image as bytes i += 1 except requests.HTTPError as e: if response.status_code == 401: raise PlomAuthenticationException() from None # TODO? elif response.status_code == 409: raise PlomTakenException( "Task taken by another user.") from None else: raise PlomSeriousException( "Some other sort of error {}".format(e)) from None finally: self.SRmutex.release() return [pageData, images]
def MclaimThisTask(self, code): self.SRmutex.acquire() try: response = self.session.patch( "https://{}/MK/tasks/{}".format(self.server, code), json={ "user": self.user, "token": self.token }, verify=False, ) response.raise_for_status() if response.status_code == 204: raise PlomTakenException("Task taken by another user.") except requests.HTTPError as e: if response.status_code == 401: raise PlomAuthenticationException() from None else: raise PlomSeriousException( "Some other sort of error {}".format(e)) from None finally: self.SRmutex.release() # should be multipart = [tags, integrity_check, image_id_list, image1, image2, ....] tags = "tagsAndImages[0].text # this is raw text" imageList = [] i = 0 for img in MultipartDecoder.from_response(response).parts: if i == 0: tags = img.text elif i == 1: integrity_check = img.text elif i == 2: image_id_list = json.loads(img.text) else: imageList.append(BytesIO( img.content).getvalue()) # pass back image as bytes i += 1 return imageList, image_id_list, tags, integrity_check
def uris(self, database, connection=None): """ Get a list of all the URIs in a database. """ uris = [] if connection is None: connection = self.connection mleval = Eval(connection) mleval.set_xquery("xquery version '1.0-ml'; cts:uris()") mleval.set_database(database) response = mleval.eval() if 'content-type' in response.headers: if response.headers['content-type'].startswith("multipart/mixed"): decoder = MultipartDecoder.from_response(response) for part in decoder.parts: uris.append(part.text) return uris
def ingest(self, ds_name=None, start=None, end=None, force_download=False): if ds_name is None: df_sets = self._list() if df_sets.empty: print('There are no datasets available yet.') return set_print_settings() while True: print(df_sets) dataset_num = input('Choose the dataset you want to ' 'ingest [0..{}]: '.format(df_sets.size - 1)) try: dataset_num = int(dataset_num) except ValueError: print( 'Enter a number between 0 and {}'.format(df_sets.size - 1)) else: if dataset_num not in range(0, df_sets.size): print('Enter a number between 0 and {}'.format( df_sets.size - 1)) else: ds_name = df_sets.iloc[dataset_num]['dataset'] break # ds_name = ds_name.lower() # TODO: catch error conditions provider_info = self.mkt_contract.functions.getDataProviderInfo( Web3.toHex(ds_name)).call() if not provider_info[4]: print('The requested "{}" dataset is not registered in ' 'the Data Marketplace.'.format(ds_name)) return address, address_i = self.choose_pubaddr() fns = self.mkt_contract.functions check_sub = fns.checkAddressSubscription(address, Web3.toHex(ds_name)).call() if check_sub[0] != address or self.to_text(check_sub[1]) != ds_name: print('You are not subscribed to dataset "{}" with address {}. ' 'Plese subscribe first.'.format(ds_name, address)) return if not check_sub[5]: print('Your subscription to dataset "{}" expired on {} UTC.' 'Please renew your subscription by running:\n' 'catalyst marketplace subscribe --dataset={}'.format( ds_name, pd.to_datetime(check_sub[4], unit='s', utc=True), ds_name)) if 'key' in self.addresses[address_i]: key = self.addresses[address_i]['key'] secret = self.addresses[address_i]['secret'] else: key, secret = get_key_secret(address, self.addresses[address_i]['wallet']) headers = get_signed_headers(ds_name, key, secret) log.info('Starting download of dataset for ingestion...') r = requests.post( '{}/marketplace/ingest'.format(AUTH_SERVER), headers=headers, stream=True, ) if r.status_code == 200: log.info('Dataset downloaded successfully. Processing dataset...') bundle_folder = get_data_source_folder(ds_name) shutil.rmtree(bundle_folder, ignore_errors=True) target_path = get_temp_bundles_folder() try: decoder = MultipartDecoder.from_response(r) # with maybe_show_progress( # iter(decoder.parts), # True, # label='Processing files') as part: counter = 1 for part in decoder.parts: log.info("Processing file {} of {}".format( counter, len(decoder.parts))) h = part.headers[b'Content-Disposition'].decode('utf-8') # Extracting the filename from the header name = re.search(r'filename="(.*)"', h).group(1) filename = os.path.join(target_path, name) with open(filename, 'wb') as f: # for chunk in part.content.iter_content( # chunk_size=1024): # if chunk: # filter out keep-alive new chunks # f.write(chunk) f.write(part.content) self.process_temp_bundle(ds_name, filename) counter += 1 except NonMultipartContentTypeException: response = r.json() raise MarketplaceHTTPRequest( request='ingest dataset', error=response, ) else: raise MarketplaceHTTPRequest( request='ingest dataset', error=r.status_code, ) log.info('{} ingested successfully'.format(ds_name))
def ingest(self, ds_name, start=None, end=None, force_download=False): # ds_name = ds_name.lower() # TODO: catch error conditions provider_info = self.mkt_contract.functions.getDataProviderInfo( Web3.toHex(ds_name)).call() if not provider_info[4]: print('The requested "{}" dataset is not registered in ' 'the Data Marketplace.'.format(ds_name)) return address, address_i = self.choose_pubaddr() fns = self.mkt_contract.functions check_sub = fns.checkAddressSubscription(address, Web3.toHex(ds_name)).call() if check_sub[0] != address or self.to_text(check_sub[1]) != ds_name: print('You are not subscribed to dataset "{}" with address {}. ' 'Plese subscribe first.'.format(ds_name, address)) return if not check_sub[5]: print('Your subscription to dataset "{}" expired on {} UTC.' 'Please renew your subscription by running:\n' 'catalyst marketplace subscribe --dataset={}'.format( ds_name, pd.to_datetime(check_sub[4], unit='s', utc=True), ds_name)) if 'key' in self.addresses[address_i]: key = self.addresses[address_i]['key'] secret = self.addresses[address_i]['secret'] else: key, secret = get_key_secret(address) headers = get_signed_headers(ds_name, key, secret) log.debug('Starting download of dataset for ingestion...') r = requests.post( '{}/marketplace/ingest'.format(AUTH_SERVER), headers=headers, stream=True, ) if r.status_code == 200: target_path = get_temp_bundles_folder() try: decoder = MultipartDecoder.from_response(r) for part in decoder.parts: h = part.headers[b'Content-Disposition'].decode('utf-8') # Extracting the filename from the header name = re.search(r'filename="(.*)"', h).group(1) filename = os.path.join(target_path, name) with open(filename, 'wb') as f: # for chunk in part.content.iter_content( # chunk_size=1024): # if chunk: # filter out keep-alive new chunks # f.write(chunk) f.write(part.content) self.process_temp_bundle(ds_name, filename) except NonMultipartContentTypeException: response = r.json() raise MarketplaceHTTPRequest( request='ingest dataset', error=response, ) else: raise MarketplaceHTTPRequest( request='ingest dataset', error=r.status_code, ) log.info('{} ingested successfully'.format(ds_name))
def predict(self, data: WrappaRequest, as_json: bool = False, path: str = None): fields = {} if isinstance(data, list): for i, v in enumerate(data): res = self._prepare_request(v, i) fields.update(res) elif isinstance(data, WrappaObject): fields = self._prepare_request(data) else: raise ValueError('Wrong type for data') me = MultipartEncoder(fields=fields) headers = { 'Authorization': 'Token ' + self._passphrase, 'Content-Type': me.content_type, } if as_json: headers['Accept'] = 'application/json' else: headers['Accept'] = 'multipart/form-data' response = requests.post( os.path.join(self._address, path if path else 'predict'), headers=headers, data=me, ) response.raise_for_status() if as_json: return response.json() md = MultipartDecoder.from_response(response) parts = defaultdict(WrappaObject) for part in md.parts: cd: bytes = part.headers[b'content-disposition'] tmp = cd.split(b';') field_name = tmp[1].split(b'=')[-1][1:-1] filename = None if len(tmp) >= 3: filename = tmp[2].split(b'=')[-1][1:-1] if b'-' in field_name: obj_type, ind = field_name.split(b'-') ind = int(ind) else: obj_type, ind = field_name, None if obj_type == b'image': parts[ind].set_value( WrappaImage(name=str(filename), payload=part.content)) if obj_type == b'file': parts[ind].set_value( WrappaFile(name=str(filename), payload=part.content)) if obj_type == b'text': parts[ind].set_value(WrappaText(str(part.text))) if parts.get(None, None): return parts[None] return [parts[i] for i in sorted(parts.keys())]
def MrequestImages(self, code, integrity_check): """Download images relevant to a question, both original and annotated. Args: code (str): the task code such as "q1234g9". Returns: 3-tuple: `(image_list, annotated_image, plom_file)` `image_list` is a list of images (e.g., png files to be written to disc). `annotated_image` and `plom_file` are the png file and and data associated with a previous annotations, or None. Raises: PlomAuthenticationException PlomTaskChangedError: you no longer own this task. PlomTaskDeletedError PlomSeriousException """ self.SRmutex.acquire() try: response = self.session.get( "https://{}/MK/images/{}".format(self.server, code), json={ "user": self.user, "token": self.token, "integrity_check": integrity_check, }, verify=False, ) response.raise_for_status() # response is either [n, image1,..,image.n] or [n, image1,...,image.n, annotatedImage, plom-data] imagesAnnotAndPlom = MultipartDecoder.from_response(response).parts n = int(imagesAnnotAndPlom[0].content) # 'n' sent as string imageList = [ BytesIO(imagesAnnotAndPlom[i].content).getvalue() for i in range(1, n + 1) ] if len(imagesAnnotAndPlom) == n + 1: # all is fine - no annotated image or plom data anImage = None plDat = None elif len(imagesAnnotAndPlom) == n + 3: # all fine - last two parts are annotated image + plom-data anImage = BytesIO(imagesAnnotAndPlom[n + 1].content).getvalue( ) # pass back annotated-image as bytes plDat = BytesIO(imagesAnnotAndPlom[ n + 2].content).getvalue() # pass back plomData as bytes else: raise PlomSeriousException( "Number of images passed doesn't make sense {} vs {}". format(n, len(imagesAnnotAndPlom))) except requests.HTTPError as e: if response.status_code == 401: raise PlomAuthenticationException() from None elif response.status_code == 404: raise PlomSeriousException( "Cannot find image file for {}.".format(code)) from None elif response.status_code == 409: raise PlomTaskChangedError( "Ownership of task {} has changed.".format(code)) from None elif response.status_code == 406: raise PlomTaskChangedError( "Task {} has been changed by manager.".format( code)) from None elif response.status_code == 410: raise PlomTaskDeletedError( "Task {} has been deleted by manager.".format( code)) from None else: raise PlomSeriousException( "Some other sort of error {}".format(e)) from None finally: self.SRmutex.release() return [imageList, anImage, plDat]
def MrequestImages(self, code, integrity_check): """Download images relevant to a question, both original and annotated. Args: code (str): the task code such as "q1234g9". Returns: 3-tuple: `(image_metadata, annotated_image, plom_file)` `image_metadata` has various stuff: DB ids, md5sums, etc `annotated_image` and `plom_file` are the png file and and data associated with a previous annotations, or None. Raises: PlomAuthenticationException PlomTaskChangedError: you no longer own this task. PlomTaskDeletedError PlomSeriousException """ self.SRmutex.acquire() try: response = self.session.get( "https://{}/MK/images/{}".format(self.server, code), json={ "user": self.user, "token": self.token, "integrity_check": integrity_check, }, verify=False, ) response.raise_for_status() # response is either [metadata] or [metadata, annotated_image, plom_file] imagesAnnotAndPlom = MultipartDecoder.from_response(response).parts image_metadata = json.loads(imagesAnnotAndPlom[0].text) if len(imagesAnnotAndPlom) == 1: # all is fine - no annotated image or plom data anImage = None plDat = None elif len(imagesAnnotAndPlom) == 3: # all fine - last two parts are annotated image + plom-data anImage = BytesIO(imagesAnnotAndPlom[1].content).getvalue() plDat = BytesIO(imagesAnnotAndPlom[2].content).getvalue() else: raise PlomSeriousException( "Number of returns doesn't make sense: should be 1 or 3 but is {}" .format(len(imagesAnnotAndPlom))) except requests.HTTPError as e: if response.status_code == 401: raise PlomAuthenticationException() from None elif response.status_code == 404: raise PlomSeriousException( "Cannot find image file for {}.".format(code)) from None elif response.status_code == 409: raise PlomTaskChangedError( "Ownership of task {} has changed.".format(code)) from None elif response.status_code == 406: raise PlomTaskChangedError( "Task {} has been changed by manager.".format( code)) from None elif response.status_code == 410: raise PlomTaskDeletedError( "Task {} has been deleted by manager.".format( code)) from None else: raise PlomSeriousException( "Some other sort of error {}".format(e)) from None finally: self.SRmutex.release() return image_metadata, anImage, plDat
def handle_result(self, part: Union[Response, BodyPart], first: bool = True) -> None: # store the file from the part inside the given directory def store_file(directory: str) -> Tuple[str, str]: disposition = part.headers.get("Content-Disposition", "") match = re.findall('filename="([^"]+)"', disposition) name = match[0] if match else "out" path = os.path.join(directory, name) i = 0 while os.path.exists(path): i += 1 path = os.path.join(directory, f"{name}-{i}") with open(path, "wb+") as fh: fh.write(part.content) return name, path content_type = part.headers.get("Content-Type", "text/plain") action = part.headers.get("Resoto-Shell-Action") command = part.headers.get("Resoto-Shell-Command") line_delimiter = "---" # If we get a plain text result, we simply print it to the console. if content_type == "text/plain": # Received plain text: print it. if not first: print(line_delimiter) if hasattr(part, "iter_lines"): for line in part.iter_lines(): print(line.decode("utf-8")) else: print(part.text) # File is sent in order to edit and return it. # We expect the command to define what should happen with the edited file. elif (content_type == "application/octet-stream" and action == "edit" and command): with TemporaryDirectory() as tmp: name, path = store_file(tmp) call([os.environ.get("EDITOR", "vi"), path]) stats = Path(path).lstat() if stats.st_mtime != stats.st_ctime: self.handle_command(f"{command} {name}", {}, {name: path}) else: print( "No change made while editing the file. Update aborted." ) # File is sent: save it to local disk elif content_type == "application/octet-stream": name, path = store_file(ArgumentParser.args.download_directory) print(f"Received a file {name}, which is stored to {path}.") # Multipart: handle each part separately elif content_type.startswith("multipart"): # Received a multipart response: parse the parts decoder = MultipartDecoder.from_response(part) def decode(value: Union[str, bytes]) -> str: return value.decode("utf-8") if isinstance(value, bytes) else value for num, part in enumerate(decoder.parts): part.headers = { decode(k): decode(v) for k, v in part.headers.items() } self.handle_result(part, num == 0)