Example #1
0
 def __init__(self, path: FilenameOrPath, mime_type: str) -> None:
     """Initialize self given a path and MIME type."""
     self.path = Path(path)
     with open(self.path, "rb") as f:
         stream = io.BytesIO(f.read())
     super().__init__(stream=stream, mime_type=mime_type)
    def do_POST(self):
        """This method services the POST request typically from either the Tenant or the Cloud Verifier.

        Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter.
        The Cloud verifier requires an additional mask parameter.  If the uri or parameters are incorrect, a 400 response is returned.
        """
        rest_params = config.get_restful_params(self.path)

        if rest_params is None:
            config.echo_json_response(
                self, 405, "Not Implemented: Use /keys/ interface")
            return

        if not rest_params["api_version"]:
            config.echo_json_response(self, 400, "API Version not supported")
            return

        content_length = int(self.headers.get('Content-Length', 0))
        if content_length <= 0:
            logger.warning('POST returning 400 response, expected content in message. url: %s', self.path)
            config.echo_json_response(self, 400, "expected content in message")
            return

        post_body = self.rfile.read(content_length)
        json_body = json.loads(post_body)

        b64_encrypted_key = json_body['encrypted_key']
        decrypted_key = crypto.rsa_decrypt(
            self.server.rsaprivatekey, base64.b64decode(b64_encrypted_key))

        have_derived_key = False

        if rest_params["keys"] == "ukey":
            self.server.add_U(decrypted_key)
            self.server.auth_tag = json_body['auth_tag']
            self.server.payload = json_body.get('payload', None)
            have_derived_key = self.server.attempt_decryption()
        elif rest_params["keys"] == "vkey":
            self.server.add_V(decrypted_key)
            have_derived_key = self.server.attempt_decryption()
        else:
            logger.warning('POST returning  response. uri not supported: %s', self.path)
            config.echo_json_response(self, 400, "uri not supported")
            return
        logger.info('POST of %s key returning 200', ('V', 'U')[rest_params["keys"] == "ukey"])
        config.echo_json_response(self, 200, "Success")

        # no key yet, then we're done
        if not have_derived_key:
            return

        # woo hoo we have a key
        # ok lets write out the key now
        secdir = secure_mount.mount()  # confirm that storage is still securely mounted

        # clean out the secure dir of any previous info before we extract files
        if os.path.isdir("%s/unzipped" % secdir):
            shutil.rmtree("%s/unzipped" % secdir)

        # write out key file
        f = open(secdir + "/" + self.server.enc_keyname, 'w', encoding="utf-8")
        f.write(base64.b64encode(self.server.K).decode())
        f.close()

        # stow the U value for later
        tpm_instance.write_key_nvram(self.server.final_U)

        # optionally extend a hash of they key and payload into specified PCR
        tomeasure = self.server.K

        # if we have a good key, now attempt to write out the encrypted payload
        dec_path = os.path.join(secdir,
                                config.get('cloud_agent', "dec_payload_file"))
        enc_path = os.path.join(config.WORK_DIR, "encrypted_payload")

        dec_payload = None
        enc_payload = None
        if self.server.payload is not None:
            dec_payload = crypto.decrypt(
                self.server.payload, bytes(self.server.K))

            enc_payload = self.server.payload
        elif os.path.exists(enc_path):
            # if no payload provided, try to decrypt one from a previous run stored in encrypted_payload
            with open(enc_path, 'rb') as f:
                enc_payload = f.read()
            try:
                dec_payload = crypto.decrypt(enc_payload, self.server.K)
                logger.info("Decrypted previous payload in %s to %s", enc_path, dec_path)
            except Exception as e:
                logger.warning("Unable to decrypt previous payload %s with derived key: %s", enc_path, e)
                os.remove(enc_path)
                enc_payload = None

        # also write out encrypted payload to be decrytped next time
        if enc_payload is not None:
            with open(enc_path, 'wb') as f:
                f.write(self.server.payload.encode('utf-8'))

        # deal with payload
        payload_thread = None
        if dec_payload is not None:
            tomeasure = tomeasure + dec_payload
            # see if payload is a zip
            zfio = io.BytesIO(dec_payload)
            if config.getboolean('cloud_agent', 'extract_payload_zip') and zipfile.is_zipfile(zfio):
                logger.info("Decrypting and unzipping payload to %s/unzipped", secdir)
                with zipfile.ZipFile(zfio, 'r')as f:
                    f.extractall('%s/unzipped' % secdir)

                # run an included script if one has been provided
                initscript = config.get('cloud_agent', 'payload_script')
                if initscript != "":
                    def initthread():
                        env = os.environ.copy()
                        env['AGENT_UUID'] = self.server.agent_uuid
                        proc = subprocess.Popen(["/bin/bash", initscript], env=env, shell=False, cwd='%s/unzipped' % secdir,
                                                stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
                        while True:
                            line = proc.stdout.readline()
                            if line == '' and proc.poll() is not None:
                                break
                            if line:
                                logger.debug("init-output: %s", line.strip())
                        # should be a no-op as poll already told us it's done
                        proc.wait()

                    if not os.path.exists(
                            os.path.join(secdir, "unzipped", initscript)):
                        logger.info("No payload script %s found in %s/unzipped", initscript, secdir)
                    else:
                        logger.info("Executing payload script: %s/unzipped/%s", secdir, initscript)
                        payload_thread = threading.Thread(target=initthread)
            else:
                logger.info("Decrypting payload to %s", dec_path)
                with open(dec_path, 'wb') as f:
                    f.write(dec_payload)
            zfio.close()

        # now extend a measurement of the payload and key if there was one
        pcr = config.getint('cloud_agent', 'measure_payload_pcr')
        if 0 < pcr < 24:
            logger.info("extending measurement of payload into PCR %s", pcr)
            measured = tpm_instance.hashdigest(tomeasure)
            tpm_instance.extendPCR(pcr, measured)

        if payload_thread is not None:
            payload_thread.start()

        return
Example #3
0
 def _save_load_module(self, m):
     scripted_module = torch.jit.script(m())
     buffer = io.BytesIO()
     torch.jit.save(scripted_module, buffer)
     buffer.seek(0)
     return torch.jit.load(buffer)
Example #4
0
###
###  Francis Parker Lunch menu downloader and parser, improved
###  By Jake Boxerman
###  Original Sep. 2019, improved Oct. 2019
###

import pdftotext
from six.moves.urllib.request import urlopen
import io
import sys

# Getting the lunch menu PDF and putting it into a variable
url = 'https://fwparker.myschoolapp.com/ftpimages/1048/download/download_3453838.pdf'
remote_file = urlopen(url).read()
memory_file = io.BytesIO(remote_file)
pdf = pdftotext.PDF(memory_file)
pdf_content = pdf[0]

# Getting the requested day
day_requested = str(sys.argv[1]).title()

# Turning the string into list (each word is its own element)
menu = pdf_content.split()

# Creating dictionary to hold the location of each day's meal list in the list
dict = {"Monday:": 0, "Tuesday:": 0, "Wednesday:": 0, "Thursday:": 0, "Friday:": 0}

# Removing weird unicode chars (bullet points)
for item in menu:
    if item == "\uf0b7":
        menu.remove(item)
Example #5
0
 def _img_test_func(self):
     img = Image.new('RGB', (32, 32), 'blue')
     img_io = io.BytesIO()
     img.save(img_io, format='JPEG')
     img_io.seek(0)
     return img_io
Example #6
0
 def phash(image_bytes: bytes):
     return imagehash.phash(Image.open(io.BytesIO(image_bytes)))
Example #7
0
 def test_valid_json(self):
     parser = parsers.JSONParser()
     stream = io.BytesIO(b'{"key": 1, "other": "two"}')
     data = parser.parse(stream, 'application/json')
     self.assertEqual(data, {"key": 1, "other": "two"})
Example #8
0
def export_request(request,
                   commission,
                   func_for_get_data_all=export_personal_info,
                   func_for_woorksheet=save_personal_info_to_woorksheet,
                   namefile='Перс. данные',
                   dop_name="Личная информация",
                   all_in_one_wheet=False):
    """
    Функция, которая выгружает красивые эксельки
    :param commission: Комиссия или 'all'
    :param func_for_get_data_all: (export_personal_info) - Функция, которая получает всю информацию в словарь датафреймов
    :param func_for_woorksheet: (save_personal_info_to_woorksheet) - Функция, которая красиво сохраняет именно этот тип данных
    :param namefile: 'Перс. данные' - название файла на русском
    :param dop_name: ' Личная информация' - Это заголовок к таблицам
    :return: response
    """

    output = io.BytesIO()
    workbook = Workbook(output, {'in_memory': True})
    result = func_for_get_data_all()

    if commission == 'all':
        if not all_in_one_wheet:
            for com in list(result):
                data = result[com]
                worksheet = workbook.add_worksheet(com.group.name)
                worksheet = func_for_woorksheet(workbook,
                                                worksheet,
                                                data,
                                                com.group.name,
                                                dop_name=dop_name)

        else:
            all_data = pd.DataFrame()
            for com in list(result):
                data = result[com]
                all_data = all_data.append(data)
            worksheet = workbook.add_worksheet("Все комиссии")
            worksheet = func_for_woorksheet(workbook,
                                            worksheet,
                                            all_data,
                                            "Все комиссии",
                                            dop_name=dop_name)
    else:
        data = result[commission]
        worksheet = workbook.add_worksheet(commission.group.name)
        worksheet = func_for_woorksheet(workbook,
                                        worksheet,
                                        data,
                                        commission.group.name,
                                        dop_name=dop_name)

    workbook.close()

    date_ = '{}-{} {}-{}-{}'.format(datetime.now().hour,
                                    datetime.now().minute,
                                    datetime.now().day,
                                    datetime.now().month,
                                    datetime.now().year)
    output.seek(0)
    response = HttpResponse(
        output.read(),
        content_type=
        "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
    if commission == 'all':
        rus_name = '{}_{}.xlsx'.format(namefile, date_)
    else:
        rus_name = '{}_{}_{}.xlsx'.format(namefile, commission.group.name,
                                          date_)
    filename = transliterate.translit(rus_name, reversed=True)
    response['Content-Disposition'] = "attachment; filename={}".format(
        filename)
    output.close()
    return response
Example #9
0
def test_nest(nest_from_config):
    crs = cimgt.GoogleTiles().crs
    z0 = cimg_nest.ImageCollection('aerial z0 test', crs)
    z0.scan_dir_for_imgs(os.path.join(_TEST_DATA_DIR, 'z_0'),
                         glob_pattern='*.png',
                         img_class=RoundedImg)

    z1 = cimg_nest.ImageCollection('aerial z1 test', crs)
    z1.scan_dir_for_imgs(os.path.join(_TEST_DATA_DIR, 'z_1'),
                         glob_pattern='*.png',
                         img_class=RoundedImg)

    z2 = cimg_nest.ImageCollection('aerial z2 test', crs)
    z2.scan_dir_for_imgs(os.path.join(_TEST_DATA_DIR, 'z_2'),
                         glob_pattern='*.png',
                         img_class=RoundedImg)

    # make sure all the images from z1 are contained by the z0 image. The
    # only reason this might occur is if the tfw files are handling
    # floating point values badly
    for img in z1.images:
        if not z0.images[0].bbox().contains(img.bbox()):
            raise OSError(
                'The test images aren\'t all "contained" by the z0 images, '
                'the nest cannot possibly work.\n'
                f'img {img!s} not contained by {z0.images[0]!s}\n'
                f'Extents: {img.extent!s}; {z0.images[0].extent!s}')
    nest_z0_z1 = cimg_nest.NestedImageCollection('aerial test', crs, [z0, z1])

    nest = cimg_nest.NestedImageCollection('aerial test', crs, [z0, z1, z2])

    z0_key = ('aerial z0 test', z0.images[0])

    assert z0_key in nest_z0_z1._ancestry.keys()
    assert len(nest_z0_z1._ancestry) == 1

    # check that it has figured out that all the z1 images are children of
    # the only z0 image
    for img in z1.images:
        key = ('aerial z0 test', z0.images[0])
        assert ('aerial z1 test', img) in nest_z0_z1._ancestry[key]

    x1_y0_z1, = (img for img in z1.images
                 if img.filename.endswith('z_1/x_1_y_0.png'))

    assert (1, 0, 1) == _tile_from_img(x1_y0_z1)

    assert ([(2, 0, 2), (2, 1, 2), (3, 0, 2), (3, 1, 2)] == sorted(
        _tile_from_img(img)
        for z, img in nest.subtiles(('aerial z1 test', x1_y0_z1))))

    # check that the the images in the nest from configuration are the
    # same as those created by hand.
    for name in nest_z0_z1._collections_by_name.keys():
        for img in nest_z0_z1._collections_by_name[name].images:
            collection = nest_from_config._collections_by_name[name]
            assert img in collection.images

    assert nest_z0_z1._ancestry == nest_from_config._ancestry

    # check that a nest can be pickled and unpickled easily.
    s = io.BytesIO()
    pickle.dump(nest_z0_z1, s)
    s.seek(0)
    nest_z0_z1_from_pickle = pickle.load(s)

    assert nest_z0_z1._ancestry == nest_z0_z1_from_pickle._ancestry
Example #10
0
def test_mathtext_to_png(tmpdir):
    mt = mathtext.MathTextParser('bitmap')
    mt.to_png(str(tmpdir.join('example.png')), '$x^2$')
    mt.to_png(io.BytesIO(), '$x^2$')
Example #11
0
s = [b'a', b'b', b'c']
print(s)
#print(bytes.join(s))#TypeError: descriptor 'join' requires a 'bytes' object but received a 'list'
print(b''.join(s))

import io
byteio = io.BytesIO()
for v in s: byteio.write(v)
print(byteio.getvalue())
byteio.close()
Example #12
0
def test_math_to_image(tmpdir):
    mathtext.math_to_image('$x^2$', str(tmpdir.join('example.png')))
    mathtext.math_to_image('$x^2$', io.BytesIO())
Example #13
0
    def _post_pdf(self, save_in_attachment, pdf_content=None, res_ids=None):
        '''Merge the existing attachments by adding one by one the content of the attachments
        and then, we add the pdf_content if exists. Create the attachments for each record individually
        if required.

        :param save_in_attachment: The retrieved attachments as map record.id -> attachment_id.
        :param pdf_content: The pdf content newly generated by wkhtmltopdf.
        :param res_ids: the ids of record to allow postprocessing.
        :return: The pdf content of the merged pdf.
        '''

        def close_streams(streams):
            for stream in streams:
                try:
                    stream.close()
                except Exception:
                    pass

        # Check special case having only one record with existing attachment.
        if len(save_in_attachment) == 1 and not pdf_content:
            return self._merge_pdfs(list(save_in_attachment.values()))

        # Create a list of streams representing all sub-reports part of the final result
        # in order to append the existing attachments and the potentially modified sub-reports
        # by the postprocess_pdf_report calls.
        streams = []

        # In wkhtmltopdf has been called, we need to split the pdf in order to call the postprocess method.
        if pdf_content:
            pdf_content_stream = io.BytesIO(pdf_content)
            # Build a record_map mapping id -> record
            record_map = {r.id: r for r in self.env[self.model].browse([res_id for res_id in res_ids if res_id])}

            # If no value in attachment or no record specified, only append the whole pdf.
            if not record_map or not self.attachment:
                streams.append(pdf_content_stream)
            else:
                if len(res_ids) == 1:
                    # Only one record, so postprocess directly and append the whole pdf.
                    if res_ids[0] in record_map and not res_ids[0] in save_in_attachment:
                        new_stream = self.postprocess_pdf_report(record_map[res_ids[0]], pdf_content_stream)
                        # If the buffer has been modified, mark the old buffer to be closed as well.
                        if new_stream and new_stream != pdf_content_stream:
                            close_streams([pdf_content_stream])
                            pdf_content_stream = new_stream
                    streams.append(pdf_content_stream)
                else:
                    # In case of multiple docs, we need to split the pdf according the records.
                    # To do so, we split the pdf based on outlines computed by wkhtmltopdf.
                    # An outline is a <h?> html tag found on the document. To retrieve this table,
                    # we look on the pdf structure using pypdf to compute the outlines_pages that is
                    # an array like [0, 3, 5] that means a new document start at page 0, 3 and 5.
                    reader = PdfFileReader(pdf_content_stream)
                    if reader.trailer['/Root'].get('/Dests'):
                        outlines_pages = sorted(
                            [outline.getObject()[0] for outline in reader.trailer['/Root']['/Dests'].values()])
                        assert len(outlines_pages) == len(res_ids)
                        for i, num in enumerate(outlines_pages):
                            to = outlines_pages[i + 1] if i + 1 < len(outlines_pages) else reader.numPages
                            attachment_writer = PdfFileWriter()
                            for j in range(num, to):
                                attachment_writer.addPage(reader.getPage(j))
                            stream = io.BytesIO()
                            attachment_writer.write(stream)
                            if res_ids[i] and res_ids[i] not in save_in_attachment:
                                new_stream = self.postprocess_pdf_report(record_map[res_ids[i]], stream)
                                # If the buffer has been modified, mark the old buffer to be closed as well.
                                if new_stream and new_stream != stream:
                                    close_streams([stream])
                                    stream = new_stream
                            streams.append(stream)
                        close_streams([pdf_content_stream])
                    else:
                        # If no outlines available, do not save each record
                        streams.append(pdf_content_stream)

        # If attachment_use is checked, the records already having an existing attachment
        # are not been rendered by wkhtmltopdf. So, create a new stream for each of them.
        if self.attachment_use:
            for stream in save_in_attachment.values():
                streams.append(stream)

        # Build the final pdf.
        # If only one stream left, no need to merge them (and then, preserve embedded files).
        if len(streams) == 1:
            result = streams[0].getvalue()
        else:
            result = self._merge_pdfs(streams)

        # We have to close the streams after PdfFileWriter's call to write()
        close_streams(streams)
        return result
Example #14
0
    async def upload(self,
                     bucket: str,
                     object_name: str,
                     file_data: Any,
                     *,
                     content_type: Optional[str] = None,
                     parameters: Optional[Dict[str, str]] = None,
                     headers: Optional[Dict[str, str]] = None,
                     metadata: Optional[Dict[str, str]] = None,
                     session: Optional[Session] = None,
                     force_resumable_upload: Optional[bool] = None,
                     timeout: int = 30) -> Dict[str, Any]:
        url = f'{API_ROOT_UPLOAD}/{bucket}/o'

        stream = self._preprocess_data(file_data)

        if BUILD_GCLOUD_REST and isinstance(stream, io.StringIO):
            # HACK: `requests` library does not accept `str` as `data` in `put`
            # HTTP request.
            stream = io.BytesIO(stream.getvalue().encode('utf-8'))

        content_length = self._get_stream_len(stream)

        # mime detection method same as in aiohttp 3.4.4
        content_type = content_type or mimetypes.guess_type(object_name)[0]

        parameters = parameters or {}

        headers = headers or {}
        headers.update(await self._headers())
        headers.update({
            'Content-Length': str(content_length),
            'Content-Type': content_type or '',
        })

        upload_type = self._decide_upload_type(force_resumable_upload,
                                               content_length)
        log.debug('using %r gcloud storage upload method', upload_type)

        if upload_type == UploadType.SIMPLE:
            if metadata:
                log.warning('metadata will be ignored for upload_type=Simple')
            return await self._upload_simple(url,
                                             object_name,
                                             stream,
                                             parameters,
                                             headers,
                                             session=session,
                                             timeout=timeout)

        if upload_type == UploadType.RESUMABLE:
            return await self._upload_resumable(url,
                                                object_name,
                                                stream,
                                                parameters,
                                                headers,
                                                metadata=metadata,
                                                session=session,
                                                timeout=timeout)

        raise TypeError(f'upload type {upload_type} not supported')
Example #15
0
def pycurl_request(url, method="get", payload={}, timeout=None):
    """
    :param url:
    :param elapesd_type: (each | cumulative)
    :return:
    """
    if timeout is None:
        timeout = args.connect_timeout

    m = {}
    method = method.upper()
    buffer = io.BytesIO()

    curl = pycurl.Curl()
    curl.setopt(pycurl.URL, url)
    curl.setopt(pycurl.TIMEOUT, timeout)
    curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
    curl.setopt(pycurl.CUSTOMREQUEST, method.upper())
    curl.setopt(pycurl.HTTPHEADER, ['User-Agent: prep-tool', 'Content-Type: application/json'])
    curl.setopt(pycurl.SSL_VERIFYPEER, False)
    # curl.setopt(pycurl.FOLLOWLOCATION, 1)

    if payload and method == "POST":
        body_as_json_string = json.dumps(payload) # dict to json
        body_as_file_object = io.StringIO(body_as_json_string)
        curl.setopt(pycurl.POST, True)
        curl.setopt(pycurl.READDATA, body_as_file_object)
        curl.setopt(pycurl.POSTFIELDSIZE, len(body_as_json_string))

    try:
        curl.perform()
        response = buffer.getvalue().decode('UTF-8')
        try:
            json_response = json.loads(response)
        except json.decoder.JSONDecodeError:
            json_response = None

        m['effective-url'] = curl.getinfo(pycurl.EFFECTIVE_URL)
        m['status_code'] = curl.getinfo(pycurl.HTTP_CODE)

        last_elapsed = 0
        elapsed_time = 0
        elapsed_each = 0
        total_elapsed_each_sum = 0
        """
        https://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
         |
         |--NAMELOOKUP
         |--|--CONNECT
         |--|--|--APPCONNECT
         |--|--|--|--PRETRANSFER
         |--|--|--|--|--STARTTRANSFER
         |--|--|--|--|--|--TOTAL
         |--|--|--|--|--|--REDIRECT
        """
        for attr_name in CURL_TIME_ATTRS:
            pycurl_attr = getattr(pycurl, attr_name)
            elapsed_time = int(curl.getinfo(pycurl_attr) * 1000)
            # elapsed_time = curl.getinfo(pycurl_attr)
            if attr_name == "TOTAL_TIME":
                elapsed_each = elapsed_time
            elif elapsed_time > 0:
                elapsed_each = elapsed_time - last_elapsed
            else:
                elapsed_each = 0
            total_elapsed_each_sum += elapsed_each
            if elapsed_time > 0:
                last_elapsed = elapsed_time
            m[attr_name] = elapsed_each
        m['redirect-count'] = curl.getinfo(pycurl.REDIRECT_COUNT)
        m['header-size'] = curl.getinfo(pycurl.HEADER_SIZE)

        # m['size-upload'] = curl.getinfo(pycurl.SIZE_UPLOAD)
        # m['size-download'] = curl.getinfo(pycurl.SIZE_DOWNLOAD)
        # m['speed-upload'] = curl.getinfo(pycurl.SPEED_UPLOAD)
        #
        # m['request-size'] = curl.getinfo(pycurl.REQUEST_SIZE)
        # m['content-length-download'] = curl.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD)
        # m['content-length-upload'] = curl.getinfo(pycurl.CONTENT_LENGTH_UPLOAD)
        #
        # m['content-type'] = curl.getinfo(pycurl.CONTENT_TYPE)
        # m['response-code'] = curl.getinfo(pycurl.RESPONSE_CODE)
        # m['speed-download'] = curl.getinfo(pycurl.SPEED_DOWNLOAD)
        # m['ssl-verifyresult'] = curl.getinfo(pycurl.SSL_VERIFYRESULT)
        # m['filetime'] = curl.getinfo(pycurl.INFO_FILETIME)
        #
        #
        # m['http-connectcode'] = curl.getinfo(pycurl.HTTP_CONNECTCODE)
        # # m['httpauth-avail'] = curl.getinfo(pycurl.HTTPAUTH_AVAIL)
        # # m['proxyauth-avail'] = curl.getinfo(pycurl.PROXYAUTH_AVAIL)
        # m['os-errno'] = curl.getinfo(pycurl.OS_ERRNO)
        # m['num-connects'] = curl.getinfo(pycurl.NUM_CONNECTS)
        # m['ssl-engines'] = curl.getinfo(pycurl.SSL_ENGINES)
        # m['cookielist'] = curl.getinfo(pycurl.INFO_COOKIELIST)
        # m['lastsocket'] = curl.getinfo(pycurl.LASTSOCKET)

        m['json'] = json_response
        if m.get("json") is None:
            m['response'] = response
    except Exception as error:
        response = None
        json_response = None
        if args.verbose > 2:
            print_debug(f"{url} , {error.args[1]}", "FAIL")
        m['error'] = error.args[1]

    buffer.close()
    curl.close()
    return m
def self_test(codec):
	(query_bytes, expected_bytes) = test_query()
	if not (lambda stdin, stdout, stderr: not main(sys.argv[0], stdin=stdin, stdout=stdout, stderr=stderr) and stdout.getvalue() == expected_bytes)(io.BytesIO(query_bytes), io.BytesIO(), io.BytesIO()):
		raise sqlite3.ProgrammingError("byte I/O is broken")
	if not (lambda stdin, stdout, stderr: not main(sys.argv[0], stdin=stdin, stdout=stdout, stderr=stderr) and stdout.getvalue() == codec.decode(expected_bytes, 'surrogateescape'))(io.StringIO(query_bytes.decode(ascii)), io.StringIO(), io.StringIO()):
		raise sqlite3.ProgrammingError("string I/O is broken")
Example #17
0
 async def close_channel(self, ctx, reason, anon: bool = False):
     try:
         await ctx.send(embed=discord.Embed(description="Closing channel...", colour=self.bot.primary_colour))
         data = await self.bot.get_data(ctx.guild.id)
         if data[7] is True:
             messages = await ctx.channel.history(limit=10000).flatten()
         await ctx.channel.delete()
         embed = discord.Embed(
             title="Ticket Closed",
             description=(reason if reason else "No reason was provided."),
             colour=self.bot.error_colour,
             timestamp=datetime.datetime.utcnow(),
         )
         embed.set_author(
             name=f"{ctx.author.name}#{ctx.author.discriminator}" if anon is False else "Anonymous#0000",
             icon_url=ctx.author.avatar_url if anon is False else "https://cdn.discordapp.com/embed/avatars/0.png",
         )
         embed.set_footer(text=f"{ctx.guild.name} | {ctx.guild.id}", icon_url=ctx.guild.icon_url)
         member = ctx.guild.get_member(self.bot.tools.get_modmail_user(ctx.channel))
         if member:
             try:
                 data = await self.bot.get_data(ctx.guild.id)
                 if data[6]:
                     embed2 = discord.Embed(
                         title="Custom Closing Message",
                         description=self.bot.tools.tag_format(data[6], member),
                         colour=self.bot.mod_colour,
                         timestamp=datetime.datetime.utcnow(),
                     )
                     embed2.set_footer(
                         text=f"{ctx.guild.name} | {ctx.guild.id}",
                         icon_url=ctx.guild.icon_url,
                     )
                     await member.send(embed=embed2)
                 await member.send(embed=embed)
             except discord.Forbidden:
                 pass
         if data[4]:
             channel = ctx.guild.get_channel(data[4])
             if channel:
                 try:
                     if member is None:
                         member = await self.bot.fetch_user(self.bot.tools.get_modmail_user(ctx.channel))
                     if member:
                         embed.set_footer(
                             text=f"{member.name}#{member.discriminator} | {member.id}",
                             icon_url=member.avatar_url,
                         )
                     else:
                         embed.set_footer(
                             text="Unknown#0000 | 000000000000000000",
                             icon_url="https://cdn.discordapp.com/embed/avatars/0.png",
                         )
                     if data[7] == 1:
                         history = ""
                         for m in messages:
                             if m.author.bot and (
                                 m.author.id != self.bot.user.id
                                 or len(m.embeds) <= 0
                                 or m.embeds[0].title not in ["Message Received", "Message Sent"]
                             ):
                                 continue
                             if not m.author.bot and m.content == "":
                                 continue
                             author = f"{m.author} (Comment)"
                             description = m.content
                             if m.author.bot:
                                 if not m.embeds[0].author.name:
                                     author = f"{' '.join(m.embeds[0].footer.text.split()[:-2])} (User)"
                                 else:
                                     author = f"{m.embeds[0].author.name} (Staff)"
                                 description = m.embeds[0].description
                                 for attachment in [
                                     field.value
                                     for field in m.embeds[0].fields
                                     if field.name.startswith("Attachment ")
                                 ]:
                                     if not description:
                                         description = f"(Attachment: {attachment})"
                                     else:
                                         description = description + f" (Attachment: {attachment})"
                             history = (
                                 f"[{str(m.created_at.replace(microsecond=0))}] {author}: "
                                 f"{description}\n" + history
                             )
                         history = io.BytesIO(history.encode())
                         file = discord.File(
                             history, f"modmail_log_{self.bot.tools.get_modmail_user(ctx.channel)}.txt"
                         )
                         msg = await channel.send(embed=embed, file=file)
                         log_url = msg.attachments[0].url[39:-4]
                         log_url = log_url.replace("modmail_log_", "")
                         log_url = [hex(int(some_id))[2:] for some_id in log_url.split("/")]
                         log_url = f"https://modmail.xyz/logs/{'-'.join(log_url)}"
                         embed.add_field(name="Message Logs", value=log_url, inline=False)
                         await asyncio.sleep(0.5)
                         await msg.edit(embed=embed)
                         return
                     await channel.send(embed=embed)
                 except discord.Forbidden:
                     pass
     except discord.Forbidden:
         await ctx.send(
             embed=discord.Embed(
                 description="Missing permissions to delete this channel.",
                 colour=self.bot.error_colour,
             )
         )
Example #18
0
 def __init__(self, frequency, _type="sine"):
     self.frequency = frequency
     self.f = OSCILLATOR_TYPES[_type]
     self.o = io.BytesIO()
Example #19
0
    def test_write_help_in_xmlformat(self):
        fv = flags.FlagValues()
        # Since these flags are defined by the top module, they are all key.
        flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=fv)
        flags.DEFINE_integer('nb_iters',
                             17,
                             'An integer flag',
                             lower_bound=5,
                             upper_bound=27,
                             flag_values=fv)
        flags.DEFINE_string('file_path',
                            '/path/to/my/dir',
                            'A test string flag.',
                            flag_values=fv)
        flags.DEFINE_boolean('use_gpu',
                             False,
                             'Use gpu for performance.',
                             flag_values=fv)
        flags.DEFINE_enum('cc_version',
                          'stable', ['stable', 'experimental'],
                          'Compiler version to use.',
                          flag_values=fv)
        flags.DEFINE_list('files',
                          'a.cc,a.h,archive/old.zip',
                          'Files to process.',
                          flag_values=fv)
        flags.DEFINE_list('allow_users', ['alice', 'bob'],
                          'Users with access.',
                          flag_values=fv)
        flags.DEFINE_spaceseplist('dirs',
                                  'src libs bins',
                                  'Directories to create.',
                                  flag_values=fv)
        flags.DEFINE_multi_string('to_delete', ['a.cc', 'b.h'],
                                  'Files to delete',
                                  flag_values=fv)
        flags.DEFINE_multi_integer('cols', [5, 7, 23],
                                   'Columns to select',
                                   flag_values=fv)
        flags.DEFINE_multi_enum('flavours', ['APPLE', 'BANANA'],
                                ['APPLE', 'BANANA', 'CHERRY'],
                                'Compilation flavour.',
                                flag_values=fv)
        # Define a few flags in a different module.
        module_bar.define_flags(flag_values=fv)
        # And declare only a few of them to be key.  This way, we have
        # different kinds of flags, defined in different modules, and not
        # all of them are key flags.
        flags.declare_key_flag('tmod_bar_z', flag_values=fv)
        flags.declare_key_flag('tmod_bar_u', flag_values=fv)

        # Generate flag help in XML format in the StringIO sio.
        sio = io.StringIO() if six.PY3 else io.BytesIO()
        fv.write_help_in_xml_format(sio)

        # Check that we got the expected result.
        expected_output_template = EXPECTED_HELP_XML_START
        main_module_name = sys.argv[0]
        module_bar_name = module_bar.__name__

        if main_module_name < module_bar_name:
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
        else:
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
            expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE

        expected_output_template += EXPECTED_HELP_XML_END

        # XML representation of the whitespace list separators.
        whitespace_separators = _list_separators_in_xmlformat(
            string.whitespace, indent='    ')
        expected_output = (expected_output_template % {
            'basename_of_argv0': os.path.basename(sys.argv[0]),
            'usage_doc': sys.modules['__main__'].__doc__,
            'main_module_name': main_module_name,
            'module_bar_name': module_bar_name,
            'whitespace_separators': whitespace_separators
        })

        actual_output = sio.getvalue()
        self.assertMultiLineEqual(expected_output, actual_output)

        # Also check that our result is valid XML.  minidom.parseString
        # throws an xml.parsers.expat.ExpatError in case of an error.
        xml.dom.minidom.parseString(actual_output)
Example #20
0
def plot_png():
    fig = create_figure(session)
    output = io.BytesIO()
    FigureCanvas(fig).print_png(output)
    return Response(output.getvalue(), mimetype='image/png')
def postprocess_translations(reduce_diff_hacks=False):
    print('Checking and postprocessing...')

    if reduce_diff_hacks:
        global _orig_escape_cdata
        _orig_escape_cdata = ET._escape_cdata
        ET._escape_cdata = escape_cdata

    for (filename,filepath) in all_ts_files():
        os.rename(filepath, filepath+'.orig')

    have_errors = False
    for (filename,filepath) in all_ts_files('.orig'):
        # pre-fixups to cope with transifex output
        parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
        with open(filepath + '.orig', 'rb') as f:
            data = f.read()
        # remove control characters; this must be done over the entire file otherwise the XML parser will fail
        data = remove_invalid_characters(data)
        tree = ET.parse(io.BytesIO(data), parser=parser)

        # iterate over all messages in file
        root = tree.getroot()
        for context in root.findall('context'):
            for message in context.findall('message'):
                numerus = message.get('numerus') == 'yes'
                source = message.find('source').text
                translation_node = message.find('translation')
                # pick all numerusforms
                if numerus:
                    translations = [i.text for i in translation_node.findall('numerusform')]
                else:
                    translations = [translation_node.text]

                for translation in translations:
                    if translation is None:
                        continue
                    errors = []
                    valid = check_format_specifiers(source, translation, errors, numerus)

                    for error in errors:
                        print('%s: %s' % (filename, error))

                    if not valid: # set type to unfinished and clear string if invalid
                        translation_node.clear()
                        translation_node.set('type', 'unfinished')
                        have_errors = True

                # Remove location tags
                for location in message.findall('location'):
                    message.remove(location)

                # Remove entire message if it is an unfinished translation
                if translation_node.get('type') == 'unfinished':
                    context.remove(message)

        # check if document is (virtually) empty, and remove it if so
        num_messages = 0
        for context in root.findall('context'):
            for message in context.findall('message'):
                num_messages += 1
        if num_messages < MIN_NUM_MESSAGES:
            print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
            continue

        # write fixed-up tree
        # if diff reduction requested, replace some XML to 'sanitize' to qt formatting
        if reduce_diff_hacks:
            out = io.BytesIO()
            tree.write(out, encoding='utf-8')
            out = out.getvalue()
            out = out.replace(b' />', b'/>')
            with open(filepath, 'wb') as f:
                f.write(out)
        else:
            tree.write(filepath, encoding='utf-8')
    return have_errors
Example #22
0
 def __call__(self, *args, **kwargs):
     self.invocation_count += 1
     self.last_invocation = (args, kwargs)
     return io.BytesIO(b'some bytes')
 def load_bytes():
     with open(test_file_path, 'rb') as f:
         return io.BytesIO(f.read())
import io
import sagemaker.amazon.common as smac
import pandas as pd

dataset = pd.read_csv("well_log_train.csv")

feature_dataset = dataset[['Density', 'Sonic' ]]
features = np.array(feature_dataset.values).astype('float32')


label_dataset= dataset[['Gamma']]
labels = np.array(label_dataset.values).astype('float32')
labels_vec = np.squeeze(np.asarray(labels))


buffer = io.BytesIO()
smac.write_numpy_to_dense_tensor(buffer, features, labels_vec)
buffer.seek(0)
bucket='forecastdemopolashbora'
key = 'linearregression'
boto3.resource('s3').Bucket(bucket).Object(key).upload_fileobj(buffer)
s3_training_data_location = 's3://{}/{}'.format(bucket, key)
output_location='s3://{}/'.format(bucket)
from sagemaker.amazon.amazon_estimator import get_image_uri
linear_container = get_image_uri(boto3.Session().region_name, 'linear-learner')

role = 'sagemaker_full_role'

sagemaker_session = sagemaker.Session()

linear = sagemaker.estimator.Estimator(linear_container,
Example #25
0
    def init_add(self, args):
        # command line options can overwrite config values
        if "agent_ip" in args:
            self.cloudagent_ip = args["agent_ip"]

        if 'agent_port' in args and args['agent_port'] is not None:
            self.cloudagent_port = args['agent_port']

        if 'cv_agent_ip' in args and args['cv_agent_ip'] is not None:
            self.cv_cloudagent_ip = args['cv_agent_ip']
        else:
            self.cv_cloudagent_ip = self.cloudagent_ip

        # Make sure all keys exist in dictionary
        if "file" not in args:
            args["file"] = None
        if "keyfile" not in args:
            args["keyfile"] = None
        if "payload" not in args:
            args["payload"] = None
        if "ca_dir" not in args:
            args["ca_dir"] = None
        if "incl_dir" not in args:
            args["incl_dir"] = None
        if "ca_dir_pw" not in args:
            args["ca_dir_pw"] = None

        # Set up accepted algorithms
        self.accept_tpm_hash_algs = config.get(
            'tenant', 'accept_tpm_hash_algs').split(',')
        self.accept_tpm_encryption_algs = config.get(
            'tenant', 'accept_tpm_encryption_algs').split(',')
        self.accept_tpm_signing_algs = config.get(
            'tenant', 'accept_tpm_signing_algs').split(',')

        # Set up PCR values
        tpm_policy = config.get('tenant', 'tpm_policy')
        if "tpm_policy" in args and args["tpm_policy"] is not None:
            tpm_policy = args["tpm_policy"]
        self.tpm_policy = TPM_Utilities.readPolicy(tpm_policy)
        logger.info(f"TPM PCR Mask from policy is {self.tpm_policy['mask']}")

        vtpm_policy = config.get('tenant', 'vtpm_policy')
        if "vtpm_policy" in args and args["vtpm_policy"] is not None:
            vtpm_policy = args["vtpm_policy"]
        self.vtpm_policy = TPM_Utilities.readPolicy(vtpm_policy)
        logger.info(f"TPM PCR Mask from policy is {self.vtpm_policy['mask']}")

        # Read command-line path string IMA whitelist
        wl_data = None
        if "ima_whitelist" in args and args["ima_whitelist"] is not None:

            # Auto-enable IMA (or-bit mask)
            self.tpm_policy['mask'] = "0x%X" % (
                int(self.tpm_policy['mask'], 0) + (1 << common.IMA_PCR))

            if type(args["ima_whitelist"]) in [str, str]:
                if args["ima_whitelist"] == "default":
                    args["ima_whitelist"] = config.get('tenant',
                                                       'ima_whitelist')
                wl_data = ima.read_whitelist(args["ima_whitelist"])
            elif type(args["ima_whitelist"]) is list:
                wl_data = args["ima_whitelist"]
            else:
                raise UserError("Invalid whitelist provided")

        # Read command-line path string IMA exclude list
        excl_data = None
        if "ima_exclude" in args and args["ima_exclude"] is not None:
            if type(args["ima_exclude"]) in [str, str]:
                if args["ima_exclude"] == "default":
                    args["ima_exclude"] = config.get('tenant',
                                                     'ima_excludelist')
                excl_data = ima.read_excllist(args["ima_exclude"])
            elif type(args["ima_exclude"]) is list:
                excl_data = args["ima_exclude"]
            else:
                raise UserError("Invalid exclude list provided")

        # Set up IMA
        if TPM_Utilities.check_mask(self.tpm_policy['mask'],common.IMA_PCR) or \
            TPM_Utilities.check_mask(self.vtpm_policy['mask'],common.IMA_PCR):

            # Process IMA whitelists
            self.ima_whitelist = ima.process_whitelists(wl_data, excl_data)

        # if none
        if (args["file"] is None and args["keyfile"] is None
                and args["ca_dir"] is None):
            raise UserError(
                "You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent"
            )

        if args["keyfile"] is not None:
            if args["file"] is not None or args["ca_dir"] is not None:
                raise UserError(
                    "You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent"
                )

            # read the keys in
            if type(args["keyfile"]) is dict and "data" in args["keyfile"]:
                if type(args["keyfile"]["data"]) is list and len(
                        args["keyfile"]["data"]) == 1:
                    keyfile = args["keyfile"]["data"][0]
                    if keyfile is None:
                        raise UserError("Invalid key file contents")
                    f = io.StringIO(keyfile)
                else:
                    raise UserError("Invalid key file provided")
            else:
                f = open(args["keyfile"], 'r')
            self.K = base64.b64decode(f.readline())
            self.U = base64.b64decode(f.readline())
            self.V = base64.b64decode(f.readline())
            f.close()

            # read the payload in (opt.)
            if type(args["payload"]) is dict and "data" in args["payload"]:
                if type(args["payload"]["data"]) is list and len(
                        args["payload"]["data"]) > 0:
                    self.payload = args["payload"]["data"][0]
            else:
                if args["payload"] is not None:
                    f = open(args["payload"], 'r')
                    self.payload = f.read()
                    f.close()

        if args["file"] is not None:
            if args["keyfile"] is not None or args["ca_dir"] is not None:
                raise UserError(
                    "You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent"
                )

            if type(args["file"]) is dict and "data" in args["file"]:
                if type(args["file"]["data"]) is list and len(
                        args["file"]["data"]) > 0:
                    contents = args["file"]["data"][0]
                    if contents is None:
                        raise UserError("Invalid file payload contents")
                else:
                    raise UserError("Invalid file payload provided")
            else:
                with open(args["file"], 'r') as f:
                    contents = f.read()
            ret = user_data_encrypt.encrypt(contents)
            self.K = ret['k']
            self.U = ret['u']
            self.V = ret['v']
            self.payload = ret['ciphertext']

        if args["ca_dir"] is None and args["incl_dir"] is not None:
            raise UserError(
                "--include option is only valid when used with --cert")
        if args["ca_dir"] is not None:
            if args["file"] is not None or args["keyfile"] is not None:
                raise UserError(
                    "You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent"
                )
            if args["ca_dir"] == 'default':
                args["ca_dir"] = common.CA_WORK_DIR

            if "ca_dir_pw" in args and args["ca_dir_pw"] is not None:
                ca_util.setpassword(args["ca_dir_pw"])

            if not os.path.exists(args["ca_dir"]) or not os.path.exists(
                    "%s/cacert.crt" % args["ca_dir"]):
                logger.warning(" CA directory does not exist.  Creating...")
                ca_util.cmd_init(args["ca_dir"])

            if not os.path.exists("%s/%s-private.pem" %
                                  (args["ca_dir"], self.agent_uuid)):
                ca_util.cmd_mkcert(args["ca_dir"], self.agent_uuid)

            cert_pkg, serial, subject = ca_util.cmd_certpkg(
                args["ca_dir"], self.agent_uuid)

            # support revocation
            if not os.path.exists(
                    "%s/RevocationNotifier-private.pem" % args["ca_dir"]):
                ca_util.cmd_mkcert(args["ca_dir"], "RevocationNotifier")
            rev_package, _, _ = ca_util.cmd_certpkg(args["ca_dir"],
                                                    "RevocationNotifier")

            # extract public and private keys from package
            sf = io.BytesIO(rev_package)
            with zipfile.ZipFile(sf) as zf:
                privkey = zf.read("RevocationNotifier-private.pem")
                cert = zf.read("RevocationNotifier-cert.crt")

            # put the cert of the revoker into the cert package
            sf = io.BytesIO(cert_pkg)
            with zipfile.ZipFile(sf, 'a',
                                 compression=zipfile.ZIP_STORED) as zf:
                zf.writestr('RevocationNotifier-cert.crt', cert)

                # add additional files to zip
                if args["incl_dir"] is not None:
                    if type(args["incl_dir"]) is dict and "data" in args[
                            "incl_dir"] and "name" in args["incl_dir"]:
                        if type(args["incl_dir"]["data"]) is list and type(
                                args["incl_dir"]["name"]) is list:
                            if len(args["incl_dir"]["data"]) != len(
                                    args["incl_dir"]["name"]):
                                raise UserError("Invalid incl_dir provided")
                            for i in range(len(args["incl_dir"]["data"])):
                                zf.writestr(
                                    os.path.basename(
                                        args["incl_dir"]["name"][i]),
                                    args["incl_dir"]["data"][i])
                    else:
                        if os.path.exists(args["incl_dir"]):
                            files = next(os.walk(args["incl_dir"]))[2]
                            for filename in files:
                                with open(
                                        "%s/%s" % (args["incl_dir"], filename),
                                        'rb') as f:
                                    zf.writestr(os.path.basename(f.name),
                                                f.read())
                        else:
                            logger.warn(
                                f'Specified include directory {args["incl_dir"]} does not exist.  Skipping...'
                            )

            cert_pkg = sf.getvalue()

            # put the private key into the data to be send to the CV
            self.revocation_key = privkey

            # encrypt up the cert package
            ret = user_data_encrypt.encrypt(cert_pkg)
            self.K = ret['k']
            self.U = ret['u']
            self.V = ret['v']
            self.metadata = {'cert_serial': serial, 'subject': subject}
            self.payload = ret['ciphertext']

        if self.payload is not None and len(self.payload) > config.getint(
                'tenant', 'max_payload_size'):
            raise UserError("Payload size %s exceeds max size %d" % (len(
                self.payload), config.getint('tenant', 'max_payload_size')))
Example #26
0
    def process_hook(self, cmd, plugin, tar):
        if cmd['type'] == 'AttachData':
            info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
            info.size = len(cmd['data'])
            tar.addfile(
                info,
                io.BytesIO(
                    cmd['data'] if isinstance(cmd['data'], bytes) else cmd['data'].encode('utf-8')
                )
            )

        if cmd['type'] == 'AttachRPC':
            try:
                result = self.dispatcher.call_sync(cmd['rpc'], *cmd['args'])
                if hasattr(result, '__next__'):
                    result = list(result)
            except RpcException as err:
                self.add_warning(TaskWarning(
                    err.code,
                    f'{plugin}: Cannot add output of {cmd["rpc"]} call, error: {err.message}'
                ))
            else:
                data = dumps(result, debug=True, indent=4)
                info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
                info.size = len(data)
                tar.addfile(
                    info,
                    io.BytesIO(
                        data if isinstance(data, bytes) else data.encode('utf-8')
                    )
                )

        if cmd['type'] == 'AttachCommandOutput':
            try:
                out, _ = system(*cmd['command'], shell=cmd['shell'], decode=cmd['decode'], merge_stderr=True)
            except SubprocessException as err:
                out = 'Exit code: {0}\n'.format(err.returncode)
                if cmd['decode']:
                    out += 'Output:\n:{0}'.format(err.out)

            info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
            info.size = len(out)
            tar.addfile(
                info,
                io.BytesIO(out if isinstance(out, bytes) else out.encode('utf-8'))
            )

        if cmd['type'] in ('AttachDirectory', 'AttachFile'):
            try:
                tar.add(
                    cmd['path'],
                    arcname=os.path.join(plugin, cmd['name']),
                    recursive=cmd.get('recursive')
                )
            except OSError as err:
                self.add_warning(TaskWarning(
                    err.errno,
                    '{0}: Cannot add file {1}, error: {2}'.format(plugin, cmd['path'], err.strerror)
                ))

                logger.error(
                    "Error occured when adding {0} to the tarfile for plugin: {1}".format(cmd['path'], plugin),
                    exc_info=True
                )
Example #27
0
    def test_different_interfaces(self):
        """
        Exercise the situation where we have the same qualified name
        in two different CompilationUnits on save/load.
        """
        @torch.jit.interface
        class MyInterface(object):
            def bar(self, x):
                # type: (Tensor) -> Tensor
                pass

        @torch.jit.script
        class ImplementInterface(object):
            def __init__(self):
                pass

            def bar(self, x):
                return x

        class Foo(torch.nn.Module):
            __annotations__ = {"interface": MyInterface}

            def __init__(self):
                super().__init__()
                self.interface = ImplementInterface()

            def forward(self, x):
                return self.interface.bar(x)

        first_script_module = torch.jit.script(Foo())
        first_saved_module = io.BytesIO()
        torch.jit.save(first_script_module, first_saved_module)
        first_saved_module.seek(0)

        clear_class_registry()

        @torch.jit.interface
        class MyInterface(object):
            def not_bar(self, x):
                # type: (Tensor) -> Tensor
                pass

        @torch.jit.script  # noqa: F811
        class ImplementInterface(object):  # noqa: F811
            def __init__(self):
                pass

            def not_bar(self, x):
                return x

        class Foo(torch.nn.Module):
            __annotations__ = {"interface": MyInterface}

            def __init__(self):
                super().__init__()
                self.interface = ImplementInterface()

            def forward(self, x):
                return self.interface.not_bar(x)

        second_script_module = torch.jit.script(Foo())
        second_saved_module = io.BytesIO()
        torch.jit.save(torch.jit.script(Foo()), second_saved_module)
        second_saved_module.seek(0)

        clear_class_registry()

        self.assertEqual(first_script_module._c.qualified_name,
                         second_script_module._c.qualified_name)

        class ContainsBoth(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.add_module("second", torch.jit.load(second_saved_module))
                self.add_module("first", torch.jit.load(first_saved_module))

            def forward(self, x):
                x = self.first(x)
                x = self.second(x)
                return x

        sm = torch.jit.script(ContainsBoth())
        contains_both = io.BytesIO()
        torch.jit.save(sm, contains_both)
        contains_both.seek(0)
        sm = torch.jit.load(contains_both)
Example #28
0
 def test_save_xml_with_invalid_characters(self):
     # Don't crash when saving files that have invalid xml characters in their path
     self.objects[0].name = "foo\x19"
     self.results.save_to_xml(io.BytesIO())  # don't crash
Example #29
0
    def test_many_collisions(self):
        class MyCoolNamedTuple(NamedTuple):
            a: int

        @torch.jit.interface
        class MyInterface(object):
            def bar(self, x):
                # type: (Tensor) -> Tensor
                pass

        @torch.jit.script
        class ImplementInterface(object):
            def __init__(self):
                pass

            def bar(self, x):
                return x

        def lol(x):
            return x

        class Foo(torch.nn.Module):
            interface: MyInterface

            def __init__(self):
                super().__init__()
                self.foo = torch.nn.Linear(2, 2)
                self.bar = torch.nn.Linear(2, 2)
                self.interface = ImplementInterface()

            def forward(self, x):
                x = self.foo(x)
                x = self.bar(x)
                x = lol(x)
                x = self.interface.bar(x)

                return x, MyCoolNamedTuple(a=5)

        first_script_module = torch.jit.script(Foo())
        first_saved_module = io.BytesIO()
        torch.jit.save(first_script_module, first_saved_module)
        first_saved_module.seek(0)

        clear_class_registry()

        @torch.jit.interface
        class MyInterface(object):
            def not_bar(self, x):
                # type: (Tensor) -> Tensor
                pass

        @torch.jit.script  # noqa F811
        class ImplementInterface(object):  # noqa F811
            def __init__(self):
                pass

            def not_bar(self, x):
                return x

        def lol(x):  # noqa F811
            return "asdofij"

        class MyCoolNamedTuple(NamedTuple):  # noqa F811
            a: str

        class Foo(torch.nn.Module):
            interface: MyInterface

            def __init__(self):
                super().__init__()
                self.foo = torch.nn.Linear(2, 2)
                self.interface = ImplementInterface()

            def forward(self, x):
                x = self.foo(x)
                self.interface.not_bar(x)
                x = lol(x)
                return x, MyCoolNamedTuple(a="hello")

        second_script_module = torch.jit.script(Foo())
        second_saved_module = io.BytesIO()
        torch.jit.save(second_script_module, second_saved_module)
        second_saved_module.seek(0)

        clear_class_registry()

        self.assertEqual(first_script_module._c.qualified_name,
                         second_script_module._c.qualified_name)

        class ContainsBoth(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.add_module("second", torch.jit.load(second_saved_module))
                self.add_module("first", torch.jit.load(first_saved_module))

            def forward(self, x):
                x, named_tuple_1 = self.first(x)
                x, named_tuple_2 = self.second(x)
                return len(x + named_tuple_2.a) + named_tuple_1.a

        sm = torch.jit.script(ContainsBoth())
        contains_both = io.BytesIO()
        torch.jit.save(sm, contains_both)
        contains_both.seek(0)
        sm = torch.jit.load(contains_both)
Example #30
0
def restricted_loads(s):
    """Helper function analogous to pickle.loads()"""
    return RestrictedUnpickler(io.BytesIO(s)).load()