def download_document(self, model_name=None, res_id=0, **kw): attachment_ids = request.env['ir.attachment'].search([ ('res_model', '=', model_name), ('res_id', '=', res_id) ]) file_dict = {} if attachment_ids: for attachment_id in attachment_ids: file_store = attachment_id.store_fname if file_store: file_name = attachment_id.name file_path = attachment_id._full_path(file_store) file_dict["%s:%s" % (file_store, file_name)] = dict( path=file_path, name=file_name) zip_filename = datetime.now() zip_filename = "%s.zip" % zip_filename bitIO = BytesIO() zip_file = zipfile.ZipFile(bitIO, "w", zipfile.ZIP_DEFLATED) for file_info in file_dict.values(): zip_file.write(file_info["path"], file_info["name"]) zip_file.close() return request.make_response( bitIO.getvalue(), headers=[('Content-Type', 'application/x-zip-compressed'), ('Content-Disposition', content_disposition(zip_filename))]) else: return request.make_response( json.dumps({ "error": "Attachments not found", "message": "There are no attachment", "code": 404 }), headers={'Content-Type': 'application/json'})
def __init__(self, session, shell, command ): self.stat, self.o_std, self.e_std = None, None, None self.o_stream = BytesIO() self.e_stream = BytesIO() self.session = session self.exec_command = command self.shell = shell
def export_zip_file(request, zipname, filename, content): try: from BytesIO import BytesIO except ImportError: from io import BytesIO in_memory = BytesIO() zip = ZipFile(in_memory, "a") zip.writestr(filename, content) # fix for Linux zip files read in Windows for file in zip.filelist: file.create_system = 0 zip.close() response = HttpResponse(content_type="application/zip") response["Content-Disposition"] = "attachment; filename={0}.zip".format( zipname) in_memory.seek(0) response.write(in_memory.read()) return response
def CreateConsultantInfo(): output = BytesIO() workbook = xlsxwriter.Workbook(output) worksheet_s = workbook.add_worksheet("ConsultantInfo") consultants = Consultant.objects.all() worksheet_s.write(0, 1, u"First Name") worksheet_s.write(0, 0, u"Last Name") worksheet_s.write(0, 2, u"Email") worksheet_s.write(0, 3, u"Alternate Email") worksheet_s.write(0, 4, u"Day Phone") worksheet_s.write(0, 5, u"Evening Phone") worksheet_s.write(0, 6, u"Text") worksheet_s.write(0, 7, u"Skype") worksheet_s.write(0, 8, u"Rate") for idx, data in enumerate(consultants): row = 1 + idx worksheet_s.write(row, 0, data.last_name) worksheet_s.write(row, 1, data.first_name) worksheet_s.write(row, 2, data.email) worksheet_s.write(row, 3, data.alternate_email) worksheet_s.write(row, 4, data.day_phone) worksheet_s.write(row, 5, data.evening_phone) worksheet_s.write(row, 6, data.text_phone) worksheet_s.write(row, 7, data.skype) worksheet_s.write(row, 8, data.payment) workbook.close() xlsx_data = output.getvalue() # xlsx_data contains the Excel file return xlsx_data
def scale_img(f, name, max_px, dim): try: img = Image.open(f, 'r') except IOError: raise ValueError('invalid image file') (w, h) = img.size if dim == 'h': if h > max_px: w = max_px * w / h h = max_px else: return f elif dim == 'both': if w > max_px or h > max_px: if w > h: h = max_px * h / w w = max_px else: w = max_px * w / h h = max_px else: return f scaled_img = img.resize((w, h), Image.ANTIALIAS) scaled_buffer = BytesIO() scaled_img.save(scaled_buffer, 'PNG') scaled_f = File(scaled_buffer, name=name + '.png') scaled_f._set_size(len(scaled_buffer.getvalue())) return scaled_f
def store_text(self, content, storage_path): storage_path = str(storage_path).lstrip("/") content = BytesIO(content.encode("utf-8")) info = tarfile.TarInfo(storage_path) info.size = self._len(content) info.mtime = int(datetime.datetime.now().strftime("%s")) self.tar.addfile(info, content)
def download_documents(self, tab_id, rec_id, model_name, field_name, field_attach, **kw): new_tab = ast.literal_eval(tab_id) new_rec = ast.literal_eval(rec_id) attachment_ids = request.env['ir.attachment'].sudo().search([ ('id', 'in', new_tab) ]) rec_ids = request.env[model_name].sudo().search([('id', 'in', new_rec) ]) file_dict = {} for attachment_id in attachment_ids: for rec in rec_ids: if attachment_id in rec[field_attach]: file_store = attachment_id.store_fname if file_store: file_name = '{}/{}/{}'.format(rec.create_uid.login, rec[field_name], attachment_id.name) file_path = attachment_id._full_path(file_store) file_dict["%s:%s" % (file_name, file_path)] = dict( name=file_name, path=file_path) zip_filename = datetime.now() zip_filename = "%s.zip" % zip_filename bitIO = BytesIO() zip_file = zipfile.ZipFile(bitIO, "w", zipfile.ZIP_DEFLATED) for file_info in file_dict.values(): zip_file.write(file_info["path"], file_info["name"]) zip_file.close() return request.make_response(bitIO.getvalue(), headers=[ ('Content-Type', 'application/x-zip-compressed'), ('Content-Disposition', content_disposition(zip_filename)) ])
def download_document(self, tab_id, **kw): new_tab = ast.literal_eval(tab_id) attachment_ids = request.env['ir.attachment'].search([('id', 'in', new_tab)]) file_dict = {} for attachment_id in attachment_ids: file_store = attachment_id.store_fname if file_store: file_name = attachment_id.name file_path = attachment_id._full_path(file_store) file_dict["%s:%s" % (file_store, file_name)] = dict( path=file_path, name=file_name) zip_filename = datetime.now() zip_filename = "%s.zip" % zip_filename bitIO = BytesIO() zip_file = zipfile.ZipFile(bitIO, "w", zipfile.ZIP_DEFLATED) for file_info in file_dict.values(): zip_file.write(file_info["path"], file_info["name"]) zip_file.close() return request.make_response(bitIO.getvalue(), headers=[ ('Content-Type', 'application/x-zip-compressed'), ('Content-Disposition', content_disposition(zip_filename)) ])
def get_qrimg(): img_buf = BytesIO() img = random_qr() img.save(img_buf) img_buf.seek(0) img_str = base64.b64encode(img_buf.getvalue()) img_str = str(img_str).replace("b'", "data:image/png;base64,").replace("'", "") return render_template('qrreloj.html', qr_image=img_str)
def write_stream(self, stream): file_name = self.path.split('/')[-1] sio = BytesIO() shutil.copyfileobj(stream, sio) sio.seek(0) ret = self.client.folder(self.id).upload_stream(sio, file_name=file_name) self.id = ret.id self.cache.add(self.path, ret.id, ret.type) return self
def unpack(self, buf): f = BytesIO(buf) line = f.readline().decode("ascii", "ignore") l = line.strip().split() if len(l) != 1: raise dpkt.UnpackError('invalid message: %r' % line) if l[0] not in self.__methods: raise dpkt.UnpackError('invalid protocol method: %r' % l[0]) self.method = l[0] Message.unpack(self, f.read())
def write(self, path, stream): full_path = self.get_full_path(path) logger.info('write:path="{}", full_path="{}"'.format(path, full_path)) bio = BytesIO() shutil.copyfileobj(stream, bio) bio.seek(0) data = bio.read() create_path(self.client, full_path) response = self.client.write_file_content(full_path, data) logger.info("write:response={}".format(response))
def test_get_image(self): directory = os.path.dirname(__file__) path = "../resources/spotseeker/file/api/v1/spot/20/image/1" mock_path = os.path.join(directory, path) with open(mock_path, "rb") as f: expected_img = Image.open(BytesIO(bytearray(f.read()))) spotseeker = Spotseeker() response, content = spotseeker.get_spot_image(20, 1) byte_img = bytearray(response.data) img = Image.open(BytesIO(byte_img)) self.assertEqual(img, expected_img)
def get_raw_img(self): headers = { 'User-Agent': ('Mozilla/5.0 (Windows NT 6.1; WOW64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/54.0.2840.71 Safari/537.36') } r_bg = url_requests.get(self.bg_url, headers=headers, proxies=proxies) r_fullbg = url_requests.get(self.fullbg_url, headers=headers, proxies=proxies) raw_chunk_img = BytesIO(r_bg.content) raw_source_img = BytesIO(r_fullbg.content) return raw_source_img, raw_chunk_img
def write(self, path, stream): """ Write the stream to the object denoted by path into the stream """ full_path = self.get_full_path(path) logger.info('write:path="{}", full_path="{}"'.format(path, full_path)) bio = BytesIO() shutil.copyfileobj(stream, bio) bio.seek(0) base_path, file_name = os.path.split(full_path) directory_id = self.session.create_directory_from_path(base_path) self.session.googledrive_upload(file_name, bio, parent_id=directory_id)
def CreateClientRoster(): output = BytesIO() workbook = xlsxwriter.Workbook(output) worksheet_s = workbook.add_worksheet("Roster") worksheet_s.write(0, 0, u"Client Last Name") worksheet_s.write(0, 1, u"Client First Name") worksheet_s.write(0, 2, u"Client email") worksheet_s.write(0, 3, u"Year") worksheet_s.write(0, 4, u"Lead First Name") worksheet_s.write(0, 5, u"Lead Last Name") worksheet_s.write(0, 6, u"Paid") worksheet_s.write(0, 7, u"Sh1") worksheet_s.write(0, 8, u"Sh2") worksheet_s.write(0, 9, u"Sh3") worksheet_s.write(0, 10, u"Sh4") worksheet_s.write(0, 11, u"Sh5") worksheet_s.write(0, 12, u"Sh6") worksheet_s.write(0, 13, u"Comments") service_list = Service.objects.all()#filter(start_date__range=[str(start_date), str(end_date)]) admission_list = AddmissionsService.objects.all()#filter(start_date__range=[str(start_date), str(end_date)]) result_list = list(chain(service_list, admission_list)) for idx, data in enumerate(result_list): row = 1 + idx school_col = 7 worksheet_s.write(row, 0, data.client.last_name) worksheet_s.write(row, 1, data.client.first_name) worksheet_s.write(row, 2, data.client.email) worksheet_s.write(row, 3, u"??????") worksheet_s.write(row, 4, data.provider.first_name) worksheet_s.write(row, 5, data.provider.last_name) worksheet_s.write(row, 6, data.provider.payment) try: for school in data.schools: worksheet_s.write(row, school_col, school.name) school_col += 1 except: pass worksheet_s.write(row, school_col, data.client.comments) workbook.close() xlsx_data = output.getvalue() # xlsx_data contains the Excel file return xlsx_data
def http_content_transform(headers, content): if content is None or not content: return '' elif 'Content-Type' in headers: contentType = headers['Content-Type'] if contentType.endswith("/json"): content_json = json.loads(content if isinstance(content, str) else content.decode('utf-8')) return '<pre>' + json.dumps(content_json, sort_keys=False, indent=2, separators=(',', ': ')) + '</pre>' elif contentType.endswith('/xml'): try: parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse(BytesIO(content) if isinstance( content, (bytes, bytearray)) else StringIO(content), parser=parser) xml_bytes = etree.tostring(tree, pretty_print=True, xml_declaration=True) xml_str = xml_bytes.decode("utf-8") html_str = escape(xml_str) return '<pre>' + html_str + '</pre>' except Exception as e: # Swallow the exception thrown by the XMLParser and abandon XML. return '<xmp>' + str(e) + '</xmp>' return '<xmp>' + str(content) + '</xmp>'
def test_cloudwatch_logs_trigger(tracer_and_invocation_support, handler, mock_cloudwatch_logs_event, mock_context): _, handler = handler tracer, invocation_support, _ = tracer_and_invocation_support handler(mock_cloudwatch_logs_event, mock_context) execution_context = ExecutionContextManager.get() span = execution_context.recorder.get_spans()[0] assert lambda_event_utils.get_lambda_event_type( mock_cloudwatch_logs_event, mock_context) == lambda_event_utils.LambdaEventType.CloudWatchLogs compressed_data = base64.b64decode( mock_cloudwatch_logs_event['awslogs']['data']) decompressed_data = json.loads( str(GzipFile(fileobj=BytesIO(compressed_data)).read(), 'utf-8')) assert span.get_tag(constants.SpanTags['TRIGGER_DOMAIN_NAME']) == 'Log' assert span.get_tag( constants.SpanTags['TRIGGER_CLASS_NAME']) == 'AWS-CloudWatch-Log' assert span.get_tag(constants.SpanTags['TRIGGER_OPERATION_NAMES']) == [ decompressed_data['logGroup'] ] assert invocation_support.get_agent_tag( constants.SpanTags['TRIGGER_DOMAIN_NAME']) == 'Log' assert invocation_support.get_agent_tag( constants.SpanTags['TRIGGER_CLASS_NAME']) == 'AWS-CloudWatch-Log' assert invocation_support.get_agent_tag( constants.SpanTags['TRIGGER_OPERATION_NAMES']) == [ decompressed_data['logGroup'] ]
def get_checkCode(cookies): """用首页的cookie访问验证码,返回验证码字符串 params: cookies: 访问首页时产生的cookies值 """ url = 'http://www.hebscztxyxx.gov.cn/notice/captcha?preset=0' headers = { 'Host': 'www.hebscztxyxx.gov.cn', 'User-Agent': ('Mozilla/5.0 (Windows NT 6.1; WOW64)' 'AppleWebKit/537.36 (KHTML, like Gecko)' 'Chrome/54.0.2840.71 Safari/537.36'), # 'Referer': 'http://www.hebscztxyxx.gov.cn/notice/search/' # 'popup_captcha', } response = url_requests.get(url, headers=headers, cookies=cookies, timeout=10) f = BytesIO(response.content) image = Image.open(f) image.show() checkCode = raw_input('please input the checkCode: ') # checkCode = image_recognition(image, 'hebei', config='-psm 10 digits') return checkCode
def read(self, path, stream, limit): full_path = self.get_full_path(path) logger.info('read:full_path={}'.format(full_path)) response = self.client.get_file_content(full_path) bio = BytesIO(response.content) shutil.copyfileobj(bio, stream)
def video_rec(self, video_name=None): if self.is_recording(): return self.recording = True if video_name is None: video_index = self.get_next_photo_index() filename = VIDEO_PREFIX + str( video_index) + self._camera.VIDEO_FILE_EXT filename_thumb = VIDEO_PREFIX + str( video_index) + PHOTO_THUMB_SUFFIX + self._camera.PHOTO_FILE_EXT else: filename = VIDEO_PREFIX + video_name + self._camera.VIDEO_FILE_EXT filename_thumb = VIDEO_PREFIX + video_name + PHOTO_THUMB_SUFFIX + self._camera.PHOTO_FILE_EXT try: #remove previous file and reference in album os.remove(PHOTO_PATH + "/" + filename) self._photos.remove({"name": filename}) except Exception: pass oft = open(PHOTO_PATH + "/" + filename_thumb, "wb") im_str = self._camera.get_image_jpeg() im_pil = PILImage.open(BytesIO(im_str)) im_pil.resize(PHOTO_THUMB_SIZE).save(oft) self._photos.append({"name": filename}) self.save_photo_metadata() self._camera.video_rec(PHOTO_PATH + "/" + filename) self.video_start_time = time.time() oft.close()
def get_checkCode(cookies): """用首页的cookie访问验证码,返回验证码字符串 params: cookies: 访问首页时产生的cookies值 """ url = 'http://gsxt.ynaic.gov.cn/notice/captcha?preset=0' headers = { 'Host': 'gsxt.ynaic.gov.cn', 'User-Agent': ('Mozilla/5.0 (Windows NT 6.1; WOW64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/54.0.2840.71 Safari/537.36') } response = url_requests.get(url, headers=headers, cookies=cookies, timeout=10) f = BytesIO(response.content) image = Image.open(f) # image.show() # checkCode = raw_input('please input the checkCode: ') checkCode = image_recognition(image, 'yunnan', config='-psm 7 character') # print checkCode return checkCode
def mapper(self, _, line): nx.set_node_attributes(self.G, 'activated', self.tmp) client = hdfs.client.Client("http://" + urlparse(line).netloc) if line[-1] != "#": with client.read(urlparse(line).path) as r: # with open(urlparse(line).path) as r: buf = BytesIO(r.read()) if ".gz" in line: gzip_f = gzip.GzipFile(fileobj=buf) content = gzip_f.read() idx, values = self.runCascade( cascade.actualCascade(StringIO.StringIO(content), self.G)) else: idx, values = self.runCascade( cascade.actualCascade(buf, self.G)) df = pd.DataFrame(values, index=idx) result_user = df.drop_duplicates(subset='numberActivatedUsers', keep='first').set_index( ['numberActivatedUsers'], verify_integrity=True) result_act = df.drop_duplicates(subset='numberOfActivations', keep='first').set_index( ['numberOfActivations'], verify_integrity=True) yield "apple", { "file": line, "name": line.split("/")[-1], "result_user": result_user.loc[-1:].to_json(orient='records'), "result_act": result_act.loc[-1:].to_json(orient='records') }
def get_image(robotcookieid): url = "http://211.141.74.198:8081/aiccips/securitycode?0.7315294243537567" headers_img = { 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3', 'Host': '211.141.74.198:8081', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36' } cookies = {'ROBOTCOOKIEID': robotcookieid} try: ImgHtml = Session.get(url=url, headers=headers_img, cookies=cookies, timeout=140) file = BytesIO(ImgHtml.content) im = Image.open(file) return im except: get_image(robotcookieid)
def _UploadStamp(self): """Upload the 'stamp' (a json file containing metadata).""" # TODO: if this fails, raise an Exception that will stop execution stream = BytesIO(json.dumps(self._stamp._asdict()).encode('utf-8')) remote_path = self._MakeRemotePath('stamp.json') self._UploadStream(stream, remote_path) self._stamp_uploaded = True self._logger.info('Uploaded %s', remote_path)
def picture(request): user = request.user # profile_pictures = django_settings.MEDIA_ROOT + 'profile_pictures/' profile_pictures = 'profile_pictures/' # if not storage.exists(profile_pictures): # storage.makedirs(profile_pictures) if request.method == 'POST': _picture = request.FILES['picture'] user_str = request.user.username + '_' + str(request.user.id) + '.jpg' filename = profile_pictures + user_str with storage.open(filename, 'wb+') as destination: for chunk in _picture.chunks(): destination.write(chunk) destination = storage.open(filename, 'rb+') im = Image.open(destination) width, height = im.size if width > 400: new_width = 400 new_height = 300 # (height * 400) / width new_size = new_width, new_height im.thumbnail(new_size, Image.ANTIALIAS) sfile = BytesIO() im.save(sfile, format='JPEG') destination.close() destination = storage.open(filename, 'wb+') destination.write(sfile.getvalue()) destination.close() if user.profile.account_flag == 3: user.profile.account_flag = 4 user.profile.profile_picture = '/profile_pictures/' + user_str user.save() if user.profile.account_flag != 0: return redirect('production:password') return render(request, 'production/picture.html') print (user.profile.profile_picture) return render(request, 'production/picture.html')
def test_zipstream(self): output = BytesIO() for data in ZipStream(self.files): output.write(data) with ZipFile(output, 'r') as f: self.assertIsNone(f.testzip()) with ZipFile(output, 'r') as f: infolist = f.infolist() self.assertTrue(len(infolist), 2) for ff in infolist: if ff.filename == self.unicode_seq: self.assertTrue( ff.file_size == len(self.unicode_seq.encode())) else: self.assertTrue(ff.file_size == os.stat( os.path.abspath(__file__)).st_size)
def doquery(self, query, num=100): self.query = query import os.path if os.path.exists(query + ".html"): self.content = open(query + ".html").read() return self.content qurl = self.url + query + '&num={}'.format(num) buffer = BytesIO() c = pycurl.Curl() c.setopt(c.URL, qurl) c.setopt(c.USERAGENT, useragent) c.setopt(c.WRITEFUNCTION, buffer.write) c.perform() c.close() self.content = buffer.getvalue().decode('UTF-8') #buffer.getvalue() return self.content
def CurlRequest(data, url='http://text-processing.com/api/sentiment/'): c = pycurl.Curl() buf = BytesIO() c.setopt(pycurl.URL, url) c.setopt(pycurl.WRITEFUNCTION, buf.write) post_data = {'text': data} post_field = urlencode(post_data) c.setopt(c.POSTFIELDS, post_field) c.perform() c.close() res = buf.getvalue().decode('UTF-8') if res != None and res != "": d = json.loads(res) if 'label' in d: return d['label'] else: return None else: return None
def SentimentAnalysis(data, url='http://text-processing.com/api/sentiment/'): c = pycurl.Curl() buf = BytesIO() c.setopt(pycurl.URL, url) c.setopt(pycurl.WRITEFUNCTION, buf.write) post_data = {'text': data} post_field = urlencode(post_data) c.setopt(c.POSTFIELDS, post_field) c.perform() c.close() res = buf.getvalue().decode('UTF-8') if res != None and res != "": try: d = json.loads(res) if 'label' in d: return [d['label'], d['probability'][d['label']]] except Exception as e: pass else: return None
def WriteToExcel(excel_export_list): output = BytesIO() workbook = xlsxwriter.Workbook(output) worksheet_s = workbook.add_worksheet('Excel_Export_Sheet_name') worksheet_b = workbook.add_worksheet('Part Number List') # excel styles title = workbook.add_format({ 'bold': True, 'font_size': 14, 'align': 'center', 'valign': 'vcenter' }) header = workbook.add_format({ 'bg_color': '#F7F7F7', 'color': 'black', 'align': 'center', 'valign': 'top', 'border': 1 }) bold_header = workbook.add_format({ 'bold': True, 'bg_color': '#F7F7F7', 'color': 'black', 'align': 'center', 'valign': 'top', 'border': 1 }) cell = workbook.add_format({ 'align': 'left', 'valign': 'top', 'text_wrap': True, 'border': 1 }) bold_cell = workbook.add_format({ 'bold': True, 'align': 'left', 'valign': 'top', 'text_wrap': True, 'border': 1 }) cell_center = workbook.add_format({ 'align': 'center', 'valign': 'top', 'border': 1 }) # write header, this is row 1 in excel worksheet_s.write(0, 0, _(HEADER_ITEM_TXT), header) worksheet_s.write(0, 1, _(QTY_TXT), header) worksheet_s.write(0, 2, _(PART_NUM_TXT), header) worksheet_s.write(0, 3, _(NONFIO_SKU), header) worksheet_s.write(0, 4, _(DESC_TXT), header) worksheet_s.write(0, 5, _(COST_TXT), header) worksheet_s.write(0, 6, _(EX_COST_TXT), header) worksheet_s.write(0, 7, _(MSRP_TXT), bold_header) worksheet_s.write(0, 8, _(EX_MSRP_TXT), header) # column widths item_name_col_width = 20 qty_col_width = 10 part_num_col_width = 20 nonfio_sku_col_width = 30 desc_col_width = 80 cost_col_width = 10 ex_cost_col_width= 10 msrp_col_width = 10 ex_msrp_col_width = 10 # add data into the table data_row = 1 second_sheet_data_row = 0 for sb in smartbuy_list: if data_row is not 1: for index in range(9): worksheet_s.write(data_row, index, '', cell) data_row += 1 # this is for smartbuy row, row 2 in excel worksheet_s.write_string(data_row, 0, _(SMART_BUY_TXT), cell) if not sb.smartbuy_qty: sb.smartbuy_qty = '' worksheet_s.write(data_row, 1, sb.smartbuy_qty, cell) if not sb.smartbuy_part_number: sb.smartbuy_part_number = '' worksheet_s.write_string(data_row, 2, sb.smartbuy_part_number, bold_cell) worksheet_b.write_string(second_sheet_data_row, 0, sb.smartbuy_part_number, cell) second_sheet_data_row += 1 if not sb.smartbuy_nonfio_sku: sb.smartbuy_nonfio_sku = '' worksheet_s.write_string(data_row, 3, sb.smartbuy_nonfio_sku, cell) if not sb.smartbuy_desc: sb.smartbuy_desc = '' worksheet_s.write_string(data_row, 4, sb.smartbuy_desc, cell) if not sb.smartbuy_cost: sb.smartbuy_cost = '' worksheet_s.write(data_row, 5, sb.smartbuy_cost, cell) if not sb.smartbuy_ex_cost: sb.smartbuy_ex_cost = '' worksheet_s.write(data_row, 6, sb.smartbuy_ex_cost, cell) if not sb.smartbuy_msrp: sb.smartbuy_msrp = '' worksheet_s.write(data_row, 7, sb.smartbuy_msrp, bold_cell) if not sb.smartbuy_ex_msrp: sb.smartbuy_ex_msrp = '' worksheet_s.write(data_row, 8, sb.smartbuy_ex_msrp, cell) # change column widths if sb.smartbuy_qty: worksheet_s.set_column('A:A', item_name_col_width) if sb.smartbuy_qty: worksheet_s.set_column('B:B', qty_col_width) if sb.smartbuy_qty: worksheet_s.set_column('C:C', part_num_col_width) if sb.smartbuy_qty: worksheet_s.set_column('D:D', nonfio_sku_col_width) if sb.smartbuy_qty: worksheet_s.set_column('E:E', desc_col_width) if sb.smartbuy_qty: worksheet_s.set_column('F:F', cost_col_width) if sb.smartbuy_qty: worksheet_s.set_column('G:G', ex_cost_col_width) if sb.smartbuy_qty: worksheet_s.set_column('H:H', msrp_col_width) if sb.smartbuy_qty: worksheet_s.set_column('I:I', ex_msrp_col_width) # for each smart buy data end <<<------ # change column widths worksheet_s.set_column('A:A', item_name_col_width) worksheet_s.set_column('B:B', qty_col_width) worksheet_s.set_column('C:C', part_num_col_width) worksheet_b.set_column('A:A', part_num_col_width) worksheet_s.set_column('D:D', nonfio_sku_col_width) worksheet_s.set_column('E:E', desc_col_width) worksheet_s.set_column('F:F', cost_col_width) worksheet_s.set_column('G:G', ex_cost_col_width) worksheet_s.set_column('H:H', msrp_col_width) worksheet_s.set_column('I:I', ex_msrp_col_width) # close workbook workbook.close() xlsx_data = output.getvalue() return xlsx_data
def download_directory(self): """ Download related directory as a zip file. This method will be called by the Download button in VIEW Todo: Reads and writes in external folders, all the behavior should be inside a try...except block """ self.ensure_one() data_dir = config.filestore(self._cr.dbname) data_dir = os.path.abspath(data_dir) action = None # pylint: disable=locally-disabled, W0212 for record in self: ira_ids = record.mapped('ir_attachment_ids') if record.directory or ira_ids: zipname = u'{}.zip'.format(record.name) in_memory = BytesIO() zipf = zipfile.ZipFile(in_memory, 'w', zipfile.ZIP_DEFLATED) if record.directory: record._zipdir(record.directory, zipf) _logger.debug(in_memory.getbuffer().nbytes) for item in ira_ids: zipf.write( os.path.join(data_dir, Path(item.store_fname)), os.path.join('ir_attachments', item.datas_fname) ) zipf.close() datas = base64.b64encode(in_memory.getvalue()) _logger.debug(u'zip size: %s', len(datas)) values = { 'name': zipname, 'datas': datas, 'datas_fname': zipname, 'res_model': record._name, 'res_id': record.id } if not record.zip_attachment_id: print('Creating') record.zip_attachment_id = \ record.zip_attachment_id.create(values) else: print('Writing') _id = record.zip_attachment_id.id record.zip_attachment_id.write(values) _id = record.zip_attachment_id.id _name = record.zip_attachment_id.name action = { 'type': 'ir.actions.act_url', 'url': DOWNLOAD_URL.format(id=_id, name=_name), 'nodestroy': True, 'target': 'new' } return action
sys.exit(6) else: sys.stderr.write('Need to force HTML or XHTML when reading from stdin.\n') sys.exit(4) if encoding: contentType = '%s; charset=%s' % (contentType, encoding) if fileName: inputHandle = open(fileName, "rb") else: inputHandle = sys.stdin data = inputHandle.read() buf = BytesIO() gzipper = gzip.GzipFile(fileobj=buf, mode='wb') gzipper.write(data) gzipper.close() gzippeddata = buf.getvalue() buf.close() connection = None response = None status = 302 redirectCount = 0 url = service if gnu: url = url + '?out=gnu' else: