def gen_pdfs(username, apikey): # create the API client instance logger.info('converting a cover page') client = pdfcrowd.HtmlToPdfClient(username, apikey) client.setMarginTop('3in') client.setPageBackgroundColor('3366cc30') yield client.convertString( '<center><h1>Wikipedia Article Demo</h1></center><ul><li>multiple URLs and page orientations in a single PDF</li><li>hyperlinks in a header</li><li>page numbers in a footer</li></ul><center><p>powered by <a href="https://pdfcrowd.com/">Pdfcrowd</a>, <i>{}</i></p></center>' .format(datetime.now().date())) offset = -1 for title, url in (('Main', 'https://www.wikipedia.org/'), ('Article:PDF', 'https://en.wikipedia.org/wiki/PDF'), ('Talk:PDF', 'https://en.wikipedia.org/wiki/Talk:PDF')): logger.info('converting %s', url) client = pdfcrowd.HtmlToPdfClient(username, apikey) client.setPageNumberingOffset(offset) client.setNoMargins(True) if title == 'Main': client.setOrientation('landscape') client.setHeaderHtml( '<center><a href="{}">Wikipedia - {}</a></center>'.format( url, title)) client.setFooterHtml( '<center>~ <span class="pdfcrowd-page-number"></span> ~</center>') client.setFailOnMainUrlError(True) pdf = client.convertUrl(url) offset -= client.getPageCount() yield pdf
def index(request): form = TestForm(request.POST) if request.method != 'POST': return render(request, 'index.html', {'form': form}) try: # enter your Pdfcrowd credentials to the converter's constructor client = pdfcrowd.HtmlToPdfClient('your-username', 'your-apikey') # convert a web page and store the generated PDF to a variable logger.info('running Pdfcrowd HTML to PDF conversion') # set HTTP response headers response = HttpResponse(content_type='application/pdf') response['Cache-Control'] = 'max-age=0' response['Accept-Ranges'] = 'none' content_disp = 'attachment' if 'asAttachment' in request.POST else 'inline' response[ 'Content-Disposition'] = content_disp + '; filename=demo_django.pdf' html = render_to_string( 'index.html', { 'form': form, 'pdfcrowd_remove': 'pdfcrowd-remove' if form.data.get('remove_convert_button') else '' }) client.convertStringToStream(html, response) # send the generated PDF return response except pdfcrowd.Error as why: logger.error('Pdfcrowd Error: %s', why) return HttpResponse(why)
def go(username, key, command_line=True): ''' Establishes a connection to the pdfcrowd server and iteratively converts all html factsheet documents into static PDFs ''' delim = gen_plots.select_delim(command_line) districts, pdf_folders, html_folders = create_folders_districts(delim) try: # create the API client instance client = pdfcrowd.HtmlToPdfClient(username, key) print('Username and password valid, connected to pdfcrowd client') for html_path, pdf_folder in zip(html_folders, pdf_folders): if pdf_folder[-10:] == 'Demography': print('Converting demographic factsheets...') suffix = ' Demography' else: suffix = ' Trade' print('Converting trade factsheets...') for district in districts: html = html_path + delim + district + suffix + '.html' pdf = pdf_folder + delim + district + suffix + '.pdf' if not os.path.exists(pdf): print(district) client.convertFileToFile(html, pdf) #sleep is used to not surpass the pdfcrowd request rate limit time.sleep(10) print('Finished coverting files, script closing') except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) # rethrow or handle the exception raise
def __init__(self): # create the API client instance u = pdfcrowd_token.uname k = pdfcrowd_token.key self.client = pdfcrowd.HtmlToPdfClient(u, k) self.client.setPageWidth(self.size) self.client.setPageHeight(self.size)
def index(request): try: # enter your Pdfcrowd credentials to the converter's constructor client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') print('Inside Try block') # convert a web page and store the generated PDF to a variable #logger.info('running Pdfcrowd HTML to PDF conversion') # set HTTP response headers response = HttpResponse(content_type='application/pdf') response['Cache-Control'] = 'max-age=0' response['Accept-Ranges'] = 'none' content_disp = 'attachment' if 'asAttachment' in request.POST else 'inline' response[ 'Content-Disposition'] = content_disp + '; filename=demo_django.pdf' html = render_to_string('review/review_index.html') client.convertStringToStream(html, response) # send the generated PDF return response except pdfcrowd.Error as why: #logger.error('Pdfcrowd Error: %s', why) return HttpResponse(why)
def printPDF(request): import pdfcrowd import sys group = request.POST['group'] type_vedom = request.POST['typeVedom'] if type_vedom == 'Экзаменационная ведомость': try: # create the API client instance client = pdfcrowd.HtmlToPdfClient( 'RazValik', '2179080295fed93f1c1d97f96dd72818') path_to_html_file = 'D:/University/Диплом/ratmetr/templates/groups/{}.html'.format( group) path_to_pdf_file = 'D:/University/Диплом/Материалы для диплома/vedom.pdf' # run the conversion and write the result to a file client.convertFileToFile(path_to_html_file, path_to_pdf_file) except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) # rethrow or handle the exception raise return redirect('index') elif type_vedom == 'Контроль текущей успеваемости': count_point = request.POST['countPoint'] try: # create the API client instance client = pdfcrowd.HtmlToPdfClient( 'RazValik', '2179080295fed93f1c1d97f96dd72818') path_to_html_file = 'D:/University/Диплом/ratmetr/templates/groups/{}({}).html'.format( group, count_point) path_to_pdf_file = 'D:/University/Диплом/Материалы для диплома/vedom.pdf' # run the conversion and write the result to a file client.convertFileToFile(path_to_html_file, path_to_pdf_file) except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) # rethrow or handle the exception raise return redirect('index')
def pdfConverter(): url = request.form['url'] endurl = request.form['end'] print(endurl) end = endurl.replace("https://www.javatpoint.com/", "") print(end) print(url) print("Check") try: client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') client.convertUrlToFile(url, 'static/pdf/1.pdf') print("1") counter = 2 noti = True while (noti == True): res = requests.get(url) soup = bs4.BeautifulSoup(res.text, "lxml") data = soup.select(".next", href=True) nextpoint = data[0]['href'] print(nextpoint) url = "https://www.javatpoint.com/" + nextpoint if nextpoint != end: try: client.convertUrlToFile(url, 'static/pdf/' + f'{counter}.pdf') print(counter) counter += 1 except pdfcrowd.Error as why: sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) raise else: noti = False filepath_list = os.listdir('static/pdf/') print(filepath_list) pdf = Pdf.new() for file in filepath_list: if file.endswith('.pdf'): print(file) src = Pdf.open('static/pdf/' + file) print("@#$") pdf.pages.extend(src.pages) src = Pdf.open('static/final_pdf/blank.pdf') pdf.pages.extend(src.pages) pdf.save('static/final_pdf/merged.pdf') for file in filepath_list: if file.endswith('.pdf'): os.remove('static/pdf/' + file) except Exception as e: print(e) return render_template('download.html')
def correlation_30_companies(df): company_list = [ 500003, 500008, 500009, 500010, 500012, 500013, 500014, 500016, 500020, 500023, 500027, 500028, 500031, 500032, 500033, 500034, 500038, 500039, 500040, 500041, 500042, 500043, 500048, 500049, 500052, 500055, 500060, 500067, 500125, 500126 ] company_names = [dict_companies[str(i)] for i in company_list] dict_30 = {} for i in company_list: df_comp = df[(df['company'] == int(i)) & (df['year'] == '2021') & (df['month'] == 'March')]['Close Price'] dict_30[i] = df_comp.tolist()[:15] keys = list(dict_30.keys()) df_30 = pd.DataFrame(dict_30, columns=keys) corrMatrix = df_30.corr() fig1 = px.imshow(corrMatrix, color_continuous_scale=["red", "green"]) fig1.show() fig1.write_html("correlation_30_companies.html") # img.seek(0) # with open("imdb_bargraph.html", "w") as file: # file.write('<div><img src="data:image/png;base64,{}"/></div>'.format(res)) mat = [] for i in dict_30: count = 0 for j in dict_30[i]: single = [] count += 1 single.append(dict_companies[str(i)]) single.append(count) single.append(j) mat.append(single) df5 = pd.DataFrame(mat, columns=['company', 'day', 'stock_price']) fig2 = px.line(df5, x="day", y="stock_price", color='company') fig2.show() fig2.write_html("closing_stock_price.html") # img.seek(0) # with open("imdb_bargraph.html", "w") as file: # file.write('<div><img src="data:image/png;base64,{}"/></div>'.format(res)) client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') client.convertFileToFile("closing_stock_price.html", "closing_stock_price.pdf") client.convertFileToFile("correlation_30_companies.html", "correlation_30_companies.pdf")
def test(request): calculator = Calculator(request) Elements = ElementsForm() Parameters = ParametersForm() api = pdfcrowd.HtmlToPdfClient("evgeniy111", "9fcddf22e94642a39acf43b932382beb") api.convertUrlToFile('http://uretekbelarus.com', '/Users/apple/PycharmProjects/Uretekweb/uretek/static/constra-free/themes/example.pdf') print(api) return render_to_pdf( 'calculator/invoice.html', { 'pagesize': 'A4', 'ElementsForm': Elements, 'ParametersForm':Parameters, 'calculator':calculator } )
def main(): for i in range(34): try: # create the API client instance client = pdfcrowd.HtmlToPdfClient(LOGIN, API_KEY) # run the conversion and write the result to a file client.convertUrlToFile(make_url(i + 1), '{}.pdf'.format(i + 1)) except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) # handle the exception here or rethrow and handle it at a higher level raise
def html_to_pdf(self,html,filename): try: print(filename) # create the API client instance filename+='.pdf' client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') client.setPageSize(u'A2') client.setHeaderHeight(u'0.0in') client.setNoMargins(True) # run the conversion and write the result to a file client.convertFileToFile(html,filename) except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) raise
def convert_pages(username, apikey, pages, out): if not os.path.isdir(out): os.makedirs(out) try: client = pdfcrowd.HtmlToPdfClient(username, apikey) client.setFailOnMainUrlError(True) for i, url in enumerate(gen_urls(pages)): file_name = os.path.join(out, 'generated_{}.pdf'.format(i)) logger.info('creating %s from %s', file_name, url) client.convertUrlToFile(url, file_name) except pdfcrowd.Error as why: logger.error('Pdfcrowd Error: %s', why) sys.exit(1)
def gen_pdfs(username, apikey, max_pages, urls): # create the API client instance client = pdfcrowd.HtmlToPdfClient(username, apikey) client.setFailOnMainUrlError(True) for url in urls: pages_str = '' if max_pages > 0: logger.info('converting max %s pages from %s', max_pages, url) client.setPrintPageRange('-{}'.format(max_pages)) else: logger.info('converting %s', url) yield client.convertUrl(url) logger.info('%s pages converted', client.getPageCount()) if max_pages > 0: max_pages -= client.getPageCount() if max_pages <= 0: break
def cetak_laporan(req): try: # create the API client instance client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') # set HTTP response headers res = HttpResponse(content_type='application/pdf') res['Cache-Control'] = 'max-age=0' res['Accept-Ranges'] = 'none' res['Content-Disposition'] = "attachment; filename*=UTF-8''" + parse.quote('report.pdf', safe='') # run the conversion and write the result into the output stream client.convertUrlToStream('https://en.wikipedia.org', res) return res except pdfcrowd.Error as why: # send the error in the HTTP response return HttpResponse(why.getMessage(), status=why.getCode(), content_type='text/plain')
def number_of_movies_by_year(df_basics, df_ratings): samp_basics = df_basics[df_basics['titleType'] == 'movie'] samp_df = pd.merge(samp_basics, df_ratings, on='tconst') samp_df = samp_df[samp_df['startYear'].astype(str).str.isdigit()] samp_df['startYear'] = samp_df['startYear'].astype(int) samp_df = samp_df[(samp_df['startYear'] >= 1900)] res = samp_df.groupby('startYear').agg('count') index_list = list(res.index) count_movies = [] for ind in index_list: count_movies.append((res.loc[ind, ['tconst']]).tolist()[0]) fig = px.bar(x=index_list, y=count_movies) fig.write_html("number_of_movies_by_year.html") client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') client.convertFileToFile('number_of_movies_by_year.html', 'number_of_movies_by_year.pdf')
def save_as_pdf(): path = os.path.join(current_app.root_path, "static", "page.html") path_pdf = os.path.join(current_app.root_path, "static", "pdf", "page.pdf") try: client = pdfcrowd.HtmlToPdfClient('geek123', 'c3e8b743bba9bdd1164ce6e361eb122d') client.convertFileToFile(path, path_pdf) return send_file(path_pdf, as_attachment=True) except pdfcrowd.Error as why: print('Pdfcrowd Error: {}\n'.format(why)) print("Error create PDF") data = get_data() html = render_template('index.html', title=data['title'], explanation=data['explanation'], date=data['date'], image_link=data['url']) with open(path, "w") as page: page.write(html) return html
def htmlToPdf(self): savepath = os.path.join( self.savepath, str(self.startpage) + self.wkinfo.get('title') + '.pdf') try: # create the API client instance client = pdfcrowd.HtmlToPdfClient('your_name', 'your_key') # run the conversion and write the result to a file # client.convertUrlToFile('http://www.example.com', 'example.pdf') client.setNoMargins(True) # 边距为0, 默认页面大小为A4 client.convertFileToFile( os.path.join(self.htmlpath, str(self.startpage) + self.htmlfile), savepath) except pdfcrowd.Error as why: # report the error print('Pdfcrowd Error: {}\n'.format(why)) return False # sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) return savepath
def convert(request): req = '/result?' for item in request.GET.items(): req += str(item[0]) + '=' + str(item[1]) + '&' try: login = os.environ.get('PDFCROWD_LOGIN') token = os.environ.get('PDFCROWD_TOKEN') client = pdfcrowd.HtmlToPdfClient(login, token) site = f'http://84.252.137.33:8000{req[:-1]}' response = HttpResponse(content_type='application/pdf') response['Cache-Control'] = 'max-age=0' response['Accept-Ranges'] = 'none' response[ 'Content-Disposition'] = "attachment; filename*=UTF-8''" + parse.quote( 'result.pdf', safe='') client.convertUrlToStream(site, response) return response except pdfcrowd.Error as why: return HttpResponse(why.getMessage(), status=why.getCode(), content_type='text/plain')
def preview(Username): template1 = Template1.objects if request.method == 'GET': return render_template('CV_detail_page3/CV_student.html', all_templates=template1) elif request.method == 'POST': try: # create the API client instance client = pdfcrowd.HtmlToPdfClient( 'quangnn', 'ca74aa6580bd6ab6c1e80b0954cab851') # run the conversion and write the result to a file print(Username) client.convertUrlToFile( 'https://lazy-cv.herokuapp.com/preview_print/{0}'.format( Username), 'static/CV/{0}.pdf'.format(Username)) # pdfcrowd-> setPageHeight("-1") except pdfcrowd.Error as why: # report the error to the standard error stream sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) return redirect( "https://lazy-cv.herokuapp.com/static/CV/{0}.pdf".format(Username))
def Process(link): if link[-4:] == ".pdf": response = requests.get(link) with open('static/temp.pdf', 'wb') as f: f.write(response.content) elif link[-5:] == ".html": try: client = pdfcrowd.HtmlToPdfClient('danisimm', 'e20813a7da8bdbe7b9002712f53b1ed7') client.convertUrlToFile(link, 'static/temp.pdf') except pdfcrowd.Error as why: sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) raise #path_wkhtmltopdf = r'static/wkhtmltopdf.exe' #config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf) #pdfkit.from_url(link, 'static/temp.pdf', configuration=config) else: des, dia = [],[] return des, dia link = "static/temp.pdf" script = Read_Script(link) c_in, s_in, c, s, = Get_Data(script) des, dia = Get_Script(script, c_in, s_in, c, s) return des, dia
def download_application(request, _id): if request.method == "GET": try: trial = get_object_or_404(Trial, pk=_id) application = Application.objects.get(user=request.user, trial=trial) details_of_application = DetailsOfApplication.objects.get( application=application) achievements = user_achievements.objects.all().filter( user_id=request.user.id) certificates = Certificates.objects.all().filter( user_id=request.user.id) client = pdfcrowd.HtmlToPdfClient( 'spiderxm', 'c2a76a628d51f71fb76b85b8808956c4') response = HttpResponse(content_type='application/pdf') response['Cache-Control'] = 'max-age=0' response['Accept-Ranges'] = 'none' response[ 'Content-Disposition'] = 'attachment; filename="application.pdf"' html = render_to_string( template_name="user_profile/application_template.html", context={ "trial": trial, "application": application, "achievements": achievements, "details_of_application": details_of_application, "certificates": certificates, "user": request.user }) client.convertStringToStream(html, response) return response except pdfcrowd.Error: messages.error( request, "There is a issue with api please try again later.") return HttpResponseRedirect('/user_profile/{}/'.format( request.user.id))
def choose(request, pk): user = request.user pp_url = user.profile.profile_pic.url.strip('/') resume = Resume.objects.get(pk=pk) form = ChooseForm(request.POST) group = Group.objects.get(name='paying_user') if request.method == 'GET': form = ChooseForm() elif request.method == 'POST' and 'view-resume' in request.POST: if form.is_valid( ) and form.cleaned_data['resume_template'] == 'jakarta': return render(request, 'resumes/jakarta.html', { 'form': form, 'resume': resume, 'pp_url': pp_url }) if form.is_valid( ) and form.cleaned_data['resume_template'] == 'new_york': return render(request, 'resumes/new_york.html', { 'form': form, 'resume': resume, 'pp_url': pp_url }) if form.is_valid() and form.cleaned_data['resume_template'] == 'tokyo': return render(request, 'resumes/tokyo.html', { 'form': form, 'resume': resume, 'pp_url': pp_url }) if form.is_valid() and form.cleaned_data['resume_template'] == 'rome': return render(request, 'resumes/rome.html', { 'form': form, 'resume': resume, 'pp_url': pp_url }) if form.is_valid() and form.cleaned_data['resume_template'] == 'sf': return render(request, 'resumes/san_francisco.html', { 'form': form, 'resume': resume, 'pp_url': pp_url }) # two buttons on one page elif form.is_valid( ) and request.method == 'POST' and 'export-resume' in request.POST: if request.user.groups.filter(name='paying_user').exists(): # code for exporting pdfcrowd goes here client = pdfcrowd.HtmlToPdfClient( 'chrisgunawan85', 'ea5734a7dc5aabbded5e65d8a32de8a4') client.setUsePrintMedia(True) client.setPageHeight('-1') client.setDebugLog(True) # set HTTP response headers pdf_response = HttpResponse(content_type='application/pdf') pdf_response['Cache-Control'] = 'max-age=0' pdf_response['Accept-Ranges'] = 'none' content_disp = 'attachment' if 'asAttachment' in request.POST else 'inline' pdf_response[ 'Content-Disposition'] = content_disp + '; filename=my_resume.pdf' if form.cleaned_data['resume_template'] == 'jakarta': html = render_to_string('resumes/jakarta.html', { 'resume': resume, 'pp_url': pp_url }) if form.cleaned_data['resume_template'] == 'new_york': html = render_to_string('resumes/new_york.html', { 'resume': resume, 'pp_url': pp_url }) if form.cleaned_data['resume_template'] == 'tokyo': html = render_to_string('resumes/tokyo.html', { 'resume': resume, 'pp_url': pp_url }) if form.cleaned_data['resume_template'] == 'rome': html = render_to_string('resumes/rome.html', { 'resume': resume, 'pp_url': pp_url }) if form.cleaned_data['resume_template'] == 'sf': html = render_to_string('resumes/san_francisco.html', { 'resume': resume, 'pp_url': pp_url }) client.convertStringToStream(html, pdf_response) # send the generated PDF return pdf_response else: messages.info(request, "Please purchase a package to export to PDF format") return HttpResponseRedirect(reverse('resumes:payment')) return render(request, 'resumes/choose.html', { 'form': form, 'resume': resume })
import pdfcrowd import sys ## ask input URL from user URL = input("Please enter the URL of webpage you'd like to convert : ") User = input("Please enter your pdfcrowd API username ") KEY = input("Please enter your API key ") try: # Authenticate API and write result to pdf file in current directory pdfcrowd.HtmlToPdfClient(User, KEY).convertUrlToFile(URL, 'new.pdf') except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) # rethrow or handle the exception raise
# !python3 # convert html to pdf import pdfcrowd, os, requests, time os.makedirs('./output', exist_ok=True) out_path = os.path.join(os.getcwd(), './output') link = str(input('Enter the link (You can copy and paste): \n')) requests.get(link).raise_for_status() pdf_file = str(input('Enter name for the pdf file: \n')) + '.pdf' api = pdfcrowd.HtmlToPdfClient("demo", "ce544b6ea52a5621fb9d55f8b542d14d") api.convertUrlToFile(link, os.path.join(out_path, pdf_file)) print( 'Converting webpage to pdf. PDF file will be saved to the "Output" folder in the extracted folder' ) '''except HTTPError as e: raise Exception(f'Invalid url {}') print('Please enter a valid url.')''' time.sleep(5)
def form_valid(self, form): self.object = form.save(commit=False) self.object.save() if 'sigt' in self.request.POST: me = pdfcrowd.HtmlToPdfClient('ariaschmario', os.getenv('PDFCROWD_PASSWORD')) secundarios = CentroCargaSecundario.objects.filter( principal=Ticket.objects.get( superId=self.kwargs['slug']).centro_carga) context = { 'ticket': Ticket.objects.get(superId=self.kwargs['slug']), 'secundarios': secundarios } x = render_to_string('pdf.html', context) me.setPageSize(u'Letter') response = HttpResponse(content_type='application/pdf') me.convertStringToStream(x, response) storage_client = storage.Client() bucket = storage_client.bucket('elelectricista') blob = bucket.blob('boletas/' + self.kwargs['slug'] + '.pdf') filePfd = response.getvalue() blob.upload_from_string(filePfd, content_type='application/pdf') Ticket.objects.get(superId=self.kwargs['slug']).update_file_url( 'https://storage.cloud.google.com/elelectricista/boletas/' + self.kwargs['slug'] + '.pdf') try: mail_content = "Te adjuntamos la boleta técnica de la visita de El Electricista" sender_address = '*****@*****.**' #sender_pass = self.access_secret_version(os.getenv('PROJECT_SECRET_MANAGER_ID'), os.getenv('GMAIL_APP_PASSWORD_ID'), 1), sender_pass = os.getenv('GMAIL_PASSWORD_ID') receiver_address = Ticket.objects.get( superId=self.kwargs['slug']).client.email # Setup the MIME message = MIMEMultipart() message['From'] = sender_address message['To'] = receiver_address message['Subject'] = 'El Electricista Boleta' # The subject line # The body and the attachments for the mail message.attach(MIMEText(mail_content, 'plain')) # attach_file = open(filePfd, 'rb') # Open the file as binary mode payload = MIMEBase('application', 'octate-stream') payload.set_payload(filePfd) encoders.encode_base64(payload) # encode the attachment # add payload header with filename filename = "boleta.pdf" payload.add_header('Content-Disposition', 'attachment; filename="%s"' % filename) message.attach(payload) # Create SMTP session for sending the mail session = smtplib.SMTP('smtp.gmail.com', 587) # use gmail with port session.starttls() # enable security session.login(sender_address, sender_pass) # login with mail_id and password text = message.as_string() session.sendmail(sender_address, receiver_address, text) session.quit() return redirect("core:sended", slug=self.kwargs['slug'], sended=True) except: return redirect("core:sended", slug=self.kwargs['slug'], sended=False) else: return redirect("core:circuitosramales", slug=self.kwargs['slug'])
pdf_file = 'MyLayout.pdf' client.convertFileToFile(html_file, pdf_file) except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) # handle the exception here or rethrow and handle it at a higher level raise""" # //////////////////////////////////////////////////////////////////////// import pdfcrowd import sys try: # create the API client instance client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') html_file = '/home/anil/Desktop/Detailed_Reports/Detailed_Report_byLawArea20191024_10-05-02.html' pdf_file = 'MyLayout_New.pdf' # create output file for conversion result output_file = open(pdf_file, 'wb') # run the conversion and store the result into a pdf variable pdf = client.convertFile(html_file) # write the pdf into the output file output_file.write(pdf) # close the output file output_file.close() except pdfcrowd.Error as why: # report the error
# import pdfkit # import os # os.chdir(r'C:\temp') # options = {'quiet': ''} # config = pdfkit.configuration(wkhtmltopdf=r'C:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe') # url = 'http://www.naver.com' # pdfkit.from_url(url, 'naver.pdf', options=options, configuration=config) import pdfcrowd import sys try: client = pdfcrowd.HtmlToPdfClient('demo','ce543'); output_file = open('leo_bb.pdf','wb') pdf = client.covertUrl('https://leo-bb.tistory.com/') output_file.write(pdf) output_file.close() except pdfcrowd.Error as why:
def analyse_daily(): df_daily = pd.read_csv("daily_answer.csv") daily_group = df_daily.groupby( 'Symbol')['Loss or Gain'].sum().reset_index() daily_top = daily_group.sort_values(by='Loss or Gain', ascending=False) daily_bottom = daily_group.sort_values(by='Loss or Gain') daily_top_val = daily_top['Loss or Gain'].tolist()[:25] daily_top_comp = daily_top['Symbol'].tolist()[:25] daily_bottom_val = daily_bottom['Loss or Gain'].tolist()[:25] daily_bottom_comp = daily_bottom['Symbol'].tolist()[:25] daily_dict = {} for i in daily_top_comp: temp_df = df_daily[df_daily['Symbol'] == i] daily_dict[i] = temp_df['Loss or Gain'].tolist()[:200] for i in daily_bottom_comp: temp_df = df_daily[df_daily['Symbol'] == i] daily_dict[i] = temp_df['Loss or Gain'].tolist()[:200] keys = list(daily_dict.keys()) df_daily = pd.DataFrame(daily_dict, columns=keys) print("The top companies(daily) are " + daily_top_comp[0] + " and " + daily_top_comp[1]) daily_top_company1 = daily_top_comp[0] daily_top_company2 = daily_top_comp[1] res = df_daily.corr().unstack().sort_values().drop_duplicates() neg_corr = res[daily_top_company1][:2].index.tolist() neg_corr_top1 = neg_corr[0] neg_corr_top2 = neg_corr[1] val1 = daily_dict[daily_top_company1][1:] val2 = daily_dict[daily_top_company2][1:] val3 = daily_dict[neg_corr_top1][1:] val4 = daily_dict[neg_corr_top2][1:] df_top_daily = pd.DataFrame() value = [] value.extend(val1) value.extend(val2) value.extend(val3) value.extend(val4) days = [] mul = [i for i in range(1, 200)] for i in range(4): days.extend(mul) comp = [] comp.extend([daily_top_company1] * 199) comp.extend([daily_top_company2] * 199) comp.extend([neg_corr_top1] * 199) comp.extend([neg_corr_top2] * 199) df_top_daily['profit or loss'] = value df_top_daily['company'] = comp df_top_daily['days'] = days fig1 = px.line(df_top_daily, x="days", y="profit or loss", color='company') fig1.write_html("top2_daily_negative_corr.html") print("The bottom companies(daily) are " + daily_bottom_comp[0] + " and " + daily_bottom_comp[1]) daily_bottom_company1 = daily_bottom_comp[0] daily_bottom_company2 = daily_bottom_comp[1] res = df_daily.corr().unstack().sort_values().drop_duplicates() neg_corr = res[daily_bottom_company1][:2].index.tolist() neg_corr_bottom1 = neg_corr[0] neg_corr_bottom2 = neg_corr[1] val1 = daily_dict[daily_bottom_company1][1:] val2 = daily_dict[daily_bottom_company2][1:] val3 = daily_dict[neg_corr_bottom1][1:] val4 = daily_dict[neg_corr_bottom2][1:] df_bottom_daily = pd.DataFrame() value = [] value.extend(val1) value.extend(val2) value.extend(val3) value.extend(val4) days = [] mul = [i for i in range(1, 200)] for i in range(4): days.extend(mul) comp = [] comp.extend([daily_bottom_company1] * 199) comp.extend([daily_bottom_company2] * 199) comp.extend([neg_corr_bottom1] * 199) comp.extend([neg_corr_bottom2] * 199) df_bottom_daily['profit or loss'] = value df_bottom_daily['company'] = comp df_bottom_daily['days'] = days fig2 = px.line(df_bottom_daily, x="days", y="profit or loss", color='company') fig2.write_html("bottom2_daily_negative_corr.html") client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') client.convertFileToFile("bottom2_daily_negative_corr.html", "bottom2_daily_negative_corr.pdf") client.convertFileToFile("top2_daily_negative_corr.html", "top2_daily_negative_corr.pdf")
def analyse_monthly(): df_monthly = pd.read_csv("monthly_answer.csv") monthly_group = df_monthly.groupby( 'Symbol')['Loss or Gain'].sum().reset_index() monthly_top = monthly_group.sort_values(by='Loss or Gain', ascending=False) monthly_bottom = monthly_group.sort_values(by='Loss or Gain') monthly_top_val = monthly_top['Loss or Gain'].tolist()[:25] monthly_top_comp = monthly_top['Symbol'].tolist()[:25] monthly_bottom_val = monthly_bottom['Loss or Gain'].tolist()[:25] monthly_bottom_comp = monthly_bottom['Symbol'].tolist()[:25] monthly_dict = {} for i in monthly_top_comp: temp_df = df_monthly[df_monthly['Symbol'] == i] monthly_dict[i] = temp_df['Loss or Gain'].tolist()[:49] for i in monthly_bottom_comp: temp_df = df_monthly[df_monthly['Symbol'] == i] monthly_dict[i] = temp_df['Loss or Gain'].tolist()[:49] keys = list(monthly_dict.keys()) df_monthly = pd.DataFrame(monthly_dict, columns=keys) print("The top companies(monthly) are " + monthly_top_comp[0] + " and " + monthly_top_comp[1]) monthly_top_company1 = monthly_top_comp[0] monthly_top_company2 = monthly_top_comp[1] res = df_monthly.corr().unstack().sort_values().drop_duplicates() neg_corr = res[monthly_top_company1][:2].index.tolist() neg_corr_top1 = neg_corr[0] neg_corr_top2 = neg_corr[1] val1 = monthly_dict[monthly_top_company1][1:] val2 = monthly_dict[monthly_top_company2][1:] val3 = monthly_dict[neg_corr_top1][1:] val4 = monthly_dict[neg_corr_top2][1:] df_top_monthly = pd.DataFrame() value = [] value.extend(val1) value.extend(val2) value.extend(val3) value.extend(val4) days = [] mul = [i for i in range(1, 49)] for i in range(4): days.extend(mul) comp = [] comp.extend([monthly_top_company1] * 48) comp.extend([monthly_top_company2] * 48) comp.extend([neg_corr_top1] * 48) comp.extend([neg_corr_top2] * 48) df_top_monthly['profit or loss'] = value df_top_monthly['company'] = comp df_top_monthly['days'] = days fig5 = px.line(df_top_monthly, x="days", y="profit or loss", color='company') fig5.write_html("top2_monthly_negative_corr.html") print("The bottom companies(monthly) are " + monthly_bottom_comp[0] + " and " + monthly_bottom_comp[1]) monthly_bottom_company1 = monthly_bottom_comp[0] monthly_bottom_company2 = monthly_bottom_comp[1] res = df_monthly.corr().unstack().sort_values().drop_duplicates() neg_corr = res[monthly_bottom_company1][:2].index.tolist() neg_corr_bottom1 = neg_corr[0] neg_corr_bottom2 = neg_corr[1] val1 = monthly_dict[monthly_bottom_company1][1:] val2 = monthly_dict[monthly_bottom_company2][1:] val3 = monthly_dict[neg_corr_bottom1][1:] val4 = monthly_dict[neg_corr_bottom2][1:] df_bottom_monthly = pd.DataFrame() value = [] value.extend(val1) value.extend(val2) value.extend(val3) value.extend(val4) days = [] mul = [i for i in range(1, 49)] for i in range(4): days.extend(mul) comp = [] comp.extend([monthly_bottom_company1] * 48) comp.extend([monthly_bottom_company2] * 48) comp.extend([neg_corr_bottom1] * 48) comp.extend([neg_corr_bottom2] * 48) df_bottom_monthly['profit or loss'] = value df_bottom_monthly['company'] = comp df_bottom_monthly['days'] = days fig6 = px.line(df_bottom_monthly, x="days", y="profit or loss", color='company') fig6.write_html("bottom2_monthly_negative_corr.html") # img.seek(0) # with open("imdb_bargraph.html", "w") as file: # file.write('<div><img src="data:image/png;base64,{}"/></div>'.format(res)) client = pdfcrowd.HtmlToPdfClient('demo', 'ce544b6ea52a5621fb9d55f8b542d14d') client.convertFileToFile("bottom2_monthly_negative_corr.html", "bottom2_monthly_negative_corr.pdf") client.convertFileToFile("top2_monthly_negative_corr.html", "top2_monthly_negative_corr.pdf")
import pdfcrowd import sys try: # create the API client instance client = pdfcrowd.HtmlToPdfClient('marxtei', 'a8f7360faa2ccb2504320d3b87e0ec4b') # configure the conversion client.setPageSize('A6') client.setNoMargins(True) output_file = open('test.pdf', 'wb') # run the conversion and store the result into a pdf variable pdf = client.convertUrl('http://interactbrasil.org/adam/teste.html') # write the pdf the into the output file output_file.write(pdf) # close the output file output_file.close() except pdfcrowd.Error as why: # report the error sys.stderr.write('Pdfcrowd Error: {}\n'.format(why)) # handle the exception here or rethrow and handle it at a higher level raise