def upload_post(request, przedmiot, test): if request.method == "POST": form = PostForm(request.POST, request.FILES) if form.is_valid(): test = test.replace("-", " ") #print(f"upload_post: test wynosi {test}, przedmiot wynosi {przedmiot}, cleaned wynosi: {form.cleaned_data['test']}") szukany_przedmiot = Przedmiot.objects.get(subject_name=przedmiot) if szukany_przedmiot is not None: testy = szukany_przedmiot.testy_z_danego_przedmiotu.all() szukany_test = testy.get(test_name=test) #nazwa = [x.strip() for x in szukany_test.test_name.split(',')] #nazwa = przedmiot + ", " + test print(f"nazwa wyszukanego testu: {szukany_test.test_name}") #form.cleaned_data['test'] = szukany_test #print(f"form cleaned data type: {form.cleaned_data['test'].test_name}, przykladowo cleaned title: {form.cleaned_data['title']}") #foremka = form.save(commit = False) #foremka.test = szukany_test #foremka.save() new_post = Post(title=form.cleaned_data['title'], content=form.cleaned_data['content'], attachment=form.cleaned_data['attachment'], test=szukany_test) new_post.save() return redirect('index') else: form = PostForm() print(f"upload_post: wysylam forme") return render(request, 'labelka/upload_post.html', {'form': form})
def upload_post1(request): print(f"wchodze w upload_post1") if request.method == "POST": print(f"jestem w poscie") author = request.user przedmiocik = request.POST['new-post-przedmiot'] przedmiot = Przedmiot.objects.get(subject_name=przedmiocik) tescik = request.POST['new-post-test'] test = przedmiot.testy_z_danego_przedmiotu.get(test_name=tescik) title = request.POST['new-post-title'] content = request.POST['new-post-content'] files = request.FILES.getlist('new-post-attachments') print(f"{request.FILES}") print( f"autor: {author.username}, tytul: {title}, zawartosc: {content}, test: {tescik}" ) post = Post(author=request.user, title=title, content=content, test=test) post.save() for f in files: file = Attachment(att=f, post=post) print(f"nazwa pliku: {file.att.name}, url: {file.att.url}") file.save() return redirect('index')
def test_database_has_posts(self): post = Post() post.title = "Ahoy" post.text = "AaaaaaHooooYYYYY" post.author = self.user post.save() self.assertEqual(Post.objects.count(), 1)
def most_viewed(request, ndays='7'): now = timezone.now() ndays = int(ndays) since = now - datetime.timedelta(days=ndays) entries = _get_most_viewed_entries(since=since) # Return a tab separated values file, if requested if request.GET.get('tsv') == '1': header = 'post_id\ttitle\turl\tcount\n' text = '\n'.join(_get_tsv(entry) for entry in entries) text = header + text response = HttpResponse(text, content_type='text/tab-separated-values') else: Post = namedtuple('Post', ('authorid', 'avatar', 'slug', 'title')) context = { 'post_list': [ Post( slug=entry['post__slug'], authorid=entry['post__blog__user__id'], avatar=entry['post__blog__user__hacker__avatar_url'], title=entry['post__title'], ) for entry in entries ], 'from': since.date(), 'to': now.date(), } response = render(request, 'home/most_viewed.html', context) return response
def create(request): if request.method == "POST": print(request.POST) name = request.POST.get("author") src = request.FILES.get("myfile") form = UploadFileForm(request.POST, request.FILES) if form.is_valid(): handle_uploaded_file(request.FILES['myfile']) form.save() try: author = Author.objects.get(name=name) except Author.DoesNotExist: author = Author(name=name) author.save() obj = Post(title=request.POST.get("title"), text=request.POST.get("text"), author=author, image=form) obj.save() return render(request, "success.html") return render(request, "create.html")
def post_list_data(request): print("regist start") if request.is_ajax(): content_title = request.POST.get('content_title') Content = request.POST.get('Content') user_id = request.POST.get('user_id') print(user_id) try: post_insert = Post(user_id=user_id,title=content_title,content=Content) post_insert.save() request.session['user_id'] = user_id #reg = User.objects.create(user_id=sign_id,password=sign_pw,name=sign_name) #reg, created = User.objects.get_or_create(user_id=sign_id,password=sign_pw,name=sign_name) except User.DoesNotExist: print ("content_title--------->",content_title) print ("regist fail") return JsonResponse({'return':'fail'}) return JsonResponse({'return':'success'})
def create_post(user): cls() t = input("Ingrese su titulo: ") tx = input("Ingrese su texto: ") p = Post(title=t, text=tx, created_by=user) p.save() cls()
def create(request): if request.method == "POST": obj = Post(title=request.POST.get("title"), text=request.POST.get("text")) obj.save() context = {"action": "created"} return render(request, "success.html", context) return render(request, "create.html")
def get_posts(self, chat_id): conn, cur = self.open_db_connection() cur.execute("SELECT * from home_post WHERE user_id=? and PUBLISHED=?", (chat_id, 0)) posts = cur.fetchall() unpublished_posts = {} for post in posts: unpublished_posts[str(post[1])] = Post( post_id=post[0], created_at=post[1], text=[False, post[2]], ) if post[4]: '''MEDIA''' cur.execute("SELECT * from home_postmedia WHERE post_id=?", (post[0], )) try: media_data = cur.fetchall()[0] unpublished_posts[post[1]].media = { 0: True, 1: media_data[1], 2: media_data[2], 3: media_data[3], 4: media_data[4], 5: media_data[5], 6: media_data[6], 7: media_data[7], 8: media_data[8], 9: media_data[9], 10: media_data[10], } del media_data except IndexError: unpublished_posts[post[1]].media = { 0: True, 1: '', 2: '', 3: '', 4: '', 5: '', 6: '', 7: '', 8: '', 9: '', 10: '', } if post[5]: '''location''' cur.execute("SELECT * from home_postlocation WHERE post_id=?", (post[0], )) location_data = cur.fetchall()[0] unpublished_posts[post[1]].location = [ True, location_data[1], location_data[2] ] self.close_db_connection(conn, cur) return unpublished_posts
def new_post(): form = PostForm() if form.validate_on_submit(): post = Post(title=form.title.data, content=form.content.data, author=current_user) db.session.add(post) db.session.commit() flash('Your Post has been created!', 'success') return redirect(url_for('home')) return render_template('create_post.html', title='New Post', form=form, legend='New Post')
def contact(request): if request.method == "POST": title = request.POST['title'] author = request.POST['author'] slug = request.POST['slug'] content = request.POST['content'] summary = request.POST['summary'] if len(title) < 1 or len(author) < 1 or len(slug) < 1 or len( content) < 4 or len(summary) < 1: messages.error(request, "Please fill the form correctly") else: contact = Post(title=title, author=author, slug=slug, content=content, summary=summary) contact.save() messages.success(request, "Your Blog has been created!!") return render(request, "home/contact.htm")
def carga(): log("INICIO DA ROTINA") from home.models import Post from home.models import ApiTokenInstagramSettings from instagram.client import InstagramAPI # AUTH REQUIRED access_token = '1345635461.7089ef3.9bc2cfb0180741d392728cc107b432fb' client_secret = 'cb86b8747fea45d9a745e95bb31110e0' user_id = '1345635461' api = InstagramAPI(access_token=access_token, client_secret=client_secret) recent_media, next_ = api.user_recent_media(user_id=user_id, count=10) #popular_media = api.media_popular(count=20) for media in recent_media: print dir(media) #print media.caption.text post = Post.objects.filter(pid=media.id) texto = media.caption.text if media.caption else None try: if post: post = post[0] post.texto = u'{0}'.format(texto) post.date = media.created_time post.imagem = media.get_standard_resolution_url() post.imagem_src = media.get_standard_resolution_url() else: post = Post( redesocial='INSTAGRAM', pid=media.id, texto=u'{0}'.format(texto), date=media.created_time, link=media.link, imagem=media.get_standard_resolution_url(), imagem_src=media.get_standard_resolution_url(), ) post.save() except Exception, e: log(u"Erro ao inserir [{0}]: {1}".format( media.id, media.get_standard_resolution_url())) log(e)
def articlesave(response): data = json.loads(response) source = data["source"].encode('utf-8') category = "hindi" articles = data["articles"] for article in articles: storedarticles = Post.objects.filter( headline=article["title"].encode('utf-8')) if len(storedarticles) == 0: articlesource = source if article["author"] == None: author = "Anonymous" else: author = article["author"].encode('utf-8') headline = article["title"].encode('utf-8') story = article["description"].encode('utf-8') url = article["url"].encode('utf-8') if article["publishedAt"] == None: date = str(timezone.now()) date = date[:10] else: date = str(timezone.now()) date = date[:10] if not (article['urlToImage'] == None): image_url = article["urlToImage"].encode('utf-8') # request = requests.get(image_url, stream=True) # time.sleep(3) # Was the request OK? #if request.status_code != requests.codes.ok: # Nope, error handling, skip file etc etc etc # continue # Get the filename from the url, used for saving later #file_name = image_url.split('/')[-1]+".jpg" #if len(file_name)>50: # file_name=file_name[:50]+".jpg" # Create a temporary file #lf = tempfile.NamedTemporaryFile() # Read the streamed image in sections #for block in request.iter_content(1024 * 8): # If no more file then stop #if not block: # break # Write image block to temporary file #lf.write(block) # Create the model you want to save the image to a = Post(source=articlesource, author=author, headline=headline, story=story, link=image_url, date=date, pageurl=url, category=category) # Save the temporary image to the model# # This saves the model so be sure that is it valid a.save()
def get_news(URL): url = URL context = ssl._create_unverified_context() response = urllib.urlopen(url, context=context) data = json.loads(response.read()) source = data["source"].encode('utf-8') if source in ['business-insider', 'bloomberg']: category = "business" elif source in ["bbc-sport"]: category = "sports" elif source in ["ars-technica"]: category = "technology" else: category = "general" articles = data["articles"] for article in articles: storedarticles = Post.objects.filter( headline=article["title"].encode('utf-8')) if len(storedarticles) == 0: articlesource = source if article["author"] == None: author = "Anonymous" else: author = article["author"].encode('utf-8') headline = article["title"].encode('utf-8') if article["description"]: story = article["description"].encode('utf-8') else: story = "" url = article["url"].encode('utf-8') if article["publishedAt"] == None: date = str(timezone.now()) date = date[:10] else: date = article["publishedAt"].encode('utf-8') date = date[:10] if not (article['urlToImage'] == None): image_url = article["urlToImage"].encode('utf-8') #request = requests.get(image_url, stream=True) #time.sleep(3) # Was the request OK? #if request.status_code != requests.codes.ok: # Nope, error handling, skip file etc etc etc # continue # Get the filename from the url, used for saving later #file_name = image_url.split('/')[-1]+".jpg" #if len(file_name)>50: # file_name=file_name[:50]+".jpg" # Create a temporary file #lf = tempfile.NamedTemporaryFile() # Read the streamed image in sections #for block in request.iter_content(1024 * 8): # If no more file then stop # if not block: # break # Write image block to temporary file # lf.write(block) # Create the model you want to save the image to a = Post(source=articlesource, author=author, headline=headline, story=story, link=image_url, date=date, pageurl=url, category=category) # Save the temporary image to the model# # This saves the model so be sure that is it valid a.save()
def start_posting(my_url,append_url,headDiv,headName,bodyDiv,bodyName): #my_url = 'http://zeenews.india.com/' #append_url = 'http://zeenews.india.com' print(my_url) print(append_url) print(headDiv,headName) print(bodyDiv,bodyName) all_sites = {} try: if os.path.exists('/home/prashant/Desktop/programming/projects/scrapewebsite/scrapewebsite/scrapersite/home/NewsData.json'): with open('/home/prashant/Desktop/programming/projects/scrapewebsite/scrapewebsite/scrapersite/home/NewsData.json','r') as rea: all_sites = json.load(rea) else: pass except Exception as e: print("Pickle doesn't exist") r = requests.get(my_url) soup = BeautifulSoup(r.content, "lxml") atags = soup.findAll('a') all_data = {} first_page_links = [] spl = [] for i in atags: try: link = i['href'] if re.match(r'^/',link): link = append_url + link print('Looking at :'+link) first_page_links.append(link) secondr = requests.get(link) secondSoup = BeautifulSoup(secondr.content, "lxml") hTags = secondSoup.findAll('h1', {headDiv:headName}) bodyTags = secondSoup.findAll('div', {bodyDiv:bodyName}) head = [] bodylist = [] if hTags and bodyTags: for headline in hTags: print(headline.text.strip()) head.append(headline.text.strip()) for body in bodyTags: bodylist.append(body.text.strip()) bod = clean_article(bodylist) # cleans out all the html data if link not in all_sites.keys() and link not in all_data.keys(): all_data[link] = {'Headline': head, 'Body': bod} newPost = Post() hh = all_data[link]['Headline'] bb = all_data[link]['Body'] summary = summarize_article(bod) #summarizes the article summ = map(str, summary) final_summary = ','.join(summ) newPost.headline = ''.join(head) #save into database newPost.body = ''.join(bod) newPost.pub_date = timezone.now() newPost.source = link newPost.summary = final_summary newPost.save() all_data[link] = {'Headline': head, 'Body': bod, 'Summary': final_summary, 'Source': link} try: with open('/home/prashant/Desktop/programming/projects/scrapewebsite/scrapewebsite/scrapersite/home/NewsData.json', 'w') as fi: json.dump(all_data, fi) print('dumping') except Exception as e: print(str(e)) else: pass except Exception as e: #print(str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) first_page_links = list(unique_everseen(first_page_links)) for i in first_page_links: if re.match(r'^/',i): i = append_url+i spl.append(i) else: spl.append(i) for j in spl: try: second_page = requests.get(j) second_page_soup = BeautifulSoup(second_page.content, "lxml") second_link = second_page_soup.findAll('a') for k in second_link: try: link2 = k['href'] third_page = requests.get(link2) third_page_soup = BeautifulSoup(third_page.content, "lxml") h_second_Tags = third_page_soup.findAll('h1', {headDiv:headName}) body_second_Tags = third_page_soup.findAll('div', {bodyDiv:bodyName}) head = [] bodylist = [] if h_second_Tags and body_second_Tags: for headline in h_second_Tags: print('second page' + headline.text.strip()) head.append(headline.text.strip()) for body in body_second_Tags: bodylist.append(body.text.strip()) bod = clean_article(bodylist) if k not in all_sites.keys() and k not in all_data.keys(): newPost = Post() summary = summarize_article(bod) summ = map(str, summary) final_summary = ','.join(summ) newPost.headline = ''.join(head) newPost.body = ''.join(bod) newPost.pub_date = timezone.now() newPost.source = str(k) newPost.summary = final_summary all_data[k] = {'Headline': head, 'Body': bod, 'Summary': final_summary, 'source': k} newPost.save() # with open('NdtvJSONData.pkl','a') as fi: # pickle.dump(all_data,fi) with open('/home/prashant/Desktop/programming/projects/scrapewebsite/scrapewebsite/scrapersite/home/NewsData.json', 'w') as fi2: json.dump(all_data, fi2) else: pass except Exception as e: print(str(e)) except Exception as e: print(str(e))