def subscribe(request): get = request.REQUEST subscriber_id = int(get.get('subscriber')) owner_id = int(get.get('owner')) ans = local_is_subscribed(owner_id, subscriber_id) if ans: loginfo("success") return HttpResponse() try: sbs = models.Subscriber() sbs.subscriber = subscriber_id sbs.owner = owner_id sbs.save() except Exception as e: logerror(e.message) return HttpResponseServerError records = models.Record.objects.filter(owner_id=owner_id) for record in records: news = models.SubscriberNews() news.record = record news.subscriber = sbs news.save() loginfo("success") return HttpResponse()
def handle_add_bookmark(request): put = request.REQUEST bm = models.Bookmark() is_public = put.get('is_public') if is_public == 'False': bm.is_public = False else: bm.is_public = True bm.description = put.get('description') bm.title = put.get('title') bm.user_id = int(put.get('user_id')) bm.username = put.get('username') try: bm.save() except Exception as e: logerror(e.message) return HttpResponseBadRequest if not bm.is_public: loginfo("Bookmark added") return HttpResponse(json.dumps({'bookmark_id': bm.id})) raw_tags = models.split_title_on_tags(bm.title) tags = models.get_tags(raw_tags) for tag in tags: bm_tag = models.BookmarkTag() bm_tag.tag = tag bm_tag.bookmark = bm bm_tag.save() loginfo("Bookmark added") return HttpResponse(json.dumps({'bookmark_id': bm.id}))
def handle_get_user_bookmarks(request): get = request.GET bookmarks_user_id = int(get.get('bookmarks_user_id')) user_id = int(get.get('user_id')) items_per_page = get.get('per_page') page = int(get.get('page')) bm_list = None try: if bookmarks_user_id == user_id: bm_list = models.Bookmark.objects.filter(user_id=bookmarks_user_id) else: bm_list = models.Bookmark.objects.filter(is_public=True, user_id=bookmarks_user_id) except Exception as e: logerror(e.message) return HttpResponseBadRequest() paginator = Paginator(bm_list, items_per_page) try: result = paginator.page(page) except PageNotAnInteger: page = 1 result = paginator.page(page) except EmptyPage: page = paginator.num_pages result = paginator.page(paginator.num_pages) pages_count = paginator.num_pages result = [x.short_json() for x in result.object_list] data = {'objects': result, 'pages': pages_count, 'cur_page': page} data = json.dumps(data) return HttpResponse(data)
def handleLoginRequest(request): # loginfo(request.get_full_path()) post = request.POST email = post.get('email') password = post.get('password') loginfo("login request") loginfo("email: " + email) user = models.get_user_via_email(email) if user is None: loginfo("user is none") result = {} if user and user.password == password: cookie = models.generate_cookie(user.id) loginfo(str(cookie.id)) result['id'] = cookie.id result['token'] = cookie.token result['userId'] = cookie.userId loginfo('login successful: ') loginfo('id: ' + str(cookie.userId)) loginfo('token: ' + cookie.token) return HttpResponse(json.dumps(result)) else: logerror('login failed') return HttpResponseBadRequest()
def handle_get_user_bookmark(request): get = request.GET id = get.get('bookmark_id') if id is None: logerror("Id is none") return HttpResponseBadRequest() try: bookmark = models.Bookmark.objects.get(pk=int(id)) except Exception as e: logerror(e.message) return HttpResponseBadRequest loginfo("bookmark is found") return HttpResponse(json.dumps(bookmark.full_json()))
def handleRegisterRequest(request): put = request.REQUEST user = models.User() user.name = put.get('name') user.email = put.get('email') user.password = put.get('password') user.last_name = put.get('last_name') loginfo("register: " + "email " + user.email) if models.get_user_via_email(user.email): logerror("user with this email is existing") return HttpResponseBadRequest() user.save() loginfo("user registered") return HttpResponse()
def handle_remove_bookmark(request): delete = request.REQUEST bm_id = delete.get('bookmark_id') user_id = delete.get('user_id') if bm_id is None or user_id is None: logerror("Some params is NONE") return HttpResponseBadRequest bm_id = int(bm_id) user_id = int(user_id) bookmark = models.Bookmark.objects.get(pk=bm_id) tags = models.BookmarkTag.objects.filter(bookmark=bookmark) for tag in tags: tag.delete() if user_id == bookmark.user_id: bookmark.delete() return HttpResponse() else: return HttpResponseBadRequest()
break #else: # time.sleep(120) #print(url_list) content_all = {} for url in url_list: #print(url) try: url_response = urlopen(url) news_html = BeautifulSoup(url_response) #print(news_html) except Exception: logerror("dw_news_URL_UnicodeEncodeError: " + url) continue news_title = (news_html.find("div", class_="col3")).find("h1").text #print(news_title) create_time = (news_html.find( "ul", class_="smallList").find("li").text).split("\n")[1] #print(create_time) create_time_convert = datetime.datetime.strptime( create_time, "%d.%m.%Y").strftime("%Y-%m-%d") #print(create_time_convert) news_id = "dw-" + url.split("/")[-1].split("-")[-1] #print(news_id)
time.sleep(120) #print(url_list) content_all = {} for url in url_list: url_response = urlopen(url) news_html = BeautifulSoup(url_response) #print(news_html) try: news_title = news_html.find("h1", class_="pg-headline").text #print(news_title) except: logerror("cnn_news_title_TypeError: " + url) continue create_time = (((news_html.find( "p", class_="update-time").text).split(")")[-1]).replace(",", "")).strip(" ") create_time_convert = datetime.datetime.strptime( create_time, "%B %d %Y").strftime("%Y-%m-%d") #print(create_time_convert) news_id = "cnn-" + url.split("/")[7] #print(news_id) news_tag = url.split("/")[6] #print(news_tag)
time.sleep(120) #print(url_list) content_all = {} for url in url_list: url_response = urlopen(url) news_html = BeautifulSoup(url_response) #print(news_html) try: news_title = news_html.find("h3", class_="htitle").text #print(news_title) except: logerror("afp_news_title_TypeError: " + url) continue create_time_block = news_html.find("div", class_="article_content_date") create_time_d = create_time_block.find("span", class_="d").text create_time_m = create_time_block.find("span", class_="m").text create_time_y = create_time_block.find("span", class_="y").text create_time = create_time_d + create_time_m + create_time_y #print(create_time) create_time_convert = datetime.datetime.strptime(create_time, "%d%b%Y").strftime("%Y-%m-%d") #print(create_time_convert) artical_id = (url.split("/")[-1]).split("-")[-1] #print(artical_id) news_id = "AFP-world-" + artical_id
create_time = news_html.find( "time", id="articleContentHeaderTime")["datetime"].split("T")[0] #print(create_time) news_id = "yomiuri-" + url.split("/")[-1] #print(news_id) news_tag = "World" try: img_link = news_html.find( "img", id="articleContentHeaderFigureImg")["src"] #print(img_link) except Exception: logerror("yomiuri_news_image_KeyError: " + url) continue artical = news_html.find("div", id="articleContentBody") content = [] for p in artical.find_all("p"): content.append(p.text) # print(p.text) news_content = "".join(content) #print(news_content) news_keywords = "None" # 為了讓同一個 news_id 被覆蓋過去不要重覆存 content_items = { news_id: {