def post(self): try: template_values = {} competitionId = self.request.get('competitionId') competition = Competition.get_by_id(int(competitionId)).key() captionText = cgi.escape(self.request.get('caption')) authorText = cgi.escape(self.request.get('author')) if len(captionText) > 140 or len(authorText) > 140: template_values = { "msg" : "too many characters" } path = os.path.join(os.path.dirname(__file__), 'templates/oops.html') self.response.out.write(template.render(path, template_values)) else: caption = Caption(text=captionText, author=authorText, competitionKey=competition, dateCreated=datetime.datetime.now()) caption.put() self.redirect("/success"); except: self.redirect("/oops");
def get(self, id=None): template_values = {} if id: captions = Caption.get_by_id(int(id)) template_values['caption'] = captions template_values['comp'] = captions.competitionKey.photoKey template_values['photoId'] = captions.competitionKey.photoKey.key().id() template_values['id'] = id template_values['author'] = captions.author path = os.path.join(os.path.dirname(__file__), 'templates/caption.html') self.response.out.write(template.render(path, template_values)) else: captions = Caption.all().order("-dateCreated").fetch(limit=100) caps = [] for c in captions: caps.append({'text' : c.text, 'author' : c.author, 'comp' : c.competitionKey, 'caption' : c.key().id(), 'dateCreated' : c.dateCreated}) template_values['captions'] = caps path = os.path.join(os.path.dirname(__file__), 'templates/captions.html') self.response.out.write(template.render(path, template_values))
def get(self, id=None, page=0, json=None): offset = 0 p = 0 if page: offset = 10*int(page) p = page template_values = {} if not id: competition = Competition.all().order("-dateCreated").get() else: competition = Competition.get_by_id(int(id)) photo = competition.photoKey # get photo instance showPagination = False captions = Caption.all().filter('competitionKey =',competition).filter('approved =',True).fetch(limit=100) if len(captions) == 11: showPagination = True p = p+1 captions.pop(10) if json == 'json': caps = [] for c in captions: caps.append({'text' : "%s" % c.text, "author" : "%s" % c.author, "comp" : "%s" % c.competitionKey, "caption" : "%s" % c.key().id(), "dateCreated" : "%s" % c.dateCreated}) self.response.headers['Content-Type'] = "application/json" self.response.out.write(simplejson.dumps([caps, showPagination, p])) else: caps = [] for c in captions: caps.append({'text' : c.text, "author" : c.author, "comp" : c.competitionKey, "caption" : c.key().id(), "dateCreated" : c.dateCreated}) template_values['img'] = "/photo/%s" % photo.key().id() # get id from photo instance, to pass later in URL template_values['title'] = photo.title template_values['description'] = photo.description template_values['competitionId'] = competition.key().id() template_values['complete'] = competition.complete template_values['captions'] = caps template_values['showPagination'] = showPagination template_values['page'] = p template_values['divWidth'] = len(captions) * 437 path = os.path.join(os.path.dirname(__file__), 'templates/competition.html') self.response.out.write(template.render(path, template_values))
def index(): contest = Contest.query.order_by(Contest.id.desc()).first() form = CaptionForm() if form.validate_on_submit(): caption = Caption(text=form.caption.data, timestamp=datetime.utcnow(), user=g.user, contest_id=contest.id) db.session.add(caption) db.session.commit() flash( "You've submitted your caption! Compete for real on newyorker.com") return redirect(url_for('index')) path = IMAGE_PATH user_captions = [a.user_id for a in contest.captions] return render_template('index.html', title="Cartoon Home", form=form, contest=contest, path=path, user_captions=user_captions)
def post(self): user = users.get_current_user() if not user: self.redirect("/welcome") else: teleUser = TeleUser.get_by_id(user.user_id()) if not teleUser: teleUser = TeleUser.fromGSI(user=user) teleUser.put() edit_type = self.request.get("request_type") print self.request.get("thread_id") thread_id = int(self.request.get("thread_id")) thread_entity_list = Thread.query().filter(Thread.thread_id==thread_id).fetch() if thread_entity_list: thread = thread_entity_list[0] else: thread = Thread(thread_id=thread_id) print thread edit_entity_list = Edit.query().filter(Edit.thread==thread.key).fetch() edit_entity_list.sort(key=lambda x: x.addition.get().date,reverse=True) lastEdit = edit_entity_list[0] if lastEdit.user == teleUser.key: self.redirect("/edit?key="+str(thread.key.id())) if edit_type=="caption": if lastEdit.key.kind() == "Caption": self.redirect("/home?request=failed") return caption = self.request.get("caption") new_caption = Caption(content=caption) content_key = new_caption.put() thread.captions.append(content_key) elif edit_type=="drawing": if lastEdit.key.kind() == "Caption": self.redirect("/home?request=failed") return drawingDataUrl = self.request.get("drawing") img_data = drawingDataUrl.split('data:image/png;base64,')[1] img = Image.open(BytesIO(base64.b64decode(img_data))) output = StringIO() img.save(output, format=img.format) drawing = output.getvalue() new_drawing = Drawing(content=drawing) content_key = new_drawing.put() thread.drawings.append(content_key) else: self.response.write("oof!") return thread_key = thread.put() edit_entity_list = Edit.query().filter(Edit.thread==thread_key).fetch() edit_entity_list.sort(key=lambda x: x.addition.get().date,reverse=True) new_edit = Edit(user=teleUser.key,thread=thread_key,addition=content_key) new_edit.put() for edit in edit_entity_list: print edit.thread.id(),":",edit.thread.get().thread_id,":",edit.addition.kind() last_edit = edit_entity_list[0] confirmation_template = the_jinja_env.get_template("confirmation.html") self.response.write(confirmation_template.render({ "user_info":user, "thread":thread, "last_edit":last_edit, "new_edit":new_edit }))
def classify_content(content): # create a generator for fetching data urls = [] for asset in content.assets: urls.append(asset.url) # prepare images for download val_generator = UrlGenerator(urls, cfg.resolve(cfg.RETINANET_MODEL, cfg.classes_file), cfg.resolve(cfg.RETINANET_MODEL, cfg.labels_file)) response = Response() # load image for i, asset in enumerate(content.assets): logger.info('Running classification on: {}'.format(asset.url)) # initialize result object result = Result() result.url = asset.url result.asset_id = asset.asset_id logger.info('Reading image bgr...') try: # fetch images image = val_generator.read_image_bgr(i) # index original image for searching if content.insert: index_original_image(image, asset) except (OSError, ConnectTimeout, HTTPError, ReadTimeout, Timeout, ConnectionError): logger.warning('Skipped: Unable to reach resource') continue except: err = traceback.format_exc() logger.error('Could not read image: {}'.format(err)) continue # copy to draw on logger.info('Drawing cvt color...') draw = np.asarray(image.copy()) draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB) # pre-process the image for the network logger.info('Processing image...') image = val_generator.preprocess_image(image) image, scale = val_generator.resize_image(image) # classify image start = time.time() boxes, scores, labels = core.model.predict_on_batch(np.expand_dims(image, axis=0)) elapsed = time.time() - start logger.info('Processing time: {}'.format(elapsed)) result.time = str(elapsed) boxes /= scale # process and save detections idx = 0 for box, score, label in zip(boxes[0], scores[0], labels[0]): if score < cfg.resolve_float(cfg.CLASSIFICATION, cfg.min_confidence): continue # get position data box = boxes[0, idx, :4].astype(int) label_name = val_generator.label_to_name(label) # save meta-info for REST API response caption = Caption(str(label), label_name, str(score), '{};{}'.format(box[0], box[1]), # x1;y1 '{};{}'.format(box[2], box[3])) # x2;y2 result.captions.append(caption) # Crop image for extraction h = box[3] - box[1] w = box[2] - box[0] cropped_img = draw[box[1]:(box[1] + h), box[0]:(box[0] + w)] if content.insert: # update sequence to remove previous index if available remove_cropped_if_asset_exists(asset) # process cropped image fragment for searching cropped_file_name = index_cropped_image(asset, cropped_img, label_name, idx, insert=content.insert) features = extract_features(cropped_file_name) faiss_features = features.reshape((1, cfg.resolve_int(cfg.FAISS_SETTINGS, cfg.index_size))) # add or clean image if content.insert: # add feature to faiss index core.index.add(faiss_features) else: # clean temp image again os.remove(cropped_file_name) # index caption if content.insert: index_asset_meta(asset, idx, caption, features.tolist(), core.index.ntotal - 1) # find similar suggestions and handle response asset_metas = get_similar_asset_metas(faiss_features, cfg.resolve_int(cfg.FAISS_SETTINGS, cfg.index_n_similar_results)) handle_suggestion_response(result, asset.asset_id, asset_metas) idx += 1 # add result to response list response.result_list.append(result) return response