def captionImage(url): image_url = url cBot = CaptionBot() caption = cBot.url_caption(image_url) caption = caption.split(" ")[2:] caption = " ".join(caption) return caption
def describe(): msgtype = request.form['type'] ##question=request.form['question'] print(str(msgtype) + "is message ") imagefile = request.files['imagefile'] filename = secure_filename(imagefile.filename) imagefile.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) print(filename) filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) print(filepath) if (msgtype == '1'): c = CaptionBot() print("caption bot running") result = c.file_caption(filepath) print(result) return jsonify({ 'message': result, 'type': msgtype, 'filename': filename, 'filepath': filepath }) elif (msgtype == '2'): question = request.form['question'] ##establish connection ##if (question is None) return jsonify({'message':'error , didnt include question','type':'2'}) response = requests.get(url, headers=header) ##upload image url1 = 'http://vqa.daylen.com/api/upload_image' files = {'file': open(filepath, 'rb')} response = requests.post(url1, files=files, headers=header) data = json.loads(response.text) im_id = data['img_id'] print im_id ## upload question url2 = 'http://vqa.daylen.com/api/upload_question' #'fa10956c0743670ec5c924ce1523ab16' datas = {'img_id': im_id, 'question': question} response = requests.post(url2, data=datas, headers=header) ##get response print(response.text) data = json.loads(response.text) return jsonify({'message': data[u'answer'], 'type': '2'}) else: return jsonify({'message': 'error in type ', 'type': '1'})
def captioning(video_id, scenes): print('Captioning {}'.format(video_id)) start_time = datetime.now() c = CaptionBot() result = [] cached_path = 'cached/{}.data'.format(video_id) if os.path.isfile(cached_path): with open(cached_path, 'rb') as cached_file: result = pickle.load(cached_file) for x in result: print('At {}: {}'.format(int(x['start_time']), x['caption'])) else: for x in scenes: caption = c.file_caption(x[0]) print('At {}: {}'.format(int(x[1]), caption)) result.append({'start_time': x[1], 'caption': caption}) with open(cached_path, 'wb') as cached_file: pickle.dump(result, cached_file) print('Captioning: {} seconds'.format(datetime.now() - start_time)) return result
class Captioner: def __init__(self): self.caption_bot = CaptionBot() self.prev_caption = '' def caption_img(self, frame, get_prev=False): if not get_prev: img_name = 'to_caption.jpg' cv2.imwrite(img_name, frame) caption = self.caption_bot.file_caption(img_name) self.prev_caption = caption return self.prev_caption
def getDescription(): if request.method == "POST": img = request.data[5:] data = img c = CaptionBot() caption = c.url_caption(str(data)) r = {"success":caption} lines = getText(str(data)) text = "" if lines: text = parseText(lines) if text: response = {"success":text} return json.dumps(response) aux = "this image has the following text: " + text response = {"success" : aux} return json.dumps(response) try: conn = httplib.HTTPSConnection('westus.api.cognitive.microsoft.com') body = {"url":data} conn.request("POST", "/face/v1.0/detect?%s" % params, json.dumps(body), headers) response = conn.getresponse() data = response.read() data = json.loads(data) if(data): age = data[0]["faceAttributes"]["age"] gender = data[0]["faceAttributes"]["gender"] moustache = data[0]["faceAttributes"]["facialHair"]["moustache"] beard = data[0]["faceAttributes"]["facialHair"]["beard"] glasses = data[0]["faceAttributes"]["glasses"] description = " It's gender is " + gender + " with an age of " + str(age) + " years. It has a "+str(100*float(moustache)) +" percent probability of having a moustache and "+str(100*float(beard))+" percent probability of having a beard. He has "+glasses r["success"] += description conn.close() except Exception as e: print(e) return json.dumps(r)
class ImageDescriber: def __init__(self): self.captionBot = CaptionBot() #www.captionbot.ai self.translator = Translator() self.dest = 'ru' self.description = 'send_photo - Я могу рассказать, что изображено на картинке \n' def describeImg(self, bot, update): imgId = update.message.photo[-1] imgPath = bot.getFile(imgId) imgPath = imgPath['file_path'] caption = self.getImageCaption(imgPath) bot.send_message(chat_id=update.message.chat_id, text=caption) def getImageCaption(self, imgPath): caption = self.captionBot.url_caption(imgPath) caption = self.translator.translate(caption, dest=self.dest).text return caption
# -*- coding: utf-8 -*- import sys from captionbot import CaptionBot import emoji # create instance of CaptionBot c = CaptionBot() for line in open('all_scrapes.csv'): line = line.rstrip() cols = line.split(',') img_url = cols[0] # if line is the header if img_url == 'img_url': print(line) continue #------------- Clean Image URL -------------# # remove quotes from img_url img_url = img_url[1:-1] caption = cols[1] #--------- Clean Instagram Caption ---------# # if there is no caption skip it if len(caption) < 1: continue # currently emojis look like (apparently called mojibake): 😘 # should turn emojis into actual emojis #caption.encode('cp1252').decode('utf-8') # now try converting emojis to aliases try:
from captionbot import CaptionBot c = CaptionBot() #msg = c.url_caption("https://beebom-redkapmedia.netdna-ssl.com/wp-content/uploads/2016/01/Reverse-Image-Search-Engines-Apps-And-Its-Uses-2016.jpg") #msg2 = c.url_caption("https://upload.wikimedia.org/wikipedia/commons/thumb/6/63/Collection_of_military_aircraft.jpg/1200px-Collection_of_military_aircraft.jpg") msg2 = c.file_caption("/Users/khanh/Desktop/bbb.jpg") print(msg2)
import cv2 from captionbot import CaptionBot c = CaptionBot() cap = cv2.VideoCapture(0) while True: r, f = cap.read() if r == True: cv2.imshow("Stream", f) key = cv2.waitKey(1) if key == ord('a'): print("Generating Caption...") cv2.imwrite('image.jpg', f) caption = c.file_caption('/home/aditya/Hack-a-bit2019/' + 'image.jpg') print(caption) elif key == 27: break else: continue cv2.destroyAllWindows() cap.release()
import os import time import tarfile import glob import six.moves.urllib as urllib import cv2 from tqdm import tqdm import numpy as np import tensorflow as tf from ssd_mobilenet_utils import * from captionbot import CaptionBot c = CaptionBot() def run_detection(image, interpreter): interpreter.set_tensor(input_details[0]['index'], image) interpreter.invoke() boxes = interpreter.get_tensor(output_details[0]['index']) classes = interpreter.get_tensor(output_details[1]['index']) scores = interpreter.get_tensor(output_details[2]['index']) num = interpreter.get_tensor(output_details[3]['index']) boxes, scores, classes = np.squeeze(boxes), np.squeeze(scores), np.squeeze( classes + 1).astype(np.int32) out_scores, out_boxes, out_classes = non_max_suppression( scores, boxes, classes) # Print predictions info #print('Found {} boxes for {}'.format(len(out_boxes), 'images/dog.jpg'))
def __init__(self): self.captionBot = CaptionBot() #www.captionbot.ai self.translator = Translator() self.dest = 'ru' self.description = 'send_photo - Я могу рассказать, что изображено на картинке \n'
def __init__(self): self.caption_bot = CaptionBot() self.prev_caption = ''
import os import json from captionbot import CaptionBot import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning from requests.packages.urllib3.exceptions import InsecurePlatformWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) requests.packages.urllib3.disable_warnings(InsecurePlatformWarning) c = CaptionBot() json_object = {'data': {'caption': "", 'filename': "", 'film-frame': 0}} fps = 25 stepInSeconds = 3 stepInFrames = fps * stepInSeconds frame_files = 0 json_files = 0 for f in os.listdir('data/frames/'): parsed = f.split(".") if parsed[1] == 'png': frame_files += 1 else: json_files += 1 index = json_files + 1 print "Total frames:", str(frame_files)
def captionimage(url): c = CaptionBot() return c.url_caption(url)