def get_focus_of_attention(videopath, frames): path = videopath[:videopath.rfind('/')] mean_path = join(videopath[:videopath.rfind('/')], "mean_frame.png") outputfolder = join(path, "output") if not os.path.exists(outputfolder): os.mkdir(outputfolder) print("///////////////////") print("getFrames Begin") print("///////////////////") get_frames(frames, videopath, join(outputfolder, 'Frames')) print("///////////////////") print("OpticalFlow Begin") print("///////////////////") optical_flow(outputfolder, frames) print("///////////////////") print("Segmantention Begin") print("///////////////////") segment(outputfolder, frames) print("///////////////////") print("Prediction Begin") print("///////////////////") predict(outputfolder, mean_path, frames) print("///////////////////") print("Visualization Begin") print("///////////////////") visualize(outputfolder, frames) print("///////////////////") print("generateVideo Begin") print("///////////////////") generate_video(outputfolder, frames)
def findGoal(frame, other=False): # Segments the blue in the image if other: seg = segment(frame, colors[u'yellow']) else: seg = segment(frame, colors[u'blue']) kernel = np.ones((5, 5), np.uint8) seg = cv2.erode(seg, kernel, iterations=1) # Gets the box from the coordinates x, y, w, h = segToRect(seg) return (x, y, w, h)
def text_to_gpt3(): request_json = flask.request.json # check to see if the secret key is valid secret_key = "CONGPilDnoMinEThonYAnkoLViTypOlmStOd" secret = request_json["secret"] if secret != secret_key: return {"Error" : "Secret Key is Invalid"} try: name = request_json["name"] userId = request_json["userId"] tags = request_json["tags"] text = request_json["text"] except: return {"code": 400, "error": "Bad request, missing arguments"} bestTextChunks = segment(text) url = "https://us-central1-vershio-hawt.cloudfunctions.net/end" data = { "secret" : "CONGPilDnoMinEThonYAnkoLViTypOlmStOd", "name" : name, "userId" : userId, "nuggets" : betterTextChunks, } requests.post(url=url, data=data) return json.dumps(bestTextChunks)
def findLines(frame, obs=None, goal=None): # If the caller did not give us a obs or goal, get our own if obs == None: obs = findObstacle(frame) if goal == None: goal = findGoal(frame) # Segment the green field seg = segment(frame, colors[u'green'], True) # Get the lines from FLD lines, seg = FLD(seg) # Order the lines based of off the x coord lines = orderStartEnd(lines) # Remove any lines within the obstacles lines = removeObsLines(obs, lines) # Merge all the lines PAR_THRESH = .1 POINT_THRESH = 135 DROP = 250 lines = mergeLines(lines, PAR_THRESH, POINT_THRESH, DROP) # identify the lines in the image lines = identifyLines(lines, frame, goal) return lines
def identifyLines(lines, frame, goal=None): if goal == None: goal = findGoal(frame) newLines = [] # converts the lines into dictionaries for line in lines: lineDict = {} lineDict[u'x1'] = line[0] lineDict[u'y1'] = line[1] lineDict[u'x2'] = line[2] lineDict[u'y2'] = line[3] lineDict[u'type'] = u'unknown' newLines.append(lineDict) # Finds all the outer field lines graySeg = segment(frame, colors[u'gray']) newLines = findOuterField(newLines, graySeg) # Finds all the inner field lines newLines = findInnerField(newLines) # Finds all the center Lines newLines = findCenterLines(newLines) # Find Goal Lines newLines = findGoalLines(newLines, goal) # Find Circle Lines newLines = findCircleLines(newLines) return newLines
def publish(uuid, content, parent=None, type='normal'): u = get_user(uuid) # We should not embed the parent message, otherwise it'd be too difficult # to delete the parent. if type == 'forward': m = messages.find_one(ObjectId(parent)) if not m: return error.invalid_message_id(raw=True) doc = { 'owner': ObjectId(uuid), 'content': content, 'timestamp': utcnow(), 'entities': contentparser.parse(content), 'parent': parent and ObjectId(parent) or None, 'type': type } if conf.enable_search: doc['keywords'] = list(set(seg.segment(content))) # TODO check result messages.save(doc) return {'success': 1}
def publish(uuid, content, parent=None, type="normal"): u = get_user(uuid) # We should not embed the parent message, otherwise it'd be too difficult # to delete the parent. if type == "forward": m = messages.find_one(ObjectId(parent)) if not m: return error.invalid_message_id(raw=True) doc = { "owner": ObjectId(uuid), "content": content, "timestamp": utcnow(), "entities": contentparser.parse(content), "parent": parent and ObjectId(parent) or None, "type": type, } if conf.enable_search: doc["keywords"] = list(set(seg.segment(content))) # TODO check result messages.save(doc) return {"success": 1}
def audio_to_text(): request_json = flask.request.json vid_file = "output.mp4" # check to see if the secret key is valid secret_key = "CONGPilDnoMinEThonYAnkoLViTypOlmStOd" secret = request_json["secret"] if secret != secret_key: return {"Error": "Secret Key is Invalid"} try: name = request_json["name"] userId = request_json["userId"] tags = request_json["tags"] videos = request_json["videoUrl"] except: return {"code": 400, "error": "Bad request, missing arguments"} token, _ = convert_mp4_to_wav(videos) audio_file = f"output{token}.wav" text = text_from_large_clip(audio_file) betterTextChunks = segment(text) url = "https://us-central1-vershio-hawt.cloudfunctions.net/end" data = { "secret": "CONGPilDnoMinEThonYAnkoLViTypOlmStOd", "name": name, "userId": userId, "nuggets": betterTextChunks, } requests.post(url=url, data=data) return betterTextChunks
def search(uuid, query=None, newerThan=None, olderThan=None): if not conf.enable_search: return error.search_not_enabled(raw=True) """ uuid is currenly unused. """ keywords = seg.segment(query) # basic query query = {"keywords": {"$all": keywords}} # setup time constraints if olderThan or newerThan: query["timestamp"] = {} if olderThan: query["timestamp"]["$lt"] = olderThan if newerThan: query["timestamp"]["$gt"] = newerThan # then execute the query c = messages.find(query).sort("timestamp", pymongo.DESCENDING).batch_size(conf.stream_item_max) return _process_messages(uuid, c)
def ocr(): request_json = flask.request.json secret = request_json["secret"] if secret != "CONGPilDnoMinEThonYAnkoLViTypOlmStOd": return {"code": 401, "error": "Unauthorized"} try: images = request_json["imageUrls"] name = request_json["name"] userId = request_json[ "userId"] #bet bet go get started on the feed ill work on api call that will just take in the text tags = request_json["tags"] #ayo when u get the chance check discord except: return {"code": 400, "error": "Bad request, missing arguments"} if images == []: return {"code": 400, "error": "Bad request, missing images"} text = "" for imageurl in images: image = Image.open(urllib.request.urlopen(imageurl)) localText = pytesseract.image_to_string(image) text += localText segText = segment(text) url = "https://us-central1-vershio-hawt.cloudfunctions.net/end" data = { "secret": "CONGPilDnoMinEThonYAnkoLViTypOlmStOd", "name": name, "userId": userId, "nuggets": segText, } requests.post(url=url, data=data) return json.dumps(segText)
def search(uuid, query=None, newerThan=None, olderThan=None): if not conf.enable_search: return error.search_not_enabled(raw=True) ''' uuid is currenly unused. ''' keywords = seg.segment(query) # basic query query = {'keywords': {'$all': keywords}} # setup time constraints if olderThan or newerThan: query['timestamp'] = {} if olderThan: query['timestamp']['$lt'] = olderThan if newerThan: query['timestamp']['$gt'] = newerThan # then execute the query c = messages.find(query) \ .sort('timestamp', pymongo.DESCENDING) \ .batch_size(conf.stream_item_max) return _process_messages(uuid, c)
def plot(): A = generate_mix() B = segment('train_A.mdl', 'mix.wav') plt.figure(figsize=(10, 3)) for i in range(len(B)): idx = len(B) - i - 1 plt.barh(2, B[idx][1], color='g') plt.barh(2, B[idx][0], color='w') for i in range(len(A)): idx = len(A) - i - 1 plt.barh(1, A[idx][1], color='r' if A[idx][2] == 'A' else 'g') plt.legend(("user", "service")) ax = plt.gca() plt.setp(ax.get_yticklabels(), visible=False) ax.yaxis.set_ticks_position('none') leg = ax.get_legend() leg.legendHandles[1].set_color('red') plt.savefig("demo.jpg")
def get_tags(d): s = set() for c in column_names(): if d.has_key(c): s.update(seg.segment(d[c])) return list(s)
new_lis.append(np.hstack(x)) frame = np.vstack(new_lis) frame = downscale(frame,.3) return frame frame = read(1) # Display the output while True: blue = {} blue[u'low_H'] = low_H blue[u'low_S'] = low_S blue[u'low_V'] = low_V blue[u'high_H'] = high_H blue[u'high_S'] = high_S blue[u'high_V'] = high_V seg = segment(frame,blue) cv2.imshow(u'hsv',frame) cv2.imshow(u'org',seg) key = cv2.waitKey(30) if key == 27:#if ESC is pressed, exit loop cv2.destroyAllWindows() break