def predict(): if request.method == "POST": response_object = validate_token() if response_object['status'] == 'success': try: video = request.files['video'] plates_like_objects, image_binary, car_image = check_video( video, app.config['UPLOAD_FOLDER']) result_dict = get_placa(plates_like_objects, image_binary, model) prediction_id = uuid.uuid4() result_img = Image.fromarray(car_image).convert('RGB') # Guardar la imagen result_img.save("./result_img/" + str(prediction_id) + ".jpg") # TODO guardar predicción , ruta de imagen en BD y usuario en tabla de BD,para listarlo luego en Historial nwPrediccion = historial(user_id=response_object['email'], placa=result_dict['placa'], foto=str(prediction_id)) db.session.add(nwPrediccion) db.session.commit() base64img = encode(car_image) # limpiar carpeta output y uploads delete_files_in_directory(app.config['UPLOAD_FOLDER']) delete_files_in_directory('output') result_dict["img"] = base64img return make_response(jsonify(result_dict)), 200 except Exception: return make_response(jsonify({"status": "failure"})), 400 else: return make_response(jsonify(response_object)), 401
def get_content(): vh_title = "yohoho." list_li = [] VALUES["kinopoisk"] = _kp_id_ response = tools.get_response(URL, HEADERS, VALUES, 'POST') if response: jdata = json.loads(response) for host in ENABLED_HOSTS: host_data = jdata[host] if host_data: iframe = host_data["iframe"] translate = host_data["translate"] quality = host_data["quality"] #{"vodlocker":{}, # "hdgo":{"iframe":"https://hdgo.cx/video/oSlSCtQ0t8apv6vJGD1va2xbKTd9k8YC/17223/","translate":"Дублированный","quality":"плохое TS"}, # "iframe":{"iframe":"https://videoframe.at/movie/2eb6408pc8p/iframe","translate":"Полное дублирование","quality":"TS"}, # "torrent":{"iframe":"https://4h0y.yohoho.cc/?title=%D1%85%D0%B8%D1%89%D0%BD%D0%B8%D0%BA"}, # "hdbaza":{"iframe":"https://vidozzz.com/iframe?mh=bbd8ed61c2256ea4&uh=65bd8ef1126daa6f","translate":"Viruseproject","quality":""}, # "kodik":{"iframe":"https://kodik.cc/video/15298/6f7fcc06b4e7d51f4ff574af5a59115e/720p","translate":"Проф. Многоголосый","quality":"BDRip 720p"}, # "trailer":{"iframe":"https://hdgo.cx/video/trailer/oSlSCtQ0t8apv6vJGD1va2xbKTd9k8YC/17223/"}, # "moonwalk":{"iframe":"https://streamguard.cc/video/d9419273b3fea0ef15980f70e35cc078/iframe?show_translations=1","translate":"Дубляж","quality":""}} title_ = "*T*" title = "[COLOR=orange][{0}][/COLOR] {1} ({2})".format( vh_title + host, tools.encode(title_), translate + "," + quality) uri = sys.argv[0] + "?mode=show&url={0}".format( urllib.quote_plus(prepare_url(host, iframe))) item = xbmcgui.ListItem(title) list_li.append([uri, item, True]) return list_li
def get_content(part): vh_title = "hdgo.club" list_li = [] VALUES["search"] = _kp_id_ HEADERS["Referer"] = URL + part response = get_response(URL + part, HEADERS, VALUES, 'POST') if response: try: arr = common.parseDOM(response, "div", attrs={"class": "li_gen"}) for item in arr: try: divs = common.parseDOM(item, "div") url_ = "http:" + common.parseDOM(item, "a", attrs={"class": "btn-primary"}, ret="href")[0] except: continue url = prepare_url(url_) title_ = tools.strip(divs[1].split("\n")[0]) + add_title_info(divs) title = "[COLOR=orange][{0}][/COLOR] {1}".format(vh_title, tools.encode(title_)) uri = sys.argv[0] + "?mode=show&url={0}&title={1}".format(urllib.quote_plus(url), urllib.quote_plus(title)) item = xbmcgui.ListItem(title) list_li.append([uri, item, True]) except: pass return list_li
def callALPR(image): base64image = encode(image) SECRET_KEY = 'sk_d6c76f5e23014eff289f1313' url = 'https://api.openalpr.com/v3/recognize_bytes?recognize_vehicle=1&country=us&secret_key=%s' % ( SECRET_KEY) r = requests.post(url, data=base64image, verify=False) output_dict = r.json() return {"placa": output_dict['results'][0]['plate'], "flag_alpr": True}
def user_history(): response_object = validate_token() if response_object['status'] == 'success': lst_predicciones = historial.query.filter_by(user_id="xyz")\ .order_by(historial.id.desc()).all() #response_object["email"]) for prediccion in lst_predicciones: car_image_output = imread("./result_img/" + prediccion.foto + ".jpg") base64img = encode(car_image_output) prediccion.foto = base64img return make_response( jsonify(results=[elem.serialize() for elem in lst_predicciones])), 200 return make_response(jsonify(response_object))
def evaluate(model, seed=1234, evaltest=True): """ Run experiment """ print 'Preparing data...' train, dev, test, scores = load_data() train[0], train[1], scores[0] = shuffle(train[0], train[1], scores[0], random_state=seed) print 'Computing training skipthoughts...' trainA = tools.encode(model, train[0], verbose=False, use_eos=True) trainB = tools.encode(model, train[1], verbose=False, use_eos=True) print 'Computing development skipthoughts...' devA = tools.encode(model, dev[0], verbose=False, use_eos=True) devB = tools.encode(model, dev[1], verbose=False, use_eos=True) print 'Computing feature combinations...' trainF = np.c_[np.abs(trainA - trainB), trainA * trainB] devF = np.c_[np.abs(devA - devB), devA * devB] print 'Encoding labels...' trainY = encode_labels(scores[0]) devY = encode_labels(scores[1]) with tf.device('/cpu:0'): print 'Compiling model...' lrmodel = prepare_model(ninputs=trainF.shape[1]) print 'Training...' bestlrmodel = train_model(lrmodel, trainF, trainY, devF, devY, scores[1]) if evaltest: print 'Computing test skipthoughts...' testA = tools.encode(model, test[0], verbose=False, use_eos=True) testB = tools.encode(model, test[1], verbose=False, use_eos=True) print 'Computing feature combinations...' testF = np.c_[np.abs(testA - testB), testA * testB] print 'Evaluating...' r = np.arange(1, 6) yhat = np.dot(bestlrmodel.predict_proba(testF, verbose=2), r) pr = pearsonr(yhat, scores[2])[0] sr = spearmanr(yhat, scores[2])[0] se = mse(yhat, scores[2]) print 'Test Pearson: ' + str(pr) print 'Test Spearman: ' + str(sr) print 'Test MSE: ' + str(se) return yhat, pr, sr, se
def get_content(): vh_title = "czx.to" list_li = [] response = tools.get_response(URL + '/' + str(_kp_id_) + '/', HEADERS, VALUES, 'GET') if response: iframe = common.parseDOM(response, "iframe", ret="src")[0] title_ = "*T*" title = "[COLOR=orange][{0}][/COLOR] {1}".format( vh_title, tools.encode(title_)) uri = sys.argv[0] + "?mode=show&url={0}".format( urllib.quote_plus(iframe)) item = xbmcgui.ListItem(title) list_li.append([uri, item, True]) return list_li
def summarize_trained(self, sentences): # Getting sentence embeddings vectors = tools.encode(self.model, sentences, verbose=False) print('Sentences have been encoded...') # Retrieving clusters n_clusters = int(np.ceil(len(vectors)**0.5)) # n_clusters = int(np.ceil(SUMMARY_LENGTH)) kmeans = KMeans(n_clusters=n_clusters, random_state=0) # print pca embeddings self.print_embeddings(vectors) kmeans.fit(vectors) avg = [] for j in range(n_clusters): idx = np.where(kmeans.labels_ == j)[0] avg.append(np.mean(idx)) # Choosing sentences closest to cluster centers closest, _ = pairwise_distances_argmin_min(kmeans.cluster_centers_, vectors) ordering = sorted(range(n_clusters), key=lambda k: avg[k]) # Returning summary summary = ' '.join([sentences[closest[idx]] for idx in ordering]) return summary
def session_start(self): self.session = vk_api.VkApi(login=self.login, password=self.password, auth_handler=two_factor_auth_handler, captcha_handler=captcha_handler, config_filename='vk_config.json') clear_last_log() self.set_logfile_name() Logging("Trying to start session", self.logfile) self.session.auth() Logging("Session started", self.logfile) Logging("Getting music api", self.logfile) self.api = self.session.get_api() self.music = audio.VkAudio(self.session) Logging("Complete successfully", self.logfile) self.get_id() with open('user.json', 'w') as f: user_data = { 'login': self.login, 'password': tools.encode(self.password) } json.dump(user_data, f, indent=2, ensure_ascii=False) return (self)
def get_content(part): vh_title = "kodik.top" list_li = [] VALUES["search"] = _kp_id_ HEADERS["Referer"] = URL + part response = tools.get_response(URL + part, HEADERS, VALUES, 'POST') if response: try: table = common.parseDOM(response, "table", attrs={"class": "table table-hover"}) tbody = common.parseDOM(table, "tbody") rows = common.parseDOM(tbody, "tr") for item in rows: try: tds = common.parseDOM(item, "td") url_ = "https:" + common.parseDOM( item, "a", attrs={"class": "btn btn-success btn-xs copypreview"}, ret="data-link")[0] except: continue url = prepare_url(url_) title_ = tools.strip(tds[0]) + " (" + tools.strip( tds[1]) + ", " + tools.strip(tds[2]) + ")" title = "[COLOR=orange][{0}][/COLOR] {1}".format( vh_title, tools.encode(title_)) uri = sys.argv[0] + "?mode=show&url={0}&title={1}".format( urllib.quote_plus(url), urllib.quote_plus(title)) item = xbmcgui.ListItem(title) list_li.append([uri, item, True]) except: pass return list_li
# Author: Shalin Luo # Apr 14, 2016 # File to loading model from tools.py and encode new sentence import tools input_fpath = 'data/ch2v_list' output_fpath = 'data/vector_output' embed_map = tools.load_googlenews_vectors() model = tools.load_model(embed_map) # X = ['l o v e ', 'r u n ', 'l i v e', 'e n d'] word_list = [] with open(input_fpath) as f: word = f.readline().strip() while word != '': word_list.append(word) word = f.readline().strip() vector_list = tools.encode(model, word_list) # output with open(output_fpath, 'a') as f_output: for vector in vector_list: output_str = (" ".join(str(elem) for elem in vector)) + '\n' f_output.write(output_str) print '*****END*****'
def run(self): thumbnail_dpath = os.path.join(self.clip_dpath, 'media') try: tools.mkdir_p(thumbnail_dpath) except OSError as err: wx.CallAfter( self.wx_parent.error_message, _('Permission error creating clip directory.') ) return tools.save_safe_image(self.clip_dpath, self.bg_input_fpath) tools.save_safe_image(self.clip_dpath, self.pip_input_fpath) tools.save_thumbnails(thumbnail_dpath, self.pip_input_fpath) pip_width, pip_height = tools.get_pip_size( self.pip_input_fpath, self.pip_scale ) bg_fname = tools.sanitize_path(os.path.split(self.bg_input_fpath)[1]) pip_fname = tools.sanitize_path(os.path.split(self.pip_input_fpath)[1]) clip_config = tools.default_clip_config() clip_config['name'] = self.clip_name clip_config['data'] = bg_fname clip_config['pip'] = pip_fname clip_config['pipwidth'] = pip_width clip_config['pipheight'] = pip_height clip_config['category'] = self.clip_category clip_config_buf = '' # name, data, pip must be first 3 items in order ordered_config_items = ['name', 'data', 'pip'] for line in ordered_config_items: clip_config_buf += line + ' = ' + clip_config[line] + os.linesep # Add the rest of the items whose order probably doesn't matter for key, value in clip_config.iteritems(): key = unicode(key) value = unicode(value) if value and (key not in ordered_config_items): clip_config_buf += key + ' = ' + value + os.linesep clip_config_fpath = os.path.join(self.clip_dpath, 'playlist.sos') with open(clip_config_fpath, 'wb') as clip_config_fobj: clip_config_fobj.write(tools.encode(clip_config_buf)) # # Update the SOS library # # Not used because updating the library doesn't automatically update # # the playlist panel in SOS Stream GUI, and updating the playlist # # panel in SOS Stream GUI the right way would be tedious. # # Rescanning the library automatically without refreshing the # # playlist panel could confuse users. # child = subprocess.Popen( # ['/shared/sos/default/bin/scan_library'], stdout=subprocess.PIPE # ) # while child.poll() is None: # Read command output as it comes # out = child.stdout.readline() # print(out.strip()) # Put this in a status window later # # The menu can be updated, at least: # wish = Tkinter.Tk() # wish.withdraw() # Hide Tk window. Possible with constructor? # try: # wish.send('sos_stream_gui', # 'destroy', # '.menubar.library') # wish.send('sos_stream_gui', # 'mk_library_menu', # '.menubar.library') # except Tkinter.TclError as err: # pass # SOS Stream GUI not open # Done success_message = '\n'.join( [ _('Clip "{}" successfully added.'), _('To activate the changes, ' 'perform the following actions in SOS Stream GUI:'), _('Library -> Update Library...\n' 'Library -> (all/{})') ] ).format( clip_config['name'], clip_config['category'] ) wx.CallAfter(self.wx_parent.information_message, success_message)
with open(f) as file_descriptor: file_content = file_descriptor.read().decode("utf-8", "ignore") file_content = sent_tokenize(file_content) for sentence in file_content: if sentence: X.append(sentence.strip()) sentence_embeddings = np.empty([file_counter, 4800]) loc = base_path_to_directory + "dictionary.pkl" saveto = base_path_to_directory + "toy.npz" maxlen_w = 70 worddict, wordcount = vocab.build_dictionary(X) vocab.save_dictionary(worddict, wordcount, loc) #loc where you want to save dictionary #in train.py set 1>path for dictionary, 2>save_to -path where to save model 3>maxlen_w train.trainer(X, dictionary=loc, saveto=saveto, maxlen_w=maxlen_w) #In tools.py set path_to_model=save_to in train, path_to_dictionary=dictionary in train and path_to_word2vec. embed_map = tools.load_googlenews_vectors(path_to_word2vec) model = tools.load_model(embed_map) if not os.path.exists(SENTENCE_EMBEDDING_FOLDER): os.mkdir(SENTENCE_EMBEDDING_FOLDER) for f in FILES: with open(f) as file_descriptor: file_content = sent_tokenize(file_descriptor.read()) document_embedding = tools.encode(model, file_content, verbose=False) document_embedding = np.average(document_embedding, axis=0) file_name = f.split('/')[-1] np.save(SENTENCE_EMBEDDING_FOLDER + file_name[:-4], document_embedding)
def turn(self) -> (str, int, int): # Fill these fields to return message: str = None message_value: int = 0 direction: int = Direction.CENTER.value ant = self.game.ant # print(ant.health) x = ant.currentX y = ant.currentY base_x = self.game.baseX base_y = self.game.baseY self.turn_number = self.turn_number + 1 self.init_dirs() if not self.vision: # print("Creating vision") for i in range(self.game.mapWidth): new_line = [] for j in range(self.game.mapHeight): new_line.append([(UNKNOWN, -1)]) self.vision.append(new_line) # self.vision[self.game.mapWidth-base_x-1][self.game.mapHeight-base_y-1].append((ENEMY_BASE, self.turn_number)) cur = self.game.ant.visibleMap.cells[ant.currentX][ant.currentY] for chat in self.game.chatBox.allChats: msg = chat.text for i in range(0, len(msg), 2): cx, cy, obj = encode(msg[i:i+2]) if not has_obj(obj, self.vision[cx][cy]): self.vision[cx][cy].append((obj, chat.turn)) new_objs = [] def upd(i, j, obj): if not has_obj(obj, self.vision[i][j]): new_objs.append((i, j, obj)) if cur.resource_type == ResourceType.BREAD.value or cur.resource_type == ResourceType.GRASS.value: tools.last_resource = (x,y) if cur.type == CellType.TRAP.value and tools.has_resource == 1: # self.vision[tools.last_resource[0]][tools.last_resource[1]].append((WALL,self.turn_number)) upd(x, y, BAD_TRAP) self.vision[x][y].append((BAD_TRAP,self.turn_number)) tools.allied_in_range = 0 tools.enemy_in_range = 0 tools.has_resource = 1 if ant.currentResource and ant.currentResource.value > 0 else 0 # print(self.turn_number , " " , tools.has_resource) if self.turn_number == 0: random_moves = [] for i in range(self.game.mapWidth): for j in range(self.game.mapHeight): cell = self.game.ant.visibleMap.cells[i][j] if not cell: continue if cell.type == CellType.WALL.value: upd(i, j, WALL) self.vision[i][j].append((WALL, self.turn_number)) elif cell.type == CellType.TRAP.value: upd(i, j, TRAP) if self.turn_number == 0: random_moves.append(cell) self.vision[i][j].append((TRAP, self.turn_number)) elif cell.type == CellType.SWAMP.value: upd(i, j, SWAMP) self.vision[i][j].append((SWAMP, self.turn_number)) elif cell.type != CellType.EMPTY and cell.type == CellType.BASE.value and (base_x != i or base_y != j): # what self.vision[i][j].append((ENEMY_BASE, self.turn_number)) else: if self.turn_number == 0: random_moves.append(cell) if cell.resource_type == ResourceType.BREAD.value: self.vision[i][j].append((BREAD, self.turn_number)) elif cell.resource_type == ResourceType.GRASS.value: self.vision[i][j].append((GRASS, self.turn_number)) else: self.vision[i][j].append((EMPTY, self.turn_number)) if cell.ants: maximum = TEAM_KARGAR for a in cell.ants: if a.antType == AntType.KARGAR.value and a.antTeam == AntTeam.ALLIED.value: this = TEAM_KARGAR tools.allied_in_range += 0.5 elif a.antType == AntType.SARBAAZ.value and a.antTeam == AntTeam.ALLIED.value: this = TEAM_SARBAZ tools.allied_in_range += 1 elif a.antType == AntType.KARGAR.value and a.antTeam == AntTeam.ENEMY.value: this = ENEMY_KARGAR tools.enemy_in_range += 0.5 else: this = ENEMY_SARBAZ tools.enemy_in_range += 1 if this > maximum: maximum = this self.vision[i][j].append((maximum, self.turn_number)) else: self.vision[i][j].append((NO_ANTS, self.turn_number)) self.vision[i][j] = prune(self.vision[i][j]) if self.turn_number == 0: print(random_moves) self.first_target = random.choice(random_moves) AI.first_target = self.first_target # print("turn: ", self.turn_number) set_turn_number(self.turn_number) if ant.antType == AntType.SARBAAZ.value: if self.turn_number == 0 and len(cur.ants) > 1: direction = random.choice([UP,DOWN,LEFT,RIGHT,CENTER]) else: direction = self.get_move('scorpion') elif ant.antType == AntType.KARGAR.value: # if self.turn_number == 0 or self.turn_number == 1: # direction = random.choice([UP,DOWN,LEFT,RIGHT,CENTER]) # else: direction = self.get_move('ant') AI.vision = self.vision AI.turn_number = self.turn_number message = "" message_value = 1 for i, j, obj in new_objs: message += decode(i, j, obj) if len(message) > MAX_CHARS: message = message[:MAX_CHARS] return message, message_value, direction
embed_map = tools.load_googlenews_vectors() model = tools.load_model(embed_map) with open(model_save_path, 'w') as f: pkl.dump(model, f) else: model = pkl.load(open(model_save_path, 'r')) # process xp = [] for entity in ent: #line = re.sub(r'[0-9]+','#'," ".join([word for word in entity.split('_') if word not in stops])).lower() line = " ".join([word for word in entity.split('_') if word not in stops]).lower() xp.append(filter(lambda x: x in string.printable, line)) # encode new sentences vectors = tools.encode(model, xp) with open(vec_save_path, 'w') as f: np.save(f, vectors) else: vectors = np.load(open(vec_save_path,'r')) #print len(vectors) #print len(labels) #print ent[0] #print labels[0] #print set(labels) # labels -> int le = LabelEncoder() lab = le.fit_transform(labels)
def encode(encoder, sentences, verbose=False): sys.path.insert(0, 'training/') import tools return tools.encode(encoder, sentences)
def get_bin(self): return encode(self.rank + self.suit)
# coding: utf-8 import vocab import train import tools import numpy as np with open("../../wikipedia_txt/result_wakati.txt") as f: fdata = [line.rstrip() for i, line in enumerate(f)] print '# lines: ', len(fdata) worddict, wordcount = vocab.build_dictionary(fdata) vocab.save_dictionary(worddict, wordcount, "word_dict") print '# vocab: ', len(worddict) train.trainer(fdata, dictionary="word_dict", saveFreq=100, saveto="model", reload_=True, n_words=40000) model = tools.load_model() vectors = tools.encode(model, fdata, use_norm=False) np.savez('vecs.npz', vectors)