def compare(): try: # Readings Request Parameters control_hist_location = request.args['control_hist'] cur_hist_location = request.args['cur_hist'] all_paths = request.args['paths'] request_type = request.args['type'] technique = request.args['technique'] request_type_id = utils.get_request_type_id(request_type) technique_id = utils.get_technique_id(technique) first_file_id = utils.get_file_id(control_hist_location) second_file_id = utils.get_file_id(cur_hist_location) first_histogram_id = utils.get_histogram_id(all_paths, first_file_id) second_histogram_id = utils.get_histogram_id(all_paths, second_file_id) # Check if query already exists previous_request = db_models.Request.query.filter_by( pattern=first_histogram_id, exemplar=second_histogram_id, technique=technique).order_by(db_models.Request.time).all() if previous_request: utils.previous_request_processing(previous_request[-1]) else: distance = utils.hist_checking(control_hist_location, cur_hist_location, all_paths, technique) utils.save_request_result(first_histogram_id, second_histogram_id, request_type_id, technique_id, distance) return json.dumps({'rc': 0, 'message': '', 'distance': distance}) except Exception, error_message: return json.dumps({ 'rc': 1, 'distance': None, 'message': str(error_message) })
async def _predownload(self, mod, session, force): """Return True if download is necessary.""" if self.dl_info is None: log.debug("must download this file, we don't even know it's filename!") return True dl_path = mod.dl_path / self.dl_info.filename log.debug(f'checking to see if {dl_path} needs to be downloaded') do_download = True if not dl_path.exists(): log.debug('does not exist must download') elif Config.hashes_db.get(self.dl_info.sha256) == get_file_id(dl_path): log.debug('file_id matches cached version, skipping download') do_download = False elif self.dl_info.sha256 == sha256_path(dl_path): log.debug('sha256 hash matches despite file_id failing, skipping download') Config.hashes_db[self.dl_info.sha256] = get_file_id(dl_path) do_download = False if not do_download: if force: log.info(f'Forcing download that would not have happened except force=True') else: log.info(f'{dl_path} already exists and passes hash check, skipping download') return False return True
def compare(): try: # Readings Request Parameters control_hist_location = request.args['control_hist'] cur_hist_location = request.args['cur_hist'] all_paths = request.args['paths'] request_type = request.args['type'] technique = request.args['technique'] request_type_id = utils.get_request_type_id(request_type) technique_id = utils.get_technique_id(technique) first_file_id = utils.get_file_id(control_hist_location) second_file_id = utils.get_file_id(cur_hist_location) first_histogram_id = utils.get_histogram_id(all_paths, first_file_id) second_histogram_id = utils.get_histogram_id(all_paths, second_file_id) # Check if query already exists previous_request = db_models.Request.query.filter_by(pattern=first_histogram_id, exemplar=second_histogram_id, technique=technique).order_by(db_models.Request.time).all() if previous_request: utils.previous_request_processing(previous_request[-1]) else: distance = utils.hist_checking(control_hist_location, cur_hist_location, all_paths, technique) utils.save_request_result(first_histogram_id, second_histogram_id, request_type_id, technique_id, distance) return json.dumps({'rc': 0, 'message': '', 'distance': distance}) except Exception, error_message: return json.dumps({'rc': 1, 'distance': None, 'message': str(error_message)})
def add_positive_sets(self, image_dir, pattern, annotation_path, sample_ratio=1.0, padding=5, augment=True, label=1): features_set = [] image_files = self._get_image_files(image_dir, pattern, sample_ratio) for image_file in image_files: image = cv2.imread(image_file) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image_id = utils.get_file_id(image_file) annotation_file = "{}/annotation_{}.mat".format( annotation_path, image_id) bb = file_io.FileMat().read(annotation_file)["box_coord"][0] roi = utils.crop_bb(image, bb, padding=padding, dst_size=self._patch_size) patches = (roi, cv2.flip(roi, 1)) if augment else (roi, ) # Todo : augment modulization features = self._desc.describe(patches) features_set += features.tolist() labels = np.zeros((len(features_set), 1)) + label dataset = np.concatenate([labels, np.array(features_set)], axis=1) self._dataset += dataset.tolist()
def create_and_load_model(_id, num_tasks, alpha, envs, loss, architecture, name, doc, option_init): print("Creating model...") hierarchical_actor_critic = HierarchicalPolicy( num_tasks=num_tasks, num_processes_per_task=num_processes_per_task, alpha=alpha, obs_shape=envs.observation_space.shape, action_space=envs.action_space, loss=loss, architecture=architecture, option_init=option_init) print("Loading model parameters from db: {}/{}".format(_id, name)) file_id = get_file_id(doc=doc, file_name=name) save_file_from_db(file_id=file_id, destination='model_tmp.pyt', db_uri=db_uri, db_name=db_name) # fname = "/Users/greg/Documents/rl/treeqn/results/57/model_iteration_343750" # state_dict = torch.load(fname, map_location=lambda storage, loc: storage) state_dict = torch.load("model_tmp.pyt", map_location=lambda storage, loc: storage) hierarchical_actor_critic.load_state_dict(state_dict) os.remove('model_tmp.pyt') print("Loading model parameters complete.") return hierarchical_actor_critic
async def showid(client, message): chat_type = message.chat.type if chat_type == "private": user_id = message.chat.id first = message.from_user.first_name last = message.from_user.last_name or "" username = message.from_user.username dc_id = message.from_user.dc_id or "" await message.reply_text( f"<b>➲ First Name:</b> {first}\n<b>➲ Last Name:</b> {last}\n<b>➲ Username:</b> {username}\n<b>➲ Telegram ID:</b> <code>{user_id}</code>\n<b>➲ Data Centre:</b> <code>{dc_id}</code>", quote=True ) elif chat_type in ["group", "supergroup"]: _id = "" _id += ( "<b>➲ Chat ID</b>: " f"<code>{message.chat.id}</code>\n" ) if message.reply_to_message: _id += ( "<b>➲ User ID</b>: " f"<code>{message.from_user.id if message.from_user else 'Anonymous'}</code>\n" "<b>➲ Replied User ID</b>: " f"<code>{message.reply_to_message.from_user.id if message.reply_to_message.from_user else 'Anonymous'}</code>\n" ) file_info = get_file_id(message.reply_to_message) else: _id += ( "<b>➲ User ID</b>: " f"<code>{message.from_user.id if message.from_user else 'Anonymous'}</code>\n" ) file_info = get_file_id(message) if file_info: _id += ( f"<b>{file_info.message_type}</b>: " f"<code>{file_info.file_id}</code>\n" ) await message.reply_text( _id, quote=True )
def get_all_samples(root_path, has_target=True): samples = {} image_folder = osp.join(root_path, 'images') image_files = glob.glob(osp.join(image_folder, '*.png')) for image_file in image_files: sample_id = get_file_id(image_file) samples[sample_id] = Sample() samples[sample_id].image_fp = image_file if has_target: mask_folder = osp.join(root_path, 'masks') mask_files = glob.glob(osp.join(mask_folder, '*.png')) for mask_file in mask_files: sample_id = get_file_id(mask_file) assert sample_id in samples, 'get all samples, wrong mask id' samples[sample_id].mask_fp = mask_file return samples
def rename(img_fpath, img_dpath, gt_fpath, gt_dpath, scount, im_ext, gt_ext): img_path_list = [file for file in glob.glob(os.path.join(img_fpath,'*'+im_ext))] gt_path_list = [] for img_path in img_path_list: fid = utils.get_file_id(img_path) gt_file_path = os.path.join(gt_fpath, fid + gt_ext) gt_path_list.append(gt_file_path) for i in range(0,len(img_path_list)): img_path = img_path_list[i] gt_path = gt_path_list[i] d_img_name = 'IMG_'+str(scount)+im_ext d_gt_name = 'GT_IMG_'+str(scount)+gt_ext scount += 1 os.rename(img_path, os.path.join(img_dpath,d_img_name)) os.rename(gt_path, os.path.join(gt_dpath, d_gt_name))
async def _do_download(self, url, mod, session, **kwargs): log.info(f'Downloading {url}') async with http_request(session, 'get', url, timeout=None, **kwargs) as response: response.raise_for_status() if self.dl_info is not None: dl_filename = self.dl_info.filename dl_size = self.dl_info.size else: try: dispo = response.headers['Content-Disposition'] _, dl_filename = dispo.split('filename=') if dl_filename.startswith(('"', "'")): dl_filename = dl_filename[1:-1] except: *_, dl_filename = urllib.parse.unquote(urllib.parse.urlparse(url).path).split('/') try: dl_size = int(response.headers.get('Content-Length')) except (ValueError, TypeError): dl_size = None dl_path = mod.dl_path / dl_filename log.info(f'Saving content to {dl_path}') log.info(f'Content size: {dl_size}') log.debug(f'headers: {response.headers!r}') pbar = tqdm(total=dl_size, unit='B', unit_scale=True, desc=dl_filename) dl_path.parent.mkdir(parents=True, exist_ok=True) async with open_async(str(dl_path), 'wb') as f: async for chunk in response.content.iter_chunked(Config.CHUNK_SIZE): await f.write(chunk) pbar.update(len(chunk)) pbar.close() if b'ERROR 403: Forbidden' in dl_path.open('rb').read(1024 * 8): dl_path.unlink() raise Exception("Nexus Doesn't Like us :-(") self.dl_info = DownloadInfo( filename=dl_filename, sha256=sha256_path(dl_path), size=dl_path.stat().st_size, ) Config.hashes_db[self.dl_info.sha256] = get_file_id(dl_path)
async def addfilter(client, message): userid = message.from_user.id if message.from_user else None if not userid: return await message.reply(f"You are anonymous admin. Use /connect {message.chat.id} in PM") chat_type = message.chat.type args = message.text.html.split(None, 1) if chat_type == "private": grpid = await active_connection(str(userid)) if grpid is not None: grp_id = grpid try: chat = await client.get_chat(grpid) title = chat.title except: await message.reply_text("Make sure I'm present in your group!!", quote=True) return else: await message.reply_text("I'm not connected to any groups!", quote=True) return elif chat_type in ["group", "supergroup"]: grp_id = message.chat.id title = message.chat.title else: return st = await client.get_chat_member(grp_id, userid) if ( st.status != "administrator" and st.status != "creator" and str(userid) not in ADMINS ): return if len(args) < 2: await message.reply_text("Command Incomplete :(", quote=True) return extracted = split_quotes(args[1]) text = extracted[0].lower() if not message.reply_to_message and len(extracted) < 2: await message.reply_text("Add some content to save your filter!", quote=True) return if (len(extracted) >= 2) and not message.reply_to_message: reply_text, btn, alert = parser(extracted[1], text) fileid = None if not reply_text: await message.reply_text("You cannot have buttons alone, give some text to go with it!", quote=True) return elif message.reply_to_message and message.reply_to_message.reply_markup: try: rm = message.reply_to_message.reply_markup btn = rm.inline_keyboard msg = get_file_id(message.reply_to_message) if msg: fileid = msg.file_id reply_text = message.reply_to_message.caption.html else: reply_text = message.reply_to_message.text.html fileid = None alert = None except: reply_text = "" btn = "[]" fileid = None alert = None elif message.reply_to_message and message.reply_to_message.media: try: msg = get_file_id(message.reply_to_message) fileid = msg.file_id if msg else None reply_text, btn, alert = parser(extracted[1], text) if message.reply_to_message.sticker else parser(message.reply_to_message.caption.html, text) except: reply_text = "" btn = "[]" alert = None elif message.reply_to_message and message.reply_to_message.text: try: fileid = None reply_text, btn, alert = parser(message.reply_to_message.text.html, text) except: reply_text = "" btn = "[]" alert = None else: return await add_filter(grp_id, text, reply_text, btn, fileid, alert) await message.reply_text( f"Filter for `{text}` added in **{title}**", quote=True, parse_mode="md" )
from bs4 import BeautifulSoup import requests import os from download_file import download_file_from_google_drive from utils import get_latest_thehindu_epaper_link, get_file_id, get_destination, find_file_with_ext, \ send_to_telegram if __name__ == "__main__": page = requests.get( "https://www.newspaperpdf.online/download-the-hindu-adfree.php") soup = BeautifulSoup(page.content, 'html.parser') download_link = get_latest_thehindu_epaper_link(soup) file_ids = get_file_id(download_link) destinations = get_destination(download_link) pdf_files = find_file_with_ext(".", "pdf") for file in pdf_files: os.remove(file) for file_id, destination in zip(file_ids, destinations): download_file_from_google_drive(file_id, destination) send_to_telegram(download_link)