def _run(self): from helpers import md5, download_file if IS_WINDOWS: old_build_path = os.path.join(self.executables_path, 'ouroborosd.exe') new_build_path = os.path.join(self.executables_path, 'ouroborosd_new.exe') else: old_build_path = os.path.join(self.executables_path, 'ouroborosd') new_build_path = os.path.join(self.executables_path, 'ouroborosd_new') # Проверяем оба билда, т.к. один из них может оказаться новым for build_path in (old_build_path, new_build_path): if os.path.exists(build_path): build_md5 = md5(build_path) # Билд уже обновлен if build_md5 == self.md5: return "EXISTS" download_file(self.win_url if IS_WINDOWS else self.url, new_build_path) return "OK"
def download_segmentation_zipfiles(base_url: str, section: str, meta_dir: str): """ Downloads segmentation mask archive files from OpenImages dataset. :param base_url: OpenImages URL location :param section: split section (train, validation, or test) for which to download the archives :param meta_dir: directory which we should download the archive files into """ # make the metadata directory if it doesn't exist if meta_dir is not None: os.makedirs(meta_dir, exist_ok=True) for i in range(16): bin = format(i, "x") mask_filename = _construct_archive_filename(section, bin) url = f"{base_url}{section}-masks/{mask_filename}" dest_path = f"{meta_dir}/{mask_filename}" if not os.path.exists(dest_path): try: download_file(url, dest_path) except ValueError as e: raise ValueError( f"Failed to get segmentation mask archive (bin {bin}) for split section {section}.", e, )
def download_doc(self, doc, folder): folder_path = Path(folder) helpers.create_folder_if_not_exists(folder_path) helpers.download_file( doc['url'], folder_path / helpers.case_name_to_folder(doc['filename']))
def _class_label_codes( class_labels: List[str], meta_dir: str = None, ) -> Dict: """ Gets a dictionary that maps a list of OpenImages image class labels to their corresponding image class label codes. :param class_labels: image class labels for which we'll find corresponding OpenImages image class codes :param meta_dir: directory where we should look for the class descriptions CSV file, and if not present download it into there for future use :return: dictionary with the class labels mapped to their corresponding OpenImages image class codes """ classes_csv = "class-descriptions-boxable.csv" if meta_dir is None: # get the class descriptions CSV from OpenImages and read into a DataFrame try: contents = download_file(_OID_v5 + classes_csv) except ValueError as e: raise ValueError("Failed to get class descriptions information.", e) df_classes = pd.read_csv(io.BytesIO(contents), header=None) else: # download the class descriptions CSV file to the specified directory if not present descriptions_csv_file_path = os.path.join(meta_dir, classes_csv) if not os.path.exists(descriptions_csv_file_path): # get the annotations CSV for the section url = _OID_v5 + classes_csv try: download_file(url, descriptions_csv_file_path) except ValueError as e: raise ValueError( "Failed to get class descriptions information.", e) df_classes = pd.read_csv(descriptions_csv_file_path, header=None) # build dictionary of class labels to OpenImages class codes labels_to_codes = {} for class_label in class_labels: labels_to_codes[class_label.lower()] = \ df_classes.loc[[i.lower() == class_label.lower() for i in df_classes[1]]].values[0][0] # return the labels to OpenImages codes dictionary return labels_to_codes
def download_torrent(self, info): if info.startswith("http"): torrent_page = retrieve_url(info) torrent_link_match = re.search(DOWNLOAD_PATTERN, torrent_page) if torrent_link_match and torrent_link_match.groups(): clean_name = torrent_link_match.groups()[0].split('title')[0].replace('"', '').strip() torrent_file = 'https://torrent.isohunt.to/download.php' + clean_name print(download_file(torrent_file)) else: print('') else: print(download_file(info))
def _class_label_segmentation_codes( class_labels: List[str], meta_dir: str = None, ) -> List[str]: """ Gets a list of OpenImages image class label codes relevant to segmentation masks. :param class_labels: image class labels for which we'll find corresponding OpenImages image class codes :param meta_dir: directory where we should look for the class label codes file, and if not present download it into there for future use :return: list of OpenImages class label codes """ classes_txt = "classes-segmentation.txt" class_label_codes = [] if meta_dir is None: # get the class codes text file try: contents = download_file(_OID_v5 + classes_txt) except ValueError as e: raise ValueError("Failed to get class descriptions information.", e) class_label_codes = [line for line in contents.splitlines()] else: # download the class descriptions CSV file to the specified directory if not present class_label_codes_file_path = os.path.join(meta_dir, classes_txt) if not os.path.exists(class_label_codes_file_path): # get the class label codes url = _OID_v5 + classes_txt try: download_file(url, class_label_codes_file_path) except ValueError as e: raise ValueError( "Failed to get class descriptions information.", e) # read the lines into a list class_label_codes = [] with open(class_label_codes_file_path, "r") as class_label_codes_file: for line in class_label_codes_file: class_labels.append(line.strip()) # return the OpenImages class label codes return class_label_codes
def get_source(no_new_sources=False): if no_new_sources: return source_urls = specfile.Spec().get_source_urls() # So far, only Source0 is a tarball to download source_url = source_urls[0] source_fn = os.path.basename(source_url) if os.path.isfile(source_fn): log.info("%s already present" % source_fn) return try: helpers.download_file(source_url) except exception.CommandFailed: raise exception.ActionRequired( msg="Failed to download source tarball. Please update Source0 in " ".spec file.", rerun=True)
def download_torrent(self, info): torrent_page = retrieve_url(urllib.parse.unquote(info)) file_link = re.search(r'(down/.+?\.torrent)', torrent_page) if file_link and file_link.groups(): print(download_file(self.url + file_link.groups()[0])) else: raise Exception('Error, please fill a bug report!')
def download_torrent(self, info): infoHash = urllib.parse.unquote(info).split("/")[-1] if len(infoHash) == 40: print( download_file('{0}/download/{1}.torrent'.format( self.url, infoHash))) else: raise Exception('Error, please fill a bug report!')
def download_torrent(self, info): torrent_page = retrieve_url(info) torrent_link_match = re.search(DOWNLOAD_PATTERN, torrent_page) if torrent_link_match and torrent_link_match.groups(): clean_name = torrent_link_match.groups()[0].strip() torrent_file = self.url + clean_name print(download_file(torrent_file)) else: print('')
def download_torrent(self, download_url): # fix for some indexers with magnet link inside .torrent file if download_url.startswith('magnet:?'): print(download_url + " " + download_url) response = self.get_response(download_url) if response is not None and response.startswith('magnet:?'): print(response + " " + download_url) else: print(download_file(download_url))
def download_torrent(self, info): # since 1337x does not provide torrent links in the search results, # we will have to fetch the page and extract the torrent link # and then call the download_file function on it. torrent_page = retrieve_url(info) torrent_link_match = DOWNLOAD_PATTERN.search(torrent_page) if torrent_link_match and torrent_link_match.groups(): torrent_file = torrent_link_match.groups()[2] # noqa print(download_file(torrent_file)) else: print('')
def download_participant(config, stats, i, email, wit_id, claim_file_url, *_args): stats[PARTICIPANTS][FROM_CSV][WIT_IDS].add(wit_id) stats[PARTICIPANTS][FROM_CSV][EMAILS].add(email) stats[MAPS][EMAIL_BY_WIT_ID][wit_id] = email if not download_file(claim_file_url, config.claims_output_dir, overwrite=False, prefix=f'{wit_id}_{i}'): print(f'Failed to download claim file from "{claim_file_url}"') return stats[PARTICIPANTS][DOWNLOADED][WIT_IDS].add(wit_id) stats[PARTICIPANTS][DOWNLOADED][EMAILS].add(email)
def download_torrent(self, info): """ Downloader """ html = retrieve_url(info) m = re.search('(<a.*?>Descargar</a>)', html) if m and len(m.groups()) > 0: torrentAnchor = m.group(1) torrentLink1 = re.search('href=[\'\"](.+?)[\'\"]', torrentAnchor) if torrentLink1 and len(torrentLink1.groups()) > 0: torrentUrl = self.url + '/' + torrentLink1.group(1) html = retrieve_url(torrentUrl) torrentLink2 = re.search( '<a.*?href=[\'\"](.+?\.torrent)[\'\"]>', html) if torrentLink2 and len(torrentLink2.groups()) > 0: #download_file is tested and downloads correctly the .torrent file #starting from the desc_url from the torrent choosen. print(download_file(torrentLink2.group(1)))
def _submit(client_payload, answer_file_path, context): """ takes a list of predicted heights and actual heights and computes the score and prepares the plots for submission to the leaderboard """ file_key = client_payload["file_key"] _update_job_event(context, job_info_template(context, "Grading Submission....")) _payload = {} _meta = {} _meta['file_key'] = file_key _payload["meta"] = _meta submission_id = report_to_crowdai(context, _payload, submission_id=False, status='submitted') print("Submission id : ", submission_id) try: localfilepath = download_file(context, file_key) _client_payload = {} _client_payload["submission_file_path"] = localfilepath _result_object = config.evaluator._evaluate(_client_payload, context) print _result_object _payload = _result_object report_to_crowdai(context, _payload, submission_id=submission_id, status='graded') # Clean up file if possible os.remove(localfilepath) return _result_object except Exception as e: # Report to crowdAI if "meta" in _payload.keys(): del _payload["meta"] report_to_crowdai(context, _payload, submission_id=submission_id, status='failed', message=str(e)) # raise the exception again # so that it can be handled further down the chain raise e
def _get_segmentations_csv(split_section: str, ) -> str: """ Requests the segmentations CSV for a split section. :param split_section: :return: the CSV payload """ # get the annotations CSV for the section url = _OID_v5 + split_section + "-annotations-object-segmentation.csv" try: contents = download_file(url) except ValueError as e: raise ValueError( f"Failed to get bounding box information for split section {split_section} ", e) return contents
def download_torrent(self, info): info_page = retrieve_url(info) link = download_pattern.findall(info_page)[0] print download_file(link)
def download_torrent(self, info): """ Unused :( """ print(download_file(info))
def download_torrent(self, url): print(download_file(url))
def download_torrent(self, info): html = retrieve_url(info) m = re.search('(/tor/.+?\s)', html) if m and len(m.groups()) > 0: print(download_file(self.url + m.group(1)))
def download_torrent(self, desc_link): """ Downloader """ dl_link = re_compile("https://www\.hypercache\.pw/metadata/.+?/") data = retrieve_url(desc_link) dl_url = dl_link.findall(data)[0] print(download_file(dl_url))
import os import glob from helpers import download_file supported_engines = dict() engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines', '*.py')) for engine in engines: e = engine.split(os.sep)[-1][:-3] if len(e.strip()) == 0: continue if e.startswith('_'): continue try: exec "from engines.%s import %s" % (e, e) exec "engine_url = %s.url" % e supported_engines[engine_url] = e except: pass if __name__ == '__main__': if len(sys.argv) < 3: raise SystemExit('./nova2dl.py engine_url download_parameter') engine_url = sys.argv[1].strip() download_param = sys.argv[2].strip() if engine_url not in supported_engines.keys(): raise SystemExit('./nova2dl.py: this engine_url was not recognized') exec "engine = %s()" % supported_engines[engine_url] if hasattr(engine, 'download_torrent'): engine.download_torrent(download_param) else: print download_file(download_param) sys.exit(0)
def download_torrent(self, info): """ Downloader """ print(download_file(info))
import glob from helpers import download_file supported_engines = dict() engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines','*.py')) for engine in engines: e = engine.split(os.sep)[-1][:-3] if len(e.strip()) == 0: continue if e.startswith('_'): continue try: exec("from engines.%s import %s"%(e,e)) exec("engine_url = %s.url"%e) supported_engines[engine_url] = e except: pass if __name__ == '__main__': if len(sys.argv) < 3: raise SystemExit('./nova2dl.py engine_url download_parameter') engine_url = sys.argv[1].strip() download_param = sys.argv[2].strip() if engine_url not in list(supported_engines.keys()): raise SystemExit('./nova2dl.py: this engine_url was not recognized') exec("engine = %s()"%supported_engines[engine_url]) if hasattr(engine, 'download_torrent'): engine.download_torrent(download_param) else: print(download_file(download_param)) sys.exit(0)
def download_torrent(self, info): print(download_file(info))
import os import glob from helpers import download_file supported_engines = dict() engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines','*.py')) for engine in engines: e = engine.split(os.sep)[-1][:-3] if len(e.strip()) == 0: continue if e.startswith('_'): continue try: exec "from engines.%s import %s"%(e,e) exec "engine_url = %s.url"%e supported_engines[engine_url] = e except: pass if __name__ == '__main__': if len(sys.argv) < 3: raise SystemExit('./nova2dl.py engine_url download_parameter') engine_url = sys.argv[1].strip() download_param = sys.argv[2].strip() if engine_url not in supported_engines.keys(): raise SystemExit('./nova2dl.py: this engine_url was not recognized') exec "engine = %s()"%supported_engines[engine_url] if hasattr(engine, 'download_torrent'): engine.download_torrent(download_param) else: print download_file(download_param) sys.exit(0)
import glob from helpers import download_file supported_engines = dict() engines = glob.glob(os.path.join(os.path.dirname(__file__), 'engines', '*.py')) for engine in engines: e = engine.split(os.sep)[-1][:-3] if len(e.strip()) == 0: continue if e.startswith('_'): continue try: exec("from engines.%s import %s" % (e, e)) exec("engine_url = %s.url" % e) supported_engines[engine_url] = e except: pass if __name__ == '__main__': if len(sys.argv) < 3: raise SystemExit('./nova2dl.py engine_url download_parameter') engine_url = sys.argv[1].strip() download_param = sys.argv[2].strip() if engine_url not in list(supported_engines.keys()): raise SystemExit('./nova2dl.py: this engine_url was not recognized') exec("engine = %s()" % supported_engines[engine_url]) if hasattr(engine, 'download_torrent'): engine.download_torrent(download_param) else: print(download_file(download_param)) sys.exit(0)
current_project_idx = 0 candidate = "%s" % project_names[current_project_idx] p = data.worksheet(candidate) tmp = p.find('last') row = tmp.row + 1 tweet = p.cell(row, 1).value if check_empty(tweet): row = 1 if row == 1: tweet = p.cell(row, 1).value tweet_media = p.cell(row, 3).value local_filename = None if not check_empty(tweet_media): local_filename = download_file(tweet_media) p.update_cell(tmp.row, 2, '') p.update_cell(row, 2, 'last') # Rotate sheets dashboard.update_acell('A1', p.title) print(local_filename) if local_filename is None: print("Tweeting without image") client.update_status(status=tweet) else: print("Tweeting with image") client.update_with_media(filename=local_filename, status=tweet)
def download_torrent(self, desc_link): """ Downloader """ dl_link = re_compile("/downloading/[^\"]+") data = retrieve_url(desc_link) dl_url = dl_link.findall(data)[0] print(download_file(self.url + dl_url))
def webhook(): data = request.get_json(force=True) app.logger.info(data) update = telegram.update.Update.de_json(request.get_json(force=True), bot) if update.message.text == '/help': bot.sendMessage(chat_id=update.message.chat_id, text=helpers.help_text()) return 'OK' photo_id, status = helpers.get_is_photo(data) app.logger.info(photo_id) if status == 1: bot.sendMessage( chat_id=update.message.chat_id, text= 'Sorry. Unsupported file type. Please use jpg (jpeg) or png file.') return 'OK' if photo_id: photo = helpers.download_file(app, photo_id, 'photos/downloads/') image = cv2.imread(photo) orig_height, orig_width, _ = image.shape objects = object_detection_api.get_objects(image, THRESHOLD) objects = json.loads(objects) if len(objects) > 1: app.logger.info(objects) font = cv2.FONT_HERSHEY_SIMPLEX result_msg = '' for item in objects: if item['name'] != 'Object': continue x = int(orig_width * item['x']) y = int(orig_height * item['y']) width = int(orig_width * item['width']) height = int(orig_height * item['height']) cv2.rectangle(image, (x, y), (width, height), (0, 255, 0), 2) scope = float('{:.2f}'.format(item['score'] * 100)) cv2.putText(image, item['class_name'] + " - " + str(scope) + '%', (x + 5, y + 20), font, 1, (255, 255, 255), 1, cv2.LINE_AA) result_msg += item['class_name'] + " - " + str(scope) + '% \n' new_name = 'photos/detected/photo_detected_' + str( photo_id) + '.jpg' cv2.imwrite(new_name, image) bot.sendPhoto(update.message.chat_id, photo=open(new_name, 'rb'), caption="Result") bot.sendMessage(chat_id=update.message.chat_id, text=result_msg) else: bot.sendMessage( chat_id=update.message.chat_id, text='Sorry! No objects were found. Please try another photo.') return 'OK' bot.sendMessage(chat_id=update.message.chat_id, text='Please send a photo for recognition!') return 'OK'
def download_torrent(self, info): from helpers import download_file print(download_file(info))
def download_torrent(self, info): print download_file(info, info)
def download_torrent(self, desc_link): """ Downloader """ dl_link = re_compile("/get_torrents/[a-zA-Z0-9]+") data = retrieve_url(desc_link) dl_url = dl_link.findall(data)[0] print(download_file(self.url + dl_url))
def _submit(client_payload, answer_file_path, context): """ takes a list of predicted heights and actual heights and computes the score and prepares the plots for submission to the leaderboard """ file_key = client_payload["file_key"] _update_job_event( context, job_info_template( context, "Grading Submission....") ) if "round" not in client_payload.keys(): raise Exception(""" The round parameter has not been specified. Please upgrade your crowdai client to atleast version 1.0.21 by : pip install -U crowdai and then update your submission code by following the latest instructions from : https://github.com/crowdAI/ieee_investment_ranking_challenge-starter-kit#submission-of-predicted-file-to-crowdai """) round_id = client_payload["round"] assert round_id in config.crowdai_round_id_map.keys(), \ "Unknown Round ID Passed. Allowed values : {}".format( str(config.crowdai_round_id_map.keys()) ) crowdai_round_id = config.crowdai_round_id_map[round_id] _payload = {} _meta = {} _meta['file_key'] = file_key _payload["meta"] = json.dumps(_meta) _payload["challenge_round_id"] = crowdai_round_id submission_id = report_to_crowdai( context, _payload, submission_id=False, status='submitted') print("Submission id : ", submission_id) try: localfilepath = download_file(context, file_key) _client_payload = {} _client_payload["submission_file_path"] = localfilepath _result_object = config.evaluator._evaluate( client_payload=_client_payload, round_indicator=round_id, _context=context) print _result_object _payload = _result_object report_to_crowdai( context, _payload, submission_id=submission_id, message = "graded successfully", status='graded') # Clean up file if possible os.remove(localfilepath) return _result_object except Exception as e: # Report to crowdAI if "meta" in _payload.keys(): del _payload["meta"] report_to_crowdai( context, _payload, submission_id=submission_id, status='failed', message=str(e) ) # raise the exception again # so that it can be handled further down the chain raise e