def __copyFile(self, file_id, dest_id): body = {'parents': [dest_id]} try: return (self.__service.files().copy(supportsAllDrives=True, fileId=file_id, body=body).execute()) except HttpError as err: if err.resp.get('content-type', '').startswith('application/json'): reason = jsnloads( err.content).get('error').get('errors')[0].get('reason') if reason in ['userRateLimitExceeded', 'dailyLimitExceeded']: if USE_SERVICE_ACCOUNTS: if self.__sa_count == len( listdir("accounts")) or self.__sa_count > 50: self.is_cancelled = True raise err else: self.__switchServiceAccount() return self.__copyFile(file_id, dest_id) else: self.is_cancelled = True LOGGER.info(f"Got: {reason}") raise err else: raise err
def solidfiles(url: str) -> str: """ Solidfiles direct link generator Based on https://github.com/Xonshiz/SolidFiles-Downloader By https://github.com/Jusidama18 """ headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36' } pageSource = requests.get(url, headers=headers).text mainOptions = str( re.search(r'viewerOptions\'\,\ (.*?)\)\;', pageSource).group(1)) return jsnloads(mainOptions)["downloadUrl"]
def get_video_resolution(path): try: result = check_output(["ffprobe", "-hide_banner", "-loglevel", "error", "-select_streams", "v:0", "-show_entries", "stream=width,height", "-of", "json", path]).decode('utf-8') fields = jsnloads(result)['streams'][0] width = int(fields['width']) height = int(fields['height']) return width, height except Exception as e: LOGGER.error(f"get_video_resolution: {e}") return 480, 320
def get_media_info(path): try: result = check_output(["ffprobe", "-hide_banner", "-loglevel", "error", "-print_format", "json", "-show_format", path]).decode('utf-8') fields = jsnloads(result)['format'] except Exception as e: LOGGER.error(f"get_media_info: {e}") return 0, None, None try: duration = round(float(fields['duration'])) except: duration = 0 try: artist = str(fields['tags']['artist']) except: artist = None try: title = str(fields['tags']['title']) except: title = None return duration, artist, title
def __download_file(self, file_id, path, filename, mime_type): request = self.__service.files().get_media(fileId=file_id) filename = filename.replace('/', '') fh = FileIO('{}{}'.format(path, filename), 'wb') downloader = MediaIoBaseDownload(fh, request, chunksize=50 * 1024 * 1024) done = False while not done: if self.is_cancelled: fh.close() break try: self.dstatus, done = downloader.next_chunk() except HttpError as err: if err.resp.get('content-type', '').startswith('application/json'): reason = jsnloads(err.content).get('error').get( 'errors')[0].get('reason') if reason not in [ 'downloadQuotaExceeded', 'dailyLimitExceeded', ]: raise err if USE_SERVICE_ACCOUNTS: if self.__sa_count == len( listdir("accounts")) or self.__sa_count > 50: self.is_cancelled = True raise err else: self.__switchServiceAccount() LOGGER.info(f"Got: {reason}, Trying Again...") return self.__download_file( file_id, path, filename, mime_type) else: self.is_cancelled = True LOGGER.info(f"Got: {reason}") raise err self._file_downloaded_bytes = 0
def __upload_file(self, file_path, file_name, mime_type, parent_id): # File body description file_metadata = { 'name': file_name, 'description': 'Uploaded by Mirror-leech-telegram-bot', 'mimeType': mime_type, } if parent_id is not None: file_metadata['parents'] = [parent_id] if ospath.getsize(file_path) == 0: media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=False) response = self.__service.files().create( supportsTeamDrives=True, body=file_metadata, media_body=media_body).execute() if not IS_TEAM_DRIVE: self.__set_permission(response['id']) drive_file = self.__service.files().get( supportsTeamDrives=True, fileId=response['id']).execute() download_url = self.__G_DRIVE_BASE_DOWNLOAD_URL.format( drive_file.get('id')) return download_url media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=True, chunksize=50 * 1024 * 1024) # Insert a file drive_file = self.__service.files().create(supportsTeamDrives=True, body=file_metadata, media_body=media_body) response = None while response is None: if self.is_cancelled: break try: self.status, response = drive_file.next_chunk() except HttpError as err: if err.resp.get('content-type', '').startswith('application/json'): reason = jsnloads(err.content).get('error').get( 'errors')[0].get('reason') if reason not in [ 'userRateLimitExceeded', 'dailyLimitExceeded', ]: raise err if USE_SERVICE_ACCOUNTS: self.__switchServiceAccount() LOGGER.info(f"Got: {reason}, Trying Again.") return self.__upload_file(file_path, file_name, mime_type, parent_id) else: self.is_cancelled = True LOGGER.info(f"Got: {reason}") raise err if self.is_cancelled: return self._file_uploaded_bytes = 0 # Insert new permissions if not IS_TEAM_DRIVE: self.__set_permission(response['id']) # Define file instance and get url for download drive_file = self.__service.files().get( supportsTeamDrives=True, fileId=response['id']).execute() download_url = self.__G_DRIVE_BASE_DOWNLOAD_URL.format( drive_file.get('id')) return download_url
for line in lines: try: temp = line.strip().split() DRIVES_IDS.append(temp[1]) DRIVES_NAMES.append(temp[0].replace("_", " ")) except: pass try: INDEX_URLS.append(temp[2]) except IndexError as e: INDEX_URLS.append(None) try: SEARCH_PLUGINS = getConfig('SEARCH_PLUGINS') if len(SEARCH_PLUGINS) == 0: raise KeyError SEARCH_PLUGINS = jsnloads(SEARCH_PLUGINS) qbclient = get_client() qb_plugins = qbclient.search_plugins() if qb_plugins: for plugin in qb_plugins: p = plugin['name'] qbclient.search_uninstall_plugin(names=p) qbclient.search_install_plugin(SEARCH_PLUGINS) except KeyError: SEARCH_PLUGINS = None updater = tg.Updater(token=BOT_TOKEN) bot = updater.bot dispatcher = updater.dispatcher job_queue = updater.job_queue