def export_to_json( db_data: list = None, display: bool = False, files_list_path: str = FILES_LIST_PATH, ) -> bool: """ Export DB objects to JSON """ try: data = json.dumps(db_data, indent=4) data_count = len(json.loads(data)) statistics.append(["export_to_json", data_count]) with open(f"{files_list_path}/cards.json", "w") as f: f.write(data) logger.info( f"DB objects exported to JSON file successfully: {files_list_path}/cards.json" ) if display: print(data) else: pass except Exception as e: logger.error(e) raise return True
def read_list_from_file( files_list_path: str = FILES_LIST_PATH, files_list_filename: str = FILES_LIST_FILENAME, ) -> list: """ Import list from file """ if os.path.exists(f"{files_list_path}/{files_list_filename}"): try: with open(f"{files_list_path}/{files_list_filename}", "r") as r: data = r.read().splitlines() statistics.append(["read_list_from_file", len(data)]) logger.info(f"{len(data)} items imported from file.") except Exception as e: logger.error(e) raise return data else: logger.critical( f'Cannot open the file "{files_list_path}/{files_list_filename}", looks like it does not exists.' ) return False logger.critical("Something went wrong!") return False
def seed_db_table( db_objects: list = None, table_name: str = TABLE_NAME, aws_region: str = AWS_REGION, ) -> bool: """ Insert DB objects into table """ logger.info("Inserting data into DB...") logger.debug( f"Context Parameters: {seed_db_table.__name__} => {seed_db_table.__code__.co_varnames}" ) try: dynamodb = boto3.resource("dynamodb", region_name=aws_region) table = dynamodb.Table(table_name) with table.batch_writer() as batch: for item in db_objects: batch.put_item(Item=item) statistics.append(["seed_db_table", len(db_objects)]) logger.info(f"{len(db_objects)} item(s) were inserted in DB.") except Exception as e: logger.error(e) raise return True
def pip_uninstall_packge(name, options, version): try: if not version: version = "" logger.info("Begin to pip-uninstall {0} {1} ...".format(name, version)) options_copy = options.copy() if len(options_copy) != 0 and options_copy[0] == "--user": options_copy.pop(0) res = -1 res = subprocess.check_call([ sys.executable, '-m', 'pip', 'uninstall', *options_copy, "-y", "-q", name ]) if res != 0: logger.error("Fail to pip-uninstall {0}.".format(name)) else: logger.info("Pip-uninstall {0} {1} successfully!".format( name, version)) return res == 0 except Exception as e: # logger.error("Fail to pip-uninstall {0}, unexpected error: {1}".format(name, e)) logger.error( "Fail to pip-uninstall {0}, unexpected error! Please try to run installer script again!" .format(name)) return False
def pip_install_package(name, options, version, pkg=None): try: if not pkg: if version: if version.strip()[0] == "<" or version.strip()[0] == ">": pkg = "{0}{1}".format(name, version) else: pkg = "{0} == {1}".format(name, version) else: pkg = name version = "" if not version: version = "" logger.info("Begin to pip-install {0} {1} ...".format(name, version)) logger.debug("pkg : {0}".format(pkg)) res = -1 res = subprocess.check_call( [sys.executable, '-m', 'pip', 'install', *options, "-q", pkg]) if res != 0: logger.error("Fail to pip-install {0}.".format(name)) SysInfo.fail_install.append("%s %s" % (name, version)) else: logger.info("Pip-install {0} {1} successfully!".format( name, version)) return res == 0 except Exception as e: # logger.error("Fail to pip-install {0}, unexpected error: {0}".format(name, e)) logger.error( "Fail to pip-install {0}, unexpected error! Please try to run installer script again!" .format(name)) SysInfo.fail_install.append("%s %s" % (name, version)) return False
def build_card_objects(media_list: list = None) -> list: """ Creates DB objects from S3 objects list """ logger.info("Crafting list of DB objects...") logger.debug( f"Context Parameters: {build_card_objects.__name__} => {build_card_objects.__code__.co_varnames}" ) medias_list = defaultdict(list) try: for item in media_list: medias_list[item["ts"]].append({ "name": item["name"], "path": item["path"], "url": item["url"], "kind": item["kind"], }) medias = [{"ts": k, "medias": v} for k, v in medias_list.items()] statistics.append(["build_card_objects", len(medias)]) logger.info(f'{len(medias)} "card" objects generated successfully.') except Exception as e: logger.error(e) raise return medias
def process_local_movie_medias( local_media_output_path: str = LOCAL_MEDIA_OUTPUT_PATH, files_list_path: str = FILES_LIST_PATH, files_list_filename: str = FILES_LIST_FILENAME, ) -> bool: """ Get movie files """ logger.info("Starting batch movie encoding...") try: with open(f"{files_list_path}/{files_list_filename}", "r") as r: data = r.read().splitlines() for item in data: if get_media_type(item) == "movie": ts = item.split("/")[-2] ts = media_ts_format(ts, item) logger.debug(ts) if ts: video_encoder( media=item, ts=ts, output_path=local_media_output_path ) else: pass except Exception as e: logger.error(e) raise logger.info("Encoder done.") return True
def video_encoder( media: str = None, ts: str = None, output_path: str = None, log_path: str = LOG_PATH, video_preset_data: dict = video_preset_data, media_presets: str = VIDEO_PRESETS, encoder_threads: int = ENCODER_THREADS, ) -> bool: """ Encode video media based on preset """ try: i = 0 media_presets = media_presets.split(" ") for media_preset in media_presets: media_preset = media_preset.split(".") preset_category = media_preset[0] preset_format = media_preset[1] settings = video_preset_data[preset_category][preset_format] logger.debug(f"Settings: {settings}") file_format = settings["format"] vcodec = settings["vcodec"] acodec = settings["acodec"] video_bitrate = settings["video_bitrate"] audio_bitrate = settings["audio_bitrate"] encoder_threads = int(encoder_threads) i += 1 logger.info( f'Encoding media "{media}" using preset "{preset_category} -> {preset_format}" ...' ) logger.info( f'Processing task(s) for: "{preset_format}"... {i}/{len(media_presets)}.' ) output_filename = ( f"{media.split('/')[-1].split('.')[-2]}.{preset_format}") output_file = f"{output_path}/{ts}/{output_filename}" cli_cmd = f"ffmpeg -i '{media}' -f {file_format} -vcodec {vcodec} -acodec {acodec} -vb {video_bitrate} -ab {audio_bitrate} -threads {encoder_threads} -y '{output_file}'" logger.debug(f"cli command: {cli_cmd}") with open(f"{log_path}/ffmpeg.log", "a") as w: subprocess.run( cli_cmd, shell=True, check=True, stdout=w, stderr=subprocess.STDOUT, universal_newlines=True, ) except Exception as e: logger.error(e) raise return True
def is_triggering_event(self) -> bool: ref = self.payload.get("ref") if not ref: logger.error("There is no info for 'ref' in payload.") return False return ref == "refs/heads/master"
def get_s3_files( bucket_name: str = BUCKET_NAME, save_to_disk: bool = True, files_list_path: str = FILES_LIST_PATH, files_list_filename: str = FILES_LIST_FILENAME, aws_region: str = AWS_REGION, s3_prefix: str = S3_PREFIX, ) -> list: """ Get S3 objects and creates list """ logger.info("Building media list from S3 objects...") logger.debug( f"Context Parameters: {get_s3_files.__name__} => {get_s3_files.__code__.co_varnames}" ) data = [] # testing format: assets/20160823/img.jpg pattern = re.compile( "^[a-z-A-Z-0-9]+/[a-z-A-Z-0-9]+/[0-9]{8}/.+[.][a-z-A-Z-0-9]+$" ) try: s3 = boto3.client("s3", region_name=aws_region) paginator = s3.get_paginator("list_objects_v2") pages = paginator.paginate(Bucket=bucket_name, Prefix=s3_prefix) for page in pages: for obj in page["Contents"]: if pattern.match(obj["Key"]): data.append(obj["Key"]) else: logger.warning( f'Wrong filename format, object "{obj["Key"]}", not added to the list.' ) statistics.append(["get_s3_files", len(data)]) logger.info("Media Objects list generated successfully.") logger.debug(f"Media objects count: {len(data)}.") if save_to_disk: logger.info("Writing media list to disk...") export_data = [f"{item}\n" for item in data] with open(f"{files_list_path}/{files_list_filename}", "w") as w: w.writelines(export_data) logger.info( f'List successfully saved to disk: "{files_list_path}/{files_list_filename}".' ) else: pass except Exception as e: logger.error(e) raise return data
def _extract_tar(file_path, target_dir): logger.info("Extracting {0} to {1} ...".format(file_path, target_dir)) try: import tarfile with tarfile.open(file_path) as tar: tar.extractall(path=target_dir) except: logger.error("Fail to extract. Error: ", sys.exc_info()) return False return True
def create_table( table_name: str = TABLE_NAME, ReadCapacityUnits: int = TABLE_READ_CAPACITY_UNITS, WriteCapacityUnits: int = TABLE_WRITE_CAPACITY_UNITS, aws_region: str = AWS_REGION, ) -> bool: """ Creates DynamoB table """ try: client = boto3.client("dynamodb", region_name=aws_region) response = client.list_tables() tables = [ table for table in response["TableNames"] if table == table_name ] if len(tables) > 0: logger.warning( f'Table "{table_name}" already exists. Skipping table creation.' ) return False else: logger.info( f'Table "{table_name}" does not exist. Starting creation process...' ) except Exception as e: logger.error(e) raise logger.info("Creating DB table...") logger.debug( f"Context Parameters: {create_table.__name__} => {create_table.__code__.co_varnames}" ) try: dynamodb = boto3.resource("dynamodb", region_name=aws_region) table = dynamodb.create_table( TableName=table_name, AttributeDefinitions=[ {"AttributeName": "ts", "AttributeType": "S"} ], KeySchema=[{"AttributeName": "ts", "KeyType": "HASH"}], ProvisionedThroughput={ "ReadCapacityUnits": int(ReadCapacityUnits), "WriteCapacityUnits": int(WriteCapacityUnits), }, ) logger.info("Table created successfully.") logger.debug(table) except dynamodb.exceptions.ResourceInUseException as e: logger.warning( f'Table "{table_name}" already exists. Skipping table creation.' ) logger.debug(e) return False return True
def factory(event_type, payload): if isinstance(payload, str): payload = json.dumps(payload) elif isinstance(payload, dict): pass else: logger.error(f"Wrong type of payload: {type(payload)}") if event_type == "push": return Push(event_type, payload) else: logger.error(f"Unhandled event type: {event_type}")
def detect_python_version(): py_architecture = platform.architecture()[0] py_version = ".".join(map(str, sys.version_info[0:2])) py_full_version = ".".join(map(str, sys.version_info[0:3])) SysInfo.python = py_version.replace('.', '') logger.debug("In detect_python_version(), sys_info['python']: {0}".format(SysInfo.python)) logger.info("Python: {0}, {1}".format(py_full_version, py_architecture)) if not (_version_compare("3.5", py_version) and py_architecture == '64bit'): logger.error("64-bit Python 3.5 or higher is required to run this installer." " We recommend latest Python 3.5 (https://www.python.org/downloads/release/python-355/).") return False return True
def _run_cmd_admin(cmd, param, wait=True): try: executeInfo = ShellExecuteInfo(fMask=0x00000040, hwnd=None, lpVerb='runas'.encode('utf-8'), lpFile=cmd.encode('utf-8'), lpParameters=param.encode('utf-8'), lpDirectory=None, nShow=5) if not ctypes.windll.shell32.ShellExecuteEx(ctypes.byref(executeInfo)): raise ctypes.WinError() if wait: _wait_process(executeInfo.hProcess) except Exception as e: # logger.error("Fail to run command {0} as admin, unexpected error: {1}".format(cmd, e)) logger.error("Fail to run command {0} as admin, unexpected error! Please try to run installer script again!".format(cmd))
def __init__(self): cf = configparser.ConfigParser() cf.read('config.ini', encoding='utf-8') serial_port = cf['options']['port'] self.pdu_length = cf['options']['pdu_length'] global Ret try: self.ser = serial.Serial(serial_port, 9600, timeout=0.5) if self.ser.isOpen(): logger.info(f'{self.ser.name} 端口已开') Ret = True except Exception as e: logger.error(f'异常: {e}')
def _unzip_file(file_path, target_dir): logger.info("Unzipping {0} to {1} ...".format(file_path, target_dir)) try: import zipfile with zipfile.ZipFile(file_path) as zip_file: if os.path.isdir(target_dir): pass else: os.makedirs(target_dir) for names in zip_file.namelist(): zip_file.extract(names, target_dir) return True except: logger.error("Fail to unzip. Error: ", sys.exc_info()) return False
def _download_file(url, local_path): logger.info("Downloading {0} ...".format(url)) try: import urllib.request import ssl myssl = ssl.create_default_context() myssl.check_hostname = False myssl.verify_mode = ssl.CERT_NONE with urllib.request.urlopen(url, context=myssl) as fin, \ open(local_path, 'ab') as fout: fout.write(fin.read()) return True except: logger.error("Fail to download {0}. Error: {1}".format(url, sys.exc_info())) return False
def detect_gpu(): gpu_detector_name = 'gpu_detector_' + SysInfo.os if (SysInfo.os == TOOLSFORAI_OS_WIN): gpu_detector_name = gpu_detector_name + '.exe' gpu_detector_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tools", gpu_detector_name) if not (os.path.isfile(gpu_detector_path)): logger.error( 'Not find GPU detector. Please make sure {0} is in the same directory with the installer script.'.format( gpu_detector_name)) return False SysInfo.gpu, return_stdout = _run_cmd(gpu_detector_path, return_stdout=True) if not SysInfo.gpu: return_stdout = 'None' logger.info('NVIDIA GPU: {0}'.format(return_stdout)) return True
def pip_install_scipy(pkg_info, options): logger.info("Begin to install scipy(numpy, scipy) ...") name = pkg_info["scipy"]["numpy"]["name"] version = pkg_info["scipy"]["numpy"]["version"] if not pip_install_package(name, options, version): logger.error( "Pip_install_scipy terminated due to numpy installation failure.") return False name = pkg_info["scipy"]["scipy"]["name"] version = pkg_info["scipy"]["scipy"]["version"] if not pip_install_package(name, options, version): logger.error( "Pip_install_scipy terminated due to scipy installation failure.") return False return True
def save_defer_encoding(movies_list: list, files_list_path: str = FILES_LIST_PATH) -> bool: """ Store list of defered remote video encoding to file """ assert movies_list is not None assert type(movies_list) == list try: data = json.dumps(movies_list, indent=4) file_path = f"{files_list_path}/defered_encode.json" with open(file_path, "w") as w: w.write(data) except Exception as e: logger.error(e) raise logger.info(f"Defered encoding list saved successfully: '{file_path}'") return True
def main(): args, unknown = set_options() if args.verbose: logger.setLevel(logging.DEBUG) if args.cuda80: SysInfo.cuda80 = True logger.info("Detecting system information ...") if not utils.detect_os() or not utils.detect_python_version( ) or not utils.detect_gpu(): return utils.detect_git() if (SysInfo.os == TOOLSFORAI_OS_WIN): utils.detect_vs() if (SysInfo.gpu): if not utils.detect_cuda(): return utils.detect_cudnn() target_dir = '' if SysInfo.os == TOOLSFORAI_OS_WIN: target_dir = os.path.sep.join( [os.getenv("APPDATA"), "Microsoft", "ToolsForAI", "RuntimeSDK"]) elif SysInfo.os == TOOLSFORAI_OS_LINUX: target_dir = os.path.sep.join( [os.path.expanduser('~'), '.toolsforai', 'RuntimeSDK']) try: _thread.start_new_thread(install_pkg.install_cntk, (target_dir, )) except: logger.error("Fail to startup install_cntk thread!") pkg_info = utils.rd_config() install_pkg.pip_software_install(pkg_info, args.options, args.user, args.verbose) utils.delete_env("AITOOLS_CNTK_ROOT") utils.fix_directory_ownership() install_res = "/".join(SysInfo.fail_install) logger.info( "Fail to install {0}. Please try to run installer script again!". format(install_res)) logger.info('Setup finishes.') input('Press enter to exit.')
def create_s3_bucket( bucket_name: str = BUCKET_NAME, aws_region: str = AWS_REGION ) -> bool: """ Create the S3 bucket of the project """ s3 = boto3.client("s3", region_name=aws_region) bucket_exists = True try: response = response = s3.list_buckets() buckets = [ bucket["Name"] for bucket in response["Buckets"] if bucket["Name"] == bucket_name ] if len(buckets) > 0: logger.warning( "S3 bucket already exists. Skipping bucket creation." ) bucket_exists = True else: bucket_exists = False except Exception as e: logger.error(e) raise if not bucket_exists: try: response = s3.create_bucket( Bucket=bucket_name, ACL="private", CreateBucketConfiguration={"LocationConstraint": aws_region}, ) logger.info(f'Created S3 bucket "{bucket_name}" successfully.') logger.debug(f"S3 client response: {response}") except Exception as e: logger.error(e) raise else: return False return True
def send_message(**kwargs): retry_num = 1 while retry_num < 4: msg = kwargs['msg'] access_token = sp.get_token_from_sql() user_list = cf['user']['user_wx_openid'].split(',') if not user_list[-1]: user_list.remove('') template_id = cf['weixin']['template_id'] # 消息模板 message_url = f'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token={access_token}' for user_id in user_list: body = { "touser": user_id, "template_id": template_id, "url": '', "topcolor": "#FF0000", 'data': { 'sender': { 'value': kwargs['sender'], "color": "#173177" }, 'msg': { 'value': kwargs['msg'], "color": "#173177" } } } r = requests.post(url=message_url, data=json.dumps( body, ensure_ascii=False).encode('utf-8')) sp.insert_msg_by_wx( send_message_time=f"{time.strftime('%Y%m%d%H%M%S')}", message_text=f"{msg}", msg_response=f"{r.text}") if '42001' in r.text: logger.error(f'{msg}发送失败,正在重试第 {retry_num} 次,详情:{r.text}') getToken().get_token() retry_num += 1 else: logger.success(f'{msg}发送成功!') return
def medias_copy( local_path: str = LOCAL_MEDIA_OUTPUT_PATH, video_encode: bool = VIDEO_ENCODE, media_encode_platform: str = MEDIA_ENCODE_PLATFORM, ) -> bool: """ Copy media files to S3 """ logger.info("Starting copy...") try: medias = get_local_medias_files(path=local_path, save_to_disk=False) logger.debug(medias) for media in medias: media_type = get_media_type(basename(media)) ts = media.split("/")[-2] if media_type == "movie": if video_encode == "True" and media_encode_platform == "cloud": send_to_bucket(media, ts) elif ( video_encode == "True" and media_encode_platform == "local" ): logger.info( f"Skipping copy of {media} for local re-encoding." ) elif media_type == "picture": send_to_bucket(media, ts) else: logger.warning(f"Media type is: {media_type} !") logger.info( f"{len(medias)} medias files have been successfully copied." ) except Exception as e: logger.error(e) raise statistics.append(["medias_copy", len(medias)]) logger.info("...done.") return True
def phrase_msg_detail(self): # 根据短信长短,读取短信,主函数 dcs = self.phrase_dcs() if self.is_short_message(): ud_raw = ''.join(self.pdu_total[self.sender_num_position + 10:]) # print(f'当前为普通短信,内容为:{self.phrase_ud(ud_raw)}\n') return { 'msg_mode': 0, # 代表普通短信 'msg_id': 1, 'msg_num': 1, 'msg_num_index': 1, 'msg_time': self.phrase_msg_time(), 'out_way': dcs['out_way'], 'alphabet': dcs['alphabet'], 'ud': self.phrase_ud(ud_raw), 'raw_pdu': self.pdu_str, 'sender': self.phrase_oa() } else: long_msg = self.phrase_longMsg() if not long_msg: logger.error(f'当前短信不是长短信:{self.pdu_str}') return else: # print(f'当前为长短信,短信ID:{long_msg["msg_id"]},\n' # f'当前为:{long_msg["msg_num_index"]}/{long_msg["msg_num"]}。\n短信内容为:{self.phrase_ud(long_msg["ud"])}') return { 'msg_mode': 1, # 代表长短信 'msg_id': long_msg["msg_id"], 'msg_num': long_msg["msg_num"], 'msg_num_index': long_msg["msg_num_index"], 'msg_time': self.phrase_msg_time(), 'out_way': dcs['out_way'], 'alphabet': dcs['alphabet'], 'ud': self.phrase_ud(long_msg["ud"]), 'raw_pdu': self.pdu_str, 'sender': self.phrase_oa() }
def main(self): self.send_data('AT+CNMI=2,2,0,0,0\r\n') # 设置消息为不保存,直接输出到终端 out = self.receive_data() if 'OK' in out: logger.info('已将短信接收设置为:不保存SIM卡内,直接输出至终端') else: logger.error(f'短信接收设置初始化失败,输出详情:{out}') self.send_data('AT+CMGF=0\r\n') out = self.receive_data() if 'OK' in out: logger.info('已设置为PDU模式') else: logger.error(f'未设置成PDU模式,输出详情:{out}') self.send_data('AT+CPMS?\r\n') out = self.receive_data() pa = re.compile(r'CPMS: (.*?)\\r\\n') logger.info(f'当前SIM卡内短信存储情况:{pa.findall(out)}') t1 = threading.Thread(target=self.receive_msg_always, ) t1.start()
def detect_os(): os_name = platform.platform(terse=True) os_bit = platform.architecture()[0] is_64bit = (os_bit == "64bit") logger.info("OS: {0}, {1}".format(os_name, os_bit)) if (os_name.startswith("Windows")): SysInfo.os = TOOLSFORAI_OS_WIN if not os_name.startswith("Windows-10"): logger.warning( "We recommend Windows 10 as the primary development OS, other Windows versions are not fully supported.") elif (os_name.startswith("Linux")): SysInfo.os = TOOLSFORAI_OS_LINUX elif (os_name.startswith("Darwin")): SysInfo.os = TOOLSFORAI_OS_MACOS is_64bit = sys.maxsize > 2 ** 32 else: logger.error("Your OS({0}-{1}) can't be supported! Only Windows, Linux and MacOS can be supported now.".format(os_name, os_bit)) return False if not is_64bit: logger.error("Your OS is not 64-bit OS. Now only 64-bit OS is supported.") return False return True
def remote_video_encoder(files_list_path: str = FILES_LIST_PATH) -> bool: """ Send movies list to SQS -> lambda/ffmpeg """ logger.info("Starting remote movie re-encoding operations...") data_path = f"{files_list_path}/defered_encode.json" if os.path.exists(data_path): try: with open(data_path, "r") as r: movies = r.read() for movie in movies: queue_message = send_to_queue(movies) logger.info(f"Re-encoding process launched for '{movie}'.") logger.debug(queue_message) except Exception as e: logger.error(e) raise else: logger.critical(f"Path does not exist: '{data_path}'. Stopping here.") return False logger.info("...done.") return True
def send_to_bucket( media_file: str, ts: str, bucket_name: str = BUCKET_NAME, s3_prefix: str = S3_PREFIX, aws_region: str = AWS_REGION, ) -> bool: """ Send file to S3 """ logger.info(f'Sending "{media_file}" to bucket "{bucket_name}"...') try: key = f"{s3_prefix}/{ts}/{basename(media_file)}" s3 = boto3.client("s3", region_name=aws_region) with open(media_file, "rb") as data: s3.upload_fileobj(data, bucket_name, key) logger.debug(f"media_file: {media_file} - key: {key}") except Exception as e: logger.error(e) raise logger.info(f'File "{media_file}" sent successfully to bucket: "{key}"') return True