def pip_uninstall_packge(name, options, version): try: if not version: version = "" logger.info("Begin to pip-uninstall {0} {1} ...".format(name, version)) options_copy = options.copy() if len(options_copy) != 0 and options_copy[0] == "--user": options_copy.pop(0) res = -1 res = subprocess.check_call([ sys.executable, '-m', 'pip', 'uninstall', *options_copy, "-y", "-q", name ]) if res != 0: logger.error("Fail to pip-uninstall {0}.".format(name)) else: logger.info("Pip-uninstall {0} {1} successfully!".format( name, version)) return res == 0 except Exception as e: # logger.error("Fail to pip-uninstall {0}, unexpected error: {1}".format(name, e)) logger.error( "Fail to pip-uninstall {0}, unexpected error! Please try to run installer script again!" .format(name)) return False
def process_local_movie_medias( local_media_output_path: str = LOCAL_MEDIA_OUTPUT_PATH, files_list_path: str = FILES_LIST_PATH, files_list_filename: str = FILES_LIST_FILENAME, ) -> bool: """ Get movie files """ logger.info("Starting batch movie encoding...") try: with open(f"{files_list_path}/{files_list_filename}", "r") as r: data = r.read().splitlines() for item in data: if get_media_type(item) == "movie": ts = item.split("/")[-2] ts = media_ts_format(ts, item) logger.debug(ts) if ts: video_encoder( media=item, ts=ts, output_path=local_media_output_path ) else: pass except Exception as e: logger.error(e) raise logger.info("Encoder done.") return True
def build_card_objects(media_list: list = None) -> list: """ Creates DB objects from S3 objects list """ logger.info("Crafting list of DB objects...") logger.debug( f"Context Parameters: {build_card_objects.__name__} => {build_card_objects.__code__.co_varnames}" ) medias_list = defaultdict(list) try: for item in media_list: medias_list[item["ts"]].append({ "name": item["name"], "path": item["path"], "url": item["url"], "kind": item["kind"], }) medias = [{"ts": k, "medias": v} for k, v in medias_list.items()] statistics.append(["build_card_objects", len(medias)]) logger.info(f'{len(medias)} "card" objects generated successfully.') except Exception as e: logger.error(e) raise return medias
def pip_install_package(name, options, version, pkg=None): try: if not pkg: if version: if version.strip()[0] == "<" or version.strip()[0] == ">": pkg = "{0}{1}".format(name, version) else: pkg = "{0} == {1}".format(name, version) else: pkg = name version = "" if not version: version = "" logger.info("Begin to pip-install {0} {1} ...".format(name, version)) logger.debug("pkg : {0}".format(pkg)) res = -1 res = subprocess.check_call( [sys.executable, '-m', 'pip', 'install', *options, "-q", pkg]) if res != 0: logger.error("Fail to pip-install {0}.".format(name)) SysInfo.fail_install.append("%s %s" % (name, version)) else: logger.info("Pip-install {0} {1} successfully!".format( name, version)) return res == 0 except Exception as e: # logger.error("Fail to pip-install {0}, unexpected error: {0}".format(name, e)) logger.error( "Fail to pip-install {0}, unexpected error! Please try to run installer script again!" .format(name)) SysInfo.fail_install.append("%s %s" % (name, version)) return False
def receive_msg_always(self): logger.info('------------------开始监听端口数据------------------') while True: a = str(self.ser.readline().decode('utf-8')) if len(a) > int(self.pdu_length): logger.info(f'发现有新的短信,详情:{a}') store_and_send_data(a)
def detect_git(): res = _run_cmd("git", ["--version"]) SysInfo.git = res if res: logger.info("Git: {0}".format(res)) else: logger.info("Git: {0} (Git is needed, otherwise some dependency packages can't be installed.)".format(res))
def sizetypes(pricefile): """ Summary. Finds all EC2 size types in price file Args: :pricefile (str): complete path to file on local fs containing ec2 price data Returns: size type list (list) """ sizes = [] count = 0 with open(pricefile) as f1: f2 = json.loads(f1.read()) for sku in [x for x in f2['products']]: try: sizes.append(f2['products'][sku]['attributes']['instanceType']) count += 1 except KeyError: logger.info(f'No size type found at count {count}, sku {sku}') continue return sizes
def export_to_json( db_data: list = None, display: bool = False, files_list_path: str = FILES_LIST_PATH, ) -> bool: """ Export DB objects to JSON """ try: data = json.dumps(db_data, indent=4) data_count = len(json.loads(data)) statistics.append(["export_to_json", data_count]) with open(f"{files_list_path}/cards.json", "w") as f: f.write(data) logger.info( f"DB objects exported to JSON file successfully: {files_list_path}/cards.json" ) if display: print(data) else: pass except Exception as e: logger.error(e) raise return True
def seed_db_table( db_objects: list = None, table_name: str = TABLE_NAME, aws_region: str = AWS_REGION, ) -> bool: """ Insert DB objects into table """ logger.info("Inserting data into DB...") logger.debug( f"Context Parameters: {seed_db_table.__name__} => {seed_db_table.__code__.co_varnames}" ) try: dynamodb = boto3.resource("dynamodb", region_name=aws_region) table = dynamodb.Table(table_name) with table.batch_writer() as batch: for item in db_objects: batch.put_item(Item=item) statistics.append(["seed_db_table", len(db_objects)]) logger.info(f"{len(db_objects)} item(s) were inserted in DB.") except Exception as e: logger.error(e) raise return True
def read_list_from_file( files_list_path: str = FILES_LIST_PATH, files_list_filename: str = FILES_LIST_FILENAME, ) -> list: """ Import list from file """ if os.path.exists(f"{files_list_path}/{files_list_filename}"): try: with open(f"{files_list_path}/{files_list_filename}", "r") as r: data = r.read().splitlines() statistics.append(["read_list_from_file", len(data)]) logger.info(f"{len(data)} items imported from file.") except Exception as e: logger.error(e) raise return data else: logger.critical( f'Cannot open the file "{files_list_path}/{files_list_filename}", looks like it does not exists.' ) return False logger.critical("Something went wrong!") return False
def detect_cuda(): if (SysInfo.os == TOOLSFORAI_OS_WIN or SysInfo.os == TOOLSFORAI_OS_LINUX): # return detect_cuda_() status, stdout = _run_cmd("nvcc", ["-V"], True) if status and re.search(r"release\s*8.0,\s*V8.0", stdout): SysInfo.cuda = "8.0" logger.info("CUDA: {0}".format(SysInfo.cuda)) if SysInfo.cuda80: logger.warning( "Detect parameter '--cuda80', the installer script will be forced to install dependency package for CUDA 8.0.") return True else: logger.warning("We recommend CUDA 9.0 (https://developer.nvidia.com/cuda-toolkit)." "If you want to install dependency package for CUDA 8.0, please run the installer script with '--cuda80' again.") return False elif status and re.search(r"release\s*9.0,\s*V9.0", stdout): SysInfo.cuda = "9.0" logger.info("CUDA: {0}".format(SysInfo.cuda)) else: SysInfo.cuda = "9.0" logger.warning("Not detect CUDA! We recommend CUDA 9.0 (https://developer.nvidia.com/cuda-toolkit). " "The installer script will install dependency package for CUDA 9.0 by default.") if SysInfo.cuda80: SysInfo.cuda = "8.0" logger.warning( "Detect parameter '--cuda80', the installer script will be forced to install dependency package for CUDA 8.0.") return True else: return True
def video_encoder( media: str = None, ts: str = None, output_path: str = None, log_path: str = LOG_PATH, video_preset_data: dict = video_preset_data, media_presets: str = VIDEO_PRESETS, encoder_threads: int = ENCODER_THREADS, ) -> bool: """ Encode video media based on preset """ try: i = 0 media_presets = media_presets.split(" ") for media_preset in media_presets: media_preset = media_preset.split(".") preset_category = media_preset[0] preset_format = media_preset[1] settings = video_preset_data[preset_category][preset_format] logger.debug(f"Settings: {settings}") file_format = settings["format"] vcodec = settings["vcodec"] acodec = settings["acodec"] video_bitrate = settings["video_bitrate"] audio_bitrate = settings["audio_bitrate"] encoder_threads = int(encoder_threads) i += 1 logger.info( f'Encoding media "{media}" using preset "{preset_category} -> {preset_format}" ...' ) logger.info( f'Processing task(s) for: "{preset_format}"... {i}/{len(media_presets)}.' ) output_filename = ( f"{media.split('/')[-1].split('.')[-2]}.{preset_format}") output_file = f"{output_path}/{ts}/{output_filename}" cli_cmd = f"ffmpeg -i '{media}' -f {file_format} -vcodec {vcodec} -acodec {acodec} -vb {video_bitrate} -ab {audio_bitrate} -threads {encoder_threads} -y '{output_file}'" logger.debug(f"cli command: {cli_cmd}") with open(f"{log_path}/ffmpeg.log", "a") as w: subprocess.run( cli_cmd, shell=True, check=True, stdout=w, stderr=subprocess.STDOUT, universal_newlines=True, ) except Exception as e: logger.error(e) raise return True
def monitor_remote_ops() -> None: """ Display number of running SQS task(s) """ while queue_count() > 0: logger.info(f"Numbere of task(s) processing remotely: {queue_count()}") time.sleep(5) if queue_count() < 1: break
def _extract_tar(file_path, target_dir): logger.info("Extracting {0} to {1} ...".format(file_path, target_dir)) try: import tarfile with tarfile.open(file_path) as tar: tar.extractall(path=target_dir) except: logger.error("Fail to extract. Error: ", sys.exc_info()) return False return True
def detect_python_version(): py_architecture = platform.architecture()[0] py_version = ".".join(map(str, sys.version_info[0:2])) py_full_version = ".".join(map(str, sys.version_info[0:3])) SysInfo.python = py_version.replace('.', '') logger.debug("In detect_python_version(), sys_info['python']: {0}".format(SysInfo.python)) logger.info("Python: {0}, {1}".format(py_full_version, py_architecture)) if not (_version_compare("3.5", py_version) and py_architecture == '64bit'): logger.error("64-bit Python 3.5 or higher is required to run this installer." " We recommend latest Python 3.5 (https://www.python.org/downloads/release/python-355/).") return False return True
def __init__(self): cf = configparser.ConfigParser() cf.read('config.ini', encoding='utf-8') serial_port = cf['options']['port'] self.pdu_length = cf['options']['pdu_length'] global Ret try: self.ser = serial.Serial(serial_port, 9600, timeout=0.5) if self.ser.isOpen(): logger.info(f'{self.ser.name} 端口已开') Ret = True except Exception as e: logger.error(f'异常: {e}')
def detect_mpi_win(): target_version = "7.0.12437.6" mpi_path = _registry_read(winreg.HKEY_LOCAL_MACHINE, r"Software\Microsoft\MPI", "InstallRoot") if (mpi_path and os.path.isfile(os.path.sep.join([mpi_path, "bin", "mpiexec.exe"]))): SysInfo.mpi = _registry_read(winreg.HKEY_LOCAL_MACHINE, r"Software\Microsoft\MPI", "Version") if SysInfo.mpi: logger.info("MPI: {0}".format(SysInfo.mpi)) if not _version_compare(target_version, SysInfo.mpi): logger.warning("CNTK suggests MPI version to be {0}, please manually upgrade MPI.".format(target_version)) return False return True else: logger.warning("Not detect MPI, please manually download and isntall MPI.") return False
def pip_install_extra_software(pkg_info, options): logger.info( "Begin to install extra software(jupyter, matplotlib, and pandas) ...") #1 jupyter name = pkg_info["extra_software"]["jupyter"]["name"] version = pkg_info["extra_software"]["jupyter"]["version"] if utils.module_exists(name): logger.info("{0} is already installed.".format(name)) else: pip_install_package(name, options, version) #2 matplotlib name = pkg_info["extra_software"]["matplotlib"]["name"] version = pkg_info["extra_software"]["matplotlib"]["version"] if utils.module_exists(name): logger.info("{0} is already installed.".format(name)) else: pip_install_package(name, options, version) #3 pandas name = pkg_info["extra_software"]["pandas"]["name"] version = pkg_info["extra_software"]["pandas"]["version"] if utils.module_exists(name): logger.info("{0} is already installed.".format(name)) else: pip_install_package(name, options, version)
def detect_cudnn_win(): if SysInfo.cuda == "8.0": required_cndunn = {'6': 'cudnn64_6.dll', '7': 'cudnn64_7.dll'} else: required_cndunn = {'7': 'cudnn64_7.dll'} cmd = r"C:\Windows\System32\where.exe" for version, dll in required_cndunn.items(): args = [dll] status, cudnn = _run_cmd(cmd, args, True) if status and next(filter(os.path.isfile, cudnn.split('\n')), None): SysInfo.cudnn = version logger.info("cuDNN: {0}".format(version)) if not SysInfo.cudnn: logger.warning("Not detect cuDNN! We recommand cuDNN 7, please download and install cuDNN 7 from https://developer.nvidia.com/rdp/cudnn-download.")
def _unzip_file(file_path, target_dir): logger.info("Unzipping {0} to {1} ...".format(file_path, target_dir)) try: import zipfile with zipfile.ZipFile(file_path) as zip_file: if os.path.isdir(target_dir): pass else: os.makedirs(target_dir) for names in zip_file.namelist(): zip_file.extract(names, target_dir) return True except: logger.error("Fail to unzip. Error: ", sys.exc_info()) return False
def _download_file(url, local_path): logger.info("Downloading {0} ...".format(url)) try: import urllib.request import ssl myssl = ssl.create_default_context() myssl.check_hostname = False myssl.verify_mode = ssl.CERT_NONE with urllib.request.urlopen(url, context=myssl) as fin, \ open(local_path, 'ab') as fout: fout.write(fin.read()) return True except: logger.error("Fail to download {0}. Error: {1}".format(url, sys.exc_info())) return False
def detect_vs(): vs = [] vs_2015_path = _registry_read(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\WOW6432Node\Microsoft\VisualStudio\14.0", "InstallDir") if (vs_2015_path and os.path.isfile(os.path.join(vs_2015_path, "devenv.exe"))): vs.append("VS2015") vs_2017_path = _registry_read(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\WOW6432Node\Microsoft\VisualStudio\SxS\VS7", "15.0") if (vs_2017_path and os.path.isfile(os.path.sep.join([vs_2017_path, "Common7", "IDE", "devenv.exe"]))): vs.append("VS2017") if (len(vs) == 0): logger.warning("Not detect Visual Studio 2017 or 2015! We recommend Visual Studio 2017, " "please manually download and install Visual Studio 2017 form https://www.visualstudio.com/downloads/.") else: logger.info("Visual Studio: {0}".format(" ".join(vs)))
def pip_install_scipy(pkg_info, options): logger.info("Begin to install scipy(numpy, scipy) ...") name = pkg_info["scipy"]["numpy"]["name"] version = pkg_info["scipy"]["numpy"]["version"] if not pip_install_package(name, options, version): logger.error( "Pip_install_scipy terminated due to numpy installation failure.") return False name = pkg_info["scipy"]["scipy"]["name"] version = pkg_info["scipy"]["scipy"]["version"] if not pip_install_package(name, options, version): logger.error( "Pip_install_scipy terminated due to scipy installation failure.") return False return True
def jenkins_trigger(): logger.info(f"({request.method}) {request.path}") payload_logger.info(json.dumps(request.form, indent=2)) event_type = request.headers.get("X-GitHub-Event") if not event_type: logger.warn("There is no X-GitHub-Event header") return render_template("result.html", result="FAIL"), 400 else: event = Event.factory(event_type, request.form) if event and event.is_triggering_event(): pass return render_template("result.html", result="SUCCESS"), 200
def detect_gpu(): gpu_detector_name = 'gpu_detector_' + SysInfo.os if (SysInfo.os == TOOLSFORAI_OS_WIN): gpu_detector_name = gpu_detector_name + '.exe' gpu_detector_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tools", gpu_detector_name) if not (os.path.isfile(gpu_detector_path)): logger.error( 'Not find GPU detector. Please make sure {0} is in the same directory with the installer script.'.format( gpu_detector_name)) return False SysInfo.gpu, return_stdout = _run_cmd(gpu_detector_path, return_stdout=True) if not SysInfo.gpu: return_stdout = 'None' logger.info('NVIDIA GPU: {0}'.format(return_stdout)) return True
def get_s3_files( bucket_name: str = BUCKET_NAME, save_to_disk: bool = True, files_list_path: str = FILES_LIST_PATH, files_list_filename: str = FILES_LIST_FILENAME, aws_region: str = AWS_REGION, s3_prefix: str = S3_PREFIX, ) -> list: """ Get S3 objects and creates list """ logger.info("Building media list from S3 objects...") logger.debug( f"Context Parameters: {get_s3_files.__name__} => {get_s3_files.__code__.co_varnames}" ) data = [] # testing format: assets/20160823/img.jpg pattern = re.compile( "^[a-z-A-Z-0-9]+/[a-z-A-Z-0-9]+/[0-9]{8}/.+[.][a-z-A-Z-0-9]+$" ) try: s3 = boto3.client("s3", region_name=aws_region) paginator = s3.get_paginator("list_objects_v2") pages = paginator.paginate(Bucket=bucket_name, Prefix=s3_prefix) for page in pages: for obj in page["Contents"]: if pattern.match(obj["Key"]): data.append(obj["Key"]) else: logger.warning( f'Wrong filename format, object "{obj["Key"]}", not added to the list.' ) statistics.append(["get_s3_files", len(data)]) logger.info("Media Objects list generated successfully.") logger.debug(f"Media objects count: {len(data)}.") if save_to_disk: logger.info("Writing media list to disk...") export_data = [f"{item}\n" for item in data] with open(f"{files_list_path}/{files_list_filename}", "w") as w: w.writelines(export_data) logger.info( f'List successfully saved to disk: "{files_list_path}/{files_list_filename}".' ) else: pass except Exception as e: logger.error(e) raise return data
def save_defer_encoding(movies_list: list, files_list_path: str = FILES_LIST_PATH) -> bool: """ Store list of defered remote video encoding to file """ assert movies_list is not None assert type(movies_list) == list try: data = json.dumps(movies_list, indent=4) file_path = f"{files_list_path}/defered_encode.json" with open(file_path, "w") as w: w.write(data) except Exception as e: logger.error(e) raise logger.info(f"Defered encoding list saved successfully: '{file_path}'") return True
def create_s3_bucket( bucket_name: str = BUCKET_NAME, aws_region: str = AWS_REGION ) -> bool: """ Create the S3 bucket of the project """ s3 = boto3.client("s3", region_name=aws_region) bucket_exists = True try: response = response = s3.list_buckets() buckets = [ bucket["Name"] for bucket in response["Buckets"] if bucket["Name"] == bucket_name ] if len(buckets) > 0: logger.warning( "S3 bucket already exists. Skipping bucket creation." ) bucket_exists = True else: bucket_exists = False except Exception as e: logger.error(e) raise if not bucket_exists: try: response = s3.create_bucket( Bucket=bucket_name, ACL="private", CreateBucketConfiguration={"LocationConstraint": aws_region}, ) logger.info(f'Created S3 bucket "{bucket_name}" successfully.') logger.debug(f"S3 client response: {response}") except Exception as e: logger.error(e) raise else: return False return True
def detect_visualcpp_runtime_win(): pattern = re.compile( "(^Microsoft Visual C\+\+ 201(5|7) x64 Additional Runtime)|(^Microsoft Visual C\+\+ 201(5|7) x64 Minimum Runtime)") items = [(winreg.HKEY_CURRENT_USER, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall"), (winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall"), (winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall")] for hkey, keypath in items: try: current_key = winreg.OpenKey(hkey, keypath) for subkey in _registry_subkeys(hkey, keypath): display_name = _registry_read(current_key, subkey, "DisplayName") if (display_name and pattern.match(display_name)): logger.info("Detect Visual C++ runtime already installed.") return True winreg.CloseKey(current_key) except WindowsError: pass logger.warning("Not detect Visual C++ runtime.") return False
def main(display_env: str = LOG_DISPLAY_ENV_VARS) -> None: """ Fetch, build and store S3 media files into DynamoDB """ if display_env == "True": logger.debug("## Environment variables") logger.debug(os.environ) else: pass logger.debug("- Start of execution -") prepare_local_resources() setup_cloud_resources() hydrate_cloud_resources() logger.debug("- End of execution -") print(tabulate(statistics)) monitor_remote_ops() logger.info("- All tasks executed successfully -")
def map(): logger.info('('+request.remote_addr+')') return render_template('index.html')