def send_message(message="hello from confply, {user}", user=None, files=[]): payload = {"token": bot_token} request = Request(api.user_list, urlencode(payload).encode()) response = urlopen(request).read().decode() response = json.loads(response) user_id = None confply_id = None for mem in response["members"]: if user in mem["name"]: user_id = mem["id"] elif user in mem["real_name"]: user_id = mem["id"] elif "confply" == mem["name"] and mem["is_bot"]: confply_id = mem["id"] request = Request(api.channel_list, urlencode(payload).encode()) response = urlopen(request).read().decode() response = json.loads(response) channel_ids = [] for channel in response["channels"]: channel_ids.append(channel["id"]) valid_channels = [] for channel_id in channel_ids: payload["channel"] = channel_id request = Request(api.member_list, urlencode(payload).encode()) response = urlopen(request).read().decode() response = json.loads(response) if user_id is not None: if (response["ok"] and user_id in response["members"] and confply_id in response["members"]): valid_channels.append(channel_id) else: valid_channels.append(channel_id) for channel in valid_channels: payload["channel"] = channel payload["text"] = message.replace("{user}", "<@"+user_id+">") request = Request(api.chat_post, urlencode(payload).encode()) response = urlopen(request).read().decode() response = json.loads(response) for upload_file in files: path = pathlib.Path(upload_file) if path.exists(): del payload["text"] with open(upload_file) as file_str: payload["title"] = path.name payload["content"] = file_str.read() payload["channels"] = channel payload["thread_ts"] = response["ts"] request = Request(api.files_upload, urlencode(payload).encode()) response = urlopen(request).read().decode() response = json.loads(response) else: log.warning("couldn't find "+path.absolute()) pass
def _print_tools(): num = -1 for k in in_tools.keys(): num += 1 if not in_tools[k].is_found(): log.warning("\t" + str(num) + ") " + k + " (not found)") else: log.normal("\t" + str(num) + ") " + k)
def __apply_overrides(config): # update the config and confply.config dictionaries with overrides if isinstance(confply.config.__override_dict, dict): confply_dict = confply.config.__override_dict config.confply.__dict__.update(confply_dict["confply"]) del confply_dict["confply"] config.__dict__.update(confply_dict) confply.config.__override_dict = {"confply": {}} # update the configs with further overrides if isinstance(confply.config.__override_list, list): for k, v in confply.config.__override_list: try: exec("{0} = v".format(k), globals(), locals()) except Exception: log.warning("failed to exec " + "{0} = {1}:".format(k, v)) log.normal("\t\tcheck calling option --" + k)
def __load_vcs_info(path): with pushd(os.path.dirname(path)): # find the git root # #todo: extend this to other version control? # move this to a config_type if confply.config.vcs == "git": try: git_cmd = 'git rev-parse --show-toplevel' git_cmd = subprocess.check_output(git_cmd, shell=True) confply.config.vcs_root = git_cmd.decode('utf-8').strip() git_cmd = 'git branch --show-current' git_cmd = subprocess.check_output(git_cmd, shell=True) confply.config.vcs_branch = git_cmd.decode('utf-8').strip() git_cmd = "git log -1 --pretty=format:'%an'" git_cmd = subprocess.check_output(git_cmd, shell=True) confply.config.vcs_author = git_cmd.decode("utf-8").strip() git_cmd = "git log -1" git_cmd = subprocess.check_output(git_cmd, shell=True) confply.config.vcs_log = git_cmd.decode("utf-8").strip() except subprocess.CalledProcessError: log.warning('failed to fill git vcs information')
def generate(): def __parse_deps(deps_string): deps_json = json.loads(deps_string) if "Data" in deps_json: deps_json = deps_json["Data"] out_deps = deps_json["Includes"] out_deps.append(deps_json["Source"]) return out_deps pass try: config.link_libraries.remove("stdc++") log.warning("removing stdc++ from link_libraries, it's not valid when using cl.exe") except: pass try: config.warnings.remove("pedantic") log.warning("removing pedantic from warnings, it's not valid when using cl.exe") except: pass try: config.warnings.remove("extra") log.warning("removing extra from warnings, it's not valid when using cl.exe") except: pass if config.confply.platform == "windows": cpp_compiler.tool = "cl" cpp_compiler.output_obj = "-Fo" cpp_compiler.output_exe = "-Fe" cpp_compiler.standard = "-std:" cpp_compiler.dependencies = "" cpp_compiler.link = "" cpp_compiler.library = "-LIBPATH:" cpp_compiler.dependencies_output = "-sourceDependencies" cpp_compiler.exception_handling = "-EHsc" cpp_compiler.pass_to_linker = "-link" cpp_compiler.object_ext = ".obj" cpp_compiler.parse_deps = __parse_deps cpp_compiler.debug = "-Zi" return cpp_compiler.generate() else: log.error("cl only supports windows platforms") return None
def generate(): object_path = config.object_path object_path = os.path.join(object_path, tool) def gen_command(config, source=None): command = "" command += tool + " " + config.command_prepend + " " command += include + " " + (" " + include + " ").join( config.include_paths) + " " if config.include_paths else "" command += define + " " + (" " + define + " ").join( config.defines) + " " if config.defines else "" command += debug + " " if config.debug_info else "" command += standard + config.standard + " " if config.standard else "" command += gen_warnings() command += optimisation + str( config.optimisation) + " " if config.optimisation else "" if source is None: command += " ".join( config.source_files) + " " if config.source_files else "" command += output_exe + config.output_file + " " if config.output_file else output_exe + "app.bin" command += pass_to_linker + " " command += library + (" " + library).join( config.library_paths) + " " if config.library_paths else "" command += link + " " + (" " + link + " ").join( config.link_libraries) + " " if config.link_libraries else "" else: command += build_object + " " + source + " " + output_obj + os.path.join( object_path, os.path.basename(source) + object_ext + " ") command += exception_handling + " " if config.track_dependencies: command += dependencies + " " + dependencies_output + " " + os.path.join( object_path, os.path.basename(source) + ".d ") return command + " " + config.command_append if config.build_objects: os.makedirs(object_path, exist_ok=True) commands = [] sources = config.source_files objects = [] output_time = config.output_file if config.output_file else "app.bin" output_time = os.path.getmtime(output_time).real if os.path.exists( output_time) else 0 should_link = False tracking_md5 = config.track_checksums tracking_depends = config.track_dependencies config_name = config.confply.config_name if os.path.exists( config.confply.config_name) else None # generate checksums def md5(fname): hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() tracking = {} tracking_path = os.path.join(object_path, "tracking.py") if tracking_md5 or tracking_depends: if os.path.exists(tracking_path): with open(tracking_path, "r") as tracking_file: tracking = ast.literal_eval(tracking_file.read()) def update_tracking(file_path): nonlocal tracking if os.path.exists(file_path): file_time = os.path.getmtime(file_path).real if file_path in tracking: if file_time > tracking[file_path]["t"]: if tracking_md5: old_md5 = tracking[file_path][ "h"] if "h" in tracking[file_path] else None new_md5 = md5(file_path) tracking[file_path].update({ "t": file_time, "h": new_md5 }) return old_md5 != new_md5 else: tracking[file_path].update({"t": file_time}) return True else: return False elif tracking_md5: tracking[file_path] = {"t": file_time, "h": md5(file_path)} return True else: tracking[file_path] = {"t": file_time} return True pass return False compile_all = update_tracking( config_name) if config.rebuild_on_change else False for source_path in sources: should_compile = compile_all if os.path.exists(source_path): deps_path = os.path.join(object_path, os.path.basename(source_path + ".d")) obj_path = os.path.join( object_path, os.path.basename(source_path + object_ext)) obj_time = os.path.getmtime(obj_path).real if os.path.exists( obj_path) else 0 objects.append(obj_path) # dependency tracking if tracking_depends and os.path.exists(deps_path): with open(deps_path, "r") as deps_file: deps_string = deps_file.read() for dep_path in parse_deps(deps_string): should_compile = update_tracking( dep_path) or should_compile else: should_compile = update_tracking( source_path) or should_compile if should_compile or obj_time == 0: commands.append(gen_command(config, source_path)) should_link = True elif obj_time > output_time: should_link = True else: log.warning(source_path + " could not be found") if should_link and config.output_executable: config.source_files = objects commands.append(gen_command(config)) config.source_files = sources num_commands = len(commands) log.normal(str(num_commands) + " files to compile") else: log.normal("no files to compile") if tracking_md5 or tracking_depends: with open(tracking_path, "w") as tracking_file: tracking_file.write("# do not edit this file.\n") tracking_file.write(json.dumps(tracking, indent=4)) return commands else: return gen_command(config)
def __run_config(config_locals): in_args = confply.config.args config = config_locals["config"] return_code = 0 should_run = confply.config.run file_path = confply.config.config_path __apply_overrides(config) new_working_dir = os.path.dirname(file_path) old_stdout = sys.stdout # setting confply command configuration up with pushd(new_working_dir): if confply.config.log_file: log.normal("writing to: " + confply.config.log_file + "....") try: sys.stdout = open(confply.config.log_file, "w") version = sys.version_info version = (version.major, version.minor, version.micro) if "--no_header" not in in_args: log.confply_header() log.linebreak() log.normal("python" + str(version)) log.normal("confply " + get_version()) log.normal("date: " + str(datetime.now())) log.linebreak() log.normal("confply logging to " + confply.config.log_file) except Exception: log.error("couldn't open " + confply.config.log_file + " for write.") return_code = -1 try: if (confply.config.post_load and inspect.isfunction(confply.config.post_load)): log.normal("running post load script: " + confply.config.post_load.__name__) sys.stdout.flush() exec(confply.config.post_load.__code__, config_locals, {}) log.linebreak() except Exception: log.error("failed to exec " + confply.config.post_load.__name__) trace = traceback.format_exc() log.normal("traceback:\n\n" + trace) pass diff_config = __get_diff_config(config) should_run = confply.config.run report = { "config_path": file_path, "config_json": json.dumps(diff_config, indent=4), "config_type": "unknown", "tool": "unknown", "status": "failure", "vcs_root": confply.config.vcs_root, "vcs_log": confply.config.vcs_log, "vcs": confply.config.vcs, "vcs_branch": confply.config.vcs_branch, "vcs_author": confply.config.vcs_author, "time_taken": "00:00:00" } if return_code >= 0: if confply.config.log_config is not False: __print_config(os.path.basename(file_path), config) try: time_start = timeit.default_timer() # #todo: tool selection phase should happen first. tools = __validate_config(config) config_type = config.__package__ tool = confply.config.tool report["tool"] = tool report["config_type"] = config_type if tools: shell_cmd = tools[tool] shell_cmd.handle_args() # #todo: rename generate to gen_config_type. shell_cmd = shell_cmd.generate() if tools else None else: shell_cmd = None __run_dependencies(config, should_run) if shell_cmd is not None: cmd_env = tools[tool].get_environ() if len(shell_cmd) > 0: if isinstance(shell_cmd, list): log.normal("final commands:\n") for shell_str in shell_cmd: cmd = confply.config.command_prepend + shell_str cmd = cmd + confply.config.command_append print(cmd) print("") else: cmd = confply.config.command_prepend + shell_cmd cmd = shell_cmd + confply.config.command_append log.normal("final command:\n\n" + str(cmd) + "\n") if should_run: log.header("begin " + tool) sys.stdout.flush() if should_run and isinstance(shell_cmd, list): for cmd in shell_cmd: cmd_time_start = timeit.default_timer() sys.stdout.flush() log.linebreak() cmd = confply.config.command_prepend + cmd cmd = cmd + confply.config.command_append log.normal(cmd) log.normal("", flush=True) return_code = __run_shell_cmd(cmd, cmd_env, tool) cmd_time_end = timeit.default_timer() # #todo: this can be tidied with format var capture s = cmd_time_end - cmd_time_start m = int(s / 60) h = int(m / 60) # time formating via format specifiers # https://docs.python.org/3.8/library/string.html#formatspec time = f"{h:0>2.0f}:{m:0>2.0f}:{s:0>5.2f}" log.normal("time elapsed " + time) elif should_run: return_code = __run_shell_cmd(shell_cmd, cmd_env, tool) else: log.warning("no commands run") else: log.error("failed to generate a valid command.") return_code = -1 time_end = timeit.default_timer() s = time_end - time_start m = int(s / 60) h = int(m / 60) # time formating via format specifiers # https://docs.python.org/3.8/library/string.html#formatspec time = f"{h:0>2.0f}:{m:0>2.0f}:{s:0>5.2f}" log.normal("total time elapsed " + time) report["time_taken"] = time report["status"] = "success" if return_code == 0 else "failure" except Exception: log.error("failed to run config: ") trace = traceback.format_exc() log.normal("traceback:\n\n" + trace) return_code = -1 sys.stdout.flush() if (confply.config.post_run and inspect.isfunction(confply.config.post_run)): try: log.linebreak() log.normal("running post run script: " + confply.config.post_run.__name__) sys.stdout.flush() exec(confply.config.post_run.__code__, config_locals, {}) log.linebreak() except Exception: log.error("failed to exec " + confply.config.post_run.__name__) trace = traceback.format_exc() log.normal("traceback:\n\n" + trace) log.normal("date: " + str(datetime.now())) log.linebreak() sys.stdout.flush() if sys.stdout != old_stdout: sys.stdout.close() sys.stdout = old_stdout if confply.config.log_echo_file: with open(confply.config.log_file) as out_log: log_str = out_log.read() log_str = log_str.split("confply logging to " + confply.config.log_file)[1] log.normal("wrote:" + log_str) if (confply.config.mail_send == report["status"] or confply.config.mail_send == "all"): mail.host = confply.config.__mail_host mail.sender = confply.config.mail_from mail.recipients = confply.config.mail_to mail.login = confply.config.__mail_login mail.attachments = confply.config.mail_attachments if (confply.config.log_file and report["status"] == "failure"): mail.attachments.append( os.path.abspath(confply.config.log_file)) pass if mail.login: mail.send_report(report) if (confply.config.slack_send == report["status"] or confply.config.slack_send == "all"): slack.bot_token = confply.config.__slack_bot_token slack.uploads = confply.config.slack_uploads if (confply.config.log_file and report["status"] == "failure"): slack.uploads.append( os.path.abspath(confply.config.log_file)) if slack.bot_token: slack.send_report(report) clean_modules() return return_code
def run_commandline(in_args): """ runs the confply config, with supplied arguements. confply reservered options will be stripped. e.g. --help see help.md usage: run_commandline(["path_to_config", "optional", "arguements"]) """ log.normal("called with args: " + str(in_args)) in_args = __strip_confply_args(in_args) confply.config.args = in_args if confply.config.config_path: config_path = confply.config.config_path if not os.path.exists(config_path): log.error("couldn't find: " + config_path) return -1 if config_path.endswith(".py"): log.linebreak() log.header("run config") log.linebreak() # setup config run should_run = confply.config.run config_locals = load_config(config_path) if not config_locals: log.error("failed to load: " + config_path) return -1 if ("config" not in config_locals): log.error("confply config incorrectly imported") log.normal( "\tuse: 'import confply.[config_type].config as config'") clean_modules() return -1 config = config_locals["config"] if (not __validate_types(config)): log.error("failed to run config") return -1 # ensure we don't run if should_run was EVER false if should_run is not True: confply.config.run = should_run # config config_hash = md5_file(config.__file__) if ("config_hash" not in config_locals or config_hash != config_locals["config_hash"]): log.warning("warning: config_hash doesn't match expected hash") log.normal("\tconfig file might not function correctly") log.normal("\texpected:") log.normal("\t\t" + "config_hash='" + config_hash + "'") log.normal("") return __run_config(config_locals) elif config_path.endswith(".json"): if os.path.exists(config_path): with open(config_path) as in_json: in_dict = json.loads(in_json.read()) in_dict["confply"].update({"config_path": config_path}) return run_dict(in_dict) elif config_path.endswith(".ini"): if os.path.exists(config_path): import configparser def parse_lit(in_val): try: return ast.literal_eval(in_val) except Exception: return in_val conf = configparser.ConfigParser() conf.read(config_path) in_dict = {"confply": {"config_path": config_path}} for (key, val) in conf["config"].items(): in_dict[key] = parse_lit(val) for (key, val) in conf["confply"].items(): in_dict["confply"][key] = parse_lit(val) return run_dict(in_dict) else: log.error("unsupported config type: " + config_path) return -1 else: return 0 return 0