def parse(self, process_data): if process_data: Logger.d('Parsing target data...') self.project_name = process_data.parse_text(self.project_name) self.header_search_paths = process_data.parse_text_list( self.header_search_paths) self.library_search_paths = process_data.parse_text_list( self.library_search_paths) self.source_groups = process_data.parse_sourge_group_list( self.source_groups) self.library_links = process_data.parse_text_list( self.library_links) self.framework_links = process_data.parse_text_list( self.framework_links) self.c_flags = process_data.parse_text_list(self.c_flags) self.cxx_flags = process_data.parse_text_list(self.cxx_flags) self.compiler_options = process_data.parse_text_list( self.compiler_options) self.copy_files = process_data.parse_copy_file_list( self.copy_files) else: Logger.d('Cannot parse target data with invalid source')
def get_filename_from_url_without_extension(url): Logger.d('Parsing URL to get filename...') filename = url.split('\\').pop().split('/').pop().rsplit('.', -1)[0] Logger.d('Filename from download URL: {0}'.format(filename)) return filename
def read_file(file_path): Logger.d('Reading file: {0}'.format(file_path)) with open(file_path, 'r') as f: content = f.read() f.close() return content
def run_all_tasks(tasks, process_data, template_data, working_dir): if tasks: Logger.d('Tasks to run: {0}...'.format(len(tasks))) for task in tasks: task.run(process_data=process_data, template_data=template_data, working_dir=working_dir) else: Logger.d('Task list is invalid to execute')
def initialize(self): from ezored.models.logger import Logger from ezored.models.constants import Constants from ezored.models.util.file_util import FileUtil Logger.i('Cleaning...') FileUtil.remove_dir(Constants.TEMP_DIR) FileUtil.remove_dir(Constants.VENDOR_DIR) Logger.i('Finished')
def load_vendor_file_data(self): Logger.d('Loading vendor file...') vendor_dir = self.get_temp_dir() vendor_file_path = os.path.join(vendor_dir, Constants.VENDOR_FILE) try: with open(vendor_file_path, 'r') as stream: return yaml.load(stream) except IOError as exc: Logger.f('Error while read vendor file: {0}'.format(exc))
def load_target_data_file(self): Logger.d('Loading target data file...') vendor_dir = self.get_vendor_dir() target_file_path = os.path.join(vendor_dir, Constants.TARGET_DATA_FILE) try: with open(target_file_path, 'r') as stream: return yaml.load(stream) except IOError as exc: Logger.f('Error while read target file: {0}'.format(exc))
def get_filename_from_url(url): Logger.d('Parsing URL to get filename...') scheme, netloc, path, query, fragment = urlparse.urlsplit(url) filename = os.path.basename(path) if not filename: filename = 'downloaded.file' Logger.d('Filename from download URL: {0}'.format(filename)) return filename
def get_name(self): if self.name and len(self.name) > 0: return self.name if self.type == self.TYPE_COPY_FILE: return 'Copy file' elif self.type == self.TYPE_COPY_FILES: return 'Copy files' elif self.type == self.TYPE_PARSE_FILE: return 'Parse file' elif self.type == self.TYPE_RUN: return 'Run' else: Logger.f('Invalid task type')
def write_to_file(dir_path, filename, content): Logger.d('Creating file {0} in directory {1} with {2} bytes...'.format( filename, dir_path, len(content))) full_file_path = os.path.join(dir_path, filename) FileUtil.remove_file(full_file_path) FileUtil.create_dir(dir_path) with open(full_file_path, 'w') as f: f.write(content) f.close() Logger.d('Created file {0} in directory {1}'.format( filename, dir_path))
def initialize(self): from ezored.models.constants import Constants from ezored.models.logger import Logger Logger.d('Initializing...') if os.path.isfile(Constants.PROJECT_FILE): Logger.d('Project file already exists, don\'t will be created') else: Logger.d('Creating project file...') project_file = open(Constants.PROJECT_FILE, 'w') project_file.write(Constants.PROJECT_FILE_DATA) project_file.close() Logger.d('Project file created') Logger.i('A new ezored project was initialized with success')
def merge(self, target_data): if target_data: Logger.d('Merging target data...') self.header_search_paths.extend(target_data.header_search_paths) self.library_search_paths.extend(target_data.library_search_paths) self.source_groups.extend(target_data.source_groups) self.library_links.extend(target_data.library_links) self.framework_links.extend(target_data.framework_links) self.c_flags.extend(target_data.c_flags) self.cxx_flags.extend(target_data.cxx_flags) self.compiler_options.extend(target_data.compiler_options) self.tasks.extend(target_data.tasks) else: Logger.d('Cannot merge target data with invalid source')
def download_from_git(self): # download Logger.i('Downloading repository: {0}...'.format(self.get_name())) force_download = False rep_path, rep_type, rep_version = self.get_git_data() download_filename = self.get_download_filename() download_dest_dir = Constants.TEMP_DIR download_dest_path = os.path.join(download_dest_dir, download_filename) downloaded_version = GitUtil.get_current_downloaded_repository_version( download_dest_path) if rep_type == Constants.GIT_TYPE_BRANCH: force_download = True if downloaded_version is not None: if downloaded_version != rep_version: Logger.i( 'Repository downloaded version ({0}) is different from configured version ({1}), ' 'downloading configured version...'.format( downloaded_version.strip(), rep_version.strip())) force_download = True # skip if exists if not force_download and os.path.isdir(download_dest_path): Logger.i('Repository already downloaded: {0}'.format( self.get_name())) else: FileUtil.remove_dir(download_dest_path) GitUtil.download(rep_path, rep_type, rep_version, download_dest_path) # check if file was downloaded if os.path.isdir(download_dest_path): Logger.i('Repository downloaded: {0}'.format(self.get_name())) else: Logger.f('Problems when download repository: {0}'.format( self.get_name()))
def build(self, project, process_data): Logger.i('Building repository: {0}...'.format(self.get_name())) sys_path = list(sys.path) original_cwd = os.getcwd() try: sys.path.insert(0, self.get_temp_dir()) target_module = importlib.import_module( Constants.VENDOR_MODULE_NAME) do_build = getattr(target_module, 'do_build') do_build(params={ 'project': project, 'process_data': process_data, }) del sys.modules[Constants.VENDOR_MODULE_NAME] del target_module del do_build Logger.i('Build finished for repository: {0}'.format( self.get_name())) except Exception as e: Logger.e( "Error while call 'do_build' on repository {0}: {1}".format( self.get_name(), e.message)) raise sys.path = sys_path os.chdir(original_cwd)
def main(): """Main CLI entrypoint.""" options = docopt(__doc__, version=__version__) # show all params for debug if ('--debug' in options and options['--debug']) or ('-d' in options and options['-d']): Constants.DEBUG = True Logger.d('You supplied the following options: ') Logger.d('\n{0}'.format(dumps(options, indent=2, sort_keys=False))) Logger.clean('') # dynamically match the command that user is trying to run for (option_key, option_value) in options.items(): if hasattr(ezored.commands, option_key) and option_value: command_module = getattr(ezored.commands, option_key) commands = getmembers(command_module, isclass) ezcommand = None for command in commands: if command[0] != 'Base' and command[0].lower() == option_key: ezcommand = command[1](options) break if ezcommand: ezcommand.run()
def list(self): from ezored.models.logger import Logger from ezored.models.project import Project Logger.d('Listing all targets...') project = Project.create_from_project_file() Logger.clean('Target List:') for target in project.targets: Logger.clean(' - {0}'.format(target.get_name()))
def list(self): from ezored.models.logger import Logger from ezored.models.project import Project Logger.d('Listing all dependencies...') project = Project.create_from_project_file() Logger.clean('Dependency List:') for dependency in project.dependencies: Logger.clean(' - {0}'.format(dependency.get_name()))
def update(self): from ezored.models.logger import Logger from ezored.models.project import Project Logger.d('Updating all dependencies...') project = Project.create_from_project_file() total_deps = len(project.dependencies) if total_deps > 0: Logger.i('Updating {0} dependencies...'.format(total_deps)) process_data = ProcessData() process_data.reset() process_data.project_name = project.get_config_value('name') for dependency in project.dependencies: dependency.prepare_from_process_data(process_data) dependency.repository.download() dependency.repository.build(process_data) else: Logger.i('Your project does not have dependencies')
def parse(self, process_data): if process_data: Logger.d('Parsing task: {0}...'.format(self.get_name())) if self.type == self.TYPE_COPY_FILE: if self.params and 'from' in self.params: self.params['from'] = process_data.parse_text( self.params['from']) if self.params and 'to' in self.params: self.params['to'] = process_data.parse_text( self.params['to']) elif self.type == self.TYPE_COPY_FILES: if self.params and 'from' in self.params: self.params['from'] = process_data.parse_text( self.params['from']) if self.params and 'to' in self.params: self.params['to'] = process_data.parse_text( self.params['to']) elif self.type == self.TYPE_PARSE_FILE: if self.params and 'file' in self.params: self.params['file'] = process_data.parse_text( self.params['file']) elif self.type == self.TYPE_RUN: if self.params and 'args' in self.params: self.params['args'] = process_data.parse_text_list( self.params['args']) else: Logger.f('Invalid task type') else: Logger.d('Cannot parse task params with invalid process data')
def test_show(self): Logger.show('test', 'test_show') self.assertTrue(True)
def build(self, target_name): from ezored.models.logger import Logger from ezored.models.project import Project project = Project.create_from_project_file() process_data = ProcessData() process_data.reset() process_data.project_name = project.get_config_value('name') if target_name: Logger.i('Build only target: {0}'.format(target_name)) else: Logger.i('Build all targets') target_found = False for target in project.targets: can_build = False if not target_name: can_build = True elif target.get_name() == target_name: can_build = True if can_build: Logger.d('Getting target data by target name: {0}...'.format( target_name)) target_found = True # targets need be deleted to be always fresh with target data from dependencies target.remove() # build the target repository after download target.prepare_from_process_data(process_data) target.repository.download() target.repository.build(process_data) # get all target data from project dependencies target_data = TargetData() target_data.project_config = project.config for dependency in project.dependencies: dependency.prepare_from_process_data(process_data) new_target_data = dependency.get_target_data_by_target_name_and_parse( target.get_name(), process_data) target_data.merge(new_target_data) # back to target data target.prepare_from_process_data(process_data) # copy files from dependencies to target directory FileUtil.copy_files_from_list(target_data.copy_files) # parse files path and it content target_project_file_data = target.load_target_project_file_data( ) if 'target' in target_project_file_data: target_project_data = target_project_file_data['target'] # parse files if 'parse_files' in target_project_data: target_project_data_parse_files = target_project_data[ 'parse_files'] if target_project_data_parse_files: Logger.d('Files to parse from target: {0}'.format( len(target_project_data_parse_files))) target_project_data_parse_files = process_data.parse_text_list( target_project_data_parse_files) for target_project_data_parse_file in target_project_data_parse_files: template_loader = jinja2.FileSystemLoader( searchpath='/') template_env = jinja2.Environment( loader=template_loader) template_file = target_project_data_parse_file template = template_env.get_template( template_file) templ_result = template.render( target=target_data) FileUtil.write_to_file( os.path.dirname( target_project_data_parse_file), os.path.basename( target_project_data_parse_file), str(templ_result)) else: Logger.d('No files need to parse from target: {0}'. format(target.get_name())) # build target if 'build' in target_project_data: Logger.i('Building target: {0}...'.format( target.get_name())) target_project_data_build = target_project_data[ 'build'] exitcode, stderr, stdout = FileUtil.run( target_project_data_build, target.repository.get_vendor_dir(), process_data.get_environ()) if exitcode == 0: Logger.i('Build finished for target: {0}'.format( target.get_name())) else: if stdout: Logger.i('Build output for target: {0}'.format( target.get_name())) Logger.clean(stdout) if stderr: Logger.i( 'Error output while build target: {0}'. format(target.get_name())) Logger.clean(stderr) Logger.f('Failed to build target: {0}'.format( target.get_name())) if not target_found: Logger.f('Target not found: {0}'.format(target_name))
def create_dir(dir_path): Logger.d('Create a new dir: {0}'.format(dir_path)) if not os.path.isdir(dir_path): os.makedirs(dir_path)
def test_clean(self): Logger.clean('test_clean') self.assertTrue(True)
def remove_dir(dir_path): Logger.d('Remove dir: {0}'.format(dir_path)) if os.path.isdir(dir_path): shutil.rmtree(dir_path)
def remove(self): Logger.d('Removing files for target: {0}...'.format(self.get_name())) vendor_dir = self.repository.get_vendor_dir() FileUtil.remove_dir(vendor_dir)
def remove_file(filename): Logger.d('Remove file: {0}'.format(filename)) if os.path.isfile(filename): os.remove(filename)
def test_fatal(self): with pytest.raises(SystemExit) as error: Logger.f('test_fatal') self.assertEqual(error.type, SystemExit) self.assertEqual(error.value.code, 1)
def download_from_zip(self): # download Logger.i('Downloading repository: {0}...'.format(self.get_name())) download_url = self.get_download_url() download_filename = self.get_download_filename() download_dest_dir = Constants.TEMP_DIR download_dest_path = os.path.join(download_dest_dir, download_filename) unpacked_dir = self.get_temp_dir() unpack_dir = download_dest_dir # skip if exists if os.path.isfile(download_dest_path): Logger.i('Repository already downloaded: {0}'.format( self.get_name())) else: FileUtil.remove_file(download_dest_path) DownloadUtil.download_file(download_url, download_dest_dir, download_filename) # check if file was downloaded if os.path.isfile(download_dest_path): Logger.i('Repository downloaded: {0}'.format(self.get_name())) else: Logger.f('Problems when download repository: {0}'.format( self.get_name())) # unpack Logger.i('Unpacking repository: {0}...'.format(self.get_name())) if os.path.isdir(unpacked_dir): Logger.i('Repository already unpacked: {0}...'.format( self.get_name())) else: FileUtil.remove_dir(unpacked_dir) # unpack file FileUtil.create_dir(unpack_dir) zipref = zipfile.ZipFile(download_dest_path, 'r') zipref.extractall(path=unpack_dir) zipref.close() if os.path.isdir(unpacked_dir): Logger.i('Repository unpacked: {0}'.format(self.get_name())) else: Logger.f('Problems when unpack repository: {0}'.format( self.get_name()))
def run(self, process_data, template_data, working_dir): Logger.d('Running task: {0}...'.format(self.get_name())) if process_data: if self.type == self.TYPE_COPY_FILE: from_path = self.params['from'] if self.params['from'] else None to_path = self.params['to'] if self.params['to'] else None FileUtil.copy_file(from_path=from_path, to_path=to_path) elif self.type == self.TYPE_COPY_FILES: to_path = self.params['to'] if self.params['to'] else None file_pattern = self.params[ 'from'] if 'from' in self.params else None file_pattern = process_data.parse_text(file_pattern) found_files = FileUtil.find_files(file_pattern) for f in found_files: if f: FileUtil.copy_file(from_path=f, to_path=os.path.join( to_path, os.path.basename(f))) elif self.type == self.TYPE_PARSE_FILE: file_pattern = self.params[ 'file'] if 'file' in self.params else None file_pattern = process_data.parse_text(file_pattern) found_files = FileUtil.find_files(file_pattern) for f in found_files: if f: template_file = os.path.abspath(f) template_loader = jinja2.FileSystemLoader( searchpath=os.path.dirname(template_file)) template_env = jinja2.Environment( loader=template_loader) template = template_env.get_template( os.path.basename(template_file)) templ_result = template.render(template_data) FileUtil.write_to_file(os.path.dirname(template_file), os.path.basename(template_file), str(templ_result)) elif self.type == self.TYPE_RUN: run_args = self.params[ 'args'] if 'args' in self.params else None if run_args: exitcode, stderr, stdout = FileUtil.run( run_args, working_dir, process_data.get_merged_data_for_runner()) if exitcode == 0: Logger.i('Run finished for task: {0}'.format( self.get_name())) else: if stdout: Logger.i('Run output for task: {0}'.format( self.get_name())) Logger.clean(stdout) if stderr: Logger.i('Error output while run task: {0}'.format( self.get_name())) Logger.clean(stderr) Logger.f('Failed to run task: {0}'.format( self.get_name())) else: Logger.f('Invalid task type') else: Logger.d('Process data is invalid to run task')
def get_target_data_by_target_name_and_parse(self, target_name, process_data): Logger.d('Getting target data from dependency: {0}...'.format( self.get_name())) target_file_data = self.repository.load_target_file_data() if target_file_data: if 'targets' in target_file_data: targets_data = target_file_data['targets'] for target_data_item in targets_data: current_target_name = target_data_item['name'] if current_target_name == target_name: # get target data target_data = TargetData() if 'data' in target_data_item: target_data_dict = target_data_item['data'] if 'header_search_paths' in target_data_dict: if target_data_dict['header_search_paths']: target_data.header_search_paths.extend( target_data_dict['header_search_paths'] ) if 'library_search_paths' in target_data_dict: if target_data_dict['library_search_paths']: target_data.library_search_paths.extend( target_data_dict[ 'library_search_paths']) if 'c_flags' in target_data_dict: if target_data_dict['c_flags']: target_data.c_flags.extend( target_data_dict['c_flags']) if 'cxx_flags' in target_data_dict: if target_data_dict['cxx_flags']: target_data.cxx_flags.extend( target_data_dict['cxx_flags']) if 'framework_links' in target_data_dict: if target_data_dict['framework_links']: target_data.framework_links.extend( target_data_dict['framework_links']) if 'copy_files' in target_data_dict: if target_data_dict['copy_files']: target_data.copy_files.extend( target_data_dict['copy_files']) # create source group if have files for it target_data_header_files = [] target_data_source_files = [] if 'header_files' in target_data_dict: if target_data_dict['header_files']: target_data_header_files = target_data_dict[ 'header_files'] if 'source_files' in target_data_dict: if target_data_dict['source_files']: target_data_source_files = target_data_dict[ 'source_files'] if len(target_data_header_files) > 0 or len( target_data_source_files) > 0: target_data_source_group = SourceGroup() target_data_source_group.name = self.get_name() target_data_source_group.header_files = target_data_header_files target_data_source_group.source_files = target_data_source_files target_data.source_groups.append( target_data_source_group) # parse all things target_data.parse(process_data) return target_data