def main(): """Main CLI entrypoint.""" options = docopt(__doc__, version=__version__) # show all params for debug if ('--debug' in options and options['--debug']) or ('-d' in options and options['-d']): Constants.DEBUG = True Logger.d('You supplied the following options: ') Logger.d('\n{0}'.format(dumps(options, indent=2, sort_keys=False))) Logger.clean('') # dynamically match the command that user is trying to run for (option_key, option_value) in options.items(): if hasattr(ezored.commands, option_key) and option_value: command_module = getattr(ezored.commands, option_key) commands = getmembers(command_module, isclass) ezcommand = None for command in commands: if command[0] != 'Base' and command[0].lower() == option_key: ezcommand = command[1](options) break if ezcommand: ezcommand.run()
def parse(self, process_data): if process_data: Logger.d('Parsing target data...') self.project_name = process_data.parse_text(self.project_name) self.header_search_paths = process_data.parse_text_list( self.header_search_paths) self.library_search_paths = process_data.parse_text_list( self.library_search_paths) self.source_groups = process_data.parse_sourge_group_list( self.source_groups) self.library_links = process_data.parse_text_list( self.library_links) self.framework_links = process_data.parse_text_list( self.framework_links) self.c_flags = process_data.parse_text_list(self.c_flags) self.cxx_flags = process_data.parse_text_list(self.cxx_flags) self.compiler_options = process_data.parse_text_list( self.compiler_options) self.copy_files = process_data.parse_copy_file_list( self.copy_files) else: Logger.d('Cannot parse target data with invalid source')
def read_file(file_path): Logger.d('Reading file: {0}'.format(file_path)) with open(file_path, 'r') as f: content = f.read() f.close() return content
def get_filename_from_url_without_extension(url): Logger.d('Parsing URL to get filename...') filename = url.split('\\').pop().split('/').pop().rsplit('.', -1)[0] Logger.d('Filename from download URL: {0}'.format(filename)) return filename
def run_all_tasks(tasks, process_data, template_data, working_dir): if tasks: Logger.d('Tasks to run: {0}...'.format(len(tasks))) for task in tasks: task.run(process_data=process_data, template_data=template_data, working_dir=working_dir) else: Logger.d('Task list is invalid to execute')
def list(self): from ezored.models.logger import Logger from ezored.models.project import Project Logger.d('Listing all targets...') project = Project.create_from_project_file() Logger.clean('Target List:') for target in project.targets: Logger.clean(' - {0}'.format(target.get_name()))
def load_target_data_file(self): Logger.d('Loading target data file...') vendor_dir = self.get_vendor_dir() target_file_path = os.path.join(vendor_dir, Constants.TARGET_DATA_FILE) try: with open(target_file_path, 'r') as stream: return yaml.load(stream) except IOError as exc: Logger.f('Error while read target file: {0}'.format(exc))
def load_vendor_file_data(self): Logger.d('Loading vendor file...') vendor_dir = self.get_temp_dir() vendor_file_path = os.path.join(vendor_dir, Constants.VENDOR_FILE) try: with open(vendor_file_path, 'r') as stream: return yaml.load(stream) except IOError as exc: Logger.f('Error while read vendor file: {0}'.format(exc))
def list(self): from ezored.models.logger import Logger from ezored.models.project import Project Logger.d('Listing all dependencies...') project = Project.create_from_project_file() Logger.clean('Dependency List:') for dependency in project.dependencies: Logger.clean(' - {0}'.format(dependency.get_name()))
def get_filename_from_url(url): Logger.d('Parsing URL to get filename...') scheme, netloc, path, query, fragment = urlparse.urlsplit(url) filename = os.path.basename(path) if not filename: filename = 'downloaded.file' Logger.d('Filename from download URL: {0}'.format(filename)) return filename
def write_to_file(dir_path, filename, content): Logger.d('Creating file {0} in directory {1} with {2} bytes...'.format( filename, dir_path, len(content))) full_file_path = os.path.join(dir_path, filename) FileUtil.remove_file(full_file_path) FileUtil.create_dir(dir_path) with open(full_file_path, 'w') as f: f.write(content) f.close() Logger.d('Created file {0} in directory {1}'.format( filename, dir_path))
def merge(self, target_data): if target_data: Logger.d('Merging target data...') self.header_search_paths.extend(target_data.header_search_paths) self.library_search_paths.extend(target_data.library_search_paths) self.source_groups.extend(target_data.source_groups) self.library_links.extend(target_data.library_links) self.framework_links.extend(target_data.framework_links) self.c_flags.extend(target_data.c_flags) self.cxx_flags.extend(target_data.cxx_flags) self.compiler_options.extend(target_data.compiler_options) self.tasks.extend(target_data.tasks) else: Logger.d('Cannot merge target data with invalid source')
def update(self): from ezored.models.logger import Logger from ezored.models.project import Project Logger.d('Updating all dependencies...') project = Project.create_from_project_file() total_deps = len(project.dependencies) if total_deps > 0: Logger.i('Updating {0} dependencies...'.format(total_deps)) process_data = ProcessData() process_data.reset() process_data.project_name = project.get_config_value('name') for dependency in project.dependencies: dependency.prepare_from_process_data(process_data) dependency.repository.download() dependency.repository.build(process_data) else: Logger.i('Your project does not have dependencies')
def initialize(self): from ezored.models.constants import Constants from ezored.models.logger import Logger Logger.d('Initializing...') if os.path.isfile(Constants.PROJECT_FILE): Logger.d('Project file already exists, don\'t will be created') else: Logger.d('Creating project file...') project_file = open(Constants.PROJECT_FILE, 'w') project_file.write(Constants.PROJECT_FILE_DATA) project_file.close() Logger.d('Project file created') Logger.i('A new ezored project was initialized with success')
def parse(self, process_data): if process_data: Logger.d('Parsing task: {0}...'.format(self.get_name())) if self.type == self.TYPE_COPY_FILE: if self.params and 'from' in self.params: self.params['from'] = process_data.parse_text( self.params['from']) if self.params and 'to' in self.params: self.params['to'] = process_data.parse_text( self.params['to']) elif self.type == self.TYPE_COPY_FILES: if self.params and 'from' in self.params: self.params['from'] = process_data.parse_text( self.params['from']) if self.params and 'to' in self.params: self.params['to'] = process_data.parse_text( self.params['to']) elif self.type == self.TYPE_PARSE_FILE: if self.params and 'file' in self.params: self.params['file'] = process_data.parse_text( self.params['file']) elif self.type == self.TYPE_RUN: if self.params and 'args' in self.params: self.params['args'] = process_data.parse_text_list( self.params['args']) else: Logger.f('Invalid task type') else: Logger.d('Cannot parse task params with invalid process data')
def create_dir(dir_path): Logger.d('Create a new dir: {0}'.format(dir_path)) if not os.path.isdir(dir_path): os.makedirs(dir_path)
def remove(self): Logger.d('Removing files for target: {0}...'.format(self.get_name())) vendor_dir = self.repository.get_vendor_dir() FileUtil.remove_dir(vendor_dir)
def execute_command(self, target_command, target_name): from ezored.models.logger import Logger from ezored.models.project import Project import importlib import sys project = Project.create_from_project_file() if target_name: Logger.i('Execute command "{0}" only on target "{1}"'.format( target_command, target_name)) else: Logger.i( 'Execute command "{0}" on all targets'.format(target_command)) target_found = False total_targets = len(project.targets) if total_targets > 0: for target in project.targets: process_data = ProcessData() process_data.reset() process_data.project_name = project.get_config_value('name') can_build = False if not target_name: can_build = True elif target.get_name() == target_name: can_build = True if can_build: Logger.d( 'Getting target data by target name "{0}"...'.format( target.get_name())) target_found = True # targets need be deleted to be always fresh with target data from dependencies target.remove() # build the target repository after download target.prepare_from_process_data(process_data) target.repository.download() target.repository.build(project=project, process_data=process_data) # get all target data from project dependencies target_data = TargetData() target_data.project_home = target.repository.get_vendor_dir( ) target_data.project_config = project.config for dependency in project.dependencies: dependency.prepare_from_process_data(process_data) new_target_data = dependency.get_target_data_by_target_name_and_parse( target.get_name(), process_data) target_data.merge(new_target_data) # back to target data target.prepare_from_process_data(process_data) # process target data and execute required command target_data_file = target.repository.load_target_data_file( ) if 'target' in target_data_file: target_project_data = target_data_file['target'] # target tasks if 'tasks' in target_project_data: target_tasks_data = target_project_data['tasks'] for target_task_data in target_tasks_data: task = Task.from_dict(target_task_data) task.parse(process_data) target_data.tasks.append(task) # run all tasks Task.run_all_tasks( tasks=target_data.tasks, process_data=process_data, template_data={'target': target_data}, working_dir=target.repository.get_vendor_dir()) # execute command on target Logger.i('Executing command "{0}" on target "{1}"...'. format(target_command, target.get_name())) sys_path = list(sys.path) original_cwd = os.getcwd() try: sys.path.insert(0, target.repository.get_vendor_dir()) target_module = importlib.import_module( Constants.TARGET_MODULE_NAME) command = getattr(target_module, 'do_' + target_command) command( params={ 'project': project, 'target': target, 'target_data': target_data, 'process_data': process_data, }) del sys.modules[Constants.TARGET_MODULE_NAME] del target_module del command Logger.i('Command "{0}" finished for target "{1}"'. format(target_command, target.get_name())) except Exception as e: Logger.e( 'Error while call "{0}" on target "{1}": {2}'. format(target_command, target.get_name(), e.message)) raise sys.path = sys_path os.chdir(original_cwd) if not target_found: Logger.f('Target not found: {0}'.format(target_name)) else: Logger.i('Your project does not have targets')
def build(self, target_name): from ezored.models.logger import Logger from ezored.models.project import Project project = Project.create_from_project_file() process_data = ProcessData() process_data.reset() process_data.project_name = project.get_config_value('name') if target_name: Logger.i('Build only target: {0}'.format(target_name)) else: Logger.i('Build all targets') target_found = False for target in project.targets: can_build = False if not target_name: can_build = True elif target.get_name() == target_name: can_build = True if can_build: Logger.d('Getting target data by target name: {0}...'.format( target_name)) target_found = True # targets need be deleted to be always fresh with target data from dependencies target.remove() # build the target repository after download target.prepare_from_process_data(process_data) target.repository.download() target.repository.build(process_data) # get all target data from project dependencies target_data = TargetData() target_data.project_config = project.config for dependency in project.dependencies: dependency.prepare_from_process_data(process_data) new_target_data = dependency.get_target_data_by_target_name_and_parse( target.get_name(), process_data) target_data.merge(new_target_data) # back to target data target.prepare_from_process_data(process_data) # copy files from dependencies to target directory FileUtil.copy_files_from_list(target_data.copy_files) # parse files path and it content target_project_file_data = target.load_target_project_file_data( ) if 'target' in target_project_file_data: target_project_data = target_project_file_data['target'] # parse files if 'parse_files' in target_project_data: target_project_data_parse_files = target_project_data[ 'parse_files'] if target_project_data_parse_files: Logger.d('Files to parse from target: {0}'.format( len(target_project_data_parse_files))) target_project_data_parse_files = process_data.parse_text_list( target_project_data_parse_files) for target_project_data_parse_file in target_project_data_parse_files: template_loader = jinja2.FileSystemLoader( searchpath='/') template_env = jinja2.Environment( loader=template_loader) template_file = target_project_data_parse_file template = template_env.get_template( template_file) templ_result = template.render( target=target_data) FileUtil.write_to_file( os.path.dirname( target_project_data_parse_file), os.path.basename( target_project_data_parse_file), str(templ_result)) else: Logger.d('No files need to parse from target: {0}'. format(target.get_name())) # build target if 'build' in target_project_data: Logger.i('Building target: {0}...'.format( target.get_name())) target_project_data_build = target_project_data[ 'build'] exitcode, stderr, stdout = FileUtil.run( target_project_data_build, target.repository.get_vendor_dir(), process_data.get_environ()) if exitcode == 0: Logger.i('Build finished for target: {0}'.format( target.get_name())) else: if stdout: Logger.i('Build output for target: {0}'.format( target.get_name())) Logger.clean(stdout) if stderr: Logger.i( 'Error output while build target: {0}'. format(target.get_name())) Logger.clean(stderr) Logger.f('Failed to build target: {0}'.format( target.get_name())) if not target_found: Logger.f('Target not found: {0}'.format(target_name))
def test_debug_with_debug_turned_on(self): Constants.DEBUG = True Logger.d('test_debug_with_debug_turned_on') self.assertTrue(True)
def get_target_data_by_target_name_and_parse(self, target_name, process_data): Logger.d('Getting target data from dependency: {0}...'.format( self.get_name())) target_file_data = self.repository.load_target_file_data() if target_file_data: if 'targets' in target_file_data: targets_data = target_file_data['targets'] for target_data_item in targets_data: current_target_name = target_data_item['name'] if current_target_name == target_name: # get target data target_data = TargetData() if 'data' in target_data_item: target_data_dict = target_data_item['data'] if 'header_search_paths' in target_data_dict: if target_data_dict['header_search_paths']: target_data.header_search_paths.extend( target_data_dict['header_search_paths'] ) if 'library_search_paths' in target_data_dict: if target_data_dict['library_search_paths']: target_data.library_search_paths.extend( target_data_dict[ 'library_search_paths']) if 'c_flags' in target_data_dict: if target_data_dict['c_flags']: target_data.c_flags.extend( target_data_dict['c_flags']) if 'cxx_flags' in target_data_dict: if target_data_dict['cxx_flags']: target_data.cxx_flags.extend( target_data_dict['cxx_flags']) if 'framework_links' in target_data_dict: if target_data_dict['framework_links']: target_data.framework_links.extend( target_data_dict['framework_links']) if 'copy_files' in target_data_dict: if target_data_dict['copy_files']: target_data.copy_files.extend( target_data_dict['copy_files']) # create source group if have files for it target_data_header_files = [] target_data_source_files = [] if 'header_files' in target_data_dict: if target_data_dict['header_files']: target_data_header_files = target_data_dict[ 'header_files'] if 'source_files' in target_data_dict: if target_data_dict['source_files']: target_data_source_files = target_data_dict[ 'source_files'] if len(target_data_header_files) > 0 or len( target_data_source_files) > 0: target_data_source_group = SourceGroup() target_data_source_group.name = self.get_name() target_data_source_group.header_files = target_data_header_files target_data_source_group.source_files = target_data_source_files target_data.source_groups.append( target_data_source_group) # parse all things target_data.parse(process_data) return target_data
def download_file(url, dest=None, filename=None): """ Download and save a file specified by url to dest directory. """ Logger.d('New download request: {0}'.format(url)) Logger.d('Destination: {0}'.format(dest)) Logger.d('Filename: {0}'.format(filename)) req = urllib2.Request(url, headers={'User-Agent': 'Mozilla/5.0'}) u = urllib2.urlopen(req) scheme, netloc, path, query, fragment = urlparse.urlsplit(url) if not filename: filename = DownloadUtil.get_filename_from_url(path) if dest: FileUtil.create_dir(dest) filename = os.path.join(dest, filename) Logger.d('Getting file metadata...') with open(filename, 'wb') as f: meta = u.info() meta_func = meta.getheaders if hasattr( meta, 'getheaders') else meta.get_all meta_length = meta_func('Content-Length') file_size = None pbar = None if meta_length: file_size = int(meta_length[0]) if file_size: Logger.d('File size in bytes: {0}'.format(file_size)) Logger.clean('') pbar = tqdm(total=file_size) file_size_dl = 0 block_sz = 8192 if not pbar: Logger.d('Downloading, please wait...') while True: dbuffer = u.read(block_sz) if not dbuffer: break dbuffer_len = len(dbuffer) file_size_dl += dbuffer_len f.write(dbuffer) if pbar: pbar.update(dbuffer_len) if pbar: pbar.close() Logger.clean('') return filename
def test_debug_with_debug_turned_off(self): Constants.DEBUG = False Logger.d('test_debug_with_debug_turned_off') self.assertTrue(True)
def remove_dir(dir_path): Logger.d('Remove dir: {0}'.format(dir_path)) if os.path.isdir(dir_path): shutil.rmtree(dir_path)
def remove_file(filename): Logger.d('Remove file: {0}'.format(filename)) if os.path.isfile(filename): os.remove(filename)
def run(self, process_data, template_data, working_dir): Logger.d('Running task: {0}...'.format(self.get_name())) if process_data: if self.type == self.TYPE_COPY_FILE: from_path = self.params['from'] if self.params['from'] else None to_path = self.params['to'] if self.params['to'] else None FileUtil.copy_file(from_path=from_path, to_path=to_path) elif self.type == self.TYPE_COPY_FILES: to_path = self.params['to'] if self.params['to'] else None file_pattern = self.params[ 'from'] if 'from' in self.params else None file_pattern = process_data.parse_text(file_pattern) found_files = FileUtil.find_files(file_pattern) for f in found_files: if f: FileUtil.copy_file(from_path=f, to_path=os.path.join( to_path, os.path.basename(f))) elif self.type == self.TYPE_PARSE_FILE: file_pattern = self.params[ 'file'] if 'file' in self.params else None file_pattern = process_data.parse_text(file_pattern) found_files = FileUtil.find_files(file_pattern) for f in found_files: if f: template_file = os.path.abspath(f) template_loader = jinja2.FileSystemLoader( searchpath=os.path.dirname(template_file)) template_env = jinja2.Environment( loader=template_loader) template = template_env.get_template( os.path.basename(template_file)) templ_result = template.render(template_data) FileUtil.write_to_file(os.path.dirname(template_file), os.path.basename(template_file), str(templ_result)) elif self.type == self.TYPE_RUN: run_args = self.params[ 'args'] if 'args' in self.params else None if run_args: exitcode, stderr, stdout = FileUtil.run( run_args, working_dir, process_data.get_merged_data_for_runner()) if exitcode == 0: Logger.i('Run finished for task: {0}'.format( self.get_name())) else: if stdout: Logger.i('Run output for task: {0}'.format( self.get_name())) Logger.clean(stdout) if stderr: Logger.i('Error output while run task: {0}'.format( self.get_name())) Logger.clean(stderr) Logger.f('Failed to run task: {0}'.format( self.get_name())) else: Logger.f('Invalid task type') else: Logger.d('Process data is invalid to run task')
def get_target_data_by_target_name_and_parse(self, target_name, process_data): Logger.d('Getting target data from dependency: {0}...'.format( self.get_name())) target_file_data = self.repository.load_target_data_file() if target_file_data: if 'targets' in target_file_data: targets_data = target_file_data['targets'] for target_data_item in targets_data: current_target_name = target_data_item['name'] if self.match_name(pattern=current_target_name, name=target_name): # get target data target_data = TargetData() if 'data' in target_data_item: target_data_dict = target_data_item['data'] if 'header_search_paths' in target_data_dict: if target_data_dict['header_search_paths']: target_data.header_search_paths.extend( FileUtil.normalize_path_from_list( target_data_dict[ 'header_search_paths'])) if 'library_search_paths' in target_data_dict: if target_data_dict['library_search_paths']: target_data.library_search_paths.extend( FileUtil.normalize_path_from_list( target_data_dict[ 'library_search_paths'])) if 'c_flags' in target_data_dict: if target_data_dict['c_flags']: target_data.c_flags.extend( target_data_dict['c_flags']) if 'cxx_flags' in target_data_dict: if target_data_dict['cxx_flags']: target_data.cxx_flags.extend( target_data_dict['cxx_flags']) if 'library_links' in target_data_dict: if target_data_dict['library_links']: target_data.library_links.extend( target_data_dict['library_links']) if 'framework_links' in target_data_dict: if target_data_dict['framework_links']: target_data.framework_links.extend( target_data_dict['framework_links']) if 'tasks' in target_data_dict: if target_data_dict['tasks']: for target_data_task in target_data_dict[ 'tasks']: task = Task.from_dict(target_data_task) target_data.tasks.append(task) # create source group if have files for it target_data_header_files = [] target_data_source_files = [] if 'header_files' in target_data_dict: if target_data_dict['header_files']: for file_data in target_data_dict[ 'header_files']: # find all files source_file_to_find = SourceFile.from_dict( file_data) if source_file_to_find: # process file pattern before file_pattern = source_file_to_find.file file_pattern = process_data.parse_text( file_pattern) found_files = FileUtil.find_files( file_pattern) found_files = FileUtil.normalize_path_from_list( found_files) # create new source file for each found file for f in found_files: target_data_header_files.append( SourceFile( source_file=f, compile_flags= source_file_to_find. compile_flags)) if 'source_files' in target_data_dict: if target_data_dict['source_files']: for file_data in target_data_dict[ 'source_files']: # find all files source_file_to_find = SourceFile.from_dict( file_data) if source_file_to_find: # process file pattern before file_pattern = source_file_to_find.file file_pattern = process_data.parse_text( file_pattern) found_files = FileUtil.find_files( file_pattern) found_files = FileUtil.normalize_path_from_list( found_files) # create new source file for each found file for f in found_files: target_data_source_files.append( SourceFile( source_file=FileUtil. normalize_path(f), compile_flags= source_file_to_find. compile_flags)) if len(target_data_header_files) > 0 or len( target_data_source_files) > 0: target_data_source_group = SourceGroup() target_data_source_group.name = self.get_name() target_data_source_group.header_files = target_data_header_files target_data_source_group.source_files = target_data_source_files target_data.source_groups.append( target_data_source_group) # parse all things target_data.parse(process_data) return target_data