def run(self, *args, **kwargs) -> dict: """ This base run method should be called by any descendent classes since this contains the calls to the first `pftree` prove as well as any (overloaded) file filtering. """ b_status: bool = False b_timerStart: bool = False d_env: dict = {} d_filter: dict = {} d_pftreeProbe: dict = {} d_pftreeRun: dict = {} b_JSONprint: bool = True self.dp.qprint( "Starting pfdo run... (please be patient while running)", level=1) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if k == 'JSONprint': b_JSONprint = bool(v) if b_timerStart: other.tic() d_env = self.env_check() if d_env['status']: # We change to the inputDir so as to get a relative # tree listing structure. str_startDir = os.getcwd() os.chdir(self.args['inputDir']) d_pftreeProbe = self.pf_tree.run(timerStart=False) if d_pftreeProbe['status']: b_status = d_pftreeProbe['status'] if len(self.args['fileFilter']) or len(self.args['dirFilter']): d_filter = self.filterFileHitList() b_status = d_filter['status'] if self.args['test']: d_pftreeRun = self.testRun() b_status = d_pftreeRun['status'] os.chdir(str_startDir) d_ret = { 'status': b_status, 'd_env': d_env, 'd_pftreeProbe': d_pftreeProbe, 'd_filter': d_filter, 'd_pftreeRun': d_pftreeRun, 'runTime': other.toc() } if self.args['json'] and b_JSONprint: self.ret_dump(d_ret, **kwargs) else: self.dp.qprint('Returning from pfdo base class run...', level=1) return d_ret
def run(self, *args, **kwargs): """ The run method is merely a thin shim down to the embedded pftree run method. """ b_status = True d_pftreeRun = {} d_inputAnalysis = {} d_env = self.env_check() b_timerStart = False self.dp.qprint( "\tStarting pfdicom run... (please be patient while running)", level=1) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() if d_env['status']: d_pftreeRun = self.pf_tree.run(timerStart=False) else: b_status = False str_startDir = os.getcwd() os.chdir(self.str_inputDir) if b_status: if len(self.str_extension): d_inputAnalysis = self.pf_tree.tree_process( inputReadCallback=None, analysisCallback=self.filelist_prune, outputWriteCallback=None, applyResultsTo='inputTree', applyKey='l_file', persistAnalysisResults=True) os.chdir(str_startDir) d_ret = { 'status': b_status and d_pftreeRun['status'], 'd_env': d_env, 'd_pftreeRun': d_pftreeRun, 'd_inputAnalysis': d_inputAnalysis, 'runTime': other.toc() } if self.b_json: self.ret_dump(d_ret, **kwargs) self.dp.qprint('\tReturning from pfdicom run...', level=1) return d_ret
def run(self, *args, **kwargs): """ The run method calls the base class run() to perform initial probe and analysis. Then, it effectively calls the method to perform the DICOM tag substitution. """ b_status = True d_tagSub = {} b_timerStart = False self.dp.qprint( "Starting pfdicom_tagSub run... (please be patient while running)", level=1) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() # Run the base class, which probes the file tree # and does an initial analysis. Also suppress the # base class from printing JSON results since those # will be printed by this class d_pfdicom = super().run(JSONprint=False, timerStart=False) if d_pfdicom['status']: str_startDir = os.getcwd() os.chdir(self.str_inputDir) if b_status: d_tagSub = self.tags_substitute() b_status = b_status and d_tagSub['status'] os.chdir(str_startDir) d_ret = { 'status': b_status, 'd_pfdicom': d_pfdicom, 'd_tagSub': d_tagSub, 'runTime': other.toc() } if self.b_json: self.ret_dump(d_ret, **kwargs) self.dp.qprint('Returning from pfdicom_tagSub run...', level=1) return d_ret
def run(self, *args, **kwargs): """ The run method is merely a thin shim down to the embedded pftree run method. """ b_status = True d_tagExtract = {} d_pftreeRun = {} self.dp.qprint( "Starting pfdicom_tagSub run... (please be patient while running)", level = 1 ) for k, v in kwargs.items(): if k == 'timerStart': other.tic() # Run the base class, which probes the file tree # and does an initial analysis. Also suppress the # base class from printing JSON results since those # will be printed by this class d_pfdicom = super().run( JSONprint = False, timerStart = False ) if d_pfdicom['status']: str_startDir = os.getcwd() os.chdir(self.str_inputDir) if b_status: d_tagExtract = self.tags_extract() b_status = b_status and d_tagExtract['status'] os.chdir(str_startDir) d_ret = { 'status': b_status, 'd_pfdicom': d_pfdicom, 'd_tagExtract': d_tagExtract, 'runTime': other.toc() } if self.b_json: self.ret_dump(d_ret, **kwargs) self.dp.qprint('Returning from pfdicom_tagExtract run...', level = 1) return d_ret
def run(self, *args, **kwargs) -> dict: """ This base run method should be called by any descendent classes since this contains the calls to the first `pftree` prove as well as any (overloaded) file filtering. """ b_status : bool = False b_timerStart : bool = False d_pfdo : dict = {} d_med2image : dict = {} # pudb.set_trace() self.dp.qprint( "Starting pfdo_med2image run... (please be patient while running)", level = 1 ) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() d_pfdo = super().run( JSONprint = False, timerStart = False ) if d_pfdo['status']: d_med2image = self.med2image() d_ret = { 'status': b_status, 'd_pfdo': d_pfdo, 'd_med2image': d_med2image, 'runTime': other.toc() } if self.args['json']: self.ret_dump(d_ret, **kwargs) else: self.dp.qprint('Returning from pfdo_med2image class run...', level = 1) return d_ret
def stats_compute(self, *args, **kwargs): """ Simply loop over the internal dictionary and echo the list size at each key (i.e. the number of files). """ totalElements = 0 totalKeys = 0 totalSize = 0 l_stats = [] d_report = {} for k, v in sorted(self.d_inputTreeCallback.items(), key=lambda kv: (kv[1]['diskUsage_raw']), reverse=self.b_statsReverse): str_report = "files: %5d; raw size: %12d; human size: %8s; %s" % (\ len(self.d_inputTree[k]), self.d_inputTreeCallback[k]['diskUsage_raw'], self.d_inputTreeCallback[k]['diskUsage_human'], k) d_report = { 'files': len(self.d_inputTree[k]), 'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'], 'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'], 'path': k } self.dp.qprint(str_report, level=1) l_stats.append(d_report) totalElements += len(v) totalKeys += 1 totalSize += self.d_inputTreeCallback[k]['diskUsage_raw'] str_totalSize_human = self.sizeof_fmt(totalSize) return { 'status': True, 'dirs': totalKeys, 'files': totalElements, 'totalSize': totalSize, 'totalSize_human': str_totalSize_human, 'l_stats': l_stats, 'runTime': other.toc() }
def run(self, *args, **kwargs): """ DESC The run method is the main entry point to the operational behaviour of the script. INPUT [timerStart = True|False] RETURN { 'status': True|False } """ b_status = True d_env = self.env_check() b_timerStart = False d_inputFile = {} d_slides = {} d_html = {} d_assemble = {} d_ret = {} numSlides = 0 self.dp.qprint("Starting tsmake run... ", level=1) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() # Process an optional input file to split into slides d_inputFile = self.slidesFile_break() if d_inputFile['status']: # read input slides d_slides = self.slide_filesRead(directory=self.str_inputDir) numSlides = d_slides['numSlides'] # read html components if d_slides['status']: d_html = self.htmlSnippets_read() # assemble the HTML page if d_html['status']: d_assemble = self.htmlPage_assemble() # now create the output dir other.mkdir(self.str_outputDir) # write the index.html file with open('%s/index.html' % self.str_outputDir, "w") as fp: fp.write(d_assemble['pageHTML']) # and copy necessary dirs l_supportDirs = [ './css', './fortunes', './images', './js', './logos' ] l_userDirs = [] if len(self.str_additionalDirList): l_userDirs = self.str_additionalDirList.split(',') for str_dir in l_supportDirs + l_userDirs: self.dp.qprint("Copying dir %s... to %s/%s" % \ ( str_dir, self.str_outputDir, os.path.basename(str_dir) ), level = 2) copy_tree( '%s' % str_dir, '%s/%s' % (self.str_outputDir, os.path.basename(str_dir))) d_ret = { 'status': b_status, 'd_env': d_env, 'd_inputFile': d_inputFile, 'd_slides': d_slides, 'd_html': d_html, 'd_assemble': d_assemble, 'numSlides': numSlides, 'runTime': other.toc() } self.dp.qprint('Returning from tslide run...', level=1) return d_ret
def run(self, *args, **kwargs): """ Probe the input tree and print. """ b_status = True d_probe = {} d_tree = {} d_stats = {} str_error = '' b_timerStart = False d_test = {} for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() if not os.path.exists(self.str_inputDir): b_status = False self.dp.qprint( "input directory either not specified or does not exist.", comms='error') error.warn(self, 'inputDirFail', exitToOS=True, drawBox=True) str_error = 'error captured while accessing input directory' if b_status: str_origDir = os.getcwd() if self.b_relativeDir: os.chdir(self.str_inputDir) str_rootDir = '.' else: str_rootDir = self.str_inputDir d_probe = self.tree_probe(root=str_rootDir) b_status = b_status and d_probe['status'] d_tree = self.tree_construct(l_files=d_probe['l_files'], constructCallback=self.dirsize_get) b_status = b_status and d_tree['status'] if self.b_test: d_test = self.test_run(*args, **kwargs) b_status = b_status and d_test['status'] else: if self.b_stats or self.b_statsReverse: d_stats = self.stats_compute() self.dp.qprint('Total size (raw): %d' % d_stats['totalSize'], level=1) self.dp.qprint('Total size (human): %s' % d_stats['totalSize_human'], level=1) self.dp.qprint('Total files: %s' % d_stats['files'], level=1) self.dp.qprint('Total dirs: %s' % d_stats['dirs'], level=1) b_status = b_status and d_stats['status'] if self.b_jsonStats: print(json.dumps(d_stats, indent=4, sort_keys=True)) if self.b_relativeDir: os.chdir(str_origDir) d_ret = { 'status': b_status, 'd_probe': d_probe, 'd_tree': d_tree, 'd_stats': d_stats, 'd_test': d_test, 'str_error': str_error, 'runTime': other.toc() } if self.b_json: print(json.dumps(d_ret, indent=4, sort_keys=True)) return d_ret