def hide_some_files(collection, site): # hide some files hide_file = Query("HideFile") for row in Query("FilesInCollectionSiteForSend").run( collection=collection, site=site): count = hide_file.execute(row.file_id) print(f"{row.file_id}: {count} row(s) updated")
def main(): get_import = Query("GetPosdaFilesImportControl") go_in_service = Query("GoInServicePosdaImport") quit_q = Query("RelinquishControlPosdaImport") while True: # get current status for import_control in get_import.run(): pass if import_control.status == "waiting to go inservice": go_in_service.execute(MYPID) continue if import_control.status == "service process running": if import_control.processor_pid != MYPID: printe("Some other process controlling import") return if import_control.pending_change_request == "shutdown": quit_q.execute() printe("Relinquished control of posda_import") return printe("Doing actual work (well, pretending)") else: printe(f"unknown state ({import_control.status}) for posda_import")
def print_distinct_series_by_collsite(collection, site): for row in Query("DistinctSeriesByCollectionSite").run( site_name=site, project_name=collection): print(row) break else: print(f"No collection/site {collection}//{site}")
def build_new_timepoint(activity_id, file_list: List[int]) -> None: # Create the new timepoint Query("CreateActivityTimepoint").execute(actiity_id=activity_id, who_created='none', comment='UncompressFilesTp.py', creating_user='******') tp_id = Query("GetActivityTimepointId").get_single_value() with Database("posda_files").cursor() as cur: # build a single giant insert query args = b','.join( [cur.mogrify("(%s, %s)", [tp_id, f]) for f in file_list]) query = b"""\ insert into activity_timepoint_file values """ + args cur.execute(query) return tp_id
# from collections import namedtuple, defaultdict help = """ Consume a posda filelist (plist) file and import all listed files into Posda. """ parser = args.Parser(arguments=[ args.CustomArgument("plist", "The Posda Filelist (plist) file to read"), ], purpose="Import plist into posda", help=help) pargs = parser.parse() # Preload queries that will be used in functions, so they aren't loaded repeatedly get_posda_file_id_by_digest = Query("GetPosdaFileIdByDigest") insert_file_posda = Query("InsertFilePosda") get_current_posda_file_id = Query("GetCurrentPosdaFileId") insert_file_import_long = Query("InsertFileImportLong") make_posda_file_ready_to_process = Query("MakePosdaFileReadyToProcess") insert_file_location = Query("InsertFileLocation") get_matching_root = Query("GetMatchingRootID") create_new_root = Query("InsertNewRootPath") def make_rel_path_from_digest(digest): return Path() / digest[:2] / digest[2:4] / digest[4:6] / digest # def get_xfer_syntax(filename): # try:
sop_seen_in_file[input_line.sop] += 1 print(f"Read {len(lines)} input lines.") background.daemonize() print(f"Comparing spreadsheet to Intake, for {pargs.collection}|{pargs.site}") print(f"Read {len(lines)} input lines.") sop_seen_in_query = defaultdict(int) query = {} Record = namedtuple("Record", ["patient_id", "sop_instance_uid", "study_instance_uid", "series_instance_uid"]) for row in Query("IntakeImagesByCollectionSitePlus").run( collection=pargs.collection, site=pargs.site): decoded = [i.decode() for i in row] row = Record(*decoded) query[row.sop_instance_uid] = row sop_seen_in_query[row.sop_instance_uid] += 1 Error = namedtuple("Error", ["sop", "type", "file_val", "intake_val"]) errors = [] def eq_or_err(sop, what, qval, lval): if lval != qval: errors.append(Error(sop, f"{what} mismatch", lval, qval)) for sop in lines: if sop not in query:
_, invoc_id, activity_id, notify = sys.argv # collect series from stdin series = [] for line in sys.stdin: series.append(line.strip()) series_count = len(series) print(f"Going to background to process {series_count} series") background = BackgroundProcess(invoc_id, notify, activity_id) background.daemonize() q = Query("SeriesInHierarchyBySeriesExtendedFurther") start = time.time() # the epoch # In python, after you call daemonize print will automatically # print to the email print("Initial line written to email") hierarchy = None report = background.create_report(f"DICOM Hierarchy for {series_count} series") writer = csv.writer(report) writer.writerow([ "collection", "site", "patient_id", "study_instance_uid",
# site_name='MAASTRO', # project_name='Radiomics') as r: # for row in r: # print(row) # print(row.series_instance_uid) # break for row in Query("DistinctSeriesByCollectionSite").run( site_name='MAASTRO', project_name='Radiomics'): print(row) break # hide some files hide_file = Query("HideFile") for row in Query("FilesInCollectionSiteForSend").run( collection='Radiomics', site='MAASTRO'): count = hide_file.execute(row.file_id) print(f"{row.file_id}: {count} row(s) updated") # unhide all files with Database("posda_files").cursor() as cur: cur.execute("update ctp_file " "set visibility = null " "where visibility = 'hidden'")
# from collections import namedtuple, defaultdict help = """ Consume a posda filelist (plist) file and import all listed files into Posda. """ parser = args.Parser(arguments=[ args.CustomArgument("plist", "The Posda Filelist (plist) file to read"), ], purpose="Import plist into posda", help=help) pargs = parser.parse() # Preload queries that will be used in functions, so they aren't loaded repeatedly get_posda_file_id_by_digest = Query("GetPosdaFileIdByDigest") insert_file_posda = Query("InsertFilePosda") get_current_posda_file_id = Query("GetCurrentPosdaFileId") insert_file_import_long = Query("InsertFileImportLong") make_posda_file_ready_to_process = Query("MakePosdaFileReadyToProcess") insert_file_location = Query("InsertFileLocation") def make_rel_path_from_digest(digest): return Path() / digest[:2] / digest[2:4] / digest[4:6] / digest def get_xfer_syntax(filename): try: ds = pydicom.dcmread(filename) return ds.file_meta.TransferSyntaxUID
args.Presets.description, args.CustomArgument("file_query_name", "Name of query to use for getting list of " "files in given series"), args.Presets.notify], purpose="Background Process to scan for PHI", help=help) args = parser.parse() # TODO: this (and some other stuff) could be moved into # an util lib, posda.background.shortcuts or something? subprocess_id = Query("CreateBackgroundSubprocess").get_single_value( subprocess_invocation_id=args.background_id, command_executed='???', foreground_pid=os.getpid(), user_to_notify=args.notify ) if subprocess_id is None or subprocess_id == 0: raise RuntimeError("Failed to get subprocess ID") print(f"Subprocess_id is: {subprocess_id}") input_lines = posda.main.get_stdin_input() print(f"Found list of {len(input_lines)} series to scan") print("Forking background process") ###############################################################################