def load_dcm(filename): dcm = dicom.ReadFile(filename) # http://www.creatis.insa-lyon.fr/pipermail/dcmlib/2005-September/002141.html # http://www.dabsoft.ch/dicom/3/C.7.6.2.1.1/ # http://xmedcon.sourceforge.net/Docs/OrientationDicomStandard img0 = dcm[(0x5200, 0x9230)][0] # Per-frame Functional Groups Sequence spacingXY = img0[(0x0028, 0x9110)][0][(0x0028, 0x0030)].value # Pixel Spacing pos0 = img0[(0x0020, 0x9113)][0][(0x0020, 0x0032)].value # Image Position (Patient) orientation = img0[(0x0020, 0x9116)][0][( 0x0020, 0x0037)].value # Image Orientation (Patient) i = 1 spacingZ = 0 while spacingZ == 0: img1 = dcm[(0x5200, 0x9230)][i] # Per-frame Functional Groups Sequence pos1 = img1[(0x0020, 0x9113)][0][(0x0020, 0x0032)].value # Image Position (Patient) spacingZ = np.linalg.norm(np.array(pos1) - np.array(pos0)) i += 1 dcm_spacing = [spacingXY[0], spacingXY[1], spacingZ] #dcm_spacing = [spacingZ,spacingXY[1],spacingXY[0]] print 'DICOM - spacing', dcm_spacing X = np.array([orientation[0], orientation[1], orientation[2]]) Y = np.array([orientation[3], orientation[4], orientation[5]]) Z = np.cross(X, Y) X *= dcm_spacing[0] Y *= dcm_spacing[1] Z *= dcm_spacing[2] scaling = np.diag(dcm_spacing + [1]) dcm_affine = np.array([[X[0], Y[0], Z[0], pos0[0]], [X[1], Y[1], Z[1], pos0[1]], [X[2], Y[2], Z[2], pos0[2]], [0, 0, 0, 1]]) #dcm_affine = np.dot( dcm_affine, scaling) data = dcm.pixel_array.astype('float') return data, dcm_affine, dcm_spacing
def load_movie(filename): dcm = dicom.ReadFile(filename) # http://www.creatis.insa-lyon.fr/pipermail/dcmlib/2005-September/002141.html # http://www.dabsoft.ch/dicom/3/C.7.6.2.1.1/ # http://xmedcon.sourceforge.net/Docs/OrientationDicomStandard frames = dcm[(0x5200, 0x9230)].value # Per-frame Functional Groups Sequence img0 = frames[0] # Per-frame Functional Groups Sequence spacingXY = img0[(0x0028, 0x9110)][0][(0x0028, 0x0030)].value # Pixel Spacing pos0 = img0[(0x0020, 0x9113)][0][(0x0020, 0x0032)].value # Image Position (Patient) orientation = img0[(0x0020, 0x9116)][0][( 0x0020, 0x0037)].value # Image Orientation (Patient) for i in range(1, len(frames)): img = frames[i] pos_i = img[(0x0020, 0x9113)][0][(0x0020, 0x0032)].value # Image Position (Patient) orientation_i = img[(0x0020, 0x9116)][0][( 0x0020, 0x0037)].value # Image Orientation (Patient) if pos_i != pos0 or orientation_i != orientation: raise ValueError('this is not a movie') dcm_spacing = [spacingXY[0], spacingXY[1], 0] print 'DICOM - spacing', dcm_spacing X = np.array([orientation[0], orientation[1], orientation[2]]) Y = np.array([orientation[3], orientation[4], orientation[5]]) X *= dcm_spacing[0] Y *= dcm_spacing[1] scaling = np.diag(dcm_spacing + [1]) dcm_affine = np.array([[X[0], Y[0], 0, pos0[0]], [X[1], Y[1], 0, pos0[1]], [X[2], Y[2], 0, pos0[2]], [0, 0, 0, 1]]) data = dcm.pixel_array.transpose() #.astype('int') return data, dcm_affine, dcm_spacing
def list_dir(location,path,dir,prnt=True,read_dicom=False): meta_ids={}; # store meta and file ids to edit or delete file_ids={}; c.execute("select f.path,s.name,f.description,a.name,m.value,f.id from external_files f LEFT JOIN studies s ON f.study_id=s.id LEFT JOIN access_ids a ON f.access_id=a.id left join external_meta_info m on m.file_id=f.id where location=%s and path like \"%s%%\" group by f.id" % (location,path)) dbfiles=c.fetchall(); if prnt: print "{0:30}|{1:40}|{2:30}|{3:20}".format("","study name","document description","access") print "-"*123 i=0; for filename in os.listdir("."): j=0; ent=[]; for dbf in dbfiles: # Search database for filename if dbf[0].endswith(filename): ent=dbf; if len(ent)>0: i+=1; file_ids["%i" % (i)]=ent[5]; if prnt: print "{0:6}: {1:30}|{2:40}|{3:30}|{4:20}".format("%i" % (i),filename,ent[1],ent[2],ent[3]) if ent[4] is not None: # display meta data c.execute("select name,value,id from external_meta_info where file_id=%s" % (ent[5])) for m in c.fetchall(): j+=1; meta_ids["%i.%i" % (i,j)]=m[2]; if prnt: if m[0]=="redmine_project": c.execute("select name from projects where id=%s" % m[1]) print "{0:6}: {1:10}: {2:40}".format("%i.%i" % (i,j), m[0],c.fetchone()[0]) else: print "{0:6}: {1:10}: {2:40}".format("%i.%i" % (i,j),m[0],m[1]) else: if prnt: try: dcm=dicom.ReadFile(filename) print " {0:30}: DICOM: {1:50}".format(filename,dcm.SeriesDescription) except: print " {0:30}".format(filename) return (file_ids,meta_ids)
def _imread_dcm(filename): """Open DICOM image with pydicom and return a NumPy array""" import dicom dcm = dicom.ReadFile(filename) # ********************************************************************** # The following is necessary until pydicom numpy support is improved: # (after that, a simple: 'arr = dcm.PixelArray' will work the same) format_str = '%sint%s' % ( ('u', '')[dcm.PixelRepresentation], dcm.BitsAllocated) try: dtype = np.dtype(format_str) except TypeError: raise TypeError("Data type not understood by NumPy: " "PixelRepresentation=%d, BitsAllocated=%d" % (dcm.PixelRepresentation, dcm.BitsAllocated)) arr = np.fromstring(dcm.PixelData, dtype) try: # pydicom 0.9.3: dcm_is_little_endian = dcm.isLittleEndian except AttributeError: # pydicom 0.9.4: dcm_is_little_endian = dcm.is_little_endian if dcm_is_little_endian != (sys.byteorder == 'little'): arr.byteswap(True) if hasattr(dcm, 'NumberofFrames') and dcm.NumberofFrames > 1: if dcm.SamplesperPixel > 1: arr = arr.reshape(dcm.SamplesperPixel, dcm.NumberofFrames, dcm.Rows, dcm.Columns) else: arr = arr.reshape(dcm.NumberofFrames, dcm.Rows, dcm.Columns) else: if dcm.SamplesperPixel > 1: if dcm.BitsAllocated == 8: arr = arr.reshape(dcm.SamplesperPixel, dcm.Rows, dcm.Columns) else: raise NotImplementedError( "This code only handles " "SamplesPerPixel > 1 if Bits Allocated = 8") else: arr = arr.reshape(dcm.Rows, dcm.Columns) # ********************************************************************** return arr
def print_dicom(dirname): lookup = ['SeriesNumber', 'SeriesDescription'] entries = [] tallies = {} count = 1 fnames = os.listdir(dirname) print fnames[:50] sorted(fnames, reverse=True) for fname in fnames: data = dicom.ReadFile(os.path.join(dirname, fname), force=True) hdr = [str(getattr(data, entry, "NO %s" % entry)) for entry in lookup] if hdr in entries: count += 1 else: entries.append(hdr) tallies[str(hdr)] = str(count) print '\t'.join(hdr + [str(count)]) count = 1 return tallies
def __init__(self, uid, directory): ''' Constructor ''' filenames = [] for root, dirs, files in os.walk(directory): for f in files: if f.endswith('.dcm'): filenames.append(os.path.join(root, f)) for name in filenames: try: self.dicom_file = dicom.ReadFile(name) if self.dicom_file.SOPInstanceUID == uid and \ self.dicom_file.SOPClassUID == 'RT Plan Storage': break except: pass else: self.dicom_file = None
def load_dicom(filename): try: name = osp.splitext(osp.basename(filename))[0] return {name: dicom.ReadFile(filename).PixelArray}, None except Exception as error: return None, str(error)
def dicom2sql(files): cur = db.cursor() cur.execute('pragma foreign_keys=ON') machines = [] contact = None delivery = None lab = None serial = None qa = None discard = None comment = None dap = -1 aek = -1 responsibility = 1 mobile = 0 if do_dicom: # read input data from files # WHY CANNOT EVERYONE JUST USE UNICODE? I SHOULD CONVERT TO PYTHON 3! for fileName in files: ds = dicom.ReadFile(fileName, stop_before_pixels=True) station_name = unicode(ds[0x8, 0x1010].value) manufacturer = unicode(ds[0x8, 0x70].value) series_date = unicode(ds[0x8, 0x20].value) institution = unicode(ds[0x8, 0x80].value) modality = unicode(ds[0x8, 0x60].value) try: model = unicode(ds[0x8, 0x1090].value) except: model = None try: department = unicode(ds[0x8, 0x1040].value) except KeyError: department = "N/A" try: software = unicode(ds[0x18, 0x1020].value) except: software = None try: detector = unicode(ds[0x18, 0x7006].value) # Detector description except: detector = None alreadyExists = False if machines > 0: for machine in machines: if machine.getStationName() == station_name: alreadyExists = True if alreadyExists: # print "Modality of filename", fileName, "already in list" continue else: machines.append(Modality()) if institution: machines[-1].addInstitution(institution) if department: machines[-1].addDepartment(department) if station_name: machines[-1].addStationName(station_name) if manufacturer: machines[-1].addManufacturer(manufacturer) if model: machines[-1].addModel(model) if detector: machines[-1].addDetector(detector) if software: machines[-1].addSoftware(software) if modality: machines[-1].addModality(modality) if serial: machines[-1].addSerial(serial) if delivery: machines[-1].addDeliveryDate(delivery) if contact: machines[-1].addContact(contact) if lab: machines[-1].addLab(lab) if not responsibility: machines[-1].addResponsibility(responsibility) machines[-1].addMobile(mobile) if comment: machines[-1].addComment(comment) if qa: if numpy.shape(qa) == (2, ): machines[-1].addQA(qa[0], qa[1]) else: for eachQA in qa: machines[-1].addQA(eachQA[0], eachQA[1]) if dap > -1: machines[-1].addHasDap(dap) if aek > -1: machines[-1].addHasAek(aek) if discard: machines[-1].addDiscardDate(discard) contact = None lab = None delivery = None serial = None qa = None discard = None aek = -1 dap = -1 model = None responsibility = 1 comment = None mobile = 0 ############################################################################ # We are done with input # Insert into SQL # sanity check with user input all_hospitals = {} # all_people = [] all_departments = {} for modality in machines: if not modality.getInstitution() in all_hospitals.keys(): all_hospitals[ modality.getInstitution()] = modality.getStationName() if not modality.getDepartment() in all_departments.keys(): all_departments[ modality.getDepartment()] = modality.getStationName() # print u"før første root = Tk()" root = Tk() # print u"Før første CorrectListOfTags" checkHospitalTags = CorrectListOfTags(root, all_hospitals, "Sykehus") # print u"Før root.mainloop()" root.mainloop() # print u"Før getState" if checkHospitalTags.getState(): updatedHospitalList = checkHospitalTags.getParameters() # print "Etter getstate" root = Tk() checkDepartmentTags = CorrectListOfTags(root, all_departments, "Avdeling") root = mainloop() if checkDepartmentTags.getState(): updatedDepartmentList = checkDepartmentTags.getParameters() # print "Gammel liste vs oppdatert liste for avd.:", all_hospitals.keys(), updatedHospitalList hospitals_to_update = zip(all_hospitals.keys(), updatedHospitalList) departments_to_update = zip(all_departments.keys(), updatedDepartmentList) for oldHospital, updatedHospital in hospitals_to_update: for modality in machines: if modality.getInstitution() == oldHospital: modality.addInstitution(updatedHospital) # add = set for oldDepartment, updatedDepartment in departments_to_update: for modality in machines: if modality.getDepartment() == oldDepartment: modality.addDepartment(updatedDepartment) # add = set for modality in machines: # start SQL insert FK_mod_hos = None FK_mod_com = None FK_mod_ppl = None FK_mod_sw = None # get contact ID, insert if not found if modality.getContact(): cur.execute( "SELECT ppl_id, ppl_job, ppl_phone FROM people WHERE ppl_name = ?", (modality.getContact(), )) result = cur.fetchall() if result: assert len(result) == 1 FK_mod_ppl = result[0][0] if (not result[0][1] or not result[0][2] ) and modality.getContact() in phone_numbers.keys(): cur.execute( "UPDATE people SET ppl_job = :job, ppl_phone = :phone WHERE ppl_name = :name", { "job": phone_numbers[modality.getContact()][0], "phone": phone_numbers[modality.getContact()][1], "name": modality.getContact() }) if not result: print modality.getContact(), "not found, adding into database." cur.execute("INSERT INTO people (ppl_name, ppl_job, ppl_phone) VALUES (?, ?, ?)", \ (modality.getContact(), phone_numbers[modality.getContact()][0], phone_numbers[modality.getContact()][1])) #cur.execute("SELECT LAST_INSERT_ID()") FK_mod_ppl = cur.lastrowid # fetchall()[0][0] # get hospital ID, insert if not found if modality.getInstitution(): cur.execute("SELECT hos_id FROM hospital WHERE hos_name = ?", (modality.getInstitution(), )) result = cur.fetchall() if result: assert len(result) == 1 FK_mod_hos = result[0][0] if not result: print modality.getInstitution( ), "not found, adding into database." cur.execute("INSERT INTO hospital (hos_name) VALUES(?)", (modality.getInstitution(), )) # cur.execute("SELECT LAST_INSERT_ID()") FK_mod_hos = cur.lastrowid # fetchall()[0][0] # get company ID, insert if not found if modality.getManufacturer(): cur.execute(u"SELECT com_id FROM company WHERE com_name = ?", (modality.getManufacturer(), )) result = cur.fetchall() if result: assert len(result) == 1 FK_mod_com = result[0][0] if not result: print modality.getManufacturer( ), "not found, adding into database." cur.execute(u"INSERT INTO company (com_name) VALUES(?)", (modality.getManufacturer(), )) # cur.execute("SELECT LAST_INSERT_ID()") FK_mod_com = cur.lastrowid # fetchall()[0][0] # insert modality into database cur.execute(u"INSERT INTO modality (station_name, department, model, deliverydate, discarddate, \ lab, serial, FK_mod_hos, FK_mod_com, FK_mod_ppl, modality_name, \ has_dap, comment, has_aek, responsibility, mobile, detector) \ VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?, ?,?)" , \ (modality.getStationName(), modality.getDepartment(), modality.getModel(), modality.getDeliveryDate(), modality.getDiscardDate(), modality.getLab(), modality.getSerial(), FK_mod_hos, FK_mod_com, FK_mod_ppl, modality.getModality(), modality.getHasDap(), modality.getComment(), modality.getHasAek(), modality.getResponsibility(), modality.getMobile(), modality.getDetector())) # get modality ID # cur.execute("SELECT LAST_INSERT_ID()") mod_id = cur.lastrowid # fetchall()[0][0] if modality.getSoftware(): try: cur.execute("SELECT sw_id FROM software WHERE sw_name = ?", str(modality.getSoftware(), )) except: print modality.getSoftware() result = cur.fetchall() if result: assert len(result) == 1 FK_mod_sw = result[0][0] if not result: print modality.getSoftware( ), "not found, adding into database." cur.execute( "INSERT INTO software (sw_name, FK_sw_mod) VALUES(?, ?)", (unicode(modality.getSoftware()), mod_id)) if modality.getQA(): qa = modality.getQA() cur.execute("SELECT study_date FROM qa WHERE FK_qa_mod = ?", (mod_id, )) study_dates = cur.fetchall() # if numpy.shape(qa) == (1,2): # if not qa[0][0] in study_dates: # cur.execute("INSERT INTO qa (study_date, doc_number, FK_qa_mod) VALUES (%s, %s, %s)", # (qa[0][0], qa[1], mod_id)) # # elif numpy.shape(qa) == (2,2): for eachQA in qa: if not eachQA[0] in study_dates: cur.execute( "INSERT INTO qa (study_date, doc_number, qa_comment, FK_qa_mod) VALUES (?, ?, ?, ?)", (eachQA[0], eachQA[1], eachQA[2], mod_id)) FK_mod_hos = None FK_mod_com = None FK_mod_ppl = None FK_mod_sw = None # cur.execute("ALTER TABLE qa MODIFY COLUMN qa_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") # cur.execute("ALTER TABLE dap MODIFY COLUMN dap_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") # cur.execute("ALTER TABLE fluoro_tube MODIFY COLUMN ftkp_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") # cur.execute("ALTER TABLE fluoro_tube MODIFY COLUMN ftdxd_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") # cur.execute("ALTER TABLE fluoro_tube MODIFY COLUMN ftpxd_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") # cur.execute("ALTER TABLE tube_qa MODIFY COLUMN tqa_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") # cur.execute("ALTER TABLE fluoro_iq MODIFY COLUMN fiq_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") # cur.execute("ALTER TABLE aek MODIFY COLUMN aek_comment TEXT CHARACTER SET utf8 COLLATE utf8_general_ci") db.commit() list_of_added_modalities = [] for modality in machines: list_of_added_modalities.append(modality.getStationName()) return u"Har lagt til følgende maskiner: ", ",".join( list_of_added_modalities)
c.execute( "select f.path,s.name,f.description,a.name,f.id,f.study_id from external_files f LEFT JOIN studies s ON f.study_id=s.id LEFT JOIN access_ids a ON f.access_id=a.id where location=%s and path = \"%s/%s\" " % (location, path, f)) fn = c.fetchone() if fn is None: print "%s not in database. Add it to a project by calling 'metadata %s'" % ( f, f) else: print "{0:30}|{1:40}|{2:30}|{3:20}".format(f, fn[1], fn[2], fn[3]) if options.header == "": q = "insert into external_meta_info (study_id,file_id,name,value) values (0,%s,%s,%s)" v = (fn[4], name, value) c.execute(q, v) else: try: dcm = dicom.ReadFile(f) h = dcm[eval("(%s)" % options.header)] q = "insert into external_meta_info (study_id,file_id,name,value) values (0,%s,%s,%s)" v = (fn[4], h.name, h.value) c.execute(q, v) except: print " skipped, not a DICOM" elif options.scan: if args[0] == ".": dir = os.getcwd() else: dir = args[0] options.authorid = choose_from_table("users", "id,login", options.authorid) options.studyid = choose_from_table("studies", "id,name", options.studyid, create_study)
def dcm2volume(filename): dcm = dicom.ReadFile(filename) data = dcm.pixel_array.astype('float') #[::-1,:,:] vol = [] # http://www.creatis.insa-lyon.fr/pipermail/dcmlib/2005-September/002141.html # http://www.dabsoft.ch/dicom/3/C.7.6.2.1.1/ # http://xmedcon.sourceforge.net/Docs/OrientationDicomStandard frames = dcm[(0x5200, 0x9230)].value # Per-frame Functional Groups Sequence cursor = 0 length = 1 #print len(frames.value) for i in range(len(frames) - 1): current = frames[i] if length == 1: pos0 = current[(0x0020, 0x9113)][0][( 0x0020, 0x0032)].value # Image Position (Patient) spacingXY = current[(0x0028, 0x9110)][0][(0x0028, 0x0030)].value # Pixel Spacing pos = current[(0x0020, 0x9113)][0][( 0x0020, 0x0032)].value # Image Position (Patient) orientation = current[(0x0020, 0x9116)][0][( 0x0020, 0x0037)].value # Image Orientation (Patient) next = frames[i + 1] next_pos = next[(0x0020, 0x9113)][0][( 0x0020, 0x0032)].value # Image Position (Patient) next_orientation = next[(0x0020, 0x9116)][0][( 0x0020, 0x0037)].value # Image Orientation (Patient) spacingZ = np.linalg.norm(np.array(next_pos) - np.array(pos)) if spacingZ == 0: raise ValueError('null spacing: not implemented') orientation = current[(0x0020, 0x9116)][0][( 0x0020, 0x0037)].value # Image Orientation (Patient) next = frames[i + 1] next_orientation = next[(0x0020, 0x9116)][0][( 0x0020, 0x0037)].value # Image Orientation (Patient) if orientation == next_orientation and i < len(frames) - 2: length += 1 else: if i == len(frames) - 2: length += 1 dcm_spacing = [spacingXY[0], spacingXY[1], spacingZ] X = np.array([orientation[0], orientation[1], orientation[2]]) Y = np.array([orientation[3], orientation[4], orientation[5]]) Z = np.cross(X, Y) X *= dcm_spacing[0] Y *= dcm_spacing[1] Z *= dcm_spacing[2] scaling = np.diag(dcm_spacing + [1]) dcm_affine = np.array([[X[0], Y[0], Z[0], pos0[0]], [X[1], Y[1], Z[1], pos0[1]], [X[2], Y[2], Z[2], pos0[2]], [0, 0, 0, 1]]) vol.append((data[:, :, cursor:cursor + length, ], dcm_affine)) cursor += length length = 1 return vol
def NPS(input_file_list, rectXY, options): """Regner ut NPS-kurven for filene i folderName, og returnerer et 1D-graf.""" imageInfo = dicom.ReadFile(input_file_list[0], stop_before_pixels=True) try: ConvolutionKernel = imageInfo.ConvolutionKernel except AttributeError: ConvolutionKernel = "N/A" IRLevel = getIRLevel(imageInfo) noiseplotList = [0] * len(input_file_list) for idx, imageFilename in enumerate(input_file_list): imageFile = dicom.ReadFile(imageFilename) raw_image = imageFile.pixel_array rescaleSlope = eval(str(imageFile.RescaleSlope)) rescaleIntercept = eval(str(imageFile.RescaleIntercept)) raw_image = np.add(np.multiply(raw_image, rescaleSlope), rescaleIntercept) if options['chooseROI']: (x1, x5) = rectXY[0] (y1, y5) = rectXY[1] dx = x5 - x1 dy = y5 - y1 assert dx == dy img_roi = raw_image[y1:y1 + dy, x1:x1 + dx] else: img_roi = raw_image if options['dcCorrection']: img_roi = np.subtract(img_roi, np.mean(img_roi)) # imageFile.PixelSpacing: (x mm, y mm) pixelSpacing = float(imageFile.PixelSpacing[0]) # pixelspacing er mm, img^2 er HU^2 cm^2 noiseplotList[idx] = {} imgfft = False fft = False # _b += time.time() # before nroi if options['nROI']: # 9 ROI calculation x1 = y1 = 0 x5 = y5 = min(np.shape(img_roi)) y2, y3, y4 = 0.25 * (y1 + y5), 0.5 * (y1 + y5), 0.75 * (y1 + y5) x2, x3, x4 = 0.25 * (x1 + x5), 0.5 * (x1 + x5), 0.75 * (x1 + x5) roiList = [[(x1, y3), (x3, y5)], [(x2, y3), (x4, y5)], [(x3, y3), (x5, y5)], [(x1, y2), (x3, y4)], [(x2, y2), (x4, y4)], [(x3, y2), (x5, y4)], [(x1, y1), (x3, y3)], [(x2, y1), (x4, y3)], [(x3, y1), (x5, y3)]] firstdx = 0 firstdy = 0 for roi in roiList: xfrom = roi[0][0] xto = roi[1][0] yfrom = roi[0][1] yto = roi[1][1] xfrom, xto, yfrom, yto = int(xfrom + .5), int(xto + .5), int(yfrom + .5), int(yto + .5) dy = yto - yfrom dx = xto - xfrom if not firstdx + firstdy: firstdx = dx firstdy = dy if dx != dy or dx != firstdx: continue img_sub_roi = img_roi[yfrom:yfrom + dy, xfrom:xfrom + dx] # y1:y2, x1:x2 if options['dcCorrection']: img_sub_roi = np.subtract(img_sub_roi, np.mean(img_sub_roi)) roi_size = np.shape(img_sub_roi)[0] if options['2dpoly']: # Subtract 2D polynomial from ROI img_sub_roi = poly_sub(img_sub_roi) if options['zeropad']: zeropadFactor = 3 ROIzeropad = np.zeros( (roi_size * zeropadFactor, roi_size * zeropadFactor)) ROIzeropad[roi_size:2 * roi_size, roi_size:2 * roi_size] = img_sub_roi * zeropadFactor**2 img_sub_roi = ROIzeropad / float(zeropadFactor) # for the first ROI, (img)fft is False, and np.add(False, x) == x. imgfft = np.add( imgfft, np.square(np.absolute(np.fft.fft2(img_sub_roi))) * (pixelSpacing**2 / (len(img_sub_roi)**2)) / 100.) fft = np.add(fft, np.fft.fftfreq(img_sub_roi.shape[0], d=float(pixelSpacing))) # 1/mm # Average the plots imgfft = np.divide(imgfft, len(roiList)) fft = np.divide(fft, len(roiList)) try: FFT = noisePowerRadAv(imgfft) except IndexError: continue else: # Just one ROI calculation roi_size = np.shape(img_roi)[0] if options['2dpoly']: img_roi = poly_sub(img_roi) if options['zeropad']: zeropadFactor = 3 ROIzeropad = np.zeros( (roi_size * zeropadFactor, roi_size * zeropadFactor)) ROIzeropad[roi_size:2 * roi_size, roi_size:2 * roi_size] = img_roi * zeropadFactor**2 img_roi = ROIzeropad imgfft = np.square(np.absolute( np.fft.fft2(img_roi))) * (pixelSpacing**2 / (len(img_roi)**2)) / 100. fft = np.fft.fftfreq(img_roi.shape[0], d=float(pixelSpacing)) # 1/mm print "Max fft frequency: {} lp/mm.".format(np.max(fft)) try: FFT = noisePowerRadAv(imgfft) except IndexError: continue # To get cm. Some scanners need another factor... fftMultiplyFactor = 10. noiseplotList[idx]['x'] = fft[fft > 0] * fftMultiplyFactor noiseplotList[idx]['y'] = FFT[fft > 0] # nå er imgfft i HU^2 CM^2 noiseplot = {} noiseplot['x'] = noiseplotList[0]['x'] noiseplot['y'] = [0] * len(noiseplot['x']) # Find average over many images for idx, x in enumerate(noiseplotList[0]['x']): n = len(noiseplotList) # number of images in run y = 0 for nplj in noiseplotList: # nplj['y'] is full NPS of one image y = np.add(nplj['y'][idx], y) # nplj['y'][idx] is one frequency # y is now total contribution to one fruequency for all images y /= float(n) # average contribution noiseplot['y'][idx] = y # save value if options['nnps']: # Divide NPS curve by AUC, we get normalized NPS x_width = noiseplot['x'][1] - noiseplot['x'][0] areaUnderNPSCurve = float(np.sum(noiseplot['y']) * x_width) noiseplot['y'] = np.divide(noiseplot['y'], areaUnderNPSCurve) ############################## # CALCULATE MEDIAN VALUES # ############################## # Old method using x_width * y_value instead of trapzoid sums # delta_x = noiseplot['x'][1] - noiseplot['x'][0] # for each in range(len(noiseplot['y'])): ## weightedsum += noiseplot['x'][each] * noiseplot['y'][each] # weightedsum += delta_x * noiseplot['y'][each] # weightedsum = 0 # for each in range(len(noiseplot['y'])): ## if weightedsum + noiseplot['x'][each] * noiseplot['y'][each] < reference: ## weightedsum += noiseplot['x'][each] * noiseplot['y'][each] # # if weightedsum + delta_x * noiseplot['y'][each] < reference: # weightedsum += delta_x * noiseplot['y'][each] # # else: # y1 = weightedsum ## y2 = weightedsum + noiseplot['x'][each] * noiseplot['y'][each] # y2 = weightedsum + delta_x * noiseplot['y'][each] # # x1 = noiseplot['x'][each-1] # x2 = noiseplot['x'][each] # dy = float(y2 - y1) # dx = x2 - x1 # median_x = (dx/dy) * (reference - y1) + x1 # # # to find interpolated y value for plotting # y1_value = noiseplot['y'][each-1] # y2_value = noiseplot['y'][each] # dy_value = float(y2_value - y1_value) # # median_y = y1_value + dy_value / dx * (median_x - x1) # # median = [median_x, median_y] # # break weightedsum_trapz = trapz(noiseplot['y'], noiseplot['x']) reference_trapz = weightedsum_trapz / 2. for each in range(len(noiseplot['y'])): if trapz(noiseplot['y'][:each], noiseplot['x'][:each]) < reference_trapz: continue else: y1 = trapz(noiseplot['y'][:each - 1], noiseplot['x'][:each - 1]) y2 = trapz(noiseplot['y'][:each], noiseplot['x'][:each]) x1 = noiseplot['x'][each - 1] x2 = noiseplot['x'][each] dy = float(y2 - y1) dx = x2 - x1 median_x = (dx / dy) * (reference_trapz - y1) + x1 # to find interpolated y value for plotting y1_value = noiseplot['y'][each - 1] y2_value = noiseplot['y'][each] dy_value = float(y2_value - y1_value) median_y = y1_value + dy_value / dx * (median_x - x1) median_trapz = [median_x, median_y] break ################################# # SAVE MEDIAN VALUES TO CSV # ################################# if options['medianValue']: fname = 'median_values.csv' open(fname, 'a') # Touch file # Check for header headerExists = False with open(fname, 'r') as medianFile: for line in medianFile.readlines(): if 'Filter' in line: headerExists = True # Append the median value to medianFile with open(fname, 'a') as medianFile: if not headerExists: medianFile.write( "Filter; IRLevel; Median value (lp/cm); Date (YYYY-MM-DD HH:MM:SS)\n" ) datestring = strftime("%Y-%m-%d %H:%M:%S") medianFile.write("{kernel}; {IRLevel}; {median}; {time}\n".format( kernel=ConvolutionKernel, IRLevel=IRLevel, median=round(median_x, 2), time=datestring)) return noiseplot, imageInfo, median_trapz