def query_missing(self):
     now = time.time()
     log.info("Querying %i missing data entries." % len(self.missing))
     for mtime in self.missing:
         starttime = mtime
         endtime = mtime + datetime.timedelta(0, 3600)
         results = self.query_transfers(starttime, endtime)
         if not results:
             log.warning("No transfer results found for %s." % starttime)
         for result in results:
             res_time, count, volume_mb = result
             res_time = float(res_time)
             starttime = self._timestamp_to_datetime(res_time)
             if now-res_time >= 3600:
                 endtime = self._timestamp_to_datetime(res_time+3600)
             else:
                 endtime = self._timestamp_to_datetime(now)
             if res_time > now:
                 continue
             td = TransferData()
             td.starttime = starttime
             td.endtime = endtime
             td.count = count
             td.volume_mb = volume_mb
             self.data[starttime] = td
             log.debug("Successfully parsed results for %s." % starttime)
             self.save_cache()
    def _address(licence_list, licence_meta):
        for key, value in licence_list.items():

            filename_end = "_" + value["faculty"] if value["faculty"] else ""
            standard_address = "opt/nesi/mahuika/" + value["software_name"] + "/Licenses/" + value["institution"] + filename_end + ".lic"   
            
            if value["file_address"]:                
                try:
                    statdat = os.stat(value["file_address"])
                    file_name = value["file_address"].split("/")[-1]

                    owner = getpwuid(statdat.st_uid).pw_name
                    group = getgrgid(statdat.st_gid).gr_name

                    # Check permissions of file
                    if statdat.st_mode == 432:
                        log.error(key + " file address permissions look weird.")

                    if value["file_group"] and group != value["file_group"]:
                        log.error(value["file_address"] + ' group is "' + group + '", should be "' + value["file_group"] + '".')

                    if owner != settings["user"]:
                        log.error(value["file_address"] + " owner is '" + owner + "', should be '" + settings["user"] + "'.")
                              
                    if value["file_address"] != standard_address and value["software_name"] and value["institution"]:
                        log.debug('Would be cool if "' + value["file_address"] + '" was "' + standard_address + '".')

                except:
                    log.error(key + ' has an invalid file path attached "' + value["file_address"] + '"')
            else:
                value["file_address"]=standard_address
                log.warning(key + " licence path set to " + standard_address)
 def query_missing(self):
     now = time.time()
     log.info("Querying %i missing data entries." % len(self.missing))
     for mtime in self.missing:
         starttime = mtime
         endtime = mtime + datetime.timedelta(0, 3600)
         results = self.query_transfers(starttime, endtime)
         if not results:
             log.warning("No transfer results found for %s." % starttime)
         for result in results:
             res_time, count, volume_mb = result
             res_time = float(res_time)
             starttime = self._timestamp_to_datetime(res_time)
             if now - res_time >= 3600:
                 endtime = self._timestamp_to_datetime(res_time + 3600)
             else:
                 endtime = self._timestamp_to_datetime(now)
             if res_time > now:
                 continue
             td = TransferData()
             td.starttime = starttime
             td.endtime = endtime
             td.count = count
             td.volume_mb = volume_mb
             self.data[starttime] = td
             log.debug("Successfully parsed results for %s." % starttime)
             self.save_cache()
예제 #4
0
 def __init__(self, data=None, thickness=None):
     self.data = data
     if data != None:
         sx, sy, sz = data.GetSpacing()
         if thickness != None and thickness != sz:
             log.warning("Thickness corrected: %g -> %g" % (sz, thickness))
             data.SetSpacing([sx, sy, thickness])
예제 #5
0
    def __init__(self):
        super().__init__()

        self.audio_list_widget = QListWidget()
        self.audio_list_widget.installEventFilter(self)
        self.audio_list_widget.itemDoubleClicked.connect(self.play)

        # TODO: playlist объединить с audio_list_widget (см примеры работы с QMediaPlayer)
        self.playlist = QMediaPlaylist()
        self.playlist.currentIndexChanged.connect(
            lambda row: self.audio_list_widget.setCurrentRow(row))

        # TODO: обрабатывать сигналы плеера: http://doc.qt.io/qt-5/qmediaplayer.html#signals
        self.player = QMediaPlayer()
        self.player.setPlaylist(self.playlist)
        self.player.currentMediaChanged.connect(
            lambda media: self.about_play_audio.emit(self.audio_list_widget.
                                                     currentItem().text()))

        if not self.player.isAvailable():
            # TODO: перевод
            text = "The QMediaPlayer object does not have a valid service.\n" \
                   "Please check the media service plugins are installed."

            log.warning(text)
            QMessageBox.warning(self, "Service not available", text)

            quit()

        self.controls = PlayerControls(self.player)
        self.controls.set_state(self.player.state())
        self.controls.set_volume(self.player.volume())
        self.controls.set_muted(self.controls.is_muted())

        self.controls.play_signal.connect(self.play)
        self.controls.pause_signal.connect(self.player.pause)
        self.controls.stop_signal.connect(self.player.stop)
        self.controls.next_signal.connect(self.playlist.next)
        self.controls.previous_signal.connect(self.playlist.previous)
        self.controls.change_volume_signal.connect(self.player.setVolume)
        self.controls.change_muting_signal.connect(self.player.setMuted)

        self.progress = QProgressBar()
        self.progress.hide()

        layout = QVBoxLayout()
        layout.addWidget(self.controls)
        layout.addWidget(self.audio_list_widget)
        layout.addWidget(self.progress)

        self.setLayout(layout)

        self.thread = LoadAudioListThread()
        self.thread.about_add_audio.connect(self._add_audio)
        self.thread.about_progress.connect(self.progress.setValue)
        self.thread.about_range_progress.connect(self.progress.setRange)
        self.thread.started.connect(self._start)
        self.thread.finished.connect(self._finished)
 def save_cache(self):
     now = datetime.datetime.now()
     old_keys = []
     for key in self.data.keys():
         if (now - key).days >= 7:
             old_keys.append(key)
     for key in old_keys:
         del self.data[key]
     try:
         name, tmpname = get_files(self.cp, "transfer_data")
         fp = open(tmpname, 'w')
         pickle.dump(self.data, fp)
         fp.close()
         commit_files(name, tmpname)
         log.debug("Saved data to cache.")
     except Exception, e:
         log.warning("Unable to write cache; message: %s" % str(e))
 def save_cache(self):
     now = datetime.datetime.now()
     old_keys = []
     for key in self.data.keys():
         if (now - key).days >= 7:
             old_keys.append(key)
     for key in old_keys:
         del self.data[key]
     try:
         name, tmpname = get_files(self.cp, "transfer_data")
         fp = open(tmpname, 'w')
         pickle.dump(self.data, fp)
         fp.close()
         commit_files(name, tmpname)
         log.debug("Saved data to cache.")
     except Exception, e:
         log.warning("Unable to write cache; message: %s" % str(e))
예제 #8
0
 def get_songs(self, html_page):
     for link in self.parser.get_links(html_page):
         if not self._is_program_link(link):
             continue
         program = link.split('/')[2]
         date = link.split('/')[3]
         if self.chosen_program and self.chosen_program not in link:
             continue
         sub_url = self.base_url + link
         try:
             sub_html_page = self.Downloader(sub_url)
         except DownloaderException as e:
             log.warning(e)
             continue
         for song in self.parser.get_program_songs(sub_html_page):
             song['program'] = program
             song['date'] = date
             yield song
 def load_cached(self):
     try:
         data = pickle.load(open(self.cp.get("Filenames", "transfer_data") \
             % {'uid': euid}, "r"))
         # Verify we didn't get useless data
         for time, tdata in data.items():
             assert isinstance(time, datetime.datetime)
             assert isinstance(tdata, TransferData)
             assert isinstance(tdata.starttime, datetime.datetime)
             assert isinstance(tdata.endtime, datetime.datetime)
             assert tdata.count != None
             assert tdata.volume_mb != None
             assert tdata.starttime != None
         self.data = data
         log.info("Successfully loaded transfer data from cache; %i" \
             " cache entries." % len(data))
         remove_data = []
         now = globals()['time'].time()
         now_dt = datetime.datetime.now()
         for time, tdata in data.items():
             if not hasattr(tdata, 'createtime') or not tdata.createtime:
                 log.debug("Ignoring cached data from %s as it has no " \
                     "create time info." % time)
                 remove_data.append(time)
                 continue
             if now - tdata.createtime > 3600:
                 log.debug("Ignoring cached data from %s as it is over " \
                     "an hour old." % time)
                 remove_data.append(time)
             age_starttime = now_dt - tdata.starttime
             age_starttime = age_starttime.days * 86400 + age_starttime.seconds
             if (now - tdata.createtime > 1800) and (age_starttime <=
                                                     12 * 3600):
                 log.debug("Ignoring cached data from %s as it is over " \
                     "30 minutes old and is for a recent interval." % \
                     time)
                 remove_data.append(time)
         for time in remove_data:
             del self.data[time]
     except Exception, e:
         log.warning("Unable to load cache; it may not exist. Error: %s" % \
            str(e))
예제 #10
0
    def run(self):
        output_file = self._get_output_file_name_path()
        with open(output_file, 'w') as f:
            writer = csv.DictWriter(f, fieldnames=self.header,
                                    lineterminator='\n')
            writer.writeheader()
            for url in self._url_generator():
                log.info('Getting data from {url}'.format(url=url))
                start_time = time.time()
                try:
                    html_page = self.Downloader(url)
                except DownloaderException as e:
                    log.warning(e)
                    continue
                for song in self.get_songs(html_page):
                    writer.writerow(song)
                log.info("Done in {} seconds."
                         .format(time.time() - start_time))

        log.info('Out file ready in: {}'.format(output_file))
 def load_cached(self):
     try:
         data = pickle.load(open(self.cp.get("Filenames", "transfer_data") \
             % {'uid': euid}, "r"))
         # Verify we didn't get useless data
         for time, tdata in data.items():
             assert isinstance(time, datetime.datetime)
             assert isinstance(tdata, TransferData)
             assert isinstance(tdata.starttime, datetime.datetime)
             assert isinstance(tdata.endtime, datetime.datetime)
             assert tdata.count != None
             assert tdata.volume_mb != None
             assert tdata.starttime != None
         self.data = data
         log.info("Successfully loaded transfer data from cache; %i" \
             " cache entries." % len(data))
         remove_data = []
         now = globals()['time'].time()
         now_dt = datetime.datetime.now()
         for time, tdata in data.items():
              if not hasattr(tdata, 'createtime') or not tdata.createtime:
                  log.debug("Ignoring cached data from %s as it has no " \
                      "create time info." % time)
                  remove_data.append(time)
                  continue
              if now - tdata.createtime > 3600:
                  log.debug("Ignoring cached data from %s as it is over " \
                      "an hour old." % time)
                  remove_data.append(time)
              age_starttime = now_dt - tdata.starttime
              age_starttime = age_starttime.days*86400 + age_starttime.seconds
              if (now - tdata.createtime > 1800) and (age_starttime <= 12*3600):
                  log.debug("Ignoring cached data from %s as it is over " \
                      "30 minutes old and is for a recent interval." % \
                      time)
                  remove_data.append(time)
         for time in remove_data:
             del self.data[time]
     except Exception, e:
         log.warning("Unable to load cache; it may not exist. Error: %s" % \
            str(e))
예제 #12
0
def monitor(node_yml):
    auto = AutoDeployPlaton()
    collusion, nocollusion = get_node_list(node_yml)
    node_list = collusion + nocollusion
    old_block = [0 * i for i in range(len(collusion))]
    t = 0
    while True:
        time.sleep(120)
        t += 120
        block_list = []
        url_list = []
        for node in node_list:
            try:
                w3 = connect_web3(node["url"])
                if not w3.isConnected():
                    raise Exception("有节点被关闭了")
                block = w3.eth.blockNumber
                block_list.append(block)
                url_list.append(node["url"])

            except:
                close_msg = "节点:{}:{}无法连接\n".format(node["host"], node["port"])
                log.warning(close_msg)
        msg = build_msg(url_list, block_list, old_block)
        if max(block_list) - min(block_list) >= 100:
            log.error("区块差距过大")
            auto.kill_of_yaml(node_yml)
            send_to_gap(block_list, msg, node_yml)
        if max(block_list) - min(old_block) == 0:
            log.error("不出块了")
            auto.kill_of_yaml(node_yml)
            send_to_block(msg, node_yml)
        if t >= 21600:
            t = 0
            send_email(EmailInfo, msg)
        old_block = block_list
예제 #13
0
def get_variants_from_matched_lines(tumor_line, normal_line):
    u"""mathced tumor line and normal line"""

    hetero_germline_variants = []
    somatic_variants = []

    if (tumor_line.depth < settings.min_depth) or (normal_line.depth < settings.min_depth):
        raise LowDepthError

    if (tumor_line.depth > settings.max_depth) or (normal_line.depth > settings.max_depth):
        raise HighDepthError

    if tumor_line.ref == u'N':
        raise CustomError(u"reference_is_N")

    if len(REGEX_COUNT_W.findall(tumor_line.bases)) < settings.min_variant_supporting_reads:
        raise TooFewVariantReadsError

    tumor_pileup_units = tumor_line.get_bases_with_qualities()
    normal_pileup_units = normal_line.get_bases_with_qualities()

    tumor_profiles = pileup_unit.get_profiles(tumor_pileup_units)
    normal_profiles = pileup_unit.get_profiles(normal_pileup_units)

    for variant_key in tumor_profiles.keys():
        if variant_key == tumor_line.ref:
            # skip for the reference base
            continue
        try:
            tumor_count = tumor_profiles[variant_key]
            if tumor_count < settings.min_variant_supporting_reads:
                raise TooFewVariantReadsError

            tumor_ref_units = [x for x in tumor_pileup_units if x.key() != variant_key]
            tumor_obs_units = [x for x in tumor_pileup_units if x.key() == variant_key]
            normal_ref_units = [x for x in normal_pileup_units if x.key() != variant_key]
            normal_obs_units = [x for x in normal_pileup_units if x.key() == variant_key]

            IndelCoverChecker.update(tumor_line.chromosome, tumor_line.position, tumor_profiles, normal_profiles)

            try:
                normal_count = normal_profiles.get(variant_key, 0)
                if normal_count < settings.min_variant_supporting_reads:
                    raise TooFewVariantReadsError
                v = HeterozygousGermlineVariant.from_pileup_units(tumor_ref_units, tumor_obs_units, normal_ref_units, normal_obs_units)
                v.set_basic_info(variant_key, tumor_line.chromosome, tumor_line.position, tumor_line.ref)
                if v.is_snv():
                    try:
                        triallelic_site_checker.check(tumor_line.ref, tumor_line.chromosome, tumor_line.position,
                                                      tumor_profiles, normal_profiles)
                    except TriallelicSiteError:
                        v.triallelic_site_check = "triallelic"
                    try:
                        IndelCoverChecker.check(tumor_line.chromosome, tumor_line.position)
                    except IndelCoverError:
                        v.indel_cover_check = "indel-cover"
                hetero_germline_variants.append(v)
            except AlleleFreqOutOfRangeError: pass
            except StrandFreqOutOfRangeError: pass
            except TooFewVariantReadsError: pass
            except LowDepthError: pass
            except LowBaseQualityError as e:
                log.debug(u"HeteroGermline: {0}, tumor: {1}, normal: {2}".format(e, tumor_line, normal_line))
            except CustomError, e:
                log.warning(u"HeteroGermline CustomError: {0}, tumor: {1}, normal: {2}".format(e, tumor_line, normal_line))

            try:
                v = SomaticVariant.from_pileup_units(tumor_ref_units, tumor_obs_units, normal_ref_units, normal_obs_units)
                v.set_basic_info(variant_key, tumor_line.chromosome, tumor_line.position, tumor_line.ref)
                v.set_fisher_score()
                if v.is_snv():
                    try:
                        triallelic_site_checker.check(tumor_line.ref, tumor_line.chromosome, tumor_line.position,
                                                      tumor_profiles, normal_profiles)
                    except TriallelicSiteError:
                        v.triallelic_site_check = "triallelic"
                    try:
                        IndelCoverChecker.check(tumor_line.chromosome, tumor_line.position)
                    except IndelCoverError:
                        v.indel_cover_check = "indel-cover"
                somatic_variants.append(v)
            except AlleleFreqOutOfRangeError: pass
            except StrandFreqOutOfRangeError: pass
            except TooManyNormalVariantReadsError: pass
            except TooFewVariantReadsError: pass
            except LowDepthError: pass
            except LowBaseQualityError as e:
                log.debug(u"Somatic: {0}, tumor: {1}, normal: {2}".format(e, tumor_line, normal_line))
            except CustomError as e:
                log.warning(u"Somatic CustomError: {0}, tumor: {1}, normal: {2}".format(e, tumor_line, normal_line))

        except TooFewVariantReadsError: pass
        except CustomError as e:
            log.warning(u"CustomError: {0}, tumor: {1}, normal: {2}".format(e, tumor_line, normal_line))
예제 #14
0

    ##################################################################
    # Sumuję dawki z poszczególnych wiązek (beams) do całkowitej dawki
    ##################################################################
    beamDoses = {}
    totalDoses = None
    totalDosesFile = None
    doseScaling = None
    singleBeam = False
    for beam in dicom_beams:
        doseScaling = float(beam.DoseGridScaling)
        try:
            bn = int(beam.ReferencedRTPlanSequence[0].ReferencedFractionGroupSequence[0].ReferencedBeamSequence[0].ReferencedBeamNumber)
        except:
            log.warning("Total doses from single RD file.")
            if totalDoses is None:
                singleBeam = True
                totalDoses = beam.pixel_array.copy()
                print(totalDoses.shape)
                totalDosesFile = beam.filename
            continue
        beamDoses[bn] = beam.pixel_array
        if doseScaling is not None and float(beam.DoseGridScaling) != doseScaling:
            log.warning('Strange data: DoseGridScaling is not same all beamlets!')

    log.info(f"Single beam: {singleBeam}")
    if not singleBeam:
        bns = list(beamDoses.keys())
        totalDoses = beamDoses[bns[0]].copy()
        for i in range(1, len(bns)):
예제 #15
0
    def get_bases_with_qualities(self):
        data = []
        # remove the beginning mark
        i = 0 # for bases
        j = 0 # for qualities
        while i < len(self.bases):
            # log.debug("{0}, {1}: {2}, {3}".format(i, j, self.bases[i], self.qualities[j]))
            if self.bases[i] == u".":
                data.append(pileup_unit.Base(self.ref, 1, self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] == u",":
                data.append(pileup_unit.Base(self.ref, 0, self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] == u"$":
                i += 1
            elif self.bases[i] == u"^":
                i += 2  # with map quality
                continue
            elif self.bases[i] in u"+-":
                # read the length
                tmp_i = i + 1
                length_str = u""
                while True:
                    try:
                        int(self.bases[tmp_i])
                        length_str += self.bases[tmp_i]
                        tmp_i += 1
                    except ValueError:
                        break
                #log.debug("length_str: {0}".format(length_str))
                seq_length = int(length_str)
                # get the inserted or deleted sequence
                seq = self.bases[tmp_i:tmp_i + seq_length]
                if seq[0] in u"ATGCN":
                    strand = 1
                else:
                    strand = 0
                if self.bases[i] == u"+":
                    data.append(pileup_unit.Ins(seq, strand, None))
                else:
                    data.append(pileup_unit.Del(seq, strand, None))
                i = tmp_i + seq_length
            elif self.bases[i] in u"ATGCN":
                data.append(pileup_unit.Base(self.bases[i], 1, self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] in u"atgcn":
                data.append(pileup_unit.Base(self.bases[i].upper(), 0, self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] == u"*":
                # deletion
                i += 1
                j += 1
            else:
                log.warning(u"i={0}, self={1}".format(i, self))
                raise Exception(u"something_wrong")

        if i != len(self.bases) or j != len(self.qualities):
            log.error(u"i:{0}={1} , j:{2}={3}".format(i, len(self.bases), j, len(self.qualities)))
            log.error(self)
            raise Exception(u"did_not_reach_the_lase_base")
        return data
예제 #16
0
def do_run(args):
    ctgriddata = None

    if hasattr(args,"rass_data"):
        rass_data = args.rass_data
    else:
        rass_data = RASSData(root_folder=args.root_folder)

    ################################################################
    # Wczytuję opcje z folderu "input"
    ################################################################
    options = default_options()
    cfname = rass_data.input("config.json")
    if os.path.isfile(cfname):
        log.info("Reading options from file: %s" % cfname)
        with open(cfname) as options_file:
            options.update(json.load(options_file))

    ################################################################
    # Przesłaniam opcje za pomocą pliku przekazanego za pomocą argumentów linii komend
    ################################################################
    for i in range(len(argv)):
        if "options" == argv[i]:
            fname = "%s" % (argv[i + 1])
            log.info("Reading options from file: %s" % fname)
            with open(fname) as options_file:
                options.update(json.load(options_file))

    dicomutils.DEBUG_LEVEL = options["debug_level"]

    ################################################################
    # Szukam plików DICOM w podkatalogu "input"/dicom
    ################################################################
    rtss, plan, ctlist, doseslist = dicomutils.find_ct_rs_rp_dicom(rass_data.input("dicom"))
    if rtss is None or plan is None:
        raise Exception(f"No RS.* or rtss.* file in {rass_data.input('dicom')}")


    ################################################################
    # Wczytuję pliki DICOM z informacjami o strukturach (ROIach) 
    # oraz plan
    ################################################################
    rtss = dicom.read_file(rtss)
    plan = dicom.read_file(plan)
    treatment_name = '-'.join(plan.PatientID.split('^'))
    log.info('Name: ' + treatment_name)


    ################################################################
    # Wczytuję dane CT używając VTK
    ################################################################
    from ct import CTVolumeDataReader
    reader = CTVolumeDataReader(rass_data.input("dicom"), ctfiles=ctlist)
    ctVolumeData = reader.read()
    ctData = ctVolumeData.getCTDataAsNumpyArray()


    if len(ctlist) > 0:
        ct = dicom.read_file(ctlist[0])
        ctgriddata = list(map(float, (
                                 ct.ImagePositionPatient[0], ct.ImagePositionPatient[1],
                                 ct.PixelSpacing[0], ct.PixelSpacing[1], 
                                 ct.Columns, ct.Rows)))
    else:
        ctgriddata = None

    ################################################################
    # reading doses information for beams from DICOM
    ################################################################
    beams = [dicom.read_file(f) for f in doseslist]

    ##################################################################
    # Wczytuję dawki z poszczególnych wiązek (beams) 
    ##################################################################
    beamDoses = {}
    totalDoses = None
    totalDosesFile = None
    doseScaling = None
    singleBeam = False
    for beam in beams:
        doseScaling = float(beam.DoseGridScaling)
        try:
            bn = int(beam.ReferencedRTPlanSequence[0].ReferencedFractionGroupSequence[0].ReferencedBeamSequence[0].ReferencedBeamNumber)
        except:
            print("Semething wrong went...")
            if totalDoses is None:
                singleBeam = True
                totalDoses = beam.pixel_array.copy()
                totalDosesFile = beam.filename
            continue
        beamDoses[bn] = beam.pixel_array
        if doseScaling is not None and float(beam.DoseGridScaling) != doseScaling:
            log.warning('Strange data: DoseGridScaling is not same all beams!')
        log.info(f"Got doses data for beam number {bn}")

    ##################################################################
    # Sumuję dawki z poszczególnych wiązek (beams) do dawki całkowitej 
    ##################################################################
    if not singleBeam:
        print(beamDoses)
        bns = list(beamDoses.keys())
        totalDoses = beamDoses[bns[0]].copy()
        for i in range(1, len(bns)):
            log.info(f"Adding doses from beam {i}")
            totalDoses += beamDoses[bns[i]]

    totalDoses = np.array(totalDoses, dtype=np.float32)
    log.info("Read doses for %d beams" % len(beamDoses))

    minDose = np.min(totalDoses)
    averageDose = np.average(totalDoses)
    maxDose = np.max(totalDoses)

    if totalDosesFile is None:
        log.info('Total doses calculated as sum of beam doses (min dose=%f, average dose=%f, max dose=%f, doseScaling=%f)' % (
            minDose, averageDose, maxDose, doseScaling))
    else:
        log.info('Got total doses from file %s (min dose=%f, average dose=%f, max dose = %f, doseScaling=%f)' % (
            totalDosesFile, minDose, averageDose, maxDose, doseScaling))


    # To są informacje o siatce planowania wyciete z pierwszej wiązki
    tBeam = beams[0]
    kmax = tBeam.Columns # x?
    jmax = tBeam.Rows # y?
    imax = len(tBeam.GridFrameOffsetVector) # z
    xbase = float(tBeam.ImagePositionPatient[0]) * SCALE
    ybase = float(tBeam.ImagePositionPatient[1]) * SCALE
    zbase = float(tBeam.ImagePositionPatient[2]) * SCALE
    dx = float(tBeam.PixelSpacing[0]) * SCALE
    dy = float(tBeam.PixelSpacing[1]) * SCALE
    zoffsets = list(map(float, tBeam.GridFrameOffsetVector))
    for i in range(len(zoffsets)):
        zoffsets[i] *= SCALE
    dz = zoffsets[1] - zoffsets[0]
    dv = dx * dy * dz

    log.info('Planning grid: %d x %d x %d in [%g:%g]x[%g:%g]x[%g:%g] dx,dy,dz=%g,%g,%g -> dv=%g' % (
        kmax, jmax, imax,
        xbase, xbase + kmax * dx, ybase, ybase + jmax * dy, zbase + zoffsets[0], zbase + zoffsets[-1],
        dx, dy, dz, dv))

    planGridInfo = {'ixmax': kmax, 'iymax': jmax, 'izmax': imax,
                    'xorig': xbase, 'yorig': ybase, 'zorig': zbase,
                    'dx': dx, 'dy': dy, 'dz': dz,
                    'minDose': minDose, 'avgDose': averageDose, 'maxDose': maxDose,
                    'doseScaling': doseScaling
                    }


    ####################################################
    # Analiza ROIów
    ####################################################
    myROIs = []
    idxROIBody = -1
    for i in range(0, len(rtss.StructureSetROISequence)):
        roiName = rtss.StructureSetROISequence[i].ROIName
        log.info(f"Reading contours for {roiName} from DICOM")

        contours = dicomutils.findContours(rtss, rtss.StructureSetROISequence[i].ROINumber)
        if len(contours) > 1:
            r = MyRoi(contours, roiName, float(tBeam.PixelSpacing[0]) / 1000.0)
            myROIs.append(r)

            if ("body" in roiName.lower() or "skin" in roiName.lower() or "outline" in roiName.lower()) and (idxROIBody == -1):
                idxROIBody = i
                log.info("Found ROI body (or skin): idx = %d" % idxROIBody)

    if idxROIBody == -1:
        raise Exception("The structure file does not contain any structure with 'body', 'outline' or 'skin' in the name.")


    ##########################################################################
    # Mark ROIs or read from cache (cache is a file in a working
    # directory, separate file for each ROI,
    # the filename pattern is: "%s_%s.markscache" % (treatment_name, ROIName)
    ##########################################################################
    roi_marks = np.zeros((imax, jmax, kmax), dtype=np.int64)
    for r in range(0, len(myROIs)):
        fcache = rass_data.processing("%s_%s.markscache" % (treatment_name, myROIs[r].name))
        if myROIs[r].read_marks(fcache, roi_marks):
            log.info("Read marking voxels for %s from cache" % myROIs[r].name)
            myROIs[r].countVoxels(roi_marks, 2 ** r)
        else:
            log.info("Marking voxels for %s" % myROIs[r].name)
            log.debug("CTGRID DATA %s" % list(ctgriddata))

            myROIs[r].mark(xbase / SCALE, ybase / SCALE, dx / SCALE, dy / SCALE, kmax, jmax, imax,
                           np.linspace(zbase, zbase + (imax - 1) * dz, imax) / SCALE, roi_marks, 2 ** r, ctgriddata=ctgriddata)
            myROIs[r].save_marks(fcache, roi_marks, 2 ** r)

    for r in range(len(myROIs)):
        log.info("Statistics for %20s: ID=%8d, %7d voxels, vol=%8.1f discrete vol=%8.1f [cm3]" % (
            myROIs[r].name, 2 ** r, myROIs[r].count, myROIs[r].volume / 1000.,
            myROIs[r].count * dv / SCALE / SCALE / SCALE / 1000.0))


    # mam wczytane CT - ctData 
    # mam wczytane Dawki - totalDoses (wspolrzedne siatki planowania)
    # mam informacje o rojach - roi_marks (współrzędne siatki planowania)

    # Teraz trzeba przeskalować CT i pozapisywać dane i będzie z głowy...

    plan_origin = (xbase, ybase, zbase)
    plan_dimensions = (kmax, jmax, imax)
    plan_spacing = (dx, dy, dz)
    ctOnPlanningGrid = ctVolumeData.approximateCTOnPlanGrid( plan_origin, plan_spacing, plan_dimensions )

    ## zapisuję do plików VTI
    npar = ctOnPlanningGrid
    if not skip_vti:
        VolumeData.saveVolumeGridToFile(plan_spacing, plan_dimensions, plan_origin, 
             npar, rass_data.output("approximated_ct"))

        VolumeData.saveVolumeGridToFileAsLong(plan_spacing, plan_dimensions, plan_origin, 
             roi_marks, rass_data.output("roi_marks"))

        for r in range(0, len(myROIs)):
            d = np.array(np.bitwise_and(roi_marks, (2 ** r)) / (2 ** r), dtype=np.float32)
            log.debug(f"ROI: {myROIs[r].name}[{2 ** r}].size() = {np.sum(d)}")
            log.info(f"Saving roi marks for {myROIs[r].name} to {rass_data.output(f'roi_marks_{myROIs[r].name}')}.vti file ...")
            VolumeData.saveVolumeGridToFile(plan_spacing, plan_dimensions, plan_origin, 
                    d, rass_data.output(f"roi_marks_{myROIs[r].name}"))


        VolumeData.saveVolumeGridToFile(plan_spacing, plan_dimensions, plan_origin, 
             totalDoses, rass_data.output("total_doses"))


    ## zapisuję do plików ndarray
    from bdfileutils import save_ndarray, read_ndarray
    ctOnPlanningGrid = np.reshape(ctOnPlanningGrid, (imax, jmax, kmax))
    save_ndarray(rass_data.output("approximated_ct.nparray"),ctOnPlanningGrid)

    roi_marks = np.reshape(roi_marks, (imax, jmax, kmax))
    save_ndarray(rass_data.output("roi_marks.nparray"),roi_marks)


    for r in range(0, len(myROIs)):
        d = np.array(np.bitwise_and(roi_marks, (2 ** r)) / (2 ** r), dtype=np.int32)
        d = np.reshape(d, (imax, jmax, kmax))
        save_ndarray(rass_data.output(f"roi_marks_{myROIs[r].name}.nparray"), d)


    totalDoses = np.reshape(totalDoses, (imax, jmax, kmax))
    save_ndarray(rass_data.output("total_doses.nparray"),totalDoses)

    with open(rass_data.output("roi_mapping.txt"),"w") as f:
        for i in range(len(myROIs)):
            f.write(f"{myROIs[i].name}:{2 ** i}\n")
    def _tokens(license_list):
        try:
            sub_input="sacctmgr -pns show resource withcluster"
            log.debug(sub_input)
            string_data=subprocess.check_output(sub_input, shell=True).decode("utf-8").strip()
        except Exception as details:
            log.error("Failed to check SLURM tokens. " + str(details))
        else:
            active_token_dict = {}
            # Format output data into dictionary 
            for lic_string in string_data.split("\n"):

                log.debug(lic_string)
                str_arr=lic_string.split("|")
                active_token_dict[str_arr[0] + "@" + str_arr[1]]=str_arr


            for key, value in licence_list.items():

                name = value["software_name"] + "_" + value["lic_type"] if value["lic_type"] else value["software_name"]
                server = value["institution"] + "_" + value["faculty"] if value["faculty"] else value["institution"]

                if key not in active_token_dict.keys():
                    log.error("'" + key + "' does not have a token in slurm database!")

                    # if possible, create.
                    if value["institution"] and value["total"] and value["software_name"]:           
                        log.error("Attempting to add...")

                        try:
                            sub_input="sacctmgr -i add resource Name=" + name.lower() + " Server=" + server.lower() + " Count=" + str(int(value["total"]*2)) + " Type=License percentallowed=50 where cluster=mahuika"
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True).decode("utf-8")
                            
                        except Exception as details:
                            log.error(details)
                        else:
                            log.info("Token added successfully!")
                    
                    else:
                        log.error("Must have 'instituiton, software_name, cluster, total' set in order to generate SLURM token.")

                else:
                    if value["total"] != int(active_token_dict[key][3])/2:
                        log.error("SLURM TOKEN BAD, HAS " + str(int(active_token_dict[key][3])/2)  + " and should be " + str(value["total"]))
                        try:
                            sub_input="sacctmgr -i modify resource Name=" + name.lower() + " Server=" + server.lower() + " set Count=" + str(int(value["total"]*2))
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True)        
                        except Exception as details:
                            log.error(details)
                        else:
                            log.warning("Token modified successfully!")
                    if active_token_dict[key][7] != "50":
                        log.error("SLURM token not cluster-split")

                        try:
                            sub_input="sacctmgr -i modify resource Name=" + name.lower() + " Server=" + server.lower() + "percentallocated=100 where cluster=mahuika" +  " set PercentAllowed=50"
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True)

                            sub_input="sacctmgr -i modify resource Name=" + name.lower() + " Server=" + server.lower() + "percentallocated=100 where cluster=maui" +  " set PercentAllowed=50"
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True)
                        except Exception as details:
                            log.error(details)
                        else:
                            log.info("Token modified successfully!")
예제 #18
0
    def reload(self, topic_id):
        #双缓冲方式,减少数据冲突点
        topic_dict_copy = dict(self.topic_dict)
        try:
            #Topic id cannot be 0 ???
            topic_id = int(topic_id)
            if topic_id <= 0:
                topic_id = None
        except Exception as e:
            topic_id = None
        db_topic_res = []
        try:
            mysql_conn = get_mysql_conn()
            sql = 'select `id`, `name`, `description`, `table_name`, `schema`, `utime`, `ctime` from topic'
            if topic_id is not None:
                sql = "%s where id = %d;" % (sql, topic_id)
            cur = mysql_conn.cursor()
            cur.execute(sql)
            db_topic_res = cur.fetchall()
            cur.close()
        except Exception as e:
            log.error(e.message)
        #We requested a topic id but db returned nothing, means this topic(s) is non-existent
        if topic_id and len(db_topic_res) == 0 and topic_dict_copy.has_key(
                topic_id):
            del topic_dict_copy[topic_id]

        for record in db_topic_res:
            topic_info = dbrecord_to_dict(cur.description, record)
            try:
                # 将record元组转换为dict. (利用cur.description里面的字段名)
                # 解析Schema对象
                try:
                    topic_info['schema_obj'] = json.loads(topic_info['schema'])
                except Exception, e:
                    topic_info['schema_obj'] = None
                    log.warning(
                        'schema parse failed[%s], topic[%s] is not writable!' %
                        (str(e), topic_info['name'].encode('utf8')))

                # 给一个默认的db_name.
                # TODO: 将来把db_name也存放在mysql表中, 使得不同的topic可以存在不同的db里面去
                topic_info['db_name'] = conf.MONGODB['default_db']

                # 拿到collection
                db_name = topic_info['db_name']

                table_name = topic_info['table_name']
                collection = mongodbs[db_name][table_name]
                # 确保collection中有_record_id的索引
                # https://docs.mongodb.com/getting-started/python/indexes/
                # ensure_index已经废弃了, create_index也是在index不存在时才进行创建.

                collection.create_index(common.FIELDNAME_RECORD_ID,
                                        background=True,
                                        unique=True)
                collection.create_index(common.FIELDNAME_IN_TIME,
                                        background=True,
                                        unique=False)
                collection.create_index(common.FIELDNAME_UTIME,
                                        background=True,
                                        unique=False)
                topic_info['collection'] = collection

                # 加入self.topic_dict
                topic_dict_copy[topic_info['id']] = topic_info
                log.info(
                    'TopicManager loaded: topic.id[%s], topic.name[%s], db_name[%s], table_name[%s] loaded!'
                    % (topic_info['id'], topic_info['name'].encode('utf8'),
                       db_name.encode('utf8'), table_name.encode('utf8')))
            except Exception as e:
                log.info("failed reload schema {}".format(topic_info['id']))
                log.error(str(traceback.format_exc()))
    def _fill(licence_list):
        """Guess at any missing properties"""
        for key, value in licence_list.items():
            
            if not value["lic_type"] and len(key.split("@")[0].split('_'))>1:
                value["lic_type"] = key.split("@")[0].split('_')[1]
                log.warning(key + " lic_type set to " + value["lic_type"])

            if not value["software_name"]:
                value["software_name"] = key.split("@")[0].split('_')[0]        
                log.warning(key + " software_name set to " + value["software_name"])

            if not value["feature"]:
                value["feature"] = key.split("@")[0].split('_')[0]
                log.warning(key + " feature set to " + value["feature"])


            if not value["institution"]:
                value["institution"] = key.split("@")[1].split('_')[0]
                log.warning(key + " institution set to " + value["institution"])


            if not value["faculty"] and len(key.split("@")[1].split('_'))>1:
                value["faculty"] = key.split("@")[1].split('_')[1]
                log.warning(key + " faculty set to " + value["faculty"])

            if not value["file_group"] and value["institution"]:
                value["file_group"] = value["institution"]+"-org"
                log.warning(key + " file_group set to " + value["file_group"])
            
            if not value["day_ave"] or not len(value["day_ave"]) == 24:
                value["day_ave"] = [0] * 24
def validate():
    """Checks for inconsistancies"""

    # Adds if licence exists in meta but not list
    for licence in licence_meta.keys():
        if not licence in licence_list:
            log.warning(licence + " is new licence. Being added to database wih default values.")
            licence_list[licence] = {}
    # Adds properties if missing from cachce (for some reason)
    for licence in licence_list.values():
        for key in settings["default"].keys():
            if key not in licence:
                licence[key] = settings["default"][key]

    def _fill(licence_list):
        """Guess at any missing properties"""
        for key, value in licence_list.items():
            
            if not value["lic_type"] and len(key.split("@")[0].split('_'))>1:
                value["lic_type"] = key.split("@")[0].split('_')[1]
                log.warning(key + " lic_type set to " + value["lic_type"])

            if not value["software_name"]:
                value["software_name"] = key.split("@")[0].split('_')[0]        
                log.warning(key + " software_name set to " + value["software_name"])

            if not value["feature"]:
                value["feature"] = key.split("@")[0].split('_')[0]
                log.warning(key + " feature set to " + value["feature"])


            if not value["institution"]:
                value["institution"] = key.split("@")[1].split('_')[0]
                log.warning(key + " institution set to " + value["institution"])


            if not value["faculty"] and len(key.split("@")[1].split('_'))>1:
                value["faculty"] = key.split("@")[1].split('_')[1]
                log.warning(key + " faculty set to " + value["faculty"])

            if not value["file_group"] and value["institution"]:
                value["file_group"] = value["institution"]+"-org"
                log.warning(key + " file_group set to " + value["file_group"])
            
            if not value["day_ave"] or not len(value["day_ave"]) == 24:
                value["day_ave"] = [0] * 24

    def _address(licence_list, licence_meta):
        for key, value in licence_list.items():

            filename_end = "_" + value["faculty"] if value["faculty"] else ""
            standard_address = "opt/nesi/mahuika/" + value["software_name"] + "/Licenses/" + value["institution"] + filename_end + ".lic"   
            
            if value["file_address"]:                
                try:
                    statdat = os.stat(value["file_address"])
                    file_name = value["file_address"].split("/")[-1]

                    owner = getpwuid(statdat.st_uid).pw_name
                    group = getgrgid(statdat.st_gid).gr_name

                    # Check permissions of file
                    if statdat.st_mode == 432:
                        log.error(key + " file address permissions look weird.")

                    if value["file_group"] and group != value["file_group"]:
                        log.error(value["file_address"] + ' group is "' + group + '", should be "' + value["file_group"] + '".')

                    if owner != settings["user"]:
                        log.error(value["file_address"] + " owner is '" + owner + "', should be '" + settings["user"] + "'.")
                              
                    if value["file_address"] != standard_address and value["software_name"] and value["institution"]:
                        log.debug('Would be cool if "' + value["file_address"] + '" was "' + standard_address + '".')

                except:
                    log.error(key + ' has an invalid file path attached "' + value["file_address"] + '"')
            else:
                value["file_address"]=standard_address
                log.warning(key + " licence path set to " + standard_address)

    def _tokens(license_list):
        try:
            sub_input="sacctmgr -pns show resource withcluster"
            log.debug(sub_input)
            string_data=subprocess.check_output(sub_input, shell=True).decode("utf-8").strip()
        except Exception as details:
            log.error("Failed to check SLURM tokens. " + str(details))
        else:
            active_token_dict = {}
            # Format output data into dictionary 
            for lic_string in string_data.split("\n"):

                log.debug(lic_string)
                str_arr=lic_string.split("|")
                active_token_dict[str_arr[0] + "@" + str_arr[1]]=str_arr


            for key, value in licence_list.items():

                name = value["software_name"] + "_" + value["lic_type"] if value["lic_type"] else value["software_name"]
                server = value["institution"] + "_" + value["faculty"] if value["faculty"] else value["institution"]

                if key not in active_token_dict.keys():
                    log.error("'" + key + "' does not have a token in slurm database!")

                    # if possible, create.
                    if value["institution"] and value["total"] and value["software_name"]:           
                        log.error("Attempting to add...")

                        try:
                            sub_input="sacctmgr -i add resource Name=" + name.lower() + " Server=" + server.lower() + " Count=" + str(int(value["total"]*2)) + " Type=License percentallowed=50 where cluster=mahuika"
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True).decode("utf-8")
                            
                        except Exception as details:
                            log.error(details)
                        else:
                            log.info("Token added successfully!")
                    
                    else:
                        log.error("Must have 'instituiton, software_name, cluster, total' set in order to generate SLURM token.")

                else:
                    if value["total"] != int(active_token_dict[key][3])/2:
                        log.error("SLURM TOKEN BAD, HAS " + str(int(active_token_dict[key][3])/2)  + " and should be " + str(value["total"]))
                        try:
                            sub_input="sacctmgr -i modify resource Name=" + name.lower() + " Server=" + server.lower() + " set Count=" + str(int(value["total"]*2))
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True)        
                        except Exception as details:
                            log.error(details)
                        else:
                            log.warning("Token modified successfully!")
                    if active_token_dict[key][7] != "50":
                        log.error("SLURM token not cluster-split")

                        try:
                            sub_input="sacctmgr -i modify resource Name=" + name.lower() + " Server=" + server.lower() + "percentallocated=100 where cluster=mahuika" +  " set PercentAllowed=50"
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True)

                            sub_input="sacctmgr -i modify resource Name=" + name.lower() + " Server=" + server.lower() + "percentallocated=100 where cluster=maui" +  " set PercentAllowed=50"
                            log.debug(sub_input)
                            subprocess.check_output(sub_input, shell=True)
                        except Exception as details:
                            log.error(details)
                        else:
                            log.info("Token modified successfully!")
                    
    _fill(licence_list)
    _address(licence_list, licence_meta)
    _tokens(licence_list)
    c.deep_merge(licence_meta, licence_list)
예제 #21
0
파일: pileup.py 프로젝트: usuyama/hapmuc
    def get_bases_with_qualities(self):
        data = []
        # remove the beginning mark
        i = 0  # for bases
        j = 0  # for qualities
        while i < len(self.bases):
            # log.debug("{0}, {1}: {2}, {3}".format(i, j, self.bases[i], self.qualities[j]))
            if self.bases[i] == u".":
                data.append(pileup_unit.Base(self.ref, 1, self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] == u",":
                data.append(pileup_unit.Base(self.ref, 0, self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] == u"$":
                i += 1
            elif self.bases[i] == u"^":
                i += 2  # with map quality
                continue
            elif self.bases[i] in u"+-":
                # read the length
                tmp_i = i + 1
                length_str = u""
                while True:
                    try:
                        int(self.bases[tmp_i])
                        length_str += self.bases[tmp_i]
                        tmp_i += 1
                    except ValueError:
                        break
                #log.debug("length_str: {0}".format(length_str))
                seq_length = int(length_str)
                # get the inserted or deleted sequence
                seq = self.bases[tmp_i:tmp_i + seq_length]
                if seq[0] in u"ATGCN":
                    strand = 1
                else:
                    strand = 0
                if self.bases[i] == u"+":
                    data.append(pileup_unit.Ins(seq, strand, None))
                else:
                    data.append(pileup_unit.Del(seq, strand, None))
                i = tmp_i + seq_length
            elif self.bases[i] in u"ATGCN":
                data.append(
                    pileup_unit.Base(self.bases[i], 1, self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] in u"atgcn":
                data.append(
                    pileup_unit.Base(self.bases[i].upper(), 0,
                                     self.qualities[j]))
                i += 1
                j += 1
            elif self.bases[i] == u"*":
                # deletion
                i += 1
                j += 1
            else:
                log.warning(u"i={0}, self={1}".format(i, self))
                raise Exception(u"something_wrong")

        if i != len(self.bases) or j != len(self.qualities):
            log.error(u"i:{0}={1} , j:{2}={3}".format(i, len(self.bases), j,
                                                      len(self.qualities)))
            log.error(self)
            raise Exception(u"did_not_reach_the_lase_base")
        return data
예제 #22
0
파일: rass.py 프로젝트: szmurlor/bdcalc
 def clean_processing_data(self):
     folder = self.processing()
     if os.path.isdir(folder):
         self.clean_folder(folder)
     else:
         log.warning("Trying to clean processing data when the processing data folder (%s) does not exist." % folder)
def lmutil():
    """Checks total of available licences for all objects passed"""
    # This is a mess. Tidy.
    pattern="Users of (?P<feature_name>\w*?):  \(Total of (?P<total>\d*?) licenses issued;  Total of (?P<in_use_real>\d*?) licenses in use\)"
    # lmutil_list=[]
    # for key, value in licence_list.items():
    #     lmutil_list.append={"path":value["address"]}

    for key, value in licence_list.items():
        if not value["file_address"]:
            continue 
            
        if not value["feature"]: 
            log.error(key + " must have feature specified in order to check with LMUTIL")
            continue           
            
        # if value["flex_method"] == "lmutil":
        #     return
        features=[]
        lmutil_return=""
        try:
            shell_string="linx64/lmutil " + "lmstat " + "-f " + value["feature"] + " -c " + value["file_address"]
            log.debug(shell_string)
            lmutil_return=subprocess.check_output(shell_string, shell=True).strip()    #Removed .decode("utf-8") as threw error.     
        except Exception as details:
            log.error("Failed to fetch " + key + " " + str(details))
        else:
            for line in (lmutil_return.split("\n")):  
                m = re.match(pattern, line)
                if m:
                    features.append(m.groupdict())

            found=False                

            for feature in features:
                if feature["feature_name"] == value["feature"]:
                    found=True
                    hour_index = dt.datetime.now().hour - 1
                    value["in_use_real"] = int(feature["in_use_real"])

                    if value["total"] != int(feature["total"]):
                        log.warning("LMUTIL shows different total number of licences than recorded. Changing from '" + str(value["total"]) + "' to '" + feature["total"] + "'")
                        value["total"] = int(feature["total"])

                    # Record to running history
                    value["history"].append(value["in_use_real"])

                    # Pop extra array entries
                    while len(value["history"]) > value["history_points"]:
                        value["history"].pop(0)

                    # Find modified in use value
                    interesting = max(value["history"])-value["in_use_nesi"]
                    value["in_use_modified"] = round(min(
                        max(interesting + value["buffer_constant"], interesting * (1 + value["buffer_factor"])), value["total"], 0
                    ))

                    # Update average
                    value["day_ave"][hour_index] = (
                        round(
                            ((value["in_use_real"] * settings["point_weight"]) + (value["day_ave"][hour_index] * (1 - settings["point_weight"]))),
                            2,
                        )
                        if value["day_ave"][hour_index]
                        else value["in_use_real"]
                    )
                else:
                    log.info("Untracked Feature " + feature["feature_name"] + ": " + (feature["in_use_real"]) +" of " + (feature["total"]) + "in use.")

            if not found:
                log.error("Feature '" + value["feature"] + "' not found on server for '" + key + "'")