示例#1
0
def detect_api():
    print('detect Called')
    msg = 'Sucess'
    queue = -1
    try:
        JOIN = JOIN_
        if request.method == 'POST':
            global jsons
            queue = str(getqueue())
            addr = request.remote_addr
            print("[{}] Remote addr: {}, Queue: {}".format(NOW(), addr, queue))
            upload_path = os.path.join(config.upload_path, addr)
            utils.checkDir(upload_path)
            for f in request.files.getlist('files'):
                print("[{}] HANDLING IMAGE: {}".format(NOW(), f.filename))
                filename = JOIN(upload_path, secure_filename(f.filename))
                f.save(filename)
                timer = time()
                h, w, dets = detect_by_path(filename)

                check_results(h, w, dets, queue)
                print("[+] SSD Detection time: {}".format(time() - timer))
                print("[{}] Save result of {}".format(NOW(), f.filename))
    except Exception as e:
        print(e)
        msg = 'INTERNAL ERROR'
    return jsonify({'queue': queue})
示例#2
0
文件: views.py 项目: thesues/zhuanli
def generate(request):
    catalogs=Catalogs.objects.all()
    lastExcutetime=ExcuteTime.objects.get(pk=1)
    now=datetime.now()
    xmlfileList=[]
    patents=Patent.objects.filter(updateTime__gt=lastExcutetime.excuteTime).values()
#   patents=Patent.objects.values()
    impl=xml.dom.minidom.getDOMImplementation()
    i=0
    for patent in patents:
        dom=impl.createDocument(None,'patent',None)
        root=dom.documentElement
        filename=os.path.join(settings.MEDIA_ROOT,'output',now.strftime('%Y%m%d%H'),'p'+str(i)+'.xml')
        d=checkDir(filename)
        for k,v in patent.iteritems():
            if k == 'updateTime' or  k == 'id' :
                continue
            if k == 'classid_id':
                v=Catalogs.objects.get(pk=v).name
                k=u'classid'
            if k in ('patent_pic2file','patent_pic1file'):
                if v:
                    shutil.copy(os.path.join(settings.MEDIA_ROOT,v),d)
                    v=os.path.basename(v)
            item=makeEasyTag(dom,k,v)
            root.appendChild(item)
        i=i+1
        f=file(filename,'w')
        writer=codecs.lookup('utf8')[3](f)
        dom.writexml(writer,encoding='utf-8',newl='\r\n')
        xmlfileList.append(filename)
    lastExcutetime.excuteTime=now
    lastExcutetime.save()
    return render_to_response("input/gen.html",add_csrf(request,xmls=xmlfileList,catalogs=catalogs))
示例#3
0
def main(species,
         abundances={},
         out_pref=None,
         constraints='european',
         diet="HighFiber",
         max_iters=10,
         cobraonly=False,
         with_essential=False):

    #model_file_template = "%s/%%s.xml" % (SBML_DIR)
    # start with the default 0.01 for all species
    for s in species:
        if s not in abundances:
            abundances[s] = 0.01

    #count = 0.0
    SpeciesDict = {}
    for s in species:
        #model_file = model_file_template % (s)
        model_file = "%s/%s.xml" % (SBML_Dict[constraints], s)
        if not os.path.isfile(model_file):
            #print("%s doesn't exist. copying it from %s" % (model_file, ALL_SBML_DIR))
            print("%s doesn't exist. using it from %s" %
                  (model_file, ALL_SBML_DIR[constraints]))
            model_file = "%s/%s.xml" % (ALL_SBML_DIR[constraints], s)
        SpeciesDict[s] = {
            'File': model_file,
            'initAbundance': abundances[s],
            'orig_name': s
        }
        #count += 1

    out_file_pref = "%s%dmodels-%s-%s-%diters-" % (
        out_pref, len(species), constraints, diet, max_iters)
    out_dir = os.path.dirname(out_file_pref)
    utils.checkDir(out_dir)

    simulate_models(species,
                    SpeciesDict,
                    diet=diet,
                    out_file_pref=out_file_pref,
                    max_iters=max_iters,
                    cobraonly=cobraonly,
                    with_essential=with_essential)
示例#4
0
def main(verbose):

    samples = ['mg_pp_hh', 'py8_pp_minbias']
    samples += ['mg_pp_tth']
    #samples = ['py8_pp_minbias']
    pileups = [0, 200, 1000]
    pileups = [0, 1000]

    trackBranchNames = [
        "TruthTrack",
        "Track",
        "PBMatchedTracks",
        "TracksFromHit30",
        "SmearedTracksFromHits",
        "PBMatchedHitTracks",
    ]

    trackPlots = [
        "track1Pt",
        "track2Pt",
        "track3Pt",
        "track4Pt",
        "electron1Pt",
        "muon1Pt",
        "lepton1Pt",
    ]

    jetBranchNames = [
        'TruthTrackJets',
        'SmearedTrackJets',
        'PBMatchedTrackJets',
        'TruthHitTrackJets',
        'SmearedHitTrackJets',
        'PBMatchedHitTrackJets',
    ]

    jetPlots = [
        "jet1Pt",
        "jet2Pt",
        "jet3Pt",
        "jet4Pt",
        "jet5Pt",
        "jet6Pt",
    ]

    # signal acceptances for things from hits
    plotSignalAcceptances(1000,
                          ['SmearedTracksFromHits', 'PBMatchedHitTracks'],
                          True)
    plotSignalAcceptances(1000,
                          ['SmearedHitTrackJets', 'PBMatchedHitTrackJets'],
                          False)

    # signal acceptances for things from delphes objects
    plotSignalAcceptances(1000, ['Track', 'PBMatchedTracks'], True)
    plotSignalAcceptances(1000, ['SmearedTrackJets', 'PBMatchedTrackJets'],
                          False)
    #return

    # signal acceptances for things from hits
    plotSignalAcceptances(0, ['SmearedTracksFromHits', 'PBMatchedHitTracks'],
                          True)
    plotSignalAcceptances(0, ['SmearedHitTrackJets', 'PBMatchedHitTrackJets'],
                          False)

    # signal acceptances for things from delphes objects
    plotSignalAcceptances(0, ['TruthTrack', 'Track'], True)
    plotSignalAcceptances(0, ['Track', 'PBMatchedTracks'], True)
    plotSignalAcceptances(0, ['SmearedTrackJets', 'PBMatchedTrackJets'], False)

    can = TCanvas("can", "can", 500, 500)
    for sample in samples:
        print 'Plotting for sample', sample
        for pileup in pileups:

            print '\tPlotting for pileup', pileup

            iFileName = INPUT_DIR + '{0}_pu{1}.root'.format(sample, pileup)
            iFile = TFile.Open(iFileName)
            plotDict = {}

            outputBaseDir = OUTPUD_BASE_DIR + '/{0}_pu{1}/'.format(
                sample, pileup)
            checkDir(outputBaseDir)

            for branch in trackBranchNames + jetBranchNames:
                print '\t\tPlotting for branch', branch
                outputDir = outputBaseDir + appendSlash(branch)
                checkDir(outputDir)
                plotDict[branch] = {}
                if branch in trackBranchNames:
                    plotList = trackPlots
                else:
                    plotList = jetPlots
                for plot in plotList:

                    histoName = branch + "_" + plot
                    print 'plotting', histoName
                    h0 = iFile.Get(histoName)
                    xTitle = plot.replace('track', 'Track ').replace(
                        'jet', 'Track Jet ').replace('Pt', ' p_{T}')
                    #print xTitle
                    h0.GetXaxis().SetTitle(xTitle + " [GeV]")
                    h0.GetYaxis().SetTitle("Fraction of events")
                    h0.Draw()
                    nbins = h0.GetNbinsX()
                    #print h0.Integral()
                    #print h0.Integral(1, nbins)
                    #print h0.Integral(1, nbins+1) # should include overflow
                    #print ''

                    # plot acceptance
                    can.SaveAs(outputDir + plot + ".pdf")

                    #acceptance = h0.GetCumulative(False)
                    acceptance = getReverseCumulativeHisto(h0)

                    xaxis = acceptance.GetXaxis()
                    yaxis = acceptance.GetYaxis()

                    xaxis.SetTitle(xTitle + " [GeV]")
                    yaxis.SetTitle("Acceptance")
                    if 'jet' in branch.lower():
                        xaxis.SetRangeUser(200, 600)
                    else:
                        xaxis.SetRangeUser(0, 100)

                    #acceptance.DrawNormalized()
                    acceptance.Draw()
                    can.SaveAs(outputDir + "acceptance_" + plot + ".pdf")

                    efficiency = h0.GetCumulative(True)
                    #efficiency.DrawNormalized()
                    #efficiency.DrawNormalized()
                    #can.SaveAs(outputDir+"efficiency_"+plot+".pdf")

                    plotDict[branch][plot] = {
                        'efficiency': efficiency,
                        'acceptance': acceptance
                    }

            #print plotDict

            # plot trigger rates (for minbias) in the different scenarios
            s1 = {
                #"TruthTrack",
                "Track": {
                    'marker': 20,
                    'colour': colours.black,
                    'leg': 'All tracks'
                },
                "PBMatchedTracks": {
                    'marker': 23,
                    'colour': colours.orange,
                    'leg': 'Tracks from PB'
                }
            }
            s2 = {
                'SmearedTrackJets': {
                    'marker': 20,
                    'colour': colours.black,
                    'leg': 'All tracks'
                },
                'PBMatchedTrackJets': {
                    'marker': 23,
                    'colour': colours.orange,
                    'leg': 'Tracks from PB'
                }
            }
            s3 = {
                'SmearedHitTrackJets': {
                    'marker': 20,
                    'colour': colours.black,
                    'leg': 'All tracks'
                },
                'PBMatchedHitTrackJets': {
                    'marker': 23,
                    'colour': colours.orange,
                    'leg': 'Tracks from PB'
                }
            }
            s4 = {
                'SmearedTracksFromHits': {
                    'marker': 20,
                    'colour': colours.black,
                    'leg': 'All tracks'
                },
                'PBMatchedHitTracks': {
                    'marker': 23,
                    'colour': colours.orange,
                    'leg': 'Tracks from PB'
                }
            }

            s00 = [
                "TracksFromHit30",
                "SmearedTracksFromHits",
                "PBMatchedHitTracks",
            ]

            ############################
            # Make plots of trigger rate
            ############################

            makeRatePlot(s1, True, outputBaseDir, plotDict, sample, pileup)
            makeRatePlot(s2, False, outputBaseDir, plotDict, sample, pileup)
            makeRatePlot(s3, False, outputBaseDir, plotDict, sample, pileup)
            makeRatePlot(s4, True, outputBaseDir, plotDict, sample, pileup)
示例#5
0
def makeRatePlot(scenarioSet, doTrack, outputBaseDir, plotDict, sample,
                 pileup):

    gStyle.SetGridStyle(3)
    gStyle.SetGridColor(kGray)

    can = TCanvas("can3", "", 500, 500)
    can.SetGrid()
    can.SetLogy()
    #can.SetLogx()

    # scenario set contains the "nominal" objects and the PB matched objects
    outputDir = outputBaseDir + "Rates/"
    checkDir(outputDir)
    outputDir = outputDir + scenarioSet.keys()[0] + '/'
    checkDir(outputDir)

    plots = plotDict[scenarioSet.keys()[0]].keys()

    for plot in plots:
        pCounter = 0
        #leg = prepareLegend('topRight')
        if doTrack:
            #leg = TLegend(0.55, 0.55, 0.8, 0.70)
            # legend for modified boarders
            leg = TLegend(0.65, 0.65, 0.9, 0.80)
        else:
            leg = TLegend(0.3, 0.5, 0.8, 0.70)

        leg.SetTextSize(TEXT_SIZE)
        for scenario in scenarioSet.keys():

            # get style dict
            style = scenarioSet[scenario]

            # scale to trigger rate
            rate = plotDict[scenario][plot]['acceptance']
            titleInfo = sampleInfo[sample]
            rate.Scale(
                40 * 1e3
            )  # scale to 40 MHz (appropriate for minbias, not for other samples)

            rate.SetTitleSize(TITLE_SIZE, 'X')
            rate.SetLabelSize(TITLE_SIZE, 'X')
            rate.SetTitleSize(TITLE_SIZE, 'Y')
            rate.SetLabelSize(TITLE_SIZE, 'Y')

            # style
            #rate.SetTitle("{0} #LT#mu#GT = {1}".format(titleInfo['title'], pileup))
            xaxis = rate.GetXaxis()
            yaxis = rate.GetYaxis()
            yaxis.SetTitle('Rate [kHz]')
            rate.SetMinimum(300)

            # settings for larger font
            xaxis.SetNdivisions(5, 5, 0)
            xaxis.SetTitleOffset(X_AXIS_OFFSET)
            yaxis.SetTitleOffset(Y_AXIS_OFFSET)

            if doTrack:
                xaxis.SetRangeUser(0, 175)
                if '1Pt' in plot:
                    xaxis.SetRangeUser(0, 175)
                if '2Pt' in plot:
                    xaxis.SetRangeUser(0, 150)
                if '3Pt' in plot:
                    xaxis.SetRangeUser(0, 70)
                if '4Pt' in plot:
                    xaxis.SetRangeUser(0, 50)
                myText(0.65, 0.90, '#sqrt{s} = 100 TeV', TEXT_SIZE)
                myText(0.65, 0.85, '{0}'.format(titleInfo['title']), TEXT_SIZE)
                myText(0.65, 0.80, "#LT#mu#GT = {0}".format(pileup), TEXT_SIZE)
            else:
                myText(0.3, 0.80, '#sqrt{s} = 100 TeV', TEXT_SIZE)
                myText(0.3, 0.75, '{0}'.format(titleInfo['title']), TEXT_SIZE)
                myText(0.3, 0.70, "#LT#mu#GT = {0}".format(pileup), TEXT_SIZE)
                xaxis.SetRangeUser(X_MIN, 300)

            rate.SetMarkerStyle(style['marker'])
            rate.SetMarkerColor(style['colour'])
            rate.SetLineColor(style['colour'])

            if pCounter == 0:
                rate.Draw()
                #elif 'jet' in plot: # only draw PU supressed for jet histograms
            else:
                rate.Draw('same')
            pCounter += 1

            leg.AddEntry(rate, style['leg'], 'lp')

        leg.Draw()
        can.SaveAs(outputDir + 'rate_{0}.pdf'.format(plot))
def main(inputFile, outputDirBase):

    outputDirBase = appendSlash(outputDirBase)
    checkDir(outputDirBase)

    # open file
    iFile = TFile.Open(inputFile)

    ptRanges = [
        #[0,2],
        [2, 5],
        [6, 10],
        [11, 30],
        [31, 50]
    ]
    etaRanges = [[0, 0.2], [0.2, 0.4], [0.4, 0.8], [0.8, 1.0], [1.0, 1.2],
                 [1.2, 1.4], [1.4, 1.6], [1.6, 1.8], [1.8, 2.0]]

    parameters = {
        'ptRes': {
            'title': 'p_{T} resolution',
            'units': '',
            'label': '#deltap_{T}/p_{T}'
        },
        'z0Res': {
            'title': 'z0 resolution',
            'units': '[mm]',
            'label': '#deltaz_{0}'
        },
        'ptResRaw': {
            'title': 'p_{T} resolution',
            'units': '[GeV]',
            'label': '#deltap_{T}'
        },
        #'d0Res'   : {'title' : 'd0 resolution' , 'units' : '[mm]', 'label': '#deltad_{0}'},

        ##'phiRes'  : {'title' : '#phi resolution',        'units' : '[deg]',      'label' : '#delta#phi'},
        ##'CtgThetaRes': {'title' : 'cot(#theta) resolution', 'units' : '',      'label' : '#deltacot(#theta)'},
    }

    branchNames = [
        #"TruthTrack",
        "Track",
        #"PBMatchedTracks",
        "TracksFromHit30",
        "SmearedTracksFromHits",
        #"PBMatchedHitTracks",
    ]

    for branch in branchNames:
        outputDir = appendSlash(outputDirBase + branch)
        checkDir(outputDir)

        for par in parameters.keys():

            ##############################################
            # Make 1d resolution plot across all eta range
            ##############################################
            basicCan = TCanvas(branch + par + "basicCan", "", 500, 500)
            basicRes = iFile.Get(branch + '_' + par)
            basicRes.Fit(
                "gaus", "MQ"
            )  # M: "improve fit", Q: "quiet" (no printing), O: "Don't draw fit or histo
            theFit = basicRes.GetFunction("gaus")
            basicCan.SetLogy()
            basicRes.Draw()
            theFit.Draw('same')
            basicCan.SaveAs(outputDir + par + '_basic.pdf')

            ##########################################################
            # Make proper resolution plots in pT and eta slices from 3D plot
            ##########################################################
            plotName = branch + '_' + par + '_pt_eta'
            resPlot = iFile.Get(plotName)
            if not is_TH3(resPlot):
                print plotName
                print 'ERROR, {0} is not of type TH3, it is {1}'.format(
                    resPlot.GetName(), type(resPlot))
                sys.exit()

            # Create legend
            #leg = prepareLegend('bottomRight')
            leg = prepareLegend('topLeft')
            leg.SetHeader('Track p_{T} [GeV]')

            # Fit resolution, extract sigma
            graphs = []
            for n, ptRange in enumerate(ptRanges):
                graphs.append(TGraphErrors())
                for i, etaRange in enumerate(etaRanges):

                    xposition = (etaRange[0] + etaRange[1]) / 2.0
                    fitResults = extractResolution(resPlot, ptRange, etaRange,
                                                   par, outputDir)
                    yVal = fitResults['Sigma'][0]
                    yErr = fitResults['Sigma'][1]
                    if par == "phiRes":  # want phi resolution to be in degrees (to compare with tkLayout)
                        yVal *= 180.0 / PI
                        yErr *= 180.0 / PI
                    graphs[n].SetPoint(i, xposition, yVal)
                    graphs[n].SetPointError(i, 0, yErr)
                    graphs[n].SetMarkerColor(colours[n])
                    graphs[n].SetLineColor(colours[n])
                    graphs[n].SetMarkerStyle(markers[n])
                leg.AddEntry(graphs[n],
                             '{0} < pT < {1}'.format(ptRange[0],
                                                     ptRange[1]), 'lp')

            # Draw graphs
            can = TCanvas(branch + par + 'can', 'can', 500, 500)
            can.SetGrid()
            can.SetLogy()
            mg = TMultiGraph()
            for g in graphs:
                mg.Add(g, 'p')

            # set plot title
            plotHeader = '{0} from Delphes'.format(parameters[par]['title'])
            plotHeader = ''
            xTitle = '#eta'
            yTitle = '{0} {1}'.format(parameters[par]['label'],
                                      parameters[par]['units'])
            mg.SetTitle('{0};{1};{2}'.format(plotHeader, xTitle, yTitle))
            mg.Draw('a')
            leg.Draw()

            # Change the axis limits
            mg.GetHistogram().GetXaxis().SetNdivisions(5, 5, 0)
            mgMin = mg.GetHistogram().GetYaxis().GetXmin()
            mgMax = mg.GetHistogram().GetYaxis().GetXmax()
            if mgMax / 10 < mgMin or True:
                mg.GetHistogram().GetYaxis().SetRangeUser(
                    mgMin * 0.5, mgMax * 5)  # make space for legend

            can.SaveAs(outputDir + par + '_graphs.pdf')
示例#7
0
import os
import cv2
import json
import utils
import requests
from config import *
from time import time

if __name__ == '__main__':
    JOIN = os.path.join
    images = [
        img for img in os.listdir(image_dir) if img.upper().endswith('.JPG')
    ]
    utils.checkDir(output_dir)
    timer = time()
    for image in images:
        print('image: {}'.format(image))

        # detect
        url = base_url + '/detect'
        img = cv2.imread(JOIN(image_dir, image))
        files = [('files', open(JOIN(image_dir, image), 'rb'))]  # For Object
        ret = requests.post(url=url, files=files).json()
        queue = ret['queue']

        # askResult
        url = base_url + '/askResult'
        files = {'data': json.dumps({'queue': queue})}
        ret = requests.post(url=url, files=files)
        results = ret.json()['result']
        for r in results:
示例#8
0
import theano
import theano.tensor as T
import theano.tensor.nlinalg as nlinalg
import nodes.iaf2 as iaf
import model.mars1 as M
from optimisor.adagrad import opt as adagrad
from optimisor.sgd import opt as sgd
from optimisor.sgdMom import opt as sgdMom

f32 = theano.config.floatX
SAMPLING = int(10)
DECAY = 1e-3
LR0 = 0.05
MAXITER = 3000
VISUAL = True
utils.checkDir('./tmp', build=True)

print('... constructing model')
VALX = -1.2
x = utils.sharedf([VALX, VALX])
model = M.banana()

print('... variational distribution and symbol flows')
lr = T.fscalar()
qiaf = iaf.experiment(lr, 2)
e = qiaf.getNoise(SAMPLING)
z = qiaf.forward(e, interout=False)

logqz = qiaf.mcLogQZ(e)
logpz = T.mean(model.logPrior(z))
logpxz = T.mean(model.logLik(x, z))
示例#9
0
def do_process(data, settings):
    # Preparing directories
    utils.checkDir(utils.GET("object_dir"), "Object")
    if utils.GET("toposort_verbose_logging_dir") is not None and utils.GET(
            "toposort_verbose_logging_dir") != "":
        utils.checkDir(utils.GET("toposort_verbose_logging_dir"),
                       "Toposort verbose logging")
    originalCXX = utils.GET("original_cxx_executable")
    originalCC = utils.GET("original_cc_executable")

    finishedList = Manager().list()

    totalLength = len(data["compile"])
    compileTaskPool = Pool()
    console.log("Compiling .o (total: {})".format(totalLength))
    for r in range(totalLength):
        i = data["compile"][r]
        execname = "(unknown)"
        cmdline = list(filter(lambda x: x != "", i.split(" ")))
        filehashpath = ["0" for i in range(0, 40)]
        for argnum in range(len(cmdline)):
            if cmdline[argnum] == originalCXX:
                cmdline[argnum] = utils.GET("targeted_cxx_executable")
                cmdline[argnum] += " -emit-llvm"
            elif cmdline[argnum] == originalCC:
                cmdline[argnum] = utils.GET("targeted_cc_executable")
                cmdline[argnum] += " -emit-llvm"
            elif cmdline[argnum] == "-o":
                filepath = realpath(cmdline[argnum + 1])
                filehashpath = utils.sha1sum(filepath)
                sha1Table[filehashpath] = filepath
                cmdline[argnum + 1] = realpath(
                    utils.GET("object_dir") + "/" + filehashpath)
                execname = utils.findName(filepath)
            elif cmdline[argnum] == "-c":
                cmdline[argnum] = "-S"
            elif cmdline[argnum] == "-g":
                cmdline[argnum] = ""
        command = " ".join(cmdline)
        compileTaskPool.apply_async(single_compile,
                                    args=(command, filehashpath, execname, r,
                                          totalLength, finishedList,
                                          settings.clean),
                                    error_callback=console_error_and_exit)
    compileTaskPool.close()
    compileTaskPool.join()

    # Construct the graph
    console.success("All object files are compiled.")

    console.info("Preparing linking relationships")
    graphData = data["scripts"]

    for i in graphData:
        itemPath = i["target"]["abs_path"]
        hashedItemPath = utils.sha1sum(itemPath)
        sha1Table[hashedItemPath] = itemPath
        itemDependencies = i["target"]["dependencies"]
        dependencyList[hashedItemPath] = utils.deduplicate(
            utils.pathToSha1(itemDependencies, sha1Table))
        if hashedItemPath in dependencyList[hashedItemPath]:
            console.warn("Self-circle found. Ignoring.")
            dependencyList[hashedItemPath].remove(hashedItemPath)

    preserveProcess = utils.GET("preserve_process")
    if preserveProcess != None and preserveProcess != "":
        console.info("Saving metadata")
        sha1FilePath = utils.GET("object_dir") + "/" + preserveProcess
        try:
            json.dump(
                sha1Table,
                open(utils.GET("object_dir") + "/" + preserveProcess, "w"))
            console.success("Metadata saved.")
        except PermissionError:
            console.warn(
                "Process file {} is not writable, while preseve_process is on."
                .format(sha1FilePath))

    console.info("Calculating linking sequence")
    try:
        currList = utils.topoSort(dependencyList, finishedList, sha1Table)
    except ValueError:
        console.error("Topo sort failed to complete. Please check your data.")
        sys.exit(1)
    console.success("Linking sequence calculated.")

    if settings.clean or settings.clean_linking:
        console.info("Cleaning linking targets")
        for i in dependencyList.keys():
            if os.access(utils.GET("object_dir") + "/" + i, os.W_OK):
                os.unlink(utils.GET("object_dir") + "/" + i)
        console.success("Linking targets cleaned.")

    if len(currList) != len(graphData):
        console.warn("Bad consistance on linking recipe")
    console.debug("Linking sequence:", currList, "or",
                  list(map(lambda x: sha1Table[x], currList)))
    console.info("Start linking")
    ctrLen = len(currList)
    p = Pool()
    for idx, obj in enumerate(currList):
        console.info("Linking {} ({})  [{}/{}]".format(sha1Table[obj], obj,
                                                       idx + 1, ctrLen))
        p.apply_async(single_linking,
                      args=(obj, finishedList),
                      error_callback=console_error_and_exit)
    p.close()
    p.join()
    console.success("All targets are linked.")
    console.success("Finished.")
示例#10
0
#outputDir = "FakeRate_phiEta_tolerance1mm_phi2GeV/"
#outputDir = "FakeRate_phiEta_tolerance01mm_phi2GeV/"
#outputDir = "FakeRate_phiEta_tolerance01mm_phi2GeV_curvature0005/"
#outputDir = "FakeRate_phiEta_tolerance05mm_phi2GeV_curvature0005/"
#outputDir = "FakeRate_phiEta_tolerance05mm_phi2GeV_curvature0005_nVertexSigma5/"
#outputDir = "processedTracks/"
##outputDir = "FakeRate_tolerance05mm_phi2GeV_multiCurvature_nVertexSigma5/"
#outputDir = "processedTracks_kappa_deltaPhi/"
#outputDir = "FakeRate_phiEta_tolerance01mm_phi2GeV_curvature001/"
#outputDir = "processedTracks_kappa_deltaPhi_zresiduum_BDT/"
#outputDir = "processedTracks_kappa_deltaPhi_zresiduum/"

outputDir = path.split('/')[-2] + '/'
print outputDir, path

checkDir(outputDir)

cols = {
    10: colours.blue,
    20: colours.orange,
    30: colours.red,
    40: colours.green,
    50: colours.grey
}

binslist = range(0, 21) + range(22, 31, 2) + [35.0, 40.0, 50.0, 100.0]
print binslist

# binslist for CDR
binslist = [0.0, 5.0, 10.0, 15.0, 30.0, 40.0, 60.0, 100.0]
binsarray = array('d', binslist)
示例#11
0
def extractTagForStudy(study, data_folder, out_images_dir, tag_list, non_tag_us, 
                        tag_bounding_box, server_path, greedy=False):
    
    logging.info("=========== PROCESSING SUBJECT: {} ===============".format(study.name))
    out_dir = getStudyOutputFolder(study, data_folder, out_images_dir)
    utils.checkDir(out_dir, delete = (not greedy))
    
    # look for the info.csv file in the output folder. 
    # if exists, read the file names and the tags
     
    i=1
    file_paths = list( study.glob('**/*.dcm') )
    csvrows=[]
    unknown = 0
    tag_manager = taginfo.TagInfoFile(out_dir)
    tag_statistic = {}
    prev_tags = {}
    if greedy and tag_manager.exists():
        # If this is in greedy mode, try to get the names of the files
        # that have an undecided tag:
        tag_manager.read()
        # get a copy of the previously created tags for all files
        prev_tags = tag_manager.getDictWithNameKeys()
        # clear the tags stored in the file. (This does not delete the file, just empties the data structure)
        tag_manager.clear()
        tag_manager.deleteTagFile()

    for file_path in file_paths:
        logging.debug("FILE {}: {}".format(i, str(file_path)))

        # Make sure path exists
        if not file_path.exists():
            logging.warning('File: {} does not exist, skipping'.format(file_path))
            continue

        # if in greedy mode, look at the tag, look at the tag and decide if we wamt to extract tag
        getTag = True
        tag = None
        us_type = None
        if greedy and len(prev_tags) > 0:
            name = file_path.name
            if name in prev_tags:
                tag = prev_tags[name][1]
                us_type = prev_tags[name][0]
                if tag in ['Unknown', 'Undecided', 'No tag']:
                    getTag = True
                else:
                     getTag = False

        if getTag:
            start = time.time()
            # Get the representative frame
            np_frame, us_type, capture_model = extractImageArrayFromUS(file_path, out_dir=out_dir)
            end = time.time()
            logging.debug('Preprocessing took : {} seconds'.format(end-start))
            if len(capture_model)>0 and capture_model not in tag_bounding_box.keys():
                logging.warning('US Model: {} not supported for file: {}'.format(capture_model, file_path))
                del np_frame
                continue
            # Extract the tag
            start = time.time()
            tag = 'Unknown'
            if np_frame is not None and \
                us_type not in non_tag_us and \
                capture_model in tag_bounding_box.keys():
                # Run tag extraction
                tag = tess.extractTagFromFrame(np_frame, tag_bounding_box[capture_model], tag_list)
            end = time.time()
            logging.debug('Tag extraction took : {} seconds'.format(end-start))
            del np_frame
            if tag in ['Unknown', 'Undecided', 'No tag']:
                unknown += 1
        else:
            logging.debug('Skipping the file: {}, tag: {}, type: {}, because it was known'.format(file_path, tag, us_type))

        tag_manager.addTag(file_path.parent, server_path, file_path.name, us_type, tag, write=True)
        i+=1
        gc.collect()

    tag_statistic = tag_manager.tag_statistic
    # If all unknown, delete the tag file. 
    if unknown == len(file_paths):
        tag_manager.deleteTagFile()
        tag_manager.clear()
    return tag_statistic
示例#12
0
def main(args):
    data_folder = Path(args.dir)
    out_images_dir = Path(args.out_dir)

    utils.checkDir(out_images_dir, False)    
    utils.setupLogFile(out_images_dir, args.debug)
    
    studies = []
    for dirname, dirnames, __ in os.walk(str(data_folder)):
        if len(dirnames) == 0:
            studies.append(Path(dirname))
            
    logging.info('Found {} studies '.format(len(studies)))
    print('Found {} studies '.format(len(studies)))
    
    # read the list of acceptable tags in the ultrasound file
    tag_list = utils.getTagsList()
    tag_statistic = dict.fromkeys(tag_list, 0)
    tag_statistic['Unknown'] = 0
    tag_statistic['Undecided'] = 0
    tag_statistic['No tag'] = 0
    
    # Approximate bounding box of where the tag is written acoording to the 
    # us model
    tag_bounding_box = { 'V830':[[40,75], [255,190]],
                         'LOGIQe':  [[0,55], [200,160]],
                         'Voluson S': [[40,75], [255,190]],
                         'LOGIQeCine': [[0,0],[135,90]],
                         'Turbo': [[75,20,],[230,80]],
                         'Voluson E8': [[40,75], [255,250]]
                        }

    # list of ultrasound image types whose tags we do not care about right now.
    non_tag_us = ['Unknown', 'Secondary capture image report',
                    'Comprehensive SR', '3D Dicom Volume']

    
    # Also read in study directories that might have been finished by a previous run - do not want to rerun them again
    finished_study_file = out_images_dir/'finished_studies.txt'
    finished_studies = None
    if finished_study_file.exists():
        with open(finished_study_file) as f:
            finished_studies = f.read().splitlines()
            finished_studies = [study for study in finished_studies if study.strip()]
    if finished_studies is not None:
        logging.info('Found {} finished studies'.format(len(finished_studies)))
        cleaned_studies = [study for study in studies if str(study) not in finished_studies]
        # Get statistics for the finished studies
        for study in finished_studies:
            logging.info('Will skip: {}'.format(study))
            try:
                infocsv_dir = getStudyOutputFolder(Path(study), data_folder, out_images_dir)
                logging.info('Opening: {}'.format(infocsv_dir))
                tag_file_man =taginfo.TagInfoFile(infocsv_dir)
                tag_file_man.read()
                if tag_file_man.getNumFiles() > 0:
                    for tag in tag_file_man.tag_statistic:
                        if tag not in tag_statistic:
                            tag_statistic[tag] = 0
                        tag_statistic[tag] += tag_file_man.tag_statistic[tag]        
            except (OSError, ValueError) as err:
                logging.warning('Error reading previously created tags.csv for subject: {}: {}'.format(study, err))
            except:
                logging.warning('Error reading previously created tags.csv for subject: {}'.format(study))
                logging.warning('Unknown except while reading csvt: {}'.format(sys.exc_info()[0]))
    else:
        cleaned_studies = studies
    del studies

    if args.use_threads:
        with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
            # Start the load operations and mark each future with its URL
            future_tags = {executor.submit(extractTagForStudy, study, 
                                            data_folder, out_images_dir, tag_list,
                                            non_tag_us, tag_bounding_box, 
                                            Path(args.server_path), args.greedy ): study for study in cleaned_studies}
            for future in concurrent.futures.as_completed(future_tags):
                d = future_tags[future] 
                logging.info('Finished processing: {}'.format(d))
                this_tag_statistic = future.result()
                #logging.info(future.result())
                for key, value in this_tag_statistic.items():
                    tag_statistic[key] += value
                with open(finished_study_file, "a+") as f:
                    f.write(str(d)+os.linesep)
    else:
        i=1
        for study in cleaned_studies:
            this_tag_statistic = extractTagForStudy(study, data_folder, out_images_dir, 
                                                    tag_list, non_tag_us, tag_bounding_box, 
                                                    Path(args.server_path), args.greedy)
            logging.info('Finished processing: {}'.format(study))
            for key, value in this_tag_statistic.items():
                tag_statistic[key] += value
            endstr = "\n" if i%50 == 0 else "."
            print("",end=endstr)
            with open(finished_study_file, "a+") as f:
                f.write(str(study)+os.linesep)
            i+=1
    
    pprint(tag_statistic)
    with open(out_images_dir/"NumberOfTags.json", "w") as outfile:
        json.dump(tag_statistic, outfile, indent=4) 
    logging.info(pformat(tag_statistic))
    logging.info('---- DONE ----')
    print('------DONE-----------')
示例#13
0
def main(args):
    data_folder = Path(args.dir)
    out_folder = Path(args.out_dir)
    utils.checkDir(out_folder, False)

    #  Setup logging:
    utils.setupLogFile(out_folder, args.debug)
    if args.cine_mode:
        tags = utils.getCineTagsList(args.tags)
    else:
        tags = utils.getTagsList(args.tags)
    print('Tags: {}'.format(tags))

    try:
        for tag in tags:
            out_folder_tag, out_tag_list_file_path, out_tag = getTagDirPathListFile(
                out_folder, tag)
            utils.checkDir(out_folder_tag, args.delete_existing)

    except Exception as e:
        logging.error("Couldn't split the tags string: {}".format(e))
        return

    gt_ga = {}
    if args.gt_ga_list:
        try:
            with open(args.gt_ga_list) as f:
                csv_reader = csv.DictReader(f)
                for line in csv_reader:
                    gt_ga[line['StudyID']] = {}
                    if line['ga_boe'] != ".":
                        gt_ga[line['StudyID']]['ga_boe'] = int(line['ga_boe'])
                    else:
                        gt_ga[line['StudyID']]['ga_boe'] = -1

                    if line['ga_avua'] != ".":
                        gt_ga[line['StudyID']]['ga_avua'] = int(
                            line['ga_avua'])
                    else:
                        gt_ga[line['StudyID']]['ga_avua'] = -1
        except OSError as e:
            logging.error(
                'Error reading the gt ga file {} \n Error: {}'.format(
                    args.gt_ga_list, e))
            gt_ga = {}
        print('Found {} studies with GT Ga'.format(len(gt_ga)))

    bounding_box = [[0, 0], [255, 250]]
    # Find all the info.csv files:
    tag_file_names = list(
        data_folder.glob('**/' + taginfo.TagInfoFile.file_name))
    tag_file_list_rows = []

    for tag_file in tag_file_names:
        logging.info('--- PROCESSING: {}'.format(tag_file))
        files_to_copy = []
        tag_file_info = taginfo.TagInfoFile(tag_file.parent)
        tag_file_info.read()
        file_tag_pairs = tag_file_info.getFileNamesWithTags(tags)

        if len(file_tag_pairs) == 0:
            continue
        # print(file_tag_pairs[0])

        for file_tag_dict in file_tag_pairs:
            file_name = Path(file_tag_dict['File']).name
            name_no_suffix = Path(file_name).stem
            jpg_file_name = tag_file.parent / (name_no_suffix + '.jpg')

            cropped = None
            if jpg_file_name.exists():
                simage = sitk.ReadImage(str(jpg_file_name))
                if args.crop_images:
                    size = simage.GetSize()
                    cropped = sitk.Crop(
                        simage, bounding_box[0],
                        [size[i] - bounding_box[1][i] for i in range(2)])
                else:
                    cropped = simage

            tag = file_tag_dict['tag']
            tag_folder, out_tag_list_file_path, out_tag = getTagDirPathListFile(
                out_folder, tag)

            target_simlink_name = tag_folder / file_name

            # Get the data for the global list
            if args.create_global_list:
                if target_simlink_name.exists():
                    tag_file_row = {}
                    study_name = (tag_file.parent).name
                    pos = study_name.find('_')
                    if pos == -1:
                        logging.warning(
                            "Study name in path {} not in the correct format for a valid study"
                            .format(study_path))
                        continue

                    study_id = study_name[:pos]
                    study_date = study_name[pos + 1:pos + 9]
                    tag_file_row['study_id'] = study_id
                    tag_file_row['study_date'] = study_date
                    if len(gt_ga) > 0 and study_id in gt_ga:
                        tag_file_row['ga_boe'] = str(
                            gt_ga[study_id]['ga_boe']
                        ) if gt_ga[study_id]['ga_boe'] > 0 else ''
                        tag_file_row['ga_avua'] = str(
                            gt_ga[study_id]['ga_avua']
                        ) if gt_ga[study_id]['ga_avua'] > 0 else ''
                    else:
                        tag_file_row['ga_boe'] = ''
                        tag_file_row['ga_avua'] = ''

                    tag_file_row['file_path'] = target_simlink_name
                    tag_file_row['tag'] = out_tag
                    tag_file_list_rows.append(tag_file_row)
                else:
                    logging.info(
                        'The file: {}, study id: {} does not exist'.format(
                            target_simlink_name, (tag_file.parent).name))
                continue

            # If not in global list generation mode, deal with the file based on what has been requested.
            out_jpg_name = tag_folder / (name_no_suffix + '.jpg')
            if os.path.exists(target_simlink_name):
                # count all files with that link
                logging.info('<---Found duplicates! ----> ')
                ext = Path(file_name).suffix
                all_target_simlink_files = list(
                    Path(tag_folder).glob(stem + '*' + ext))
                new_name = stem + '_' + str(
                    len(all_target_simlink_files)) + ext
                target_simlink_name = tag_folder / new_name
                new_name = stem + '_' + str(
                    len(all_target_simlink_files)) + '.jpg'
                out_jpg_name = tag_folder / (new_name + '.jpg')

            if cropped is not None:
                logging.info('Writing jpg image: {}'.format(out_jpg_name))
                sitk.WriteImage(cropped, str(out_jpg_name))

            source_file = Path(args.som_home_dir) / Path(file_tag_dict['File'])
            if not args.create_only_lists:
                logging.info('Copying file: {} -> {}, study:{}'.format(
                    file_name, target_simlink_name, (tag_file.parent).stem))
                try:
                    shutil.copyfile(source_file, target_simlink_name)
                except FileNotFoundError:
                    logging.warning("Couldn't find file: {}".format(file))
                    continue
                except PermissionError:
                    logging.warning(
                        "Didn't have enough permissions to copy to target: {}".
                        format(target_simlink_name))
                    continue
            else:
                with open(out_tag_list_file_path, "a") as fh:
                    fh.write(str(source_file) + "\n")

    if args.create_global_list and len(tag_file_list_rows) > 0:
        logging.info('Number of tag file rows: {}, writing'.format(
            len(tag_file_list_rows)))
        outfilepath = out_folder / 'all_files_gt_ga.csv'
        try:
            csvwrap.CSVWrap.writeCSV(tag_file_list_rows, outfilepath)
        except IOError as e:
            logging.error(
                'Error writing the output file: {} \n Error: {}'.format(
                    outfilepath, e))
    logging.info('----- DONE -----')