示例#1
0
    def __init__(self, m, N):
        self.m = m  #This is the average weight (average number of messages sent)
        self.N = N
        self.no = math.ceil(m) + 1  #Initial number of nodes in graph.
        self.G = nx.complete_graph(
            self.no - 1
        )  #Complete graphs creates a complete graph with (m+1) nodes / m edges.
        self.newEdges = []
        self.newNodes = []
        self.vertexDegree = [
        ]  #A list of vertices, each vertex appears an amount of times equal to its 'weighted' degree.
        self.newWeight = 0
        self.Analysis = Analysis.Analysis()
        for node in self.G.nodes():
            for i in range(nx.degree(self.G, node)):
                self.vertexDegree.append(node)

        #Attaching initial weights of 1 to the existing edges.
        for edges in self.G.edges():
            self.G[edges[0]][edges[1]]['weight'] = 1

        #Checking variables are storing the desired values.
        print(
            "Average weight:{}\nRequired nodes, N:{}\nInitial number of nodes:{}\n{}"
            .format(self.m, self.N, self.no, self.vertexDegree))
示例#2
0
def LoadModel(basedir, galprop_tag):
    # Load various diffuse models and run fits.
    print 'Running Analysis for model', galprop_tag
    A = Analysis.Analysis(tag='P7REP_CLEAN_V15_calore',
                          basepath='/pfs/carlson/GCE_sys/')
    A.GenSquareMask(l_range=[-20., 20.], b_range=[-20., 20.], plane_mask=2.)
    A.BinPhotons(infile='binned_photons_' + A.tag + '.npy')
    # Load 2FGL
    A.AddPointSourceTemplate(fixNorm=True, pscmap='PSC_3FGL_with_ext.npy')
    A.CalculatePixelWeights(diffuse_model='fermi_diffuse_' + A.tag + '.npy',
                            psc_model='PSC_P7REP_CLEAN_V15_calore_fgl2.npy',
                            alpha_psc=5.,
                            f_psc=0.1)
    A.AddIsotropicTemplate(
        fixNorm=False, fixSpectrum=False
    )  # External chi^2 used to fix normalization within uncertainties

    A.AddFermiBubbleTemplate(
        template_file='./bubble_templates_diskcut30.0.fits',
        spec_file='./reduced_bubble_spec_apj_793_64.dat',
        fixSpectrum=False,
        fixNorm=False)

    A.AddHDF5Template(hdf5file=basedir + '/' + galprop_tag + '.hdf5',
                      verbosity=1,
                      multiplier=2.,
                      bremsfrac=1.25,
                      E_subsample=2,
                      fixSpectrum=False,
                      separate_ics=False)
    return A
    def __init__(self, data, filename, view, parent):
        super(DisassemblerView, self).__init__(parent)

        self.status = ""
        self.view = view

        self.data = data
        for type in ExeFormats:
            exe = type(data)
            if exe.valid:
                self.data = exe
                self.view.exe = exe
                break

        # Create analysis and start it in another thread
        self.analysis = Analysis(self.data)
        self.analysis_thread = threading.Thread(None, self.analysis_thread_proc)
        self.analysis_thread.daemon = True
        self.analysis_thread.start()

        # Start disassembly view at the entry point of the binary
        if hasattr(self.data, "entry"):
            self.function = self.data.entry()
        else:
            self.function = None
        self.update_id = None
        self.ready = False
        self.desired_pos = None
        self.highlight_token = None
        self.cur_instr = None
        self.scroll_mode = False
        self.blocks = {}
        self.show_il = False
        self.simulation = None

        # Create timer to automatically refresh view when it needs to be updated
        self.updateTimer = QTimer()
        self.updateTimer.setInterval(100)
        self.updateTimer.setSingleShot(False)
        self.updateTimer.timeout.connect(self.updateTimerEvent)
        self.updateTimer.start()

        self.initFont()

        # Initialize scroll bars
        self.width = 0
        self.height = 0
        self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
        self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
        self.horizontalScrollBar().setSingleStep(self.charWidth)
        self.verticalScrollBar().setSingleStep(self.charHeight)
        areaSize = self.viewport().size()
        self.adjustSize(areaSize.width(), areaSize.height())

        # Setup navigation
        self.view.register_navigate("disassembler", self, self.navigate)
        self.view.register_navigate("make_proc", self, self.make_proc)

        self.search_regex = None
        self.last_search_type = FindDialog.SEARCH_HEX
示例#4
0
    def getStatDict(self, sampler, analysisList, timeRange, area, componentName=None):
        # Get Statistic dictionary for given analysisList, timeRange,
        #     and area
        # Area can be ReferenceData or text string

        # Convert area to ID if necessary
        if area is None:
            return None
        if isinstance(area, str):
            area = ReferenceID(area)                
        else:
            area = area.getId()
            
        return Analysis.Analysis(sampler).createStats(
            analysisList, area, timeRange, componentName)
示例#5
0
 def baseloop(self, agent_spec, exp_id, visualize_sessions = True, verbose = True):
     '''
     This function is the basis for running all RL interactions. Inherit from this class when needed.
     '''
     for trial in range(self.nb_trials):
         # Init fresh incarnation of agent
         Agent_current = Agent(agent_spec)
         # Get session and environment objects
         Session_current = self.Session_current
         # Start trial
         for episode in range(self.nb_episodes):
             # Start episode
             obs = Session_current.init_episode(episode)
             action = Agent_current.step(obs, can_terminate = False)
             termination = False
             while termination == False:  # assert termination condition = False
                 obs = Session_current.step(action)
                 action = Agent_current.step(obs)
                 termination = obs[-1]
             # update logs
             Session_current.update_logs()
             # if verbose:
             #     print('| EXP: ' + str(exp_id+1) +
             #           ' | Trial: ' + str(trial + 1) +
             #           ' | Episode: ' + str(episode + 1) +
             #           ' | Reward = ' + str(obs[-2]) + ' |')
             # End of episode processing
             Qvalues = Agent_current.Qfunction  # obtain q values for analysis
             Session_current.add_value_to_record(Qvalues)
             if agent_spec['add exploration bonus']:
                 Session_current.add_novelty_to_record(Agent_current.exploration_bonus)
         # End of trial processing
         Session_current.process_trial()
         if (trial + 1) % 50 == 0:
             if verbose:
                 print('| EXP: ' + str(exp_id + 1) +
                       ' | Trial: ' + str(trial + 1) + ' |')
     ## Session analysis
     if visualize_sessions:
         Analyze = Analysis(self.exp_output_path, exp_id,
                            Session_current.Maze, Session_current)
         Analyze.visualize(dpi = 300)
         ## Obtain session data for cross-session analysis
         self.all_cumulative_rewards[exp_id] = np.array(Analyze.cumulative_rewards)
         self.all_timeteps_perepisode[exp_id] = np.array(Analyze.all_timesteps_trial)
         self.timesteps_until_reward[exp_id] = np.array(Analyze.steps)
示例#6
0
    def createcreateConvertWidget(self):
        global obj_analysis
        self.convertWindow = QtGui.QWidget()
        self.analysisTab = QtGui.QScrollArea()
        obj_analysis = Analysis.Analysis(self.clarg1)
        self.analysisTab.setWidget(obj_analysis)
        #self.analysisTabLayout = QtGui.QVBoxLayout(self.analysisTab.widget())
        self.analysisTab.setWidgetResizable(True)
        global obj_source
        self.sourceTab = QtGui.QScrollArea()
        obj_source = Source.Source(sourcelist, sourcelisttrack, self.clarg1)
        self.sourceTab.setWidget(obj_source)
        #self.sourceTabLayout = QtGui.QVBoxLayout(self.sourceTab.widget())
        self.sourceTab.setWidgetResizable(True)
        global obj_model
        self.modelTab = QtGui.QScrollArea()
        obj_model = Model.Model(schematicInfo, modelList, self.clarg1)
        self.modelTab.setWidget(obj_model)
        #self.modelTabLayout = QtGui.QVBoxLayout(self.modelTab.widget())
        self.modelTab.setWidgetResizable(True)
        global obj_devicemodel
        self.deviceModelTab = QtGui.QScrollArea()
        obj_devicemodel = DeviceModel.DeviceModel(schematicInfo, self.clarg1)
        self.deviceModelTab.setWidget(obj_devicemodel)
        self.deviceModelTab.setWidgetResizable(True)
        global obj_subcircuitTab
        self.subcircuitTab = QtGui.QScrollArea()
        obj_subcircuitTab = SubcircuitTab.SubcircuitTab(
            schematicInfo, self.clarg1)
        self.subcircuitTab.setWidget(obj_subcircuitTab)
        self.subcircuitTab.setWidgetResizable(True)

        self.tabWidget = QtGui.QTabWidget()
        #self.tabWidget.TabShape(QtGui.QTabWidget.Rounded)
        self.tabWidget.addTab(self.analysisTab, "Analysis")
        self.tabWidget.addTab(self.sourceTab, "Source Details")
        self.tabWidget.addTab(self.modelTab, "NgSpice Model")
        self.tabWidget.addTab(self.deviceModelTab, "Device Modeling")
        self.tabWidget.addTab(self.subcircuitTab, "Subcircuits")
        self.mainLayout = QtGui.QVBoxLayout()
        self.mainLayout.addWidget(self.tabWidget)
        #self.mainLayout.addStretch(1)
        self.convertWindow.setLayout(self.mainLayout)
        self.convertWindow.show()

        return self.convertWindow
示例#7
0
def uppercase():
    url = request.GET.get('s', default=None)

    if url is not None:
        #url = "http://www.apple.com/legal/internet-services/itunes/us/terms.html"
        sc = Scrapper.Scrapper()
        obj = sc.scrap(url)
        #print obj
        #obj = json.dumps({'privacy':{'p':[{'name':'first para in privacy'},{'name':'second para in privacy'}] },
        # 'taxes':{'p':[{'name':'first para in taxes'},{'name':'second para in taxes'}] }
        #})
        obj = Analysis.Analysis().analysis(obj)

        #obj = json.dumps({'privacy':{'classify':'computer software','p':[{'name':'first para in privacy','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']},{'name':'second para in privacy','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']}] },
        # 'taxes':{'classify':'computer software','p':[{'name':'first para in taxes','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']},{'name':'second para in taxes','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']}] }
        #})

        obj = Generator.Generator().generate(obj)

        return json.dumps(obj, indent=4)
def analyze_data(classifier, company_list=COMPANY_LIST):
    # analyze reviews of each company
    for i in range(len(company_list)):
        company = company_list[i]
        company_kw = company_list[i].lower().strip().replace(' ', '')
        print "# Analyzing " + company + " ..."
        data = get_data_from_json(COMPANY_DATA_PATH + company_kw + ".json")
        pattern_sentiment, pattern_subjectivity, top_details_nphrases = analyze_details(
            data, company)
        top_questions_nphrases = analyze_questions(data, company)
        overall_sentiment, pos_sentiment, neg_sentiment = analyze_sentiment(
            data, classifier)
        most_positive_review, most_negative_review = analyze_top_reviews(data)
        analysis = Analysis.Analysis(pattern_sentiment, pattern_subjectivity,
                                     overall_sentiment, pos_sentiment,
                                     neg_sentiment, top_details_nphrases,
                                     top_questions_nphrases,
                                     most_positive_review,
                                     most_negative_review)
        export_analysis_data_to_json(analysis, company_kw)
示例#9
0
def writeDatapoints():
    dal = readDictionary.readDAL()
    labmt = readDictionary.readLABMT()
    afinn = readDictionary.readAFINN()
     
    english = readDictionary.readEnglish()
 
    emoticons = readDictionary.readEmoticons()
 
    analyzer = Analysis.Analysis(dal, labmt, afinn, english)
    
    X = open("X", 'w')
    Y = open("Y", 'w')
    X.close()
    Y.close()
    
    for p in datapoints(analyzer, emoticons):
        X = open("X", 'a')
        X.write(str(p[0]) + "\n")
        X.close()
        Y = open("Y", 'a')
        Y.write(str(p[1]) + "\n")
        Y.close()
    pass

if __name__ == "__main__":

    import sys

    if len(sys.argv) != 4:
        raise (
            "Incorrect number of args: <galprop output dir> <galprop tag> <galdef dir>"
        )

    basedir, tag, galdefdir, = sys.argv[1:4]
    fname = basedir + '/' + tag + '_XCO.hdf5'

    # Load the analysis
    A = Analysis.Analysis(tag='P7REP_CLEAN_V15_calore',
                          basepath='/pfs/carlson/GCE_sys/')
    # A.GenPointSourceTemplate(pscmap=(A.basepath + '/PSC_all_sky_3fgl.npy'))
    # A.BinPhotons(outfile='binned_photons_all_sky.npy')
    A.GenSquareMask(l_range=[-180., 180], b_range=[-45., 45.], plane_mask=0.)
    A.BinPhotons(infile='binned_photons_all_sky.npy')
    # Load 2FGL
    A.AddPointSourceTemplate(fixNorm=True, pscmap=('PSC_all_sky_3fgl.npy'))
    A.CalculatePixelWeights(diffuse_model='fermi_diffuse_' + A.tag + '.npy',
                            psc_model='PSC_' + A.tag + '.npy',
                            alpha_psc=5.,
                            f_psc=0.05)
    A.AddIsotropicTemplate(
        fixNorm=True, fixSpectrum=True
    )  # External chi^2 used to fix normalization within uncertainties
    #A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=1.26,
    #                r_s=20.0, axesratio=1, offset=(0, 0), spec_file=None,)
示例#11
0
            # def __init__(self,conf, game10,game20,modifier='nothing',verbose0=False):
            ThisPebble = PB.Pebbles(ThisConf, 3, 3, 'nothing', False)
            # play game
            ThisPebble.play_game()
            # compute rigid clusters
            cidx, clusterall, clusterallBonds, clusteridx, BigCluster = ThisPebble.rigid_cluster(
            )

            ########### Setting up the dynamical matrix and getting eigenmodes
            # This itself does very little, just creates an empty Hessian class
            # __init__(self,conf0):
            ThisHessian = HS.Hessian(ThisConf)

            ########## Have a look at some analysis functions of the rigid clusters
            #def __init__(self,conf0,pebbles0,hessian0,verbose=False):
            ThisAnalysis = AN.Analysis(ThisConf, ThisPebble, ThisHessian, 0.01,
                                       False)
            # stress statistics
            zav, nm, pres, fxbal, fybal, torbal, mobin, mohist, sxx, syy, sxy, syx = ThisAnalysis.getStressStat(
            )
            # cluster statistics
            frac, fracmax, lenx, leny = ThisAnalysis.clusterStatistics()
            #def plotStresses(self,plotCir,plotVel,plotCon,plotF,plotStress,**kwargs):
            fig1 = ThisAnalysis.plotStresses(True, False, False, True, False)
            #def plotPebbles(self,plotCir,plotPeb,plotPebCon,plotClus,plotOver,**kwargs):
            #ThisAnalysis.plotPebbles(True,True,True,False,False)
            fig2 = ThisAnalysis.plotPebbles(True, True, False, True, False)

            ######### continuing with the Hessian now
            # constructing the matrix
            #  makeHessian(self,frictional,recomputeFnor,stabilise,verbose=False):
            ThisHessian.makeHessian(True, False, 0, True)
示例#12
0
def threadCrown(filepath):
    global io

    rtpSkel = -1
    crownT = OrderedDict()
    imgL = []
    stemCorrection = bool(int(options[8][1]))

    print io.getHomePath()
    oldHome = io.getHomePath()
    os.chdir(io.getHomePath())
    io.setHomePath('./Crown/')
    f = io.scanDir()
    for (counter, i) in enumerate(f):
        io.setFileName(os.path.basename(i))
        io.setidIdx(imgID)

        print 'processing Crown file: ' + i
        xScale = allPara[counter][7]
        yScale = allPara[counter][8]
        analysis = Analysis.Analysis(io, (xScale + yScale) / 2)
        rtp = RootTipPaths.RootTipPaths(io)

        try:
            img = scipy.misc.imread(i, flatten=True)
        except:
            print 'Image not readable'
            img = -1

        if len(img) > 0:
            seg = Segmentation.Segmentation(img, io)
            imgL = seg.label()
            print 'compute root profile'
            currT = time.time()
            if ifAnyKeyIsTrue([
                    'AVG_DENSITY', 'WIDTH_MED', 'WIDTH_MAX', 'DIA_STM_SIMPLE',
                    'D10', 'D20', 'D30', 'D40', 'D50', 'D60', 'D70', 'D80',
                    'D90', 'DS10', 'DS20', 'DS30', 'DS40', 'DS50', 'DS60',
                    'DS70', 'DS80', 'DS90', 'AREA', 'ANG_TOP', 'ANG_BTM'
            ]):
                crownT['AVG_DENSITY'], crownT['WIDTH_MED'], crownT[
                    'WIDTH_MAX'], crownT['D10'], crownT['D20'], crownT[
                        'D30'], crownT['D40'], crownT['D50'], crownT[
                            'D60'], crownT['D70'], crownT['D80'], crownT[
                                'D90'], crownT['DS10'], crownT['DS20'], crownT[
                                    'DS30'], crownT['DS40'], crownT[
                                        'DS50'], crownT['DS60'], crownT[
                                            'DS70'], crownT['DS80'], crownT[
                                                'DS90'], crownT['AREA'], crownT[
                                                    'DIA_STM_SIMPLE'], crownT[
                                                        'ANG_TOP'], crownT[
                                                            'ANG_BTM'] = analysis.getWidthOverHeight(
                                                                imgL, xScale,
                                                                yScale)
                print 'Mask traits computed ' + str(time.time() - currT) + 's'

            if ifAnyKeyIsTrue([
                    'DIA_STM', 'TD_MED', 'TD_AVG', 'STA_RANGE', 'STA_DOM_I',
                    'STA_DOM_II', 'STA_25_I', 'STA_25_II', 'STA_50_I',
                    'STA_50_II', 'STA_75_I', 'STA_75_II', 'STA_90_I',
                    'STA_90_II', 'RTA_DOM_I', 'RTA_DOM_II', 'STA_MIN',
                    'STA_MAX', 'STA_MED', 'RTA_RANGE', 'RTA_MIN', 'RTA_MAX',
                    'RTA_MED', 'NR_RTP_SEG_I', 'NR_RTP_SEG_II', 'ADVT_COUNT',
                    'BASAL_COUNT', 'ADVT_ANG', 'BASAL_ANG', 'HYP_DIA',
                    'TAP_DIA', 'MAX_DIA_90', 'DROP_50', 'CP_DIA25', 'CP_DIA50',
                    'CP_DIA75', 'CP_DIA90', 'SKL_DEPTH', 'SKL_WIDTH'
            ]):
                currT = time.time()
                skel = Skeleton.Skeleton(imgL)
                testSkel, testDia = skel.skel(imgL)
                scipy.misc.imsave(
                    io.getHomePath() + '/Skeleton/' + io.getFileName() +
                    '_skel.png', testSkel)
                print 'Medial axis computed ' + str(time.time() - currT) + 's'
                currT = time.time()
                path, skelGraph, crownT[
                    'DIA_STM'], skelSize = seg.findThickestPath(
                        testSkel, testDia, xScale, yScale)
                allPara[counter][10] = skelSize
                print 'Central path computed ' + str(time.time() - currT) + 's'

            if ifAnyKeyIsTrue([
                    'TD_MED', 'TD_AVG', 'STA_RANGE', 'STA_DOM_I', 'STA_DOM_II',
                    'STA_25_I', 'STA_25_II', 'STA_50_I', 'STA_50_II',
                    'STA_75_I', 'STA_75_II', 'STA_90_I', 'STA_90_II',
                    'RTA_DOM_I', 'RTA_DOM_II', 'STA_MIN', 'STA_MAX', 'STA_MED',
                    'RTA_RANGE', 'RTA_MIN', 'RTA_MAX', 'RTA_MED',
                    'NR_RTP_SEG_I', 'NR_RTP_SEG_II', 'ADVT_COUNT',
                    'BASAL_COUNT', 'ADVT_ANG', 'BASAL_ANG', 'HYP_DIA',
                    'TAP_DIA', 'MAX_DIA_90', 'DROP_50', 'CP_DIA25', 'CP_DIA50',
                    'CP_DIA75', 'CP_DIA90', 'SKL_DEPTH', 'SKL_WIDTH',
                    'RTP_COUNT'
            ]):
                print 'Compute RTP skeleton'
                currT = time.time()
                rtpSkel, crownT['RTP_COUNT'], crownT['TD_MED'], crownT[
                    'TD_AVG'], crownT['MAX_DIA_90'], rtps, tips, crownT[
                        'SKL_WIDTH'], crownT['SKL_DEPTH'] = rtp.getRTPSkeleton(
                            path, skelGraph, True)
                seg.setTips(tips)
                print 'RTP Skeleton computed ' + str(time.time() - currT) + 's'

            allPara[len(allPara) - 1][2] = seg.getFail()

            if ifAnyKeyIsTrue(['RDISTR_X', 'RDISTR_Y']):
                print 'Compute spatial root distribution'
                currT = time.time()
                crownT['RDISTR_X'], crownT['RDISTR_Y'] = analysis.getSymmetry(
                    rtps, rtpSkel)
                print 'Symmetry computed ' + str(time.time() - currT) + 's'

            if rtpSkel != -1:
                if ifAnyKeyIsTrue([
                        'NR_RTP_SEG_I', 'NR_RTP_SEG_II', 'ADVT_COUNT',
                        'BASAL_COUNT', 'ADVT_ANG', 'BASAL_ANG', 'HYP_DIA',
                        'TAP_DIA'
                ]):
                    print 'searching for hypocotyl'
                    currT = time.time()
                    branchRad, nrPaths = seg.findHypocotylCluster(
                        path, rtpSkel)
                    print 'hypocotyl computed ' + str(time.time() -
                                                      currT) + 's'
                    print 'starting kmeans'
                    try:
                        currT = time.time()
                        c1x, c1y, c2x, c2y = analysis.plotDiaRadius(
                            nrPaths, branchRad, path, 2)

                        print '2 clusters computed in ' + str(time.time() -
                                                              currT) + 's'

                        currT = time.time()
                        segImg = seg.makeSegmentationPicture(
                            path, rtpSkel, img, xScale, yScale, c1x, c1y, c2x,
                            c2y)
                        scipy.misc.imsave(
                            io.getHomePath() + '/Result/' + io.getFileName() +
                            'Seg2.png', segImg)
                        crownT['ADVT_COUNT'], crownT['BASAL_COUNT'], crownT[
                            'NR_RTP_SEG_I'], crownT['NR_RTP_SEG_II'], crownT[
                                'HYP_DIA'], crownT[
                                    'TAP_DIA'] = analysis.countRootsPerSegment(
                                        c1y, c2y, c1x, c2x)
                    except:
                        c1x = None
                        c1y = None
                        c2x = None
                        c2y = None
                        pass
                    crownT['DROP_50'] = analysis.RTPsOverDepth(path, rtpSkel)
                    print 'count roots per segment'
                    print 'Root classes computed in ' + str(time.time() -
                                                            currT) + 's'

                if ifAnyKeyIsTrue([
                        'ADVT_ANG', 'BASAL_ANG', 'STA_RANGE', 'STA_DOM_I',
                        'STA_DOM_II', 'STA_25_I', 'STA_25_II', 'STA_50_I',
                        'STA_50_II', 'STA_75_I', 'STA_75_II', 'STA_90_I',
                        'STA_90_II', 'RTA_DOM_I', 'RTA_DOM_II', 'STA_MIN',
                        'STA_MAX', 'STA_MED', 'RTA_RANGE', 'RTA_MIN',
                        'RTA_MAX', 'RTA_MED'
                ]):
                    currT = time.time()
                    lat, corrBranchpts = seg.findLaterals(
                        rtps, rtpSkel, (xScale + yScale) / 2, None)
                    print 'seg.findLaterals computed in ' + str(time.time() -
                                                                currT) + 's'
                    print 'Compute angles at 2cm'
                    currT = time.time()
                    if c1x != None and c1y != None and c2x != None and c2y != None:
                        crownT['ADVT_ANG'], crownT[
                            'BASAL_ANG'] = analysis.anglesPerClusterAtDist(
                                c1y,
                                c2y,
                                rtpSkel,
                                path,
                                lat,
                                corrBranchpts, (xScale + yScale) / 2,
                                dist=20)
                    else:
                        crownT['ADVT_ANG'] = 'nan'
                        crownT['BASAL_NG'] = 'nan'
                    print 'angles at 2cm computed in ' + str(time.time() -
                                                             currT) + 's'

                    if ifAnyKeyIsTrue([
                            'STA_25_I', 'STA_25_II', 'STA_50_I', 'STA_50_II',
                            'STA_75_I', 'STA_75_II', 'STA_90_I', 'STA_90_II'
                    ]):
                        try:
                            print 'compute quantile angles'
                            currT = time.time()
                            a25, a50, a75, a90 = analysis.calculateAngleQuantiles(
                                path, lat, corrBranchpts, rtpSkel)
                            print 'angles computed in ' + str(time.time() -
                                                              currT) + 's'
                        except:
                            a25 = ['nan']
                            a50 = ['nan']
                            a75 = ['nan']
                            a90 = ['nan']
                            print 'ERROR: No quantile angles calculated'

                    if ifAnyKeyIsTrue(
                        ['RTA_RANGE', 'RTA_MIN', 'RTA_MAX', 'RTA_MED']):
                        try:
                            print 'compute angles'
                            currT = time.time()
                            crownT['RTA_MED'], crownT['RTA_MIN'], crownT[
                                'RTA_MAX'], crownT[
                                    'RTA_RANGE'], anglesN = analysis.calculateAngles(
                                        path, lat, corrBranchpts, rtpSkel)
                            print 'RTA angle characteristics computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No RTA angles calculated'

                    if ifAnyKeyIsTrue(
                        ['STA_RANGE', 'STA_MIN', 'STA_MAX', 'STA_MED']):
                        try:
                            print 'compute STA angles'
                            currT = time.time()
                            crownT['STA_RANGE'], crownT['STA_MED'], crownT[
                                'STA_MIN'], crownT[
                                    'STA_MAX'], angles = analysis.getLateralAngles(
                                        path, lat, corrBranchpts, rtpSkel)
                            print 'STA angles characteristics computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No STA angles calculated'

                    if ifAnyKeyIsTrue(
                        ['CP_DIA25', 'CP_DIA50', 'CP_DIA75', 'CP_DIA90']):
                        try:
                            print 'compute diameter quantils'
                            currT = time.time()
                            crownT['CP_DIA25'], crownT['CP_DIA50'], crownT[
                                'CP_DIA75'], crownT[
                                    'CP_DIA90'] = analysis.getDiameterQuantilesAlongSinglePath(
                                        path, rtpSkel)
                            print 'Tap diameters computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No quantile diameters calculated'

                    if ifAnyKeyIsTrue(['STA_DOM_I', 'STA_DOM_II']):
                        try:
                            print 'compute STA dominant angles'
                            currT = time.time()
                            crownT['STA_DOM_I'], crownT[
                                'STA_DOM_II'] = analysis.findHistoPeaks(angles)
                            print 'STA dominant angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles calculated (STA)'

                    if ifAnyKeyIsTrue(['STA_25_I', 'STA_25_II']):
                        try:
                            currT = time.time()
                            crownT['STA_25_I'], crownT[
                                'STA_25_II'] = analysis.findHistoPeaks(a25)
                            print 'STA 25 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles25 calculated'

                    if ifAnyKeyIsTrue(['STA_50_I', 'STA_50_II']):
                        try:
                            currT = time.time()
                            crownT['STA_50_I'], crownT[
                                'STA_50_II'] = analysis.findHistoPeaks(a50)
                            print 'STA 50 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles50 calculated'

                    if ifAnyKeyIsTrue(['STA_75_I', 'STA_75_II']):
                        try:
                            currT = time.time()
                            crownT['STA_75_I'], crownT[
                                'STA_75_II'] = analysis.findHistoPeaks(a75)
                            print 'STA 75 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles75 calculated'

                    if ifAnyKeyIsTrue(['STA_90_I', 'STA_90_II']):
                        try:
                            currT = time.time()
                            crownT['STA_90_I'], crownT[
                                'STA_90_II'] = analysis.findHistoPeaks(a90)
                            print 'STA 90 angles computed in ' + str(
                                time.time() - currT) + 's'
                        except:
                            print 'ERROR: No dominant angles90 calculated'

                    if ifAnyKeyIsTrue(['RTA_DOM_I', 'RTA_DOM_II']):
                        try:
                            currT = time.time()
                            crownT['RTA_DOM_I'], crownT[
                                'RTA_DOM_II'] = analysis.findHistoPeaks(
                                    anglesN)
                            print 'angles computed in ' + str(time.time() -
                                                              currT) + 's'
                        except:
                            print 'ERROR: No dominant RTA angles calculated'
    io.setHomePath(oldHome)
    if maxExRoot >= 1:
        rtpSkel = -1
        os.chdir(io.getHomePath())
        io.setHomePath('./Lateral/')
        f = io.scanDir()
        for (counter, i) in enumerate(f):
            print 'processing lateral file: ' + i

            if maxExRoot > 0:
                xScale = allPara[counter / maxExRoot][7]
                yScale = allPara[counter / maxExRoot][8]
                io.setFileName(os.path.basename(i))
            else:
                xScale = allPara[counter][7]
                yScale = allPara[counter][8]
                io.setFileName(os.path.basename(i))
                io.setidIdx(counter)

            rtp = RootTipPaths.RootTipPaths(io)

            analysis = Analysis.Analysis(io, (xScale + yScale) / 2)

            try:
                img = scipy.misc.imread(i, flatten=True)
            except:
                print 'Image not readable'
                img = []
                pass
            if len(img) > 0:

                seg = Segmentation.Segmentation(img, io=io)
                imgL = seg.label()

                if imgL != None:
                    skel = Skeleton.Skeleton(imgL)
                    testSkel, testDia = skel.skel(imgL)
                    path, skelGraph = seg.findThickestPathLateral(
                        testSkel, testDia, xScale, yScale)
                    if ifAnyKeyIsTrue([
                            'LT_AVG_LEN', 'NODAL_LEN', 'LT_BRA_FRQ',
                            'NODAL_AVG_DIA', 'LT_AVG_ANG', 'LT_ANG_RANGE',
                            'LT_MIN_ANG', 'LT_MAX_ANG', 'LT_DIST_FIRST',
                            'LT_MED_DIA', 'LT_AVG_DIA'
                    ]):
                        rtpSkel, _, crownT['LT_MED_DIA'], crownT[
                            'LT_AVG_DIA'], _, rtps, _, _, _ = rtp.getRTPSkeleton(
                                path, skelGraph, True)

                    if rtpSkel != -1:
                        if ifAnyKeyIsTrue(['LT_BRA_FRQ']):
                            crownT[
                                'LT_BRA_FRQ'] = analysis.getBranchingfrequencyAlongSinglePath(
                                    rtps, path)
                            crownT[
                                'NODAL_AVG_DIA'], _ = analysis.getDiametersAlongSinglePath(
                                    path, rtpSkel, (xScale + yScale) / 2)
                            crownT['NODAL_LEN'] = analysis.getLengthOfPath(
                                path)
                        if ifAnyKeyIsTrue([
                                'LT_DIST_FIRST', 'LT_AVG_LEN', 'LT_BRA_FRQ',
                                'LT_ANG_RANGE', 'LT_AVG_ANG', 'LT_MIN_ANG',
                                'LT_MAX_ANG'
                        ]):
                            lat, corrBranchpts, crownT[
                                'LT_DIST_FIRST'] = seg.findLaterals(
                                    rtps, rtpSkel, (xScale + yScale) / 2, path)
                            if ifAnyKeyIsTrue(['LT_AVG_LEN']):
                                crownT[
                                    'LT_AVG_LEN'] = analysis.getLateralLength(
                                        lat, path, rtpSkel)
                            if ifAnyKeyIsTrue([
                                    'LT_ANG_RANGE', 'LT_AVG_ANG', 'LT_MIN_ANG',
                                    'LT_MAX_ANG'
                            ]):
                                crownT['LT_ANG_RANGE'], crownT[
                                    'LT_AVG_ANG'], crownT['LT_MIN_ANG'], crownT[
                                        'LT_MAX_ANG'], _ = analysis.getLateralAngles(
                                            path, lat, corrBranchpts, rtpSkel)
            allCrown.append(crownT.copy())
    else:
        allCrown.append(crownT.copy())

    io.setHomePath(oldHome)
示例#13
0
def run():
    path = "Profile\Dataset 1 Analysis.xml"
    analysis = Analysis.Analysis(configuration.AnalysisConfiguration(path))
    print "done"
if __name__ == "__main__":
    if (len(sys.argv) not in [4, 5, 6]):
        raise (
            "Incorrect number of args: <galprop output dir> <galprop tag> <galdef dir> [limit_inner] [fix_xco]"
        )

    basedir, tag, path = sys.argv[1:4]

    fname = basedir + '/' + tag + '_XCO_P8.hdf5'
    #fname = basedir+'/'+tag+'_XCO_P8_MS04.hdf5'

    # Load the analysis
    A = Analysis.Analysis(
        tag='P8R2_CLEAN_V6_calore',
        fglpath='/pfs/carlson/gll_psc_v16.fit',
        templateDir='/home/carlson/pfs/Extended_archive_v15/Templates',
        basepath='/pfs/carlson/GCE_sys/')
    # A.GenPointSourceTemplate(pscmap=(A.basepath + '/PSC_all_sky_3fgl.npy'))
    # A.BinPhotons(outfile='binned_photons_all_sky.npy')
    #A.GenSquareMask(l_range=[-180.,180], b_range=[-40.,40.], plane_mask=1.)
    A.BinPhotons(infile='binned_photons_P8R2_CLEAN_V6_calore.npy')
    # Load 2FGL
    A.AddPointSourceTemplate(fixNorm=True, pscmap=('PSC_3FGL_with_ext.npy'))
    A.CalculatePixelWeights(diffuse_model='fermi_diffuse_' + A.tag + '.npy',
                            psc_model='PSC_3FGL_with_ext.npy',
                            alpha_psc=5.,
                            f_psc=0.1)
    A.AddIsotropicTemplate(
        fixNorm=False, fixSpectrum=True
    )  # External chi^2 used to fix normalization within uncertainties
示例#15
0
#List of accepted values to indicate user means yes
YesValues = ["Y", "YEAH", "YUP", "AFFIRMATIVE", "YES", "CORRECT", "RIGHT", "TRUE"]

#Initial assumed yes
ContFlag = "Y"

#While the user wants to continue, keep running new scenarios
while ContFlag in YesValues:
    
    print("For the first fighter...")
    CharacterOne = SaveAndDelete.SaveAndDelete.LoadQuery()
    print("and for the second fighter...")
    CharacterTwo = SaveAndDelete.SaveAndDelete.LoadQuery()
    
    #appends one and two to the names so that the battle makes more sense if they are the same creature
    if CharacterOne.getName() == CharacterTwo.getName():
        CharacterOne.appendName(" One")
        CharacterTwo.appendName(" Two")
        
    #Create the Analysis object
    Battle = Analysis.Analysis(CharacterOne, CharacterTwo)
    
    #Give the probabilities and values to beat
    Battle.AnalyzeToHit()
    
    #Has the entities fight to the death
    #Prints out a play by play and declares the winner
    CharacterOne.CharactersFightToDeath(CharacterTwo)
    
    #Checks if the user would like to stop     
    ContFlag = str(input("Would you like to run the simulator again?")).upper()
示例#16
0
ax_Mean = filter.Mean_Filter(ax, 4)
ay_Mean = filter.Mean_Filter(ay, 4)
az_Mean = filter.Mean_Filter(az, 4)
gx_Mean = filter.Mean_Filter(gx, 4)
gy_Mean = filter.Mean_Filter(gy, 4)
gz_Mean = filter.Mean_Filter(gz, 4)

ax_KF = filter.Kalman_Filter(ax_Mean, 1e-6, 1e-5, -1, 1)
ay_KF = filter.Kalman_Filter(ay_Mean, 1e-6, 1e-5, -1, 1)
az_KF = filter.Kalman_Filter(az_Mean, 1e-6, 1e-5, -1, 1)
gx_KF = filter.Kalman_Filter(gx_Mean, 1e-6, 1e-5, -1, 1)
gy_KF = filter.Kalman_Filter(gy_Mean, 1e-6, 1e-5, -1, 1)
gz_KF = filter.Kalman_Filter(gz_Mean, 1e-6, 1e-5, -1, 1)

aa = Analysis(ax_KF, ay_KF, az_KF, gx_KF, gy_KF, gz_KF, frq)

route = aa.countstep()
supportrate = aa.supportrate()
steplength = aa.steplength()
rollanlge = aa.rollangle()
# """
# GUI
# """
plt.figure("相对加速度时间图像")
plt.plot(ax_KF, 'b', label='X_Axis')
plt.plot(ay_KF, 'orange', label='Y_Axis')
plt.plot(az_KF, 'r', label='Z_Axis')
plt.legend()

plt.figure("绝对加速度时间图像(Q4方法)")
示例#17
0
        print >> sys.stderr, 'Encountered error with status code:', status_code
        return True  # Don't kill the stream

    def on_timeout(self):
        print >> sys.stderr, 'Timeout...'
        return True  # Don't kill the stream


###############################################################################

if __name__ == '__main__':
    # Initialize the pre-determined text analyzer
    dal = readDictionary.readDAL()
    labmt = readDictionary.readLABMT()
    afinn = readDictionary.readAFINN()

    english = readDictionary.readEnglish()

    emoticons = readDictionary.readEmoticons()

    analyzer = Analysis.Analysis(dal, labmt, afinn, english)

    # Initialize the pre-determined classifier
    clf = Classifier.buildClassifier()

    infinite = True
    while (infinite):
        analyzer.reset()
        csl = CustomStreamListener(clf, analyzer, emoticons, api)
        stream = tweepy.streaming.Stream(auth, csl)
        stream.sample()
示例#18
0
        print('     FOLLOW[', k, ']: ', v)
    Vt -= set(Epsilon)
    Vt |= set(EndSym)

    getAnalysisList()  #LL1得到分析表

    Vt.remove('#')


if __name__ == "__main__":

    LL1()

    str = input(">>>")
    for i in str:
        if i not in Vt:
            exit("输入串存在字符在文法里不存在!!!")
    table = PrettyTable(
        ["Steps", "Stack", "Input_a_now", "Remain_str", "Use_production"])

    #这里是导入之前写好的语法分析接口
    Analysis(str, StartSym, table, dicts, Vt, Vn)

    #格式控制,输出语法分析表
    table.align['步骤'] = 'l'
    table.align['分析栈'] = 'l'
    table.align['剩余输入串'] = 'l'
    table.align['所用产生式'] = 'l'
    table.align['当前输入a'] = 'l'
    print(table)
示例#19
0
import Analysis
import Lakeshore_335
import BK_Precision_9184
import DewarFill

#************** START OF MAIN PROGRAM ****************** 
master = Tk() # Main GUI Window
master.title("Simulator Control UI")
stage = Stage.Stage(master)
lakeshore = Lakeshore_335.Lakeshore(master)
sphere = Sphere.Sphere(master)
bk = BK_Precision_9184.BK_Precision_9184(master)
camera = Camera.Camera(master,stage, sphere, lakeshore, bk)
dewarfill = DewarFill.DewarFill(master)
comm_status = Comm_Status.Comm_Status(master,stage, sphere, camera, lakeshore, bk, dewarfill)
analysis = Analysis.Analysis(master,camera)
stdout_stderr = StdOut_StdErr.StdOut_StdErr(master)

stage.Define_Frame()
sphere.Define_Frame()
camera.Define_Frame()
dewarfill.Define_Frame()
analysis.Define_Frame()
stdout_stderr.Define_Frame()
comm_status.Define_Frame()
master.mainloop() 

# Return stdout and stderr to terminal window
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
print "Shutting Down\n"
示例#20
0
文件: main.py 项目: avrajit/DIRT
def threadCrown(filepath):
    imgL = []
    tipdiameter = float(options[8][1])
    print io.getHomePath()
    os.chdir(io.getHomePath())
    io.setHomePath('./Crown/')
    f = io.scanDir()
    for (counter, i) in enumerate(f):
        io.setFileName(os.path.basename(i))
        io.setidIdx(imgID)

        print 'processing Crown file: ' + i
        xScale = allPara[counter][7]
        yScale = allPara[counter][8]
        analysis = Analysis.Analysis(io, (xScale + yScale) / 2)
        rtp = RootTipPaths.RootTipPaths(io, tp=tipdiameter)
        rtp.setTipDiaFilter(tipdiameter * (xScale + yScale) / 2)
        crownT = []

        try:
            img = scipy.misc.imread(i, flatten=True)
        except:
            print 'Image not readable'
            img = -1
        if len(img) > 0:
            seg = Segmentation.Segmentation(img, io)
            imgL = seg.label()
            print 'compute root profile'
            currT = time.time()
            rootDensity, medianWidth, maxWidth, D, DS, _, _, _, _ = analysis.getWidthOverHeight(
                imgL, xScale, yScale)
            print 'Mask traits computed ' + str(time.time() - currT) + 's'
            currT = time.time()
            skel = Skeleton.Skeleton(imgL)
            testSkel, testDia = skel.skel(imgL)
            print 'Medial axis computed ' + str(time.time() - currT) + 's'
            currT = time.time()
            path, skelGraph, stemDia, skelSize = seg.findThickestPath(
                testSkel, testDia, xScale, yScale)
            allPara[counter][10] = skelSize
            print 'Central path computed ' + str(time.time() - currT) + 's'
            print 'compute rtp skeleton'
            currT = time.time()
            rtpSkel, nrOfRTP, medianTipDiameter, meanDiameter, dia90, _, rtps, tips, _, _ = rtp.getRTPSkeleton(
                path, skelGraph, True)
            allPara[len(allPara) - 1][2] = seg.getFail()
            seg.setTips(tips)
            print 'RTP Skeleton computed ' + str(time.time() - currT) + 's'
            print 'compute symmetry'
            currT = time.time()
            vecSym = analysis.getSymmetry(rtps, rtpSkel)
            print 'Symmetry computed ' + str(time.time() - currT) + 's'

            if rtpSkel != -1:

                lat, corrBranchpts, _ = seg.findLaterals(
                    rtps, rtpSkel, (xScale + yScale) / 2)

                try:
                    print 'compute quantile angles'
                    currT = time.time()
                    a25, a50, a75, a90 = analysis.calculateAngleQuantiles(
                        path, lat, corrBranchpts, rtpSkel)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    raise
                    a25 = ['nan']
                    a50 = ['nan']
                    a75 = ['nan']
                    a90 = ['nan']

                    print 'ERROR: No quantile angles calculated'

                try:
                    print 'compute angles'
                    currT = time.time()
                    angRangeN, avgAngleN, minangleN, maxAngleN, anglesN = analysis.calculateAngles(
                        path, lat, corrBranchpts, rtpSkel)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    avgAngleN = 'nan'
                    minangleN = 'nan'
                    maxAngleN = 'nan'
                    angRangeN = 'nan'
                    anglesN = 'nan'
                    print 'ERROR: No angles calculated'

                try:
                    print 'compute RTA angles'
                    currT = time.time()
                    angRange, avgAngle, minangle, maxAngle, angles = analysis.getLateralAngles(
                        path, lat, corrBranchpts, rtpSkel)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    raise
                    avgAngle = 'nan'
                    minangle = 'nan'
                    maxAngle = 'nan'
                    angRange = 'nan'
                    angles = 'nan'
                    print 'ERROR: No RTA angles calculated'
                try:

                    print 'compute diameter quantils'
                    currT = time.time()
                    d25, d50, d75, d90 = analysis.getDiameterQuantilesAlongSinglePath(
                        path, rtpSkel)
                    print 'diameters computed in ' + str(time.time() -
                                                         currT) + 's'
                except:
                    d25 = 'nan'
                    d50 = 'nan'
                    d75 = 'nan'
                    d90 = 'nan'
                    print 'ERROR: No quantile angles calculated'
                    raise

                try:
                    print 'compute dominant angles'
                    currT = time.time()
                    ang1, ang2 = analysis.findHistoPeaks(angles)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang1 = 'nan'
                    ang2 = 'nan'
                    print 'ERROR: No dominant angles calculated'
                try:
                    currT = time.time()
                    ang25_1, ang25_2 = analysis.findHistoPeaks(a25)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang25_1 = 'nan'
                    ang25_2 = 'nan'
                    print 'ERROR: No dominant angles25 calculated'
                try:
                    currT = time.time()
                    ang50_1, ang50_2 = analysis.findHistoPeaks(a50)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang50_1 = 'nan'
                    ang50_2 = 'nan'
                    print 'ERROR: No dominant angles50 calculated'
                try:
                    currT = time.time()
                    ang75_1, ang75_2 = analysis.findHistoPeaks(a75)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang75_1 = 'nan'
                    ang75_2 = 'nan'
                    print 'ERROR: No dominant angles75 calculated'
                try:
                    currT = time.time()
                    ang90_1, ang90_2 = analysis.findHistoPeaks(a90)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    ang90_1 = 'nan'
                    ang90_2 = 'nan'
                    print 'ERROR: No dominant angles90 calculated'

                try:
                    currT = time.time()
                    angN_1, angN_2 = analysis.findHistoPeaks(anglesN)
                    print 'angles computed in ' + str(time.time() -
                                                      currT) + 's'
                except:
                    angN_1 = 'nan'
                    angN_2 = 'nan'
                    print 'ERROR: No dominant angles90 calculated'

                crownT = [
                    stemDia, rootDensity, angRange, ang1, ang2, ang25_1,
                    ang25_2, ang50_1, ang50_2, ang75_1, ang75_2, ang90_1,
                    ang90_2, angN_1, angN_2, minangle, maxAngle, avgAngle,
                    angRangeN, avgAngleN, minangleN, maxAngleN, nrOfRTP,
                    medianTipDiameter, meanDiameter, dia90, medianWidth,
                    maxWidth, D[0], D[1], D[2], D[3], D[4], D[5], D[6], D[7],
                    D[8], DS[0], DS[1], DS[2], DS[3], DS[4], DS[5], DS[6],
                    DS[7], DS[8], vecSym[0], vecSym[1], d25, d50, d75, d90
                ]

            else:
                crownT = [
                    stemDia, rootDensity, 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', medianWidth, maxWidth, D[0], D[1],
                    D[2], D[3], D[4], D[5], D[6], D[7], D[8], DS[0], DS[1],
                    DS[2], DS[3], DS[4], DS[5], DS[6], DS[7], DS[8], vecSym[0],
                    vecSym[1], d25, d50, d75, d90
                ]

            if maxExRoot > 1:
                for i in range(maxExRoot):
                    allCrown.append(crownT)
            else:
                allCrown.append(crownT)
            if options[4][1] == '0':
                lateralT = [
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan'
                ]
                allLat.append(lateralT)
    io.setHomePath(os.getcwd())
示例#21
0
文件: main.py 项目: avrajit/DIRT
def threadLateral(filepath):
    tipdiameter = 0.
    os.chdir(io.getHomePath())
    io.setHomePath('./Lateral/')
    f = io.scanDir()
    for (counter, i) in enumerate(f):
        print 'processing lateral file: ' + i
        if maxExRoot > 0:
            xScale = allPara[counter / maxExRoot][7]
            yScale = allPara[counter / maxExRoot][8]
            io.setFileName(os.path.basename(i))
        else:
            xScale = allPara[counter][7]
            yScale = allPara[counter][8]
            io.setFileName(os.path.basename(i))
            io.setidIdx(counter)

        rtp = RootTipPaths.RootTipPaths(io, tipdiameter)
        rtp.setTipDiaFilter(tipdiameter)

        analysis = Analysis.Analysis(io, (xScale + yScale) / 2)

        lateralT = []

        try:
            img = scipy.misc.imread(i, flatten=True)
        except:
            print 'Image not readable'
            img = []
            pass
        if len(img) > 0:

            seg = Segmentation.Segmentation(img, io=io)
            imgL = seg.label()

            if imgL != None:
                skel = Skeleton.Skeleton(imgL)
                testSkel, testDia = skel.skel(imgL)
                path, skelGraph = seg.findThickestPathLateral(
                    testSkel, testDia, xScale, yScale)
                rtpSkel, _, medianD, meanD, _, _, rtps, _, _, _ = rtp.getRTPSkeleton(
                    path, skelGraph, True)

                if rtpSkel != -1:
                    lBranchFreq = analysis.getBranchingfrequencyAlongSinglePath(
                        rtps, path)
                    avgLatDiameter, slope = analysis.getDiametersAlongSinglePath(
                        path, rtpSkel, (xScale + yScale) / 2)
                    lengthNodalRoot = analysis.getLengthOfPath(path)
                    lat, corrBranchpts, distToFirst = seg.findLaterals(
                        rtps, rtpSkel, (xScale + yScale) / 2)
                    avgLLength = analysis.getLateralLength(lat, path, rtpSkel)
                    angRange, avgAngle, minangle, maxAngle, _ = analysis.getLateralAngles(
                        path, lat, corrBranchpts, rtpSkel)
                    lateralT = [
                        avgLLength * ((xScale + yScale) / 2),
                        float(lengthNodalRoot) * ((xScale + yScale) / 2),
                        lBranchFreq, avgLatDiameter, slope, avgAngle, angRange,
                        minangle, maxAngle,
                        float(distToFirst) * ((xScale + yScale) / 2), medianD,
                        meanD
                    ]
                else:
                    lateralT = [
                        'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                        'nan', 'nan', 'nan', 'nan'
                    ]
            else:
                lateralT = [
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan'
                ]
            allLat.append(lateralT)
            if options[5][1] == '0':
                crownT = [
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                    'nan', 'nan', 'nan', 'nan'
                ]
                allCrown.append(crownT)

    io.setHomePath(os.getcwd())
    pass

if __name__ == "__main__":

    import sys

    if len(sys.argv) != 4:
        raise (
            "Incorrect number of args: <galprop output dir> <galprop tag> <galdef dir>"
        )

    basedir, tag, galdefdir, = sys.argv[1:4]
    fname = basedir + '/' + tag + '.hdf5'

    # Load the analysis
    A = Analysis.Analysis(tag='P7REP_CLEAN_V15_calore', )
    # A.GenPointSourceTemplate(pscmap=(A.basepath + '/PSC_all_sky_3fgl.npy'))
    # A.BinPhotons(outfile='binned_photons_all_sky.npy')
    A.GenSquareMask(l_range=[-180., 180], b_range=[-40., 40.], plane_mask=1.)
    A.BinPhotons(infile='binned_photons_all_sky.npy')
    # Load 2FGL
    A.AddPointSourceTemplate(fixNorm=True, pscmap=('PSC_all_sky_3fgl.npy'))
    A.CalculatePixelWeights(diffuse_model='fermi_diffuse_' + A.tag + '.npy',
                            psc_model='PSC_' + A.tag + '.npy',
                            alpha_psc=5.,
                            f_psc=0.1)
    A.AddIsotropicTemplate(
        fixNorm=True, fixSpectrum=True
    )  # External chi^2 used to fix normalization within uncertainties
    #A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=1.26,
    #                r_s=20.0, axesratio=1, offset=(0, 0), spec_file=None,)
示例#23
0
 def setUpClass(self):
     self.analysis = Analysis()
示例#24
0
import openpyxl as xl
import re
import calendar
from BackEnd import BackEnd
from Naming import PreProcess
from Analysis import *
from tkinter import *
import tkinter as ttk

###################
##  Setting global variables and pre-processing statements
###################

PreProcess = PreProcess()  ## Creating month-based named files.
BackEnd = BackEnd()  ##  Reading raw data and GUI
Analysis = Analysis()  ## Analysis data and creating graphs

letters = list(string.ascii_lowercase)
month = PreProcess.Naming()  ## pre-process files and gets the month
#month = "Mar" # NOTE: if you want to analyse a specific month, you can overide here.

#skipable = [" Interest Charge on Cash Advances" , " Interest Charge on Purchases" , " AUTOMATIC PAYMENT" , " ONLINE PAYMENT" , " AUTOPAY PAYMENT"] ## move to excel

Mainpth = 'C:\\Users\\gad-t\\Desktop\\Files\\Money Manger\\Bank Statements\\Processed\\'
WriteFileName = "Combined\\Spending Summary.xlsx"
Write_Full_pth = Mainpth + WriteFileName

###################
## Setting-up Spending Summary file and getting expenses catagories
###################
示例#25
0
def user_entry():
    """
    Get user input from command line or from input file and run full program.
    """
    parser = argparse.ArgumentParser(
        prog='CLIMATE_ANALYSIS',
        formatter_class=argparse.RawTextHelpFormatter,
        description=
        """The functions will give statistical analysis of the climate data 
                                     presented
    FILENAMES FORMAT
    ----------------
    - The filenames should be in the format "{START OF FILENAME}_ens{NUM}_{YEAR}.nc", where {START OF FILENAME} is 
    the prefix of the file, this can be the algae type etc, {NUM} is the ensemble number and {YEAR} is the year. 
   OR if you have multiple years stored in one file then:
   - The filenames should be in the format "{START OF FILENAME}_ens{NUM}_{YEAR 1}_{YEAR 2}.nc", where 
   {START OF FILENAME} is the prefix of the file, this can be the algae type etc, {NUM} is the ensemble number and 
   {YEAR 1} and {YEAR 2} are the start and end year of the data in the file. 
   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    ASSUMPTIONS
    ------------
    - Files do not have overlapped data.
    - Daily increments of data, except if the monthly tag is set in the arguments.
    - Grids have constant latitude and longitude.
    ------------
    - Some example files are in the data folder.
    """)
    parser._optionals.title = "other arguments"
    parser.add_argument(
        '-pf',
        '--prefix',
        nargs='+',
        required=True,
        help=
        "<Required> This is the prefix of the file - in the filenames format section, this is the START OF FILENAME."
    )
    parser.add_argument('start_date',
                        nargs='+',
                        help="""Start date of analysis 
    Can be in the following formats:
    ----------------------------------
    YYYY-MM-DD : e.g. 2020-04-12
    YYYY-MM    : e.g. 2020-04
    YYYY       : e.g. 2020 
    - If day is not given, the 1st of the given month will be used i.e 2020-04 => 2020-04-01
    - If day and month is not given, 1st Jan will be used as the start date i.e 2020 => 2020-01-01"""
                        )
    parser.add_argument(
        '-end',
        '--end_date',
        nargs='*',
        help=
        """ <Not required> End date of analysis - format is the same as start_date
    -----------------------------------end_date not given-------------------------------------
    - If only start year is given, the end_date is automatically set to the 31 Dec of start year
    - If start year and month is given, then end_date is set to the end of the start month
       -----------------------------------end_date given-------------------------------------
    - If day is not given, the end of the given month will be used i.e 2020-04 => 2020-04-30
    - If day and month is not given, 31 Dec will be used as the end date i.e 2020 => 2020-12-31"""
    )
    parser.add_argument('-v',
                        '--vars',
                        nargs='+',
                        metavar="variables",
                        help="<Required> Variables of data to analyse",
                        required=True)
    parser.add_argument('-p',
                        '--plot',
                        nargs=1,
                        metavar=("ensemble_number"),
                        help="""Plot map, histogram and timeseries graphs
    E.g. --plot 1
    The ensemble to plot must be included. """)
    parser.add_argument('-m',
                        '--monthly',
                        action="store_true",
                        help="Data in file is stored in monthly increments.")
    group = parser.add_mutually_exclusive_group()
    group.add_argument('-g',
                       '--grid',
                       nargs='+',
                       type=float,
                       metavar=("(lat, lon) or filename or linear/rotate"),
                       help="""
                       Grid Point: Latitude, Longitude
                        Uses grid point that latitude and longitude lies in.
                        Other commands:
                        - You can define a list of grid points in a .txt file e.g check INPUT/sample_points.txt
                           - Grid Point: sample_points.txt
                        - You can regrid to a grid (using nearest neighbour interpolation) defined in a NETCDF file:
                           - Grid Point: example_file.nc
                        Cannot be used in conjunction with sample point.
                       """)
    group.add_argument('-s',
                       '--sample',
                       nargs='+',
                       type=float,
                       metavar=("(lat, lon) or filename or linear/rotate"),
                       help="""
                       Sample Point: Latitude, Longitude
                        Uses sample point given by latitude and longitude using interpolation.
                        Other commands:
                        - You can define a list of sample points in a .txt file e.g check INPUT/sample_points.txt
                           - Sample Point: sample_points.txt
                        - You can regrid to a grid (using linear interpolation) defined in a NETCDF file:
                           - Sample Point: example_file.nc
                        Cannot be used in conjunction with grid point.
                       """)
    group.add_argument('-lc',
                       '--lon_centre',
                       nargs=1,
                       type=float,
                       help="Longitude to centre map on.")
    parser.add_argument('-mk',
                        '--mask',
                        nargs=1,
                        metavar="filename",
                        help="Uses masking grid given as a file "
                        "(contains boolean array to be imposed on "
                        "the global grid).")
    parser.add_argument(
        '-o',
        '--output',
        action="store_true",
        help=
        "If plot option selected, save data output of histogram and timeseries "
        "analysis in " + directories.ANALYSIS + " as a .dat file.")
    parser.add_argument('-cv',
                        '--covary',
                        action="store_true",
                        help="Analysis on how the variables given in -v "
                        "vary with each other.")
    parser.add_argument('-e',
                        '--ens',
                        nargs=1,
                        type=int,
                        metavar="number_of_ensembles",
                        help="<Required> The number of ensembles of the data. "
                        "If not set, the default value = 1",
                        required=True)
    parser.add_argument(
        '-ht',
        '--hist',
        nargs='*',
        metavar="number_of_bins_in_histogram",
        help=" Options for bin size selection. If not set, the "
        "default value = fd (Freedman "
        "Diaconis Estimator). The list of the potential "
        "options are listed in: \n"
        "https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram_bin_edges.html#numpy.histogram_bin_edges"
    )
    parser.add_argument(
        '-u',
        '--user',
        nargs=2,
        metavar=('file_name', 'function_name'),
        help=
        """Use function written by the user and stored in user_function folder for analysis. 
                        file_name : name of file that contains function in user_function folder
                        function_name : name of function to call 
                        Note: user functions are expected to only take in a cube as an argument. An example of a function 
                        can be found in user_function/example_function.py
                        """)
    parser.add_argument('-a',
                        '--analysis',
                        nargs='+',
                        help="""Analysis performed on data set.
    If not specified, then all analysis listed below will be performed.
    Types of analysis:
    - mean
    - std (Standard deviation)
    - rms (Root mean squared error)
    - median
    You can also select a combination of analysis to perform e.g. -a mean rms """
                        )
    parser.add_argument('-sp',
                        '--spatial',
                        action="store_true",
                        help="Calculates averages spatially.")
    parser.add_argument(
        '-ca',
        '--areas',
        action="store_true",
        help="Calculate areas of grid boxes of latitude and"
        " longitude and saves to NetCDF file areas.nc in results folder")
    parser.add_argument(
        '-t',
        '--total',
        action="store_true",
        help=
        """Total ensemble stats: True/False : The analysis will be performed over the whole ensemble given.
                        - If set True, all the ensembles will be averaged as a collection.
                        - If set False, the ensembles will be averaged individually."""
    )
    parser.add_argument('-i',
                        '--index',
                        metavar=('index'),
                        help="""Calculate index given 
                            The control run is the FIRST file prefix set and the corresponding start/end date. 
                            The future run is the SECOND file prefix set and the corresponding second start/end date
                            Types of inidices that can be calculated:          
                            enso : The Oceanic Niño Index (ONI) 
                            nino12 : Niño 1+2 Index
                            nino4 : Niño 4 Index
                            tni : The Trans-Niño Index (TNI)
                            iod : Indian Ocean Dipole (IOD) Mode Index 
                            amo : Atlantic Multidecadal Oscillation (AMO) Index
                            pdo : Pacific Decadal Oscillation (PDO) Index 
                            ao : Arctic Oscillation (AO; Northern Annular Mode) Index 
                            aao : Antarctic Oscillation (AAO; Southern Annular Mode) Index 
                            nao : North Atlantic Oscillation (NAO) Index
                            """)
    # Log output
    old_stdout = sys.stdout
    log_file = open("output.log", "w")
    sys.stdout = log_file

    # Init progress bar
    sys.stdout = old_stdout
    progress = ProgressBar(n_iter=5,
                           total_width=25,
                           description='Climate Modelling software output')
    sys.stdout = log_file
    # Initialise the variables
    algae_type, start, varbs, ens, end, analysis, spatial, total = None, None, None, None, None, None, None, None
    plot, monthly, grid, sample, mask, output, covary, hist = None, None, None, None, None, None, None, None
    lon_centre, func, calc_areas, index, lat, lon, points_sample_grid = None, None, None, None, None, None, None
    second_date_given, start2, end2 = False, None, None

    # If no arguments are given, use input file
    if len(sys.argv) == 1:
        algae_type, start, varbs, ens, end, analysis, spatial, total, plot, monthly, grid, sample, mask, output, covary, hist, lon_centre, func, calc_areas, index = file_entry(
        )
    elif len(sys.argv) == 2 and (sys.argv[1] == '-ex'
                                 or sys.argv[1] == '--example'):
        algae_type, start, varbs, ens, end, analysis, spatial, total, plot, monthly, grid, sample, mask, output, covary, hist, lon_centre, func, calc_areas, index = file_entry(
            example=True)
    else:
        # Arguments
        args = parser.parse_args()

        algae_type = args.prefix
        start = args.start_date
        varbs = args.vars
        ens = args.ens[0]
        end = args.end_date
        analysis = args.analysis
        spatial = args.spatial
        total = args.total
        plot = args.plot
        monthly = args.monthly
        grid = args.grid
        sample = args.sample
        mask = args.mask
        output = args.output
        covary = args.covary
        hist = args.hist
        lon_centre = args.lon_centre
        func = args.user
        calc_areas = args.areas
        index = args.index

    # Update progress after getting input from user
    sys.stdout = old_stdout
    progress.update()
    sys.stdout = log_file
    # Get command line arguments
    argv = 'python main.py'
    argv = argv + ' ' + start[0]
    if len(start) == 2:
        argv = argv + start[1]
    argv = argv + ' -pf ' + algae_type[0]
    if len(algae_type) == 2:
        argv = argv + ' ' + algae_type[1]
    if end:
        argv = argv + ' -end ' + end[0]
        if len(end) == 2:
            argv = argv + ' ' + end[1]
    av = ' '.join(varbs)
    argv = argv + ' -v ' + av + ' -e ' + str(ens)

    if end and len(start) < len(end):
        print("ERROR in function user_entry: Start dates are required.")
        sys.exit()

    if len(algae_type) > 2:
        print(
            "ERROR in function user_entry: Too many arguemnts given for 'Prefix' argument."
        )
        sys.exit()

    if spatial and not analysis:
        print(
            "Error in function user_entry: Spatial argument cannot be set when no analysis is selected."
        )
        sys.exit()

    # All dates
    day_s, mon_s, yr_s, day_e, mon_e, yr_e = None, None, None, None, None, None
    day_s2, mon_s2, yr_s2, day_e2, mon_e2, yr_e2 = None, None, None, None, None, None
    # Get split start date
    if len(start) == 1:
        day_s, mon_s, yr_s = get_date(start[0])
        if not end:  # If end date not given, use the end of start year
            if StartBools.just_start_year:
                end = str(yr_s)
            elif StartBools.just_start_year_month:
                end = str(yr_s) + "-" + str(mon_s)
        else:
            end = end[0]
        # Get split end date
        day_e, mon_e, yr_e = get_date(end, start=False)

    # 2 end years must be given with 2 start years
    if len(start) == 2 and len(end) != 2:
        print(
            "ERROR in function user_entry: Both end dates must be given with both start dates."
        )
        sys.exit()

    # If extra year is given
    if len(start) == 2:
        second_date_given = True
        # Get first start date
        StartBools.just_start_year, StartBools.just_start_year_month = False, False
        day_s, mon_s, yr_s = get_date(start[0])

        # Get first end date
        fst_end = end[0]
        day_e, mon_e, yr_e = get_date(fst_end, start=False)

        # Get next start
        day_s2, mon_s2, yr_s2 = get_date(start[1])

        # Get next end date
        end = end[1]
        day_e2, mon_e2, yr_e2 = get_date(end, start=False)
    elif len(start) > 2:
        print(
            "ERROR in function user_entry: Too many arguemnts given for 'Start date' argument."
        )
        sys.exit()

    # Print user input
    print("Arguments:")
    if len(algae_type) == 1:
        print("- file prefix: ", algae_type[0])
    if len(algae_type) == 2:
        print("- first file prefix: ", algae_type[0])
        print("- second file prefix: ", algae_type[1])
    print("- variables: ", varbs)
    print("- start date: " + str(yr_s) + "-" + str(mon_s) + "-" + str(day_s))
    print("- end date: " + str(yr_e) + "-" + str(mon_e) + "-" + str(day_e))
    if second_date_given:
        print("- second start date: " + str(yr_s2) + "-" + str(mon_s2) + "-" +
              str(day_s2))
        print("- second end date: " + str(yr_e2) + "-" + str(mon_e2) + "-" +
              str(day_e2))

    # Check that dates are in valid order
    is_valid = check_valid_order([day_s, mon_s, yr_s], [day_e, mon_e, yr_e])
    if not is_valid:
        print("ERROR in function user_entry: Invalid start and end date")
        print("  - The end date is earlier than the start date")
        sys.exit()
    if second_date_given:
        is_valid = check_valid_order([day_s2, mon_s2, yr_s2],
                                     [day_e2, mon_e2, yr_e2])
        if not is_valid:
            print(
                "ERROR in function user_entry: Invalid second start and second end date"
            )
            print("  - The end date is earlier than the start date")
            sys.exit()
    print("Number of ensembles:", ens)

    if analysis:
        print("Analysis: ", analysis)
        a_ = ' '.join(analysis)
        argv = argv + ' -a ' + a_
        check_analysis(analysis)
    if spatial:
        print("Spatial analysis option selected.")
        argv = argv + ' -sp'
    if total:
        print("Total ensemble stats option selected.")
        argv = argv + ' -t'
    if plot:
        print("Plotting option selected.")
        argv = argv + ' -p ' + str(plot[0])
    else:
        plot = None
    if monthly:
        print("Monthly date expected.")
        argv = argv + ' -m'

    if grid:
        if len(grid) == 2:
            lat, lon = grid[0], grid[1]
            print("Grid point option selected.")
            argv = argv + ' -g ' + str(grid[0]) + ' ' + str(grid[1])
        elif len(grid) == 1:
            # Check if txt or nc file or linear or rotate
            check_sample_grid_one_arg(grid, 'user_entry')
            points_sample_grid = grid[0]
            print("Grid point option selected.")
            argv = argv + ' -g ' + str(grid[0])
        else:
            print(
                "ERROR in function user_entry: Grid point argument has invalid number of arguments."
            )
            sys.exit()
    elif sample:
        if len(sample) == 2:
            lat, lon = sample[0], sample[1]
            print("Sample point option selected.")
            argv = argv + ' -s ' + str(sample[0]) + ' ' + str(sample[1])
        elif len(sample) == 1:
            # Check if txt or nc file or linear or rotate
            check_sample_grid_one_arg(sample, 'user_entry')
            points_sample_grid = sample[0]
            print("Sample point option selected.")
            argv = argv + ' -s ' + str(sample[0])
        else:
            print(
                "ERROR in function user_entry: Sample point argument has invalid number of arguments."
            )
            sys.exit()

    if mask:
        if isinstance(mask, list):
            mask = mask[0]
        print("Masking grid option selected.")
        argv = argv + ' -mk ' + mask
    elif not mask:
        mask = None
    if output:
        print("Save analysis data output selected.")
        argv = argv + ' -o'
    if covary:
        print("Co-varying option selected.")
        argv = argv + ' -cv'
        check_variables_covary(varbs)

    if not hist:
        hist = ['fd']
    elif hist:
        argv = argv + ' -ht ' + str(hist[0])
        if len(hist) == 2:
            argv = argv + ' ' + str(hist[1])
        elif len(hist) > 2:
            print(
                "ERROR in function user_entry: Histogram argument has invalid number of arguments."
            )
            sys.exit()

    print("Histogram bin selection option:", hist)

    if func:
        print("User function given: " + str(func[0]) + ", " + str(func[1]))
        argv = argv + ' -u ' + func[0] + ' ' + func[1]

    if calc_areas:
        print("Calculate areas option selected.")
        argv = argv + ' -ca'

    # Check index is given with second date
    if index and not second_date_given:
        print(
            "ERROR in function user_entry: Index must be given with a second start date set."
        )
        sys.exit()

    if index:
        print("Index option selected: " + index)
        argv = argv + ' -i'

    if lon_centre:
        lon_centre = lon_centre[0]
        print("Longitude centering option selected.")
        argv = argv + ' -lc ' + str(lon_centre)
    elif not lon_centre:
        lon_centre = None

    # Call functions to perform analysis
    start = [day_s, mon_s, yr_s]
    end = [day_e, mon_e, yr_e]
    if second_date_given:
        start2 = [day_s2, mon_s2, yr_s2]
        end2 = [day_e2, mon_e2, yr_e2]

    # Update progress after preocessing input from user
    sys.stdout = old_stdout
    progress.update()
    sys.stdout = log_file

    # Calculate indices
    if index:  # Self contained action
        calculate_index(algae_type,
                        index,
                        varbs,
                        start,
                        end,
                        start2,
                        end2,
                        monthly=monthly,
                        test=True)
        # Update progress after calculating index
        sys.stdout = old_stdout
        progress.update()
        sys.stdout = log_file
        progress.finish()
        sys.exit()

    # EXTRACT DATA FROM FILES
    extract = Extract(algae_type[0],
                      varbs,
                      start,
                      end,
                      ens,
                      monthly=monthly,
                      lat=lat,
                      lon=lon,
                      grid=grid,
                      points_sample_grid=points_sample_grid,
                      lon_centre=lon_centre,
                      maskfile=mask,
                      calc_areas=calc_areas)
    saved, ens_files, abs_files, full_saved, dim_coords = extract.extract_data(
    )

    saved2, ens_files2, abs_files2, full_saved2 = None, None, None, None
    if second_date_given:
        at = None
        if len(algae_type) == 2:
            at = algae_type[1]
        else:
            at = algae_type[0]
        extract = Extract(at,
                          varbs,
                          start2,
                          end2,
                          ens,
                          monthly=monthly,
                          lat=lat,
                          lon=lon,
                          grid=grid,
                          points_sample_grid=points_sample_grid,
                          lon_centre=lon_centre,
                          maskfile=mask,
                          calc_areas=calc_areas)
        saved2, ens_files2, abs_files2, full_saved2, _ = extract.extract_data()

    # Update progress after extracting data
    sys.stdout = old_stdout
    progress.update()
    sys.stdout = log_file

    # COMPUTE ANALYSIS
    anlys = Analysis(saved)
    ens_stats, func_name, analysis_str, nan_indices = None, None, None, None
    spat_calcs, spat_calcs2 = None, None
    ens_stats2 = None
    if func:  # user analysis
        file_name, func_name = func[0], func[1]
        ens_stats = anlys.compute_user_analysis(file_name, func_name)
    else:
        if second_date_given:
            ens_stats, spat_calcs, spat_calcs2, analysis_str, nan_indices = anlys.calc_stats_difference(
                saved2,
                analysis,
                total=total,
                spatial=spatial,
                dim_coords=dim_coords)
        else:
            ens_stats, analysis_str, nan_indices = anlys.compute_stats_analysis(
                analysis, total=total, spatial=spatial, dim_coords=dim_coords)

    # Warning for mask and sample/grid
    if mask is not None and lat is not None:
        print(
            "WARNING: Please ensure that sample/grid point is in the masked region."
        )

    # Update progress after computing analysis
    sys.stdout = old_stdout
    progress.update()
    sys.stdout = log_file

    # # PLOTTING
    # try:
    #     if plot is not None or output:
    #         plot_ens_num = int(plot[0]) if plot is not None else 1

    #         # Plot histogram
    #         create_histogram(saved, ens_stats, start, end, varbs, sel=hist, monthly=monthly,
    #                          save_out=output, ens_num=plot_ens_num, cov=covary, mask=mask,
    #                          total=total, analysis_str=analysis_str, nan_indices=nan_indices, plot=plot,
    #                          second_date_given=second_date_given, start_date2=start2, end_date2=end2, spatial=spatial)

    #         # Only plot timeseries and map if plot is enabled
    #         if plot is not None:
    #             # Only plot map of analysis if using analysis: mean, median, std or rms and NOT grid/sample point
    #             if analysis_str:
    #                 if func is None or not func:
    #                     plot_map_analysis(ens_stats, varbs, save_out=output, ens_num=plot_ens_num,
    #                                       analysis_str=analysis_str, total=total,
    #                                       second_date_given=second_date_given)
    #                 else:
    #                     print("WARNING: Map not plotted as user function is used.")
    #             else:
    #                 plot_map(saved, varbs, save_out=output, ens_num=plot_ens_num, total=total,
    #                          second_date_given=second_date_given)

    #         # Plot time series and boxplot
    #         if analysis_str:
    #             create_timeseries_analysis(ens_stats, start, end, varbs, analysis_str, monthly=monthly,
    #                                        save_out=output, ens_num=plot_ens_num,
    #                                        second_date_given=second_date_given, total=total, spatial=spatial,
    #                                        calcs=spat_calcs, calcs2=spat_calcs2, plot=plot)
    #         else:
    #             create_timeseries(saved, start, end, varbs,
    #                               save_out=output, ens_num=plot_ens_num, func_name=func_name, monthly=monthly,
    #                               second_date_given=second_date_given, plot=plot)
    #         # Update progress after plotting
    #         progress.update()

    # except Exception as err:
    #     print("Exception thrown in function user_entry when plotting: " + str(err))

    # WRITE ANALYSIS TO NETCDF FILE
    if output:
        wo = WriteOutput(ens_files,
                         abs_files,
                         ens_stats,
                         analysis_str,
                         varbs,
                         start,
                         end,
                         argv,
                         saved,
                         full_saved,
                         total=total,
                         lon_centre=lon_centre,
                         mask=mask,
                         lon=lon,
                         lat=lat,
                         grid=grid,
                         user_func=func_name,
                         points_sample_grid=points_sample_grid,
                         second_date_given=second_date_given,
                         test=True)
        wo.write_analysis_to_netcdf_file()
        # Update progress after writing output
        sys.stdout = old_stdout
        progress.update()
        sys.stdout = log_file

    print("PROGRAM SUCCESSFUL - TERMINAL FINISHED.")
    # End logging
    sys.stdout = old_stdout
    log_file.close()

    # Print to terminal when finished
    print("")
    print_end_statement()

    progress.finish()
示例#26
0
 def setUp(self):
     self.analysis = Analysis.Analysis()
示例#27
0
文件: main.py 项目: Aspect26/socneto
logger.setLevel(logging.INFO)
ts = datetime.now().strftime('%Y-%m-%dT%H%M%S')
fh = logging.FileHandler('sentiment_analyser' + ts + '.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)

model_path = "best_model"
analysis = a.Analysis(model_path)


def register_itself(topic, input_topic, componentId, producer):
    request = {
        "ComponentId": componentId,
        "ComponentType": "DATA_ANALYSER",
        "UpdateChannelName": "job_management.job_configuration.DataAnalyser_sentiment",
        "InputChannelName": input_topic,
        "attributes": {
            "outputFormat": {
                "polarity": "numberValue",
                "accuracy": "numberValue"
            }
        }
    }
        print(bestName)

        # Use pickle
        tempList = [self.ndGenPool, self.ndNamePool, highestPdNp, profitNp]
        pickle_path = os.path.dirname(
            os.path.abspath(__file__)) + '/pickle/roldata.pickle'
        with open(pickle_path, 'wb') as myroldata:
            roldata = pickle.dump(tempList, myroldata)

        return bestName, maxProfit


if __name__ == '__main__':

    start_time = time.time()
    an = Analysis.Analysis()
    ga = GeneticAlgorithm(an, GENERATION=500, GENS=1000, PRS=-1, opt=True)
    bestName, maxProfit = ga.start()
    end_time = time.time() - start_time
    print('Time', end_time)
    logging.info(datetime.datetime.now())
    logging.info('Non mutation.')
    logging.info('GENERATION:   {0}'.format(ga.GENERATION))
    logging.info('GENS:         {0}'.format(ga.GENS))
    logging.info('GENLEN:       {0}'.format(ga.GENLEN))
    logging.info('FITPER:       {0}'.format(ga.FITPER))
    logging.info('PROFIT_RATE:  {0}'.format(ga.PROFIT_RATE))
    logging.info('ONE_DAY_TIME: {0}'.format(ga.ONE_DAY_TIME))
    logging.info("End time:     {0}".format(datetime.datetime.now()))
    logging.info("Run time:     {0} seconds".format(end_time))
    logging.info("Max Profit:   {0} \n".format(maxProfit))