Exemple #1
0
    def test_detectComplexExpr(self):
        fileName = "/Users/Tushar/Documents/Research/PuppetQuality/Repos/cmits/cmits-example/modules-unclass/user/manifests/valid.pp"
        # fileName = "/Users/Tushar/Documents/Research/PuppetQuality/Repos/percona-xtradb-cluster-tutorial/manifests/master_slave.pp"
        outFileName = "test1/DefEncTest.txt"

        outFile = open(outFileName, "w")
        Analyzer.detectComplexExpr(fileName, outFile)
        outFile.close()
        outFileRead = open(outFileName, "r")
        self.assertGreater(len(outFileRead.read()), 0)
Exemple #2
0
def main():
    (options, args) = AnalyzerOptions.parse_args()
    if not args:
        AnalyzerOptions.print_help()
        exit()
    else:
        mp = ProductModelProgram(options, args)
        Analyzer.explore(mp, options.maxTransitions)
        print '%s states, %s transitions, %s accepting states' % \
            (len(Analyzer.states),len(Analyzer.graph),len(Analyzer.accepting))
        mname = options.output if options.output else '%sFSM' % args[0]
        Analyzer.save(mname)
    def test_trend_forecast():

        analyzer = Analyzer(dax_hist, min_trend_h=82)

        plen, ph = 0, 0
        d_len_sum, d_h_sum = 0, 0

        delta_len_percentual, delta_h_percentual = 0, 0
        delta_len_percentual_sum, delta_h_percentual_sum = 0, 0

        for trend in analyzer.trend_list:

            print("Len: {} \t\t\t\t\t\t\tHeight: {}".format(
                trend.len, trend.height))

            d_len, d_h = abs(plen - trend.len), abs(ph - trend.height)
            d_len_sum += d_len
            d_h_sum += d_h_sum

            if not (d_len == 0 and d_h == 0):

                delta_len_percentual, delta_h_percentual = abs(
                    d_len / trend.len), abs(d_h / trend.height)
                delta_len_percentual_sum += delta_len_percentual
                delta_h_percentual_sum += delta_h_percentual

                print("Diff len: {:12.2f}\t\t\t\tDiff height: {:12.2f}".format(
                    d_len, d_h))
                print(
                    "Diff len percentual: {:12.2f} % Diff height percentual: {:12.2f} %"
                    .format(delta_len_percentual * 100,
                            delta_h_percentual * 100))

            plen, ph = analyzer.predict_next_trend(trend,
                                                   similarity_threshold=0.5)

            if not (plen == 0 and ph == 0):
                print(
                    "\n                         Pred len: {:12.2f} Pred height: {:12.2f}"
                    .format(plen, ph))

            print()

        print("Avg d len: {:12.2f} Avg d h: {:12.2f}".format(
            d_len_sum / (len(analyzer.trend_list) - 1),
            d_h_sum / (len(analyzer.trend_list) - 1)))
        print(
            "Avg d len percentual: {:12.2f} % Avg d h percentual: {:12.2f} %".
            format(
                100 * delta_len_percentual_sum /
                (len(analyzer.trend_list) - 1),
                100 * delta_h_percentual_sum / (len(analyzer.trend_list) - 1)))
    def anal_trends():
        anal16 = Analyzer(dax_hist)
        plotter = Plotter(anal16)

        trends = anal16.get_trends(anal16.hist, min_trend_h=5, realistic=False)
        trend_heights = [abs(x.height) for x in trends]
        trend_len = [abs(x.len) for x in trends]
        mom = [abs(x.height / x.len) * 10 for x in trends]

        plotter.plot_general_same_y(list(range(len(trends))),
                                    [trend_heights, trend_len, mom],
                                    x_label="Trends",
                                    y_labels=["heights", "lens", "momentum"])
Exemple #5
0
def protein_length_graphic(prot_name, savepath, height, width, font_scale,
                           err):
    """ 
	creates a seaborn graphic using the coverage of proteins stored at
	Proteins_size.csv file 
	"""

    resultpath = get_protein_figures(savepath)

    if not os.path.exists(resultpath):
        os.makedirs(resultpath)

    inputFile = analyzer.get_file5(savepath)  #get_proteins_size_csv(savepath)
    tmpfiles = analyzer.get_tmpfiles(savepath)
    df = pd.read_csv(inputFile)

    sns.set(font_scale=font_scale)

    # index = protein_listbox1.curselection()[0]
    # n =  protein_listbox1.get(index)

    dfname = df['Name']
    dfprot = df[prot_name]

    #df2 = pd.concat([dfname, dfprot])
    sns.set_style("whitegrid")
    xticks = list(dfname)
    f, ax = plt.subplots(figsize=(width, height))
    plt.tight_layout()

    pal = sns.color_palette(
        'Set2')  # con esto elegimos la paleta de color, hay varias para elegir
    #ax = sns.barplot(data=df2, y=dfname, x=dfprot, errwidth=0.5, palette = pal, capsize=0.5) # Error bars represent standard deviations
    ax = sns.barplot(data=df,
                     y="Name",
                     x=prot_name,
                     errwidth=0.5,
                     palette=pal,
                     capsize=0.5)  # Error bars represent standard deviations

    plt.xticks(np.arange(0, 100 + 1, 25))
    ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
    plt.tight_layout()

    plt.savefig(get_protein_size_pdf(savepath, prot_name))
    plt.savefig(get_protein_size_tif(savepath, prot_name))
    plt.close()

    return
Exemple #6
0
 def __getKeySeg(self, sourceStr, num, weight):
     chiKeywords = jieba.analyse.extract_tags(
         Analyzer.getAllChiInStr(sourceStr),
         topK=num,
         withWeight=False,
         allowPOS=('ns', 'n', 'v'))
     #英文全保留
     engKeyword = Analyzer.getEngSegList(Analyzer.getAllEngInStr(sourceStr),
                                         self.engStopWordsList)
     targetMap = {}
     for word in chiKeywords:
         targetMap[word] = weight
     for word in engKeyword:
         targetMap[word] = weight
     return targetMap
    def test_trend_forecasting_ability(n=20, min_trend_h=50):

        np.set_printoptions(suppress=True)

        avg_rand_difference = 0

        anal = Analyzer(dax_hist, min_trend_h=min_trend_h)

        r = len(anal.trend_list) - 1

        for _ in range(r):
            random_list_a = (np.random.random(n) * 2) - 1
            random_list_b = (np.random.random(n) * 2) - 1

            avg_rand_difference += np.sum(
                np.absolute(random_list_a - random_list_b)) / n

        avg_rand_difference /= r

        print("Avg rand difference:               ", avg_rand_difference)

        # ########################################

        avg_avg_sim_difference = 0

        for base_trend in anal.trend_list[:-1]:

            base_similar_trend_containers, base_sum_sim = anal.get_similar_trends(
                base_trend, n, -1)
            next_similar_trend_containers, next_sum_sim = anal.get_similar_trends(
                base_trend.next_trend, n, -1)

            base_similarities = np.asarray(
                [container.sim for container in base_similar_trend_containers])
            next_similarities = np.asarray(
                [container.sim for container in next_similar_trend_containers])

            avg_avg_sim_difference += np.sum(
                np.absolute(base_similarities - next_similarities)) / n

        avg_avg_sim_difference /= r

        print("Average sim difference:            ", avg_avg_sim_difference)

        print("Average following trend similarity:",
              anal.get_avg_following_trend_similarity())

        return avg_rand_difference, avg_avg_sim_difference
Exemple #8
0
  def exportTrace(self, traceName, fileName, format = "binary", **kwargs):
    """
    Export a trace to a file in a special format.

    @param traceName:   Trace to save
    @param fileName:    Output trace file
    @param format:      Output trace format
    @param **kwargs:    Keyword arguments for the exporter plugin
    """
    if not traceName in self.analyzer.traces:
      self.analyzer.fail("Trace not found: %s" % traceName)
      
    trace = self.analyzer.traces[traceName]
    
    for exporter in self.analyzer.exportPlugins:
      if exporter.formatName == format:
        break
    else:
      self.analyzer.fail("No such format. Available formats: %s." % (", ".join([e.formatName for e in self.analyzer.exportPlugins])))

    kwargs = Analyzer.completeKeywordArguments(exporter.saveTrace, kwargs)

    try:
      f = open(fileName, "wb")
      exporter.saveTrace(trace, f, **kwargs)
      f.close()
      # If the file is in binary format, record the output file
      if format == "binary":
        self.analyzer.traceFiles[traceName] = fileName
        self.reportInfo("Saved trace %s to '%s'." % (traceName, fileName))
      else:
        self.reportInfo("Exported trace %s to '%s' in %s format." % (traceName, fileName, format))
    except IOError, e:
      self.analyzer.fail(e)
Exemple #9
0
def wacot_analyze(args):
    analyzer = Analyzer.Analyzer()
    analyzer.compute_article_contributions()
    analyzer.compute_category_contributions()
    analyzer.compute_bot_flags()
    analyzer.count_article_contributions()
    analyzer.count_category_contributions()
Exemple #10
0
 def solve(self):
     old_time = datetime.datetime.now()
     self.bfsUtil()
     new_time = datetime.datetime.now()
     return Analyzer(self.matrix, self.pathCount, self.nodeExplored,
                     len(self.queue), (new_time - old_time).total_seconds(),
                     self.isMazeSolved, 'BFS', self.probability, self.size)
Exemple #11
0
def alert_management(Systolic_BP, Diastolic_BP, Heart_Rate, Heart_O2_Level,
                     Body_temp):
    new_alert = analysis.Analyzer(Systolic_BP, Diastolic_BP, Heart_Rate,
                                  Heart_O2_Level, Body_temp)

    if (new_alert.Shock_Alert(int(Heart_Rate), int(Body_temp)) is True or\
        new_alert.Signal_Loss(int(Heart_Rate), int(Body_temp)) is True or\
        new_alert.Oxygen_Supply(int(Heart_O2_Level)) is True or\
        new_alert.Fever(int(Body_temp)) is True or\
        new_alert.Hypotension(int(Systolic_BP), int(Diastolic_BP)) is True or\
        new_alert.Hypertension(int(Systolic_BP), int(Diastolic_BP)) is True):
        print('\n\033[1;31;40m|Alert|\033[0m')
    else:
        print(
            '\033[1;32mAll the vital signs were within normal limits.\033[0m\n'
        )

    if new_alert.Shock_Alert(int(Heart_Rate), int(Body_temp)) is True:
        print('\033[1;31;40mShock_Alert!\033[0m')

    if new_alert.Signal_Loss(int(Heart_Rate), int(Body_temp)) is True:
        print('\033[1;31;40mWarning: Signal_Loss!\033[0m')

    if new_alert.Oxygen_Supply(int(Heart_O2_Level)) is True:
        print('\033[1;31;40mPlease Increase Oxygen_Supply!\033[0m')

    if new_alert.Fever(int(Body_temp)) is True:
        print('\033[1;31;40mFever!\033[0m')

    if new_alert.Hypotension(int(Systolic_BP), int(Diastolic_BP)) is True:
        print('\033[1;31;40mHypotension\033[0m')

    if new_alert.Hypertension(int(Systolic_BP), int(Diastolic_BP)) is True:
        print('\033[1;31;40mHypertension\033[0m')
    def autocor_trend_mom():

        anal16 = Analyzer(dax_hist, min_trend_h=5, realistic=False)

        moms = np.asarray([abs(trend.momentum) for trend in anal16.trend_list])

        Plotter.sm_autocor(moms)
Exemple #13
0
 def run(self):
     """
     Start processing.
     """
     # parse the command line arguments and set logging options
     try:
         self.args = self.parser.parse_args()
         self.configureLogging()
         self.logger.info("Started with {0}".format(' '.join(sys.argv[1:])))
     except Exception as e:
         self.parser.print_help()
         sys.exit(e)
     # load the configuration file
     try:
         with open(self.args.config) as f:
             self.config.readfp(f)
     except Exception as e:
         self.logger.critical("Could not load the specified configuration file")
         sys.exit(e)
     # set options
     Cfg.LOG_EXC_INFO = self.args.trace
     # execute commands
     with Timer.Timer() as t:
         if self.args.crawl:
             import Crawler
             Crawler.crawl(self.config, self.args.update)
         if self.args.clean:
             import Cleaner
             Cleaner.clean(self.config, self.args.update)
         if self.args.infer:
             import Facter
             Facter.infer(self.config, self.args.update)
         if self.args.graph:
             import Grapher
             Grapher.graph(self.config, self.args.update)
         if self.args.transform:
             import Transformer
             Transformer.transform(self.config)
         if self.args.post:
             import Poster
             Poster.post(self.config)
         if self.args.analyze:
             import Analyzer
             Analyzer.analyze(self.config, self.args.update)
     self.logger.info("Indexer finished in {0}:{1}:{2}".format(t.hours, t.minutes, t.seconds))
Exemple #14
0
def main(dic):
    dic = {
        "ID": 1,
        "age": 22,
        "gender": 'Male',
        "heartrate": random.randint(50, 100),
        "Diastolic_BP": random.randint(40, 110),
        "Systolic_BP": random.randint(70, 160),
        "blood_oxygen": random.randint(50, 100),
        "temperature": random.randint(34, 39),
        "time": time.ctime()
    }
    #input module:
    patient = input_api.input_api(dic["ID"], dic["age"], dic["gender"],
                                  dic["heartrate"], dic["Diastolic_BP"],
                                  dic["Systolic_BP"], dic["blood_oxygen"],
                                  dic["temperature"], dic["time"])

    data1 = patient.return_request(1)
    patient.return_request(2)
    print("Patient Data:")
    print(data1)

    #Analyze module:
    data = Analyzer.Analyzer(dic["Systolic_BP"], dic["Diastolic_BP"],
                             dic["heartrate"], dic["blood_oxygen"],
                             dic["temperature"])
    Signal_Loss = data.Signal_Loss(dic["heartrate"], dic["temperature"])
    Shock_Alert = data.Shock_Alert(dic["heartrate"], dic["temperature"])
    Oxygen_Supply = data.Oxygen_Supply(dic["blood_oxygen"])
    Fever = data.Fever(dic["temperature"])
    Hypotension = data.Hypotension(dic["Systolic_BP"], dic["Diastolic_BP"])
    Hypertension = data.Hypertension(dic["Systolic_BP"], dic["Diastolic_BP"])

    #Database:
    database = Database_Module.DataBaseModule()
    # print(authenDB.get("admin"))
    # database.auth(authenDB(), authenDB.get("admin"))
    database.insert(1, data1)

    ##AI_module
    AI = AI_module.AI_module(dic["ID"], data1)
    Blood_oxygen, heartate, Systolic, Diastolic = AI.Query_Data_From_Database()
    heartrate_predict_result, oxygen_predict_result, Diastolic_predict_result, Systolic_predict_result = AI.AI_Module(
        Blood_oxygen, heartate, Systolic, Diastolic)
    Predict_Hypertension_Alert, Predict_Hypotension_Alert, Predict_Shock_Alert, Predict_Oxygen_Alert = AI.Feedback(
        heartrate_predict_result, oxygen_predict_result,
        Diastolic_predict_result, Systolic_predict_result)

    ##Output
    OutputAlert_module.display_AI_iuput_data(dic["ID"], data1, Blood_oxygen,
                                             heartate, Systolic, Diastolic)

    OutputAlert_module.receive_basic_iuput_data(
        Signal_Loss, Shock_Alert, Oxygen_Supply, Fever, Hypotension,
        Hypertension, Predict_Hypertension_Alert, Predict_Hypotension_Alert,
        Predict_Shock_Alert, Predict_Oxygen_Alert)
Exemple #15
0
    def __init__(self, events):
        super().__init__(events)
        self.analyzer = Analyzer()
        self.processing = None
        self.fig = None
        self.ax = None

        self.input_url = None
        self.output_path = None
Exemple #16
0
def run(replications):
    SEED = 42
    outAnal = Analyzer.Analyzer(["D1", "D2", "D3", "D4"])
    for i in xrange(replications):
        random.seed(SEED)
        print('EMS! Run %i of %i' % (i + 1, replications))
        outAnal.addData(_runRep(DETAIL))
        SEED = random.random()
    outAnal.run(True)
Exemple #17
0
def genSingleDistPlot(axis, config, title):
    # Get results from DISTANCE directories
    # create analyzed data sets from file logs by searching by directory (y values)
    beacon = anlyzr.getResultsFrom(dflts.getDistDirs(config, 'beacon'))
    devkit = anlyzr.getResultsFrom(dflts.getDistDirs(config, 'devkit'))
    sim = dflts.getSimulatedData(
        config)  # simulated data is hard coded or calculated; no directory
    # generate x-axis labels for the plot intervals (x values)
    xs_bcon = genDistXLabels(beacon)
    xs_dev = genDistXLabels(devkit)
    xs_sim = genDistXLabels(sim, 0)
    # create list of lists with y values (ys) and x values (xs)
    ys = [
        plotAxisAmplitude(beacon, axis),
        plotAxisAmplitude(devkit, axis), sim
    ]
    xs = [xs_bcon, xs_dev, xs_sim]
    return xs, ys
Exemple #18
0
 def solve(self):
     old_time = datetime.datetime.now()
     self.dfsUtil()
     new_time = datetime.datetime.now()
     print((new_time - old_time).total_seconds())
     print(self.nodeExplored)
     print(self.isMazeSolved)
     return Analyzer(self.matrix, self.pathCount, self.nodeExplored,
                     len(self.stack), (new_time - old_time).total_seconds(),
                     self.isMazeSolved, 'DFS', self.probability, self.size)
Exemple #19
0
def get_sentiment():
    result = Analyzer.get_overall_sentiment(ticker_var.get())

    sentiment_conversion_list = [
        "Very Poor", "Poor", "Neutral", "Good", "Very Good"
    ]
    n = len(sentiment_conversion_list)
    sentiment.set(
        f'{round(result, 2)}  {sentiment_conversion_list[int((result / 2 +0.5 )//.2)]}'
    )  # converts range from -1 to 1 to 0 to 1.
def main():
    #Prints to signify it is running
    print("Started")
    #Starts the DataGrabber to load information to a file for stock to pull from
    stock = Stock("ko")

    #Loads the stock so it will have the needed information to analyze
    #stock = Stock("ko", years)
    #Will run the calculations so we can pull the results without
    #cluttering up this class which will interpret them
    processedData = Analyzer(stock)
Exemple #21
0
    def getResult(self, targetWord):

        #截取关键词
        targetWord = targetWord.decode('utf-8')
        if (len(targetWord) > self.MAXKEYWORDLEN):
            targetWord = targetWord[0:self.MAXKEYWORDLEN]

        result = []
        #将搜索词作为关键字查找
        #targetWord = targetWord.decode('utf-8')
        #tempResult = self.__getUrlAndWeight(targetWord)
        #tempResult = self.__getBrief(targetWord, tempResult)
        #result += tempResult
        #将分词的结果作为关键字
        #targetSplit = Analyzer.getChiSegList(targetWord, self.htmlIndexer.chiStopWordsList)

        #chiTargetSplit =
        #engTargetSplit =

        targetSplit = Analyzer.getChiSegList(
            Analyzer.getAllChiInStr(targetWord),
            self.htmlIndexer.chiStopWordsList) + Analyzer.getEngSegList(
                Analyzer.getAllEngInStr(targetWord),
                self.htmlIndexer.engStopWordsList)

        for word in targetSplit:
            tempResult = self.__getUrlAndWeight(word)
            tempResult = self.__getBrief(word, tempResult)
            result += tempResult
        #将url结果相同的条目合并
        mergedRes = self.__mergeUrlAndWeight(result)
        #将结果按照权重排序
        mergedRes.sort(key=lambda uaw: uaw[1], reverse=True)
        '''for res in mergedRes:
            if(len(res) >= 3):
                mergedRes.remove(res)

        result = []'''
        for i in mergedRes:
            i[0] = 'http://' + i[0]
        return mergedRes
Exemple #22
0
 def __init__(self, mainui, AIObject, TSS, gboard, textfield, progressbar,
              pids):
     self.MainUI = mainui
     self.TextField = textfield
     self.progressbar = progressbar
     self.pids = pids
     self.GomokuBoard = gboard
     self.AI = AIObject
     self.AIStoneType = self.AI.AIStoneType
     self.PlayerStoneType = "black" if self.AIStoneType == "white" else "white"
     self.refree = Analyzer.WinChecker(self.AI.Board)
     self.TSS = TSS
Exemple #23
0
 def getSentimentsOnlyByTextBlob(self):
     client = Client.Client()
     api = client.getTwitterClientApi()
     tweets = api.twitterClient.user_timeline(screen_name=self.user,
                                              count=self.numberOfTweets)
     tweetAnalyzer = Analyzer.TweetAnalyzer()
     dataFrame = tweetAnalyzer.tweetsToDataFrame(tweets)
     dataFrame['sentiment'] = np.array([
         tweetAnalyzer.analyzeSentimentByTextBlobModel(tweet)
         for tweet in dataFrame['Tweets']
     ])
     print(dataFrame.head(self.numberOfTweets))
Exemple #24
0
    def __init__(self):
        super(main, self).__init__()
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logger.info('initializing Self-Adaptive System')

        KB = KnowledgeBase.KnowledgeBase()
        KB.loadGoalModel('car-wheel.json')

        E = Executor.Executor(KB)
        P = Planner.Planner(KB, E)
        A = Analyzer.Analyzer(KB, P)
        M = Monitor.Monitor(KB)
Exemple #25
0
def main():
    '''
    folders = []
    for (dirname, dirs, files) in os.walk("C:\\Users\\anivr\\Desktop\\AutoDownload"):
        folders = dirs
        for x, f in enumerate(folders):
            folders[x] = dirname + "\\" + f
        break
    for folder in folders:
        print("FOLDER : " + str(folder))
        print("===================================")
        # print(str(folder))
        try:
            Analyzer.main(folder)
        except Exception as err:
            print("There was an error : " + "[" + str(folder) + "]" + str(err))
    print(str(Analyzer.S / (Analyzer.S + Analyzer.F)))
    '''
    Analyzer.main(
        "C:\\Users\\anivr\\Desktop\\AutoDownload\\service.subtitles.opensubtitles\\service.py"
    )
 def solve(self):
     old_time = datetime.datetime.now()
     print(old_time)
     self.dfsUtil()
     new_time = datetime.datetime.now()
     print(new_time)
     print((new_time - old_time).total_seconds())
     print("********************")
     print(self.nodeExplored)
     return Analyzer(self.matrix, 0, self.nodeExplored, len(self.stack),
                     (new_time - old_time).total_seconds(),
                     self.isMazeSolved)
def analyze():
	username = request.args.get('username', '')
	if username != '':
		res = Analyzer.analyze(username)
		# for test
		#res = json.load(open('result.txt'))
		if res:
			res['success'] = True
		else:
			res = {'success': False}
	else:
		res = {'success': False}
	return jsonify(res)
Exemple #28
0
def analyze():
    username = request.args.get('username', '')
    if username != '':
        res = Analyzer.analyze(username)
        # for test
        #res = json.load(open('result.txt'))
        if res:
            res['success'] = True
        else:
            res = {'success': False}
    else:
        res = {'success': False}
    return jsonify(res)
Exemple #29
0
    def solve(self):

        old_time = datetime.datetime.now()
        self.aStarUtil()
        new_time = datetime.datetime.now()
        print((new_time - old_time).total_seconds())
        print(self.nodeExplored)
        print(self.isMazeSolved)
        return Analyzer(self.matrix, self.nodeExplored, self.nodeExplored,
                        self.queue.qsize(),
                        (new_time - old_time).total_seconds(),
                        self.isMazeSolved, 'ASTAR', self.probability,
                        self.size)
    def test_trend_forecasting_model(test_years,
                                     min_trend_h,
                                     model_years=None,
                                     model_hist=None,
                                     strict_mode=False,
                                     mode="avg"):

        if not model_years and not model_hist:
            raise Exception(
                "You must provide a model history or year for a model history!"
            )

        if model_years:
            if type(model_years) is not list:
                model_years = list(model_years)

            if len(model_years) > 2:
                model_years = [model_years[0], model_years[-1]]

        if type(test_years) is not list:
            test_years = list(test_years)

        if len(test_years) > 2:
            test_years = [test_years[0], test_years[-1]]

        if model_hist:
            anal = Analyzer(model_hist,
                            min_trend_h=min_trend_h,
                            realistic=False)
        else:
            h = History("GER30", *model_years)
            anal = Analyzer(h, min_trend_h=min_trend_h, realistic=False)

        anal.get_intern_trend_prediction_error(p=True,
                                               use_strict_mode=strict_mode,
                                               mode=mode)
        test_anal = Analyzer(History("GER30", *test_years),
                             min_trend_h=min_trend_h,
                             realistic=False)

        anal.get_extern_trend_prediction_error(test_anal.trend_list,
                                               p=True,
                                               use_strict_mode=strict_mode,
                                               mode=mode)
Exemple #31
0
    def __getChiSegMap(self, sourceStr, wordNum, weight):
        #tempList = []
        targetMap = {}
        segList = Analyzer.getChiSegList(sourceStr, self.chiStopWordsList)
        #for word in segList:
        #if(word not in self.chiStopWordsList):
        #tempList.append(word)
        tempC = collections.Counter(segList)

        for word, times in tempC.most_common(wordNum):
            targetMap[word] = times * weight

        return targetMap
Exemple #32
0
def wacot_import(args):
    importer = Importer.Importer()
    if args.from_dumps == 'all' or args.from_dumps == 'xml':
        importer.import_xml()
    if args.from_dumps == 'all' or args.from_dumps == 'cat':
        importer.import_categories()
    if not args.only_import:
        analyzer = Analyzer.Analyzer()
        analyzer.compute_article_contributions()
        analyzer.compute_category_contributions()
        analyzer.compute_bot_flags()
        analyzer.count_article_contributions()
        analyzer.count_category_contributions()
Exemple #33
0
    def createAnalyzer(self, args=[]):
        # Parse the command line options
        options, args = Options.parseCommandLine(args)

        # Read the project file
        project = Project.Project(options=options, fileName=options.project)

        # Create the interactive analyzer
        analyzer = Analyzer.InteractiveAnalyzer(project, options)

        # Divert the output
        analyzer.reportInfo = self.logInfo
        Task.setMonitor(None)

        return analyzer
Exemple #34
0
    def run(self):
        """
        main entry point to run the post-processing of a perfmon job
        """
        msg = self.msg
        self.msg.info("running app...")
        ## check everybody has the same bins
        for i in range(len(self.anaMgrs)):
            ## print "-->",self.anaMgrs[i].name,len(self.anaMgrs[i].bins)
            for j in range(i + 1, len(self.anaMgrs)):
                if len(self.anaMgrs[i].bins) != len(self.anaMgrs[j].bins):
                    self.msg.warning("Not running on same number of evts !")
                    self.msg.warning(" [%s] : %r", self.anaMgrs[i].name,
                                     self.anaMgrs[i].bins)
                    self.msg.warning(" [%s] : %r", self.anaMgrs[j].name,
                                     self.anaMgrs[j].bins)

        self.msg.info("nbr of datasets: %i", len(DataSetMgr.instances.keys()))
        import Analyzer
        self.msg.info("running analyzers...")
        for monComp in self.monComps.values():
            self.msg.debug(" ==> [%s]... (%s)", monComp.name, monComp.type)
            monVars = self.analyzers
            if (monComp.name in ['TopAlg', 'Streams']
                    or self.__filter(monComp)):
                for monVar in monVars:
                    analyzer = Analyzer.getAnalyzer(monVar, monComp.name)
                    analyzer.run(monComp)
                    pass
            else:
                self.msg.debug(" skipped [%s]", monComp.name)
            pass

        self.msg.info("creating summary...")
        ## create the summary
        import SummaryCreator
        self.summary = SummaryCreator.SummaryCreator(fitSlice=self._fitSlice)
        self.summary.process(DataSetMgr.instances,
                             MonitoredComponent.instances)

        self.msg.info("creating output files...")
        self.__writeRootFile()
        self.__writeAsciiFile()
        self.__writePdfFile()

        self.msg.info("running app... [DONE]")
        return ExitCodes.SUCCESS
Exemple #35
0
def run_all_algs(stock):
    import sys
    sys.path.append("/Users/carlsoncheng/PycharmProjects/grahamBot")
    import Analyzer
    graham = Analyzer.Analyzer(stock)
    graham.earn_inc_by_33_percent_test()
    graham.positive_earnings_test()
    graham.twenty_year_div_record_test()
    graham.shareholder_equity_to_total_assets()
    graham.long_term_debt_less_than_net_current_assets()
    graham.curr_ratio_greater_than_2()
    graham.long_term_debt_less_than_2x_shareholder_equity()
    graham.ttm_average_pe_less_than_20()
    graham.price_to_seven_year_earnings_ratio_less_than_25()
    graham.price_to_3_year_earnings_less_than_15()
    graham.pb_ratio_less_than_1_point_5()
    graham.graham_number()
Exemple #36
0
__author__ = 'ja'
from Analyzer import *

if __name__ == "__main__":
    a=Analyzer('../data/MC.root',"../out/MC_resid.root")
    a.runAnalysis()
def parseBooks():
    print("starting to parse books")
    booksInfo = []
    with open(locationOfBooksInfo, encoding="utf8") as iFile:
        booksInfo = iFile.readlines()

    # booksInfo = booksInfo[:30]
    booksParsedCount = 0
    bookLength = len(booksInfo)
    books = []
    for book in booksInfo:
        booksParsedCount += 1
        rowData = book.split("@")
        title = rowData[0].strip()
        author = rowData[1].strip()
        location = rowData[2].strip()
        content = ""
        errorMessage = ""
        print("looking at book" + title + " " + location + " " + author)
        if not os.path.isfile(location):
            zipLocation = location[:-3]
            zipLocation += "zip"
            if os.path.isfile(zipLocation):
                try:
                    zfile = zipfile.ZipFile(zipLocation)
                    zipFolder = zipLocation[: zipLocation.rfind("/")]
                    zfile.extractall(zipFolder)
                except Exception:
                    continue
            if not os.path.isfile(location):
                errorMessage += "location not found"
        if author == "Anonymous" or author == "Various":
            errorMessage += "author unknown"
        if ".txt" not in location:
            errorMessage += "location weird"

        if errorMessage is not "":
            print(errorMessage)
            continue

        with open(location) as iFile:
            try:
                content = iFile.read()
                # print("read " + location)
            except UnicodeDecodeError as e:
                print("decode error")
                print(e)
                continue
            except Exception as e:
                print(e)
        data = {}
        try:
            data = Analyzer.analyzeBook(content)

            print("anayzled " + title)
        except Exception as e:
            print(e)
            continue
        newBook = Book(location, author, title, data)
        books.append(newBook)
        print(newBook.title + " was parsed")
        print("percent finished: " + str(booksParsedCount / bookLength))

    with open(locationOfBooks, "w", encoding="utf8") as oFile:
        for book in books:
            print(bookToFileFormat(book))
            oFile.write(bookToFileFormat(book))
Exemple #38
0
 def exportTraceHelp(self):
   helpText = ["Output formats and options:"]
   for exporter in self.analyzer.exportPlugins:
     helpText.append("  %10s: %s" % (exporter.formatName, ", ".join(Analyzer.getFunctionArguments(exporter.saveTrace)[3:])))
   return "\n".join(helpText)
import os

import PLConstants as CONSTS

import Aggregator
import Analyzer

root = CONSTS.REPO_ROOT
print("Initiating Custom Puppet-Lint Analyzer...")
totalRepos = len(os.listdir(root))
currentItem = 0
for item in os.listdir(root):
    currentFolder = os.path.join(root, item)
    if not os.path.isfile(currentFolder):
        Analyzer.analyze(currentFolder, item)
    currentItem += 1
    print(str("{:.2f}".format(float(currentItem * 100) / float(totalRepos))) + "% analysis done.")
print("Custom Puppet-Lint Analyzer - Done.")

print("Initiating Puppet-Lint aggregator...")
aggregatedFile = open(root + "/" + CONSTS.AGGREGATOR_FILE, "wt")
aggregatedFile.write(CONSTS.HEADER)
for item in os.listdir(root):
    currentFolder = os.path.join(root, item)
    if not os.path.isfile(currentFolder):
        Aggregator.aggregate(currentFolder, item, aggregatedFile)
aggregatedFile.close()
print("Puppet-Lint aggregator - Done.")