Beispiel #1
0
async def fetch():
    s = smmrpy.SMMRPY('8B1BD1A777')  # Instantiate the SMMRPY instance.
    article = await s.get_smmry(
        'https://edition.cnn.com/2018/05/11/middleeast/iran-israel-syria-intl/index.html'
    )
    print(article.title)  # Print the title of the found article.
    a = article.content
    my_list = a.split(".")
    for sent in my_list:
        analyzer.analyze(sent)
Beispiel #2
0
def compare(testname, olddir, curdir, config_file='perf.conf', output_dir="./result.txt"):
    config = ConfigParser.ConfigParser()
    config.read(config_file)
    result_file_pattern = config.get(testname, "result_file_pattern")

    def search_files(dir):
        cmd = 'find %s|grep "%s.*/%s"' % (dir, testname, result_file_pattern)
        print cmd
        return commands.getoutput(cmd)

    oldlist = search_files(olddir)
    newlist = search_files(curdir)
    if oldlist != "" or newlist != "":
        analyzer.analyze(oldlist, newlist, output_dir)
Beispiel #3
0
def make_freq(file):
    """テキストファイルを読み込んで形態素解析結果を返す
    
       戻り値 : 解析結果を格納した多重リスト
    """
    print('テキストを読み込んでいます...')

    with open(
            file,  # ファイル名を指定
            'r',  # 読み取り専用で開く
            encoding='utf_8'  # エンコード方式を指定
    ) as f:
        text = f.read()  # 全データをtextに格納
    text = re.sub('\n', '', text)  # 文末の改行文字を取り除く
    word_dic = {}  # 語を保持するリスト

    # ①形態素解析の結果をリストとして取得
    analyze_list = analyzer.analyze(text)

    # ②多重リストの要素を2つのパラメーターに取り出す
    for wd, part in analyze_list:
        # keyword_check()関数の戻り値がTrueの場合
        if (analyzer.keyword_check(part)):
            if wd in word_dic:  # 辞書に語と同じキーがあるか
                word_dic[wd] += 1  # キーの値に1加算
            else:  # 該当するキーがなければ
                word_dic[wd] = 1  # 単語をキーにして値を1にする
    return (word_dic)  # 頻度表としての辞書を返す
Beispiel #4
0
def getOutput():
    global filename

    # check if the filename is not empty
    if filename:
        # clear the treeview first before putting any data
        removeAll(lexemeTable)
        removeAll(symbolTable)

        # process the file
        data = analyzer.analyze(filename)

        # display the data in the treeview (lexeme and symbol table)
        printLexeme(data[0])
        printSymbols(data[1])

        # this will print the output of the LOLcode file
        textOut = ""
        for text in data[2]:
            textOut += text

        outputText.config(state=NORMAL)
        outputText.delete(1.0, END)
        outputText.insert(END, textOut)
        outputText.config(state=DISABLED)

    else:
        messagebox.showwarning("Message Box", "No File Selected!")
Beispiel #5
0
    def SelectClassHeat(self, evt):
        self.Debug('SelectClassHeat')
        if evt is None:
            cls, heat = self.class_heat
        else:
            cls, heat = self.recs[evt.GetSelection()]
            self.class_heat = cls, heat
        self.Info('Selected class %s heat %s' % (cls, heat))

        if self.editwin:
            self.sizer.Remove(self.editwin)
            self.editwin.Destroy()

        #reload(analyzer)

        res = analyzer.analyze(heat, self.record[cls][heat],
                               self.scoringsystem)
        self.editwin = EditWin1(self.record[cls][heat][0],
                                self.record[cls][heat][1],
                                res,
                                self.topparent,
                                self,
                                self.debug + (not not self.debug),
                                zoom=self.zoom,
                                seltime=self.seltime)
        self.sizer.Add(self.editwin, 1, wx.EXPAND)
        self.sizer.Layout()
def test():
    logging.info("test: begin ...")

    # test flow order
    flow = [
        'T', 'A', 'T', 'G', 'T', 'C', 'B', 'A', 'G', 'T', 'G', 'C', 'A', 'T',
        'G', 'T', 'C', 'A', 'T', 'G', 'T', 'C', 'A'
    ]
    # test sequence
    sequence = [
        'T', 'C', 'A', 'G', 'G', 'G', 'C', 'A', 'G', 'C', 'G', 'C', 'A', 'A',
        'A', 'A', 'G', 'G', 'G', 'A', 'A', 'G', 'A', 'T', 'A', 'C', 'C', 'A',
        'A', 'G', 'G', 'A', 'G', 'G', 'C', 'A', 'T', 'G', 'G', 'C', 'C', 'T',
        'T', 'T', 'G', 'T', 'C', 'A', 'A', 'G', 'G', 'G', 'C', 'C', 'C', 'C',
        'C', 'C', 'T', 'C', 'T', 'C', 'T', 'G', 'A', 'G', 'C', 'T', 'C', 'T',
        'C', 'A', 'T', 'C', 'A', 'C', 'T', 'T', 'T', 'C', 'C', 'T', 'C', 'C',
        'C', 'C', 'C'
    ]
    # test analysis
    number_of_incorporation = analyzer.analyze(flow, sequence)
    # test incorporation output
    logging.info(number_of_incorporation)
    # test output plotting
    plotter.plot(flow, number_of_incorporation, "Test")
    logging.info("test: complete ...")
Beispiel #7
0
def intraday_working_hour_only(stock_symbols, crypto_symbols, date):
    start_time = "{} 09:30:00".format(date)
    end_time = "{} 16:00:00".format(date)
    df = load_data(start_time, end_time, crypto_symbols, stock_symbols, True)
    analysis = analyze(df, start_time, end_time,
                       stock_symbols + crypto_symbols)
    print(analysis)
Beispiel #8
0
def find_route(source, dest):
    """ Find path from source to dest via Wikipedia links """
    curr = "https://en.wikipedia.org/wiki/" + source
    order = [source]  # links traversed
    print(curr)

    while True:
        unseen_links = []

        # Grab all links in page
        page_links = link_grab.get_links(curr)
        for link in page_links:
            # Ensures no cycles
            if link not in SEEN:
                # only store links that have not been visited
                unseen_links.append(link)
        # Ensures the program does not get stuck
        if not unseen_links:  # all links on page have been visited
            if curr.lower() == source.lower():  # have tracked back to source
                print("Impossible to get there")  # no source to dest path
                sys.exit()
            if curr in order:
                order.remove(curr)
            curr = order[-1]  # this link was a bad choice, move back
            continue
        best_links = analyzer.analyze(list(unseen_links), dest)
        step = best_links[0]
        SEEN.append(step)
        order.append(step)
        curr = "https://en.wikipedia.org/wiki/" + step
        print(curr)
        if step.lower() == dest.lower():  # destination reached
            print(order)
            break
Beispiel #9
0
    def run_analyzer(self, timeout=TIMEOUT_DEFAULT):
        import analyzer

        analysis_results = to_unicode(analyzer.analyze(self.testee))
        try:
            self.result.update(json.loads(analysis_results))
        except ValueError:
            self.result["status"] = analysis_results
Beispiel #10
0
def analyze(request):
    pwd = settings.BASE_DIR
    JSON_FILE = pwd + '/don/don.json'

    params = {
        'error_file': pwd + '/don/static/don.error.txt',
        'test:all': True,
        'test:ping': False,
        'test:ping_count': 1,
        'test:ovs': True,
        'test:report_file': pwd + '/don/static/don.report.html',
    }

    analyzer.analyze(JSON_FILE, params)
    #output = analyzer.analyze(JSON_FILE, params)
    #html = '<html><body>Output: %s</body></html>' % output
    #return HttpResponse(html)
    return HttpResponseRedirect('/static/don.report.html')
Beispiel #11
0
def analyze(request):
    pwd = settings.BASE_DIR
    JSON_FILE = pwd + "/don/don.json"

    params = {
        "error_file": pwd + "/don/static/don.error.txt",
        "test:all": True,
        "test:ping": False,
        "test:ping_count": 1,
        "test:ovs": True,
        "test:report_file": pwd + "/don/static/don.report.html",
    }

    analyzer.analyze(JSON_FILE, params)
    # output = analyzer.analyze(JSON_FILE, params)
    # html = '<html><body>Output: %s</body></html>' % output
    # return HttpResponse(html)
    return HttpResponseRedirect("/static/don.report.html")
Beispiel #12
0
def compare(testname,
            olddir,
            curdir,
            config_file='perf.conf',
            output_dir="./result.txt"):
    config = ConfigParser.ConfigParser()
    config.read(config_file)
    result_file_pattern = config.get(testname, "result_file_pattern")

    def search_files(dir):
        cmd = 'find %s|grep "%s.*/%s"' % (dir, testname, result_file_pattern)
        print cmd
        return commands.getoutput(cmd)

    oldlist = search_files(olddir)
    newlist = search_files(curdir)
    if oldlist != "" or newlist != "":
        analyzer.analyze(oldlist, newlist, output_dir)
Beispiel #13
0
def run_bot(r):
    for mention in r.inbox.unread(limit=10):
        if (mention.was_comment):
            target = mention.parent().author
            if (target.name != config.username):
                countP = 0
                countN = 0
                ratio = 0

                #summarize target comment history sentiment
                for comment in r.redditor(
                        target.name).comments.new(limit=None):
                    sentiment = analyze(comment.body)
                    if (sentiment == 1):
                        countP += 1
                    elif (sentiment == -1):
                        countN += 1

                if (countN + countP != 0):
                    ratio = countP / (countN + countP)

                print('countP: ' + str(countP))
                print('countN: ' + str(countN))
                print('ratio: ' + str(ratio))

                #Build comment reply
                graph = "##👿 <"
                for i in range(10):
                    if (ratio <= (i + 1) / 10 and ratio >= (i) / 10):
                        graph += " x "
                    else:
                        graph += " - "
                graph += "> 😇  "
                reply = "You have summoned the **Thought-Police-Bot** to investigate /u/" + target.name + "\n\n"
                reply += graph + "\n\n"

                if (ratio > 0.80):
                    reply += "/u/" + target.name + " passes the investigation with an impressively wholesome rating."
                elif (ratio > 0.6):
                    reply += "/u/" + target.name + " passes the investigation respectably."
                elif (ratio > 0.45):
                    reply += "/u/" + target.name + " passes the investigation... but just barely."
                elif (ratio > 0.2):
                    reply += "/u/" + target.name + " has failed the investigation!"
                else:
                    reply += "/u/" + target.name + " has failed the investigation with a highly toxic rating!"
                try:
                    mention.reply(reply)
                except Exception:
                    print("Error replying - PRAWException")
            else:
                print("Skipped")
        mention.mark_read()

    time.sleep(5)
Beispiel #14
0
def analyze(request):
    # pwd = settings.BASE_DIR
    pwd = settings.ROOT_PATH
    JSON_FILE = pwd + '/don/ovs/don.json'

    params = {
        'error_file': pwd + '/don/templates/don/don.error.txt',
        'test:all': True,
        'test:ping': False,
        'test:ping_count': 1,
        'test:ovs': True,
        'test:report_file': pwd + '/don/templates/don/don.report.html',
    }
    print "params ====> ", params
    analyzer.analyze(JSON_FILE, params)
    # output = analyzer.analyze(JSON_FILE, params)
    # html = '<html><body>Output: %s</body></html>' % output
    # return HttpResponse(html)
    # return HttpResponseRedirect('/static/don.report.html')
    return render(request, "don/ovs/analyze.html")
Beispiel #15
0
def runScrapper(source, e, queue):  # Button press function
    if source in source_functions:
        print("Started ", source, " Extraction")
        fileToAnalyze = source_functions[source]()
        print("Finished extraction")
        e.set()  # Done extraction
        time.sleep(0.4)  # To ensure progress bar shows done
        e.clear()  # reset flag
        print("Calling Analyzer on file ", fileToAnalyze)
        output_file = analyzer.analyze(fileToAnalyze)
        time.sleep(0.2)
        e.set()  # Done analysis
        queue.put(output_file)
        print("Done Analysis")

    else:
        # Custom Headline
        output_file = analyzer.analyze(source)  # Analyze custom headline file
        e.set()  # Done analysis
        queue.put(output_file)
def classify_tweet(c, tweet):
    cities, transportations = analyze(tweet['text'])
    if len(cities) > 0 or len(transportations) > 0:
        update(c, tweet['id'], {
            'cities': cities,
            'transportations': map(serialize_transport, transportations),
            'confirmedAsOffer': True
        })
        print ('Updated a tweet.')
    else:
        print ('Ignored the update')
 def post(self):
     actual_text = request.json['actual_text']
     optimal_text = request.json['optimal_text']
     sentiment_analysis, targetting_analysis = analyze(
         actual_text, optimal_text)
     print(sentiment_analysis)
     print(targetting_analysis)
     return {
         'sentiment_analysis': str(sentiment_analysis),
         'targetting_analysis': str(targetting_analysis),
     }
Beispiel #18
0
def featuresScreenTimerFired(data):
    # if cannot complete analysis, direct user to load screen
    if data.analysisInProgress:
        try:
            data.results = a.analyze(data.filename, data.features)
            data.analysisInProgress = False
            data.mode = "analyzeScreen"
        except Exception as e:
            print(e)
            data.analysisInProgress = False
            data.mode = "loadDataScreen"
Beispiel #19
0
def analyze(request):
    # pwd = settings.BASE_DIR
    pwd = settings.ROOT_PATH
    JSON_FILE = pwd + '/don/ovs/don.json'

    params = {
            'error_file'        : pwd + '/don/templates/don/don.error.txt',
            'test:all'          : True,
            'test:ping'         : False,
            'test:ping_count'   : 1,
            'test:ovs'          : True,
            'test:report_file'  : pwd + '/don/templates/don/don.report.html',
            }
    print "params ====> ",params
    analyzer.analyze(JSON_FILE, params)
    #output = analyzer.analyze(JSON_FILE, params)
    #html = '<html><body>Output: %s</body></html>' % output
    #return HttpResponse(html)
    # return HttpResponseRedirect('/static/don.report.html')
    return render(request,"don/ovs/analyze.html")
Beispiel #20
0
def upload_file():
    if request.method == 'POST':
        f = request.files['the_file']

        destination = os.path.join(app.config['UPLOAD'], f.filename)

        f.save(os.path.join(app.config['UPLOAD'], f.filename))
        # Add analyze code
        output = a.analyze(destination)
        print(output)

    return output
Beispiel #21
0
 def schemeLoad(self, cont, env, args):
     """Special primitive: implements LOAD."""
     symbolicFilename = str(args[0])
     try:
         f = open(symbolicFilename)
         try:
             text = "(begin \n%s\n 'ok)" % f.read()
         finally:
             f.close()
         expandedExp = self.get_expander().expand(parser.parse(text))
         analyzedExp = analyzer.analyze(expandedExp)
         return analyzer.texec(analyzedExp, env, cont)
     except IOError, e:
         raise error.SchemeError, "LOAD error -- %s" % str(e)
def runScrapper(source, e, queue):
    if source in source_functions:
        print("Started ", source, " Extraction")
        fileToAnalyze = source_functions[source]()
        print("Finished extraction")
        # Set sync flag to done
        e.set()
        # To ensure progress bar shows done
        time.sleep(0.4)
        # Reset sync done flag before calling analyzer
        e.clear()
        print("Calling Analyzer on file ", fileToAnalyze)
        output_file = analyzer.analyze(fileToAnalyze)
        # Done analysis, set flag
        e.set()
        # Store the file name where the analyzer stored it's scores
        queue.put(output_file)
        print("Done Analysis")
    else:
        # Custom Headline
        output_file = analyzer.analyze(source)
        e.set()  # Done analysis
        queue.put(output_file)
Beispiel #23
0
 def schemeLoad(self, cont, env, args):
     """Special primitive: implements LOAD."""
     symbolicFilename = str(args[0])
     try:
         f = open(symbolicFilename)
         try:
             text = "(begin \n%s\n 'ok)" % f.read()
         finally:
             f.close()
         expandedExp = self.get_expander().expand(parser.parse(text))
         analyzedExp = analyzer.analyze(expandedExp)
         return analyzer.texec(analyzedExp, env, cont)
     except IOError, e:
         raise error.SchemeError, "LOAD error -- %s" % str(e)
Beispiel #24
0
 def analyze_result(self, version, spectra_list, fault_list, formula):
     # print('spectra len to analyze : %s' % len(spectra_list))
     spectra_list.sort(reverse=True)
     # print_spectra(version.cal_result_dir(formula), spectra_list)
     # analyzer.count_fault_rank(fault_list, spectra_list)
     self.ranker(fault_list, spectra_list, version)
     fault_result_list = [fault_now.to_dict() for fault_now in fault_list]
     # analyze_judgements = ['EXAM']
     result_dict = {'project': version.name(), 'no': int(version.no())}
     to_exclude = version.return_spectra_exclude() if self.exclude_blank else []
     for analyze_judgement in self.analyze_judgements:
         result_dict[analyze_judgement] = analyzer.analyze(
             analyze_judgement, spectra_list, fault_list, to_exclude)
     # print('finish analyze')
     return fault_result_list, result_dict
def main():
    logging.info("main: begin ...")
    # reading the sequence from the fasta file
    sequence = fastareader.read_sequence()
    # reading the flow order
    flow_order_map = h5reader.read_flow_order()
    # for each flow order do the following
    for flow_order in flow_order_map:
        logging.info("For flow order: " + flow_order)
        # getting the list against the key
        flow = flow_order_map.get(flow_order)
        # passing in the flow and the sequence to get the number on incorporation for the current flow order
        number_of_incorporation = analyzer.analyze(flow, sequence)
        # plot the output with the current flow order
        plotter.plot(flow, number_of_incorporation, flow_order)
    logging.info("main: complete ...")
Beispiel #26
0
def detect(data, channel):
    try:
        response = {
            'id': data['id'],
            'status': 'processing',
            'result': {},
            'fullname': data['fullname'],
        }
        channel.basic_publish(exchange='',
                              routing_key='detection-response',
                              body=json.dumps(response))

        print(data)

        file_params = analyzer.analyze(data['fullname'])
        params_array = np.array(file_params, dtype=np.float32).reshape(1, -1)
        transformed_params = sc.transform(params_array)

        # prediction = classifier.predict(transformed_params)

        probs = classifier.predict_proba(transformed_params)
        best_n = np.argsort(probs, axis=1)
        print(probs)
        print(best_n)

        response = {
            'id': data['id'],
            'status': 'finished',
            'result': {
                "probs": probs[0].tolist(),
            },
            'fullname': data['fullname'],
        }
        channel.basic_publish(exchange='',
                              routing_key='detection-response',
                              body=json.dumps(response))
    except:
        response = {
            'id': data['id'],
            'status': 'failed',
            'result': {},
            'fullname': data['fullname'],
        }
        channel.basic_publish(exchange='',
                              routing_key='detection-response',
                              body=json.dumps(response))
Beispiel #27
0
def analyze_code(pythonfile):
    # transform to cfg
    cfg, g = transform.transformToCFG('./uploads/' + pythonfile)

    start_time = time.time()
    # analyze broken authentication
    vulnerabilities, count_node = analyzer.analyze(cfg, g,
                                                   './uploads/' + pythonfile)
    end_time = time.time()

    # analyze time in second
    exec_time = end_time - start_time

    # visualize output
    visualize.createImage(g, pythonfile)

    return vulnerabilities, exec_time * 1000, count_node
def runScrapperDate(source, e, queue, year, month, day):
    name_functions = {
        "guardian": guardianScrapperDate,
        "new-york-times": nyTimesScrapperDate,
        "times-of-india": toiScrapperDate
    }

    if source in name_functions:
        print("Started ", source, " Extraction")
        fileToAnalyze = name_functions[source](str(year), str(month), str(day))
        print("Calling Analyzer on file ", fileToAnalyze)
        output_file = analyzer.analyze(fileToAnalyze)
        e.set()
        queue.put(output_file)
        print("Done Analysis")
    else:
        e.set()
        pass
Beispiel #29
0
def analyzerhtml(tmpfilename, htmlfilename, execpath, reqspath, aaffiles):

    if not len(aaffiles) > 0:
        print "empty AAF file list, no action taken"
        return

    (files, removedFiles) = analyzer.analyze(tmpfilename, execpath, aaffiles,
                                             reqspath)
    files = ['File Set'] + files

    # Run the analyzer to generate a detailed requirement listing
    reqsDetailCmdArgs = [
        execpath, "-report", "-detail", "-type", "all", "-reqs", reqspath
    ]
    reqsDetailPipe = subprocess.Popen(reqsDetailCmdArgs,
                                      stdout=subprocess.PIPE,
                                      stderr=None)

    # Run the analyzer to generate a list of requirements that are covered
    # by test implementations.
    reqsCovCmdArgs = [execpath, "-report", "-testcoverage", "-reqs", reqspath]
    reqsCovPipe = subprocess.Popen(reqsCovCmdArgs,
                                   stdout=subprocess.PIPE,
                                   stderr=None)

    # Parse the detailed requirement listing and the coverage listing.
    requirements = parse.parse_requirements(reqsDetailPipe.stdout,
                                            reqsCovPipe.stdout)

    parse.parse_file_coverage(tmpfilename, requirements)

    for i in range(0, len(files)):
        if sys.platform == 'win32' or sys.platform == 'cygwin':
            files[i] = files[i].split('\\')[-1]
        else:
            files[i] = files[i].split('/')[-1]

    # Parse the test result data to generate the result table and output
    # the table in html
    testResults = parse.get_result_data(files, requirements)

    parse.write_html(htmlfilename, testResults, requirements, removedFiles)

    print "succesfully wrote html file:", htmlfilename
Beispiel #30
0
def report(repo_url):
    repo = create_repository(repo_url)
    if repo is None:
        flash('Given repository does not exists or could not be accessed')
        return redirect(url_for('index'))

    if is_cached(repo):
        results = mongo.db.repositories.find_one({'url': repo.url})
    else:
        path = clone(repo)

        results = analyze(path)
        repo.save_analysis_results(results)

        cache(repo)
        clear(path)

        results = repo.to_document()
    return render_template('report/results.html', report=results)
Beispiel #31
0
def main():
    p = argparse.ArgumentParser()
    p.add_argument('rootdir', metavar='rootdir', nargs='?', default='.', help='the directory to scan (optional: default is the current directory)')
    args = p.parse_args()
    
    root = os.path.abspath(args.rootdir)
    
    sanity.ensure(root)
    
    files = scanner.scan(root)
    defs = list(parser.parse(root, files))
    
    fix_methods = False
    if fix_methods:
        results = analyzer.analyze(root, files, defs)
        fixer.fix_method_declarations(root, defs, results)
    else:
        a2 = analyzer.analyze2(root, files, defs)
        fixer.fix2(root, defs, files, a2)
Beispiel #32
0
    def dialogue(self, input):
        """
        マルコフ連鎖によって生成された文章群から
        ユーザーの発言に含まれる名詞を含むものを抽出して応答メッセージとして返す。

        Parameters:
            input(str)  :ユーザーによって入力された文字列。                
            Returns:
              str: 応答メッセージ。
        """
        # インプット文字列を形態素解析
        parts = analyzer.analyze(input)

        m = []  #
        # 解析結果の形態素と品詞に対して反復処理
        for word, part in parts:
            # インプット文字列に名詞があればそれを含むマルコフ連鎖文を検索
            if analyzer.keyword_check(part):
                #print('afetr_check_word===',word)
                # マルコフ連鎖で生成した文章を1つずつ処理
                for element in self.sentences:
                    # 形態素の文字列がマルコフ連鎖の文章に含まれているか検索する
                    # 最後を'.*?'にすると「花」のように検索文字列だけにもマッチ
                    #
                    # + '.*'として検索文字列だけにマッチしないようにする
                    #
                    find = '.*?' + word + '.*'
                    # マルコフ連鎖文にマッチさせる
                    tmp = re.findall(find, element)
                    if tmp:
                        # マッチする文章があればリストmに追加
                        m.append(tmp)
        # findall()はリストを返してくるので多重リストをフラットにする
        m = list(chain.from_iterable(m))

        if m:
            # インプット文字列の名詞にマッチしたマルコフ連鎖文からランダムに選択
            return random.choice(m)
        else:
            # マッチするマルコフ連鎖文がない場合
            return random.choice(self.sentences)
Beispiel #33
0
    def SelectClassHeat(self,evt):
        self.Debug('SelectClassHeat')
        if evt is None:
            cls, heat = self.class_heat
        else:
            cls, heat = self.recs[evt.GetSelection()]
            self.class_heat = cls, heat
        self.Info('Selected class %s heat %s'%(cls,heat))

        if self.editwin:
            self.sizer.Remove(self.editwin)
            self.editwin.Destroy()

        #reload(analyzer)
        
        res = analyzer.analyze(heat,self.record[cls][heat],self.scoringsystem)
        self.editwin = EditWin1(self.record[cls][heat][0],self.record[cls][heat][1],
                                res,
                                self.topparent,self,self.debug+(not not self.debug),
                                zoom = self.zoom, seltime=self.seltime)
        self.sizer.Add(self.editwin,1,wx.EXPAND)
        self.sizer.Layout()
Beispiel #34
0
    def main(self, analyzer):

        offset = 6
        isFinished = False
        isNewTime = True
        self.getOptions()
        tools = ["iperf", "scp"]

        if not os.path.exists(self.uriTime):
            print('WARN! Create a timeRead file!')
            sys.exit(0)

        timeRead = self.readTimeRead(self.uriTime)
        timeR = timeRead.split(",")
        timeRNew = timeRead

        while (not isFinished):
            fileLines, isFinished = self.readLog(self.uriLog)

            for i in range(len(fileLines) - 1, 0, -1):
                #print(line)
                result = analyzer.analyze(fileLines[i], tools)
                if result[0]:
                    if self.check(result, timeR, offset):
                        isFinished = True
                        break

                    if isNewTime:
                        timestampNew = self.splitStr(result[1], ' ', offset)
                        timeRNew = result[2] + "," + timestampNew
                        isNewTime = False
                    print self.combi(result[1], result[2])
                    self.excuteShell(self.combi(result[1], result[2]))
            self.closeFile()

        print(timeRNew)
        with open(self.uriTime, 'w') as timeReadFile:
            timeReadFile.write(timeRNew)
Beispiel #35
0
	def main(self, analyzer):
		
		offset = 6
		isFinished = False
		isNewTime = True
		self.getOptions()
		tools = ["iperf", "scp"]

		if not os.path.exists(self.uriTime):
			print('WARN! Create a timeRead file!')
			sys.exit(0)
        
		timeRead = self.readTimeRead(self.uriTime)
		timeR = timeRead.split(",")
		timeRNew = timeRead
        
		while(not isFinished):
			fileLines, isFinished = self.readLog(self.uriLog)            
            
			for i in range (len(fileLines) - 1, 0, -1):
				#print(line)
				result = analyzer.analyze(fileLines[i], tools)
				if result[0]:
					if self.check(result, timeR, offset):
						isFinished =  True
						break
                    
					if isNewTime:
						timestampNew = self.splitStr(result[1], ' ', offset)
						timeRNew = result[2] + "," + timestampNew
						isNewTime = False
					print self.combi(result[1], result[2])  
					self.excuteShell(self.combi(result[1], result[2]))
			self.closeFile()

		print(timeRNew)
		with open(self.uriTime, 'w') as timeReadFile:
			timeReadFile.write(timeRNew)
Beispiel #36
0
def analyzerhtml(tmpfilename, htmlfilename, execpath, reqspath, aaffiles):

    if not len(aaffiles) > 0:
        print "empty AAF file list, no action taken"
        return

    (files, removedFiles) = analyzer.analyze(tmpfilename, execpath, aaffiles, reqspath)
    files = ["File Set"] + files

    # Run the analyzer to generate a detailed requirement listing
    reqsDetailCmdArgs = [execpath, "-report", "-detail", "-type", "all", "-reqs", reqspath]
    reqsDetailPipe = subprocess.Popen(reqsDetailCmdArgs, stdout=subprocess.PIPE, stderr=None)

    # Run the analyzer to generate a list of requirements that are covered
    # by test implementations.
    reqsCovCmdArgs = [execpath, "-report", "-testcoverage", "-reqs", reqspath]
    reqsCovPipe = subprocess.Popen(reqsCovCmdArgs, stdout=subprocess.PIPE, stderr=None)

    # Parse the detailed requirement listing and the coverage listing.
    requirements = parse.parse_requirements(reqsDetailPipe.stdout, reqsCovPipe.stdout)

    parse.parse_file_coverage(tmpfilename, requirements)

    for i in range(0, len(files)):
        if sys.platform == "win32" or sys.platform == "cygwin":
            files[i] = files[i].split("\\")[-1]
        else:
            files[i] = files[i].split("/")[-1]

            # Parse the test result data to generate the result table and output
            # the table in html
    testResults = parse.get_result_data(files, requirements)

    parse.write_html(htmlfilename, testResults, requirements, removedFiles)

    print "succesfully wrote html file:", htmlfilename
Beispiel #37
0
 def schemeEval(self, cont, env, args):
     expandedExp = self.get_expander().expand(args[0])
     analyzedExp = analyzer.analyze(expandedExp)
     return analyzer.texec(analyzedExp, env, cont)
Beispiel #38
0
def handle(filename):
    sdpdata = sdpdatafile.SdpDataFile(filename)

    log = simplelogger.SimpleLogReader(sdpdata.logfilename)
    logw = simplelogger.SimpleLogWriter('sub', sdpdata.logfilename)

    status = log.getstatus()

    if status == 'concluded':
        inlineprint('all done.')
    elif status is None or status == 'tosubmit':
        inlineprint('submitting...')
        submit(sdpdata)
        handle(filename)
    elif status == 'failed':
        if args.force:
            inlineprint('failed; forced resubmission...')
            submit(sdpdata)
            handle(filename)
        else:
            inlineprint('failed.')
    elif status == 'submitted' or status == 'running':
        inlineprint('submitted.')
        if args.reallyrunning:
            inlineprint('checking...')
            submissionid = log.lastbonusexprwith(expr='submissionid')
            if world.isreallyrunning(submissionid):
                inlineprint('is really running.')
            else:
                inlineprint('is NOT really running:')
                logw.setstatus('failed')
                handle(filename)
        if args.pause:
            inlineprint('waiting for completion...')
            submissionid = log.lastbonusexprwith(expr='submissionid')
            world.waitforcompletion(submissionid)
            inlineprint('completed.')
            handle(filename)
    elif status == 'finished':
        tr = log.lastbonusexprwith(expr='terminateReason')
        primopt = log.lastbonusexprwith(expr='primalObjective')
        if tr == 'maxRuntime exceeded' or \
                tr == 'maxIterations exceeded':
            inlineprint('ran out of time.')
            if args.maxsubmissions and \
                    log.numlineswith(expr='status', bonusexpr='submitted') >= \
                    args.maxsubmissions:
                inlineprint('too many submissions to resubmit.')
                logw.write('too many submissions.')
                logw.setstatus('failed')
                handle(filename)
            else:
                inlineprint('resubmitting... ')
                submit(sdpdata)
                handle(filename)
        else:  # i.e. terminateReason is not timed out
            try:
                newfilename = analyzer.analyze(sdpdata, tr, primopt)
            except ValueError:
                logw.write('analyzer failed with ValueError')
                logw.setstatus('failed')
                inlineprint('could not analyze the result.')
                handle(filename)
            else:
                logw.setstatus('concluded')
                if newfilename is None:
                    inlineprint('done.')
                elif newfilename == filename:
                    inlineprint('resubmitting according to analyzer...')
                    logw.setstatus('tosubmit')
                    handle(filename)
                else:
                    print('replaced -->')
                    inlineprint(os.path.basename(newfilename) + ' :')
                    logw.write('replaced with', newfilename)
                    handle(newfilename)
    else:
        # How did you get here?
        print('Unknown status for ' + filename + '!')
Beispiel #39
0
import downloader
import analyzer
import time
prefix = r'http://www.lingdiankanshu.com/html/0/175/'
url = r'75298.html'
n = 1
while True:
    now = time.time()
    soup = downloader.download(prefix + url)
    if soup == None:
        print('Finished or Error')
        break
    url = analyzer.analyze(soup)
    print('Chapter',n,'finished in',time.time()-now,'seconds')
    if url == None:
        print('No found next!')
        break
    n = n + 1
    
Beispiel #40
0
#! /usr/bin/python
# -*- coding: utf-8 -*-

import sys, getopt, analyzer

# Usage:
#   python runanalyzer.py coverage_file_to_create AAFAnalyzer_executable \
#            file1.aaf file2.aaf file3.aaf ... AAFRequirements.xml 
#
# where,
#  coverage_file_to_create is the coverage file to generate
#  AAFAnalyzer_executable is the location of the AAFAnalyzer executable
#  AAFRequirements.xml is the location of the AAFRequirements.xml document
#  file{1,2,3,...}.aaf are the .aaf files to be processed

analyzer.analyze( sys.argv[1], sys.argv[2], sys.argv[4:-1], sys.argv[3] )
Beispiel #41
0
 def get_pretty_path_list(self):
     return analyzer.analyze(self).get_pretty_path_list()
Beispiel #42
0
 def get_pretty_path(self):
     if settings.VERBOSE > 2:
         print 'File.get_pretty_path()'
         print 'Path from analyzer: %s' % analyzer.analyze(self).get_pretty_path()
     return analyzer.analyze(self).get_pretty_path()
Beispiel #43
0
            doObject = True

    if len(args) != 2:
        usage(stream=stderr)
        return 1
    name, filename, = args

    import parser
    import analyzer
    import log
    import codegen

    log.setStreams(stdout=stdout, stderr=stderr)
    parseTree = parser.parse(filename, debug=doDebug)
    parseTree.name = name
    analyzer.analyze(parseTree)
    if doClient:
        print >> stderr, 'Generating client ...'
        codegen.genClient(parseTree, outdir=dirClient)
        if doDBus:
            codegen.genClientDBus(parseTree, outdir=dirClient)
        if doTests:
            codegen.genTests(parseTree, outdir=dirClient)
        if doShell:
            codegen.genShell(parseTree, outdir=dirClient)
        if doObject:
            codegen.genObject(parseTree, outdir=dirClient)
    if doServer:
        print >> stderr, 'Generating server ...'
        codegen.genServer(parseTree, outdir=dirServer)
        if doDBus:
Beispiel #44
0
def do_task_fetch_material(username, force_top):
    user = CurryUser.get_by_key_name(username)
    force_top = bool(int(force_top))

    if not user:
        logging.error("no such user '%s'" % username)
        return 'bad'

    tweet_list = api.GetUserTimeline(screen_name=username, count=config.FETCH_COUNT)

    tweet = None
    material_list = None
    success = False
    if force_top:
        tweet_list = tweet_list[0:1]
    else:
        shuffle(tweet_list)

    #
    # select material
    #
    for tweet in tweet_list:
        # check history
        if not force_top and is_duplicated(tweet):
            continue

        text = tweet.GetText().encode('utf-8')
        material_list = analyze(
                text,
                count=config.TWEET_MATERIAL_MAX
                )

        if len(material_list) > 0:
            # found material
            success = True
            break

    if success:
        # record to history
        # TODO: trim history chronically
        History(
                key_name=str(tweet.id),
                timestamp=datetime.now()
                ).put()
    else:
        logging.info("material not found for user '%s'" % username)
        return 'bad'

    #
    # select receivers
    #
    link_list = (UserLink
            .all()
            .filter('sender = ', user)
            .order('timestamp')
            .fetch(limit=config.RECEIVER_MAX)
            )

    for link in link_list:
        # randomize material per receiver
        shuffle(material_list)
        count = 1 + int(random() * len(material_list))
        receive_material = material_list[:count]

        taskqueue.add(
                queue_name='post-queue',
                url='/task/post_material/%s/%s' % (username, link.receiver.key().name()), 
                params={'material': receive_material}
                )

        link.timestamp=datetime.now()
        logging.debug("sending from user '%s' to '%s' with material '%s'" % 
                (username, link.receiver.key().name(), repr(receive_material)))
    # update timestamp
    db.put(link_list)

    # send to karei_bot if no receivers
    if len(link_list) == 0:
        shuffle(material_list)
        count = 1 + int(random() * len(material_list))
        receive_material = material_list[:count]

        taskqueue.add(
                queue_name='post-queue',
                url='/task/post_material/%s/%s' % (username, config.MY_NAME), 
                params={'material': receive_material}
                )

        logging.debug("sending from user '%s' to '%s' with material '%s'" % 
                (username, config.MY_NAME, repr(receive_material)))

    return 'ok'
Beispiel #45
0
url = "https://www.nytimes.com/2017/09/29/us/politics/tom-price-trump-hhs.html"

outputFile = "summarized.txt"

freqFile = "wordFreq.txt"

#settings

PERCENT_SUMMARIZED = 15

LENGTH_ADJUSTMENT_FACTOR = 10

OPTIMAL_SENTENCE_LENGTH = 20

SENTENCE_LOCATION_FACTOR = 2

WORD_IN_TITLE_FACTOR = 20

#run

extract = client.Extract({"url": url})

parse(extract.get('article').encode('ascii', 'xmlcharrefreplace'), "temp.txt")
parse(
    extract.get('title').encode('ascii', 'xmlcharrefreplace'), "titleTemp.txt")

analyze(url, "temp.txt", "titleTemp.txt", outputFile, freqFile,
        PERCENT_SUMMARIZED, LENGTH_ADJUSTMENT_FACTOR, OPTIMAL_SENTENCE_LENGTH,
        SENTENCE_LOCATION_FACTOR, WORD_IN_TITLE_FACTOR)
Beispiel #46
0
 def get_analyzer(self):
     return analyzer.analyze(self)