Esempio n. 1
0
def go(program_path, pattern_path):
    if not (program_path.exists() and program_path.is_file()):
        fatal(f'given program file does not exist')

    if not (pattern_path.exists() and pattern_path.is_file()):
        fatal(f'given pattern file does not exist')

    output_path = get_out_filepath(program_path.parents[0])

    program = read_json(program_path)
    patterns_json = read_json(pattern_path)
    patterns = []

    for patt in patterns_json:
        patterns.append(Pattern(patt))

    analyser = Analyser(program, patterns)
    vulnerabilities = analyser.run()

    json_vulns = json.dumps([vuln.to_dict() for vuln in vulnerabilities], indent=2)
    debug(json_vulns)
    output_path.write_text(json_vulns)

    debug(f'Found vulnerabilities: {len(vulnerabilities)}')
    debug(vulnerabilities)

    # may be used by tester program
    return vulnerabilities
Esempio n. 2
0
def test_shouldUseAutoCapitalLettersAfterDots():
    sut = Analyser()
    sut.analyse('ustawienia capital Auto')
    assert 'Dzień dobry, nazywam się Czesio. Lubię jeść muchy... Troche to dziwne.' == ''.join(
        sut.analyse(
            "duża dzień dobry przecinek nazywam się Czesio kropka lubię jeść muchy kropka kropka kropka troche to dziwne kropka"
        ))
Esempio n. 3
0
def test_lalalalalala2():
    sut = Analyser()
    sut.analyse("ustawienia auto Space")
    result = sut.analyse(
        "alias spacja to do równe cudzysłów vim spacja tylda kropka ukośnik to do cudzysłów"
    )
    assert 'alias todo=\"vim ~./todo\"' == "".join(result)
Esempio n. 4
0
def test_lalalala7():
    sut = Analyser()
    sut.analyse('ustawienia capital wyłącz')
    assert 'dzień dobry, nazywam się czesio. lubię jeść muchy' == ''.join(
        sut.analyse(
            "Dzień dobry przecinek nazywam się Czesio kropka lubię jeść muchy")
    )
Esempio n. 5
0
    def test_get_daily_avg_sentiment_by_viewpoint(self, mocked_open,
                                                  mocked_smooth,
                                                  mocked_add_to_cache,
                                                  mocked_connect,
                                                  mocked_redis):
        mocked_cursor = mock.MagicMock()
        mocked_timestamp = mock.MagicMock()
        mocked_timestamp.isoformat.side_effect = [1, 2, 3]
        mocked_cursor.fetchall.return_value = [
            self.RawAvgResult(0.4, mocked_timestamp, True),
            self.RawAvgResult(0.5, mocked_timestamp, False),
            self.RawAvgResult(0.6, mocked_timestamp, True)
        ]
        mocked_smooth.side_effect = [[
            self.RawAvgResult(0.44444444, mocked_timestamp, True),
            self.RawAvgResult(0.66666666, mocked_timestamp, True)
        ], [self.RawAvgResult(0.55555555, mocked_timestamp, False)]]
        mocked_connect.return_value = (None, mocked_cursor)

        Analyser().get_daily_avg_sentiment_by_viewpoint()

        mocked_cursor.execute.assert_called_once_with("<SQL here>")
        mocked_smooth.assert_any_call([
            self.RawAvgResult(0.4, mocked_timestamp, True),
            self.RawAvgResult(0.6, mocked_timestamp, True)
        ])
        mocked_smooth.assert_any_call(
            [self.RawAvgResult(0.5, mocked_timestamp, False)])
        mocked_add_to_cache.assert_any_call("vp:senti", {
            1: 0.44444,
            2: 0.66667
        })
        mocked_add_to_cache.assert_any_call("vn:senti", {3: 0.55556})
Esempio n. 6
0
 def __init__(self, state):
     super().__init__()
     self.daemon = True
     self.audio_converter = None
     self.analyser = Analyser(state)
     self.fake_keyboard = FakeKeyboard()
     self.responses = None
Esempio n. 7
0
def simulation_tails(fname, uranium_type="Natural"):
    """Get the reprocessed and depleted uranium tails as dict"""

    if uranium_type == "Natural":
        sink = "DepletedNaturalUSink"
    elif uranium_type == "Reprocessed":
        sink = "DepletedReprocessedUSink"
    else:
        msg = "'uranium_type' has to be either 'Natural' or 'Reprocessed'."
        raise ValueError(msg)

    a = Analyser(fname)
    sim_end = a.query(selection='EndTime', table='Finish')[0][0]
    results = a.query(selection='NucId, Quantity',
                      table='ExplicitInventory',
                      condition='Time==? AND AgentId==?',
                      vals=(sim_end, a.names[sink]))
    comp = dict([(key, 0) for key in range(232, 239) if key != 237])
    quantity = 0
    for key, value in results:
        key = key / 10000 - 92 * 1000
        comp[key] = value
        quantity += value

    for key, value in comp.items():
        comp[key] = value / quantity

    return comp, quantity
Esempio n. 8
0
def test_lalalalala3():
    sut = Analyser()
    text = "ustawienia auto Space"
    assert ["Ala", " ", "ma", " ", "kota"] == sut.analyse("Ala ma kota")
    sut.analyse(text)
    assert ["Ala", "ma", "kota"] == sut.analyse("Ala ma kota")
    sut.analyse(text)
    assert ["Ala", " ", "ma", " ", "kota"] == sut.analyse("Ala ma kota")
Esempio n. 9
0
def test_shoundUseCapitalLetters():
    sut = Analyser()
    sut.analyse('ustawienia auto space')
    sut.analyse('ustawienia capital włącz')
    assert 'DZIEŃ DOBRY, NAZYWAM SIĘ CZESIO. CHCIAŁEM SPRAWDZIĆ JESZCZE, CZY DZIAŁA "CAPSLOCK" CUDZYSŁÓW.' == ''.join(
        sut.analyse(
            "dzień spacja dobry przecinek spacja nazywam spacja się spacja Czesio kropka spacja chciałem spacja sprawdzić spacja jeszcze przecinek spacja czy spacja działa spacja cudzysłów capslock cudzysłów spacja dosłownie cudzysłów kropka"
        ))
Esempio n. 10
0
    def __init__(self, db):
        self.db = db

        self.analyser = Analyser(self.db)
        self.statistics = Statistics(self.db)
        self.sleep_statForMonth = {}
        self.DaysValidity = 0  # 用于计算各种分数的有效天数
        self.statMonth_analyser = StatMonthAnalyser(db)
Esempio n. 11
0
 def __init__(self):
     self.settings = Settings()
     self.logger = Logger(self.settings.logfile)
     self.reporter = Reporter(self.settings)
     self.setup = Setup(self.settings, self.logger)
     self.grader = Grader(self.settings, self.logger, self.setup)
     self.analyser = Analyser(self.settings, self.reporter, self.logger,
                              self.setup, self.grader)
Esempio n. 12
0
def run():
    analyser = Analyser()
    if analyser.valid:
        analyser.print_all()
        analyser.analyse_all()
        if len(analyser.Protocols["All"]):
            wind = ProtsWindow("Protocols", analyser, nb=10)
            wind.show()
Esempio n. 13
0
def write_mesh(fronts, name):
    counter = 0
    for front in fronts:
        for indiv in front:
            mesh = Analyser(str(indiv.phenotype))
            mesh.create_graph()
            filename = FRONT_FOLDER + "/" + name + "." + str(counter)
            mesh.create_mesh(filename)
            counter += 1
 def __init__(self, db):
     self.db = db
     self.stage_analyser = StageAnalyser()
     self.analyser = Analyser(self.db)
     self.stage_regularity_analyser = StageRegularityAnalyser(self.db)
     self.time_array = []  # 设置分隔为30分钟的时间段判定
     self.dict_time_point = {}  # 设置以时间段为key的记录时间点次数的dict
     self.offbed_time_array = []  # 所有睡眠期间离床时间集合
     self.divide_time()
Esempio n. 15
0
    def test_prune_old_tweets(self, mocked_open, mocked_connect, mocked_redis):
        mocked_conn = mock.MagicMock()
        mocked_cursor = mock.MagicMock()
        mocked_connect.return_value = (mocked_conn, mocked_cursor)

        Analyser().prune_old_tweets()

        mocked_cursor.execute.assert_called_once_with("<SQL here>")
        mocked_conn.commit.assert_called_once_with()
Esempio n. 16
0
def exercise(data):
    print(data)
    print("Started " + data["name"])
    global exercise_data
    exercise_data = data
    global exercise_in_progress
    exercise_in_progress = True  # Start exercise processing
    global analyser
    analyser = Analyser('references_' + data["name"] + '.json', data,
                        rdr)  # Create analyser for data["name"] exercise
Esempio n. 17
0
def analise(request):
    if request.method == 'POST':
        a = Analyser(1, 2)
        content = show_reviews(request)

        a.analyse(content['reviews'])

        return render(request, 'respostas.html', content)
    elif request.method == 'GET':
        return render(request, 'home.html', {})
Esempio n. 18
0
 def setup(self, explore, doTrain, impala, calcprobs, minmax, lossf, K,
           dropout, alpha, discount, lambd, lr, name, TopNvalues,
           cutOffdepth, ValueCutOff, ValueDiffCutOff, ProbabilityCutOff,
           historyLength, startAfterNgames, batchSize, sampleLenth, network,
           analyse, montecarlo):
     self.calcprobs, self.newreward, self.all_state, self.all_reward, self.explore, self.doTrain, self.previousState, self.actionState, self.parameters, self.phi, self.rating, self.connection, self.montecarlo = calcprobs, 0, [], [], explore, doTrain, [], None, [], [], 1000, None, montecarlo
     self.ImpaleIsActivated = impala
     if self.ImpaleIsActivated:
         self.historyLength, self.startAfterNgames, self.batchSize, self.sampleLenth = int(
             historyLength), int(startAfterNgames), int(batchSize), int(
                 sampleLenth)
         self.impala = Impala(self.train,
                              self.resettrace,
                              historyLength=self.historyLength,
                              startAfterNgames=self.startAfterNgames,
                              batchSize=self.batchSize,
                              sampleLenth=self.sampleLenth)
     else:
         self.historyLength, self.startAfterNgames, self.batchSize, self.sampleLenth, self.impala = None, None, None, None, None
     self.EloWhileTrain = [self.rating]
     self.name = name
     self.network = network
     self.gameNumber = 1
     self.K, self.dropout, self.alpha, self.discount, self.lambd, self.lr = K, dropout, alpha, discount, lambd, lr
     if not self.explore:
         self.K = None
     self.NextbestAction = []
     self.analyse = analyse
     if self.analyse:
         self.analyser = Analyser()
     else:
         self.analyser = None
     self.lossf = lossf
     self.currentAgent = self
     self.minimaxi = minmax
     if self.minimaxi:
         self.TopNvalues, self.cutOffdepth, self.ValueCutOff, self.ValueDiffCutOff, self.ProbabilityCutOff = int(
             TopNvalues), int(
                 cutOffdepth
             ), ValueCutOff, ValueDiffCutOff, ProbabilityCutOff
         self.minmaxer = MinMaxCalculate(
             self.value,
             TopNvalues=self.TopNvalues,
             cutOffdepth=self.cutOffdepth,
             ValueCutOff=self.ValueCutOff,
             ValueDiffCutOff=self.ValueDiffCutOff,
             ProbabilityCutOff=self.ProbabilityCutOff,
             explore=self.explore,
             K=self.K,
             calcprobs=self.calcprobs,
             montecarlo=self.montecarlo,
             discount=self.discount)
     else:
         self.TopNvalues, self.cutOffdepth, self.ValueCutOff, self.ValueDiffCutOff, self.ProbabilityCutOff = None, None, None, None, None
     self.Features = 'antSituation + [sum(mine)] + [sum(dine)] + mine[1:13] + dine[1:13] + splitDistance + baseDistance + [carryEnimy, carryAlly] + dice + score + flat_list'
def main():
    """
    Analyse the emails of the user and obtain the style metrics.
    
    Returns
    -------
    None.
    
    """
    if not(os.getcwd() in sys.path):
        sys.path.append(os.getcwd())
    
    #Creation of a Gmail resource
    service = build('gmail', 'v1',
                    credentials = auth.get_credentials(config.SCOPES, config.CREDS))
    anls = None
    nextPageToken = None
    
    usu = input('Introduce the user name: ')
    
    if (yes_no_question('Were there a previous execution with the same credentials?')):
        q = int(input('Introduce the remaining quota units: '))
        if (yes_no_question('Was it with the same user?')):
            ext = yes_no_question('Was the previously executed by extracting messages?')
            nextPageToken = input('Introduce NextPageToken: ')
            num_res = int(input('How many Gmail resources were extracted? '))
            anls = Analyser(service, usu, q, ext, num_res)
        else:
            anls = Analyser(service, usu, q)
    else:
        anls = Analyser(service, usu)
        
    if (yes_no_question('Has the user an email signature?')):
        print('Introduce the signature and finish it with the word "STOP".\n')
        entr = input()
        sign = ''
        while (entr != 'STOP'):
            sign += entr + '\n'
            entr = input()
        anls.analyse(nextPageToken, sign)
    else:
        anls.analyse(nextPageToken)
Esempio n. 20
0
    def process_newest_weibo():
        """
        设置定时器: 每隔一段时间调用自身
        获取新微博: 先爬取 person 的最新一条微博, 若其已存在与数据库中, 则等待下次执行.
                                             若不存在, 则分析其情绪值, 并存入数据库中, 同时更新数据库中的统计数据
        分析情绪值: 若情绪正常, 则等待, 否则, 发送短信
        """

        # 获取新微博
        new_weibo = spider.get_newest_weibo()
        query = Weibo.select().where(
            Weibo.weibo_content == new_weibo["weibo_content"])
        print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        # 对新发出的微博进行处理
        if not query.exists():
            print('======发现新微博======')
            analyser = Analyser(user_id)
            sentiment_value = analyser.get_sentiment_value(
                new_weibo["weibo_content"])
            # 数据库操作
            db = DB()
            db.person = person
            weibo = db.store_weibo(**new_weibo)
            db.store_sentiment(weibo, sentiment_value)
            analyser.calculate()
            # 更新数据库后, 均值和方差变了, 需要再次更新
            MEAN = Person.get(Person.user_id == user_id).mean
            SD = Person.get(Person.user_id == user_id).std
            if sentiment_value not in range(MEAN - SD // 2, MEAN + SD // 2):
                message = '[{person_name}]发了一条[情绪值]为 {sentiment_value} 的微博, 内容为: {content}'.format(
                    person_name=person.username,
                    sentiment_value=sentiment_value,
                    content=new_weibo['weibo_content'],
                )
                print(message)
                print('============')
                # 发送短信
                send_sms(message)
        else:
            # 更新点赞数 转发数 和 评论数
            weibo = query[0]
            if (weibo.up_num != new_weibo['up_num']
                    or weibo.retweet_num != new_weibo['retweet_num']
                    or weibo.comment_num != new_weibo['comment_num']):
                print('微博状态发生变化, 正在更新数据库...')
                db = DB()
                db.person = person
                weibo.up_num = new_weibo['up_num']
                weibo.retweet_num = new_weibo['retweet_num']
                weibo.comment_num = new_weibo['comment_num']
                weibo.save()
                print('数据库更新完毕')

            print('======无新微博, 当前输出的是最后一条微博======')
Esempio n. 21
0
 def netstream_loaded(self):
     # start = time()
     analyser = Analyser(self.replay)
     # print("analyser: %f" % (time()-start))
     # start = time()
     self.heatmap_tab.set_analyser(analyser)
     # print("heatmap: %f" % (time()-start))
     # start = time()
     self.distance_tab.set_analyser(analyser)
     # print("distance: %f" % (time()-start))
     logger.info('Netstream Parsed. No Errors found')
Esempio n. 22
0
    def test_init(self, mocked_connect, mocked_redis):
        mocked_conn = mock.MagicMock()
        mocked_cursor = mock.MagicMock()
        mocked_redis_conn = mock.MagicMock()
        mocked_connect.return_value = (mocked_conn, mocked_cursor)
        mocked_redis.return_value = mocked_redis_conn

        an = Analyser()
        mocked_redis.assert_called_once_with(host='cache', port=6379, db=0)
        self.assertEqual(mocked_conn, an.db_con)
        self.assertEqual(mocked_cursor, an.db_cursor)
        self.assertEqual(mocked_redis_conn, an.redis)
Esempio n. 23
0
def test_basic_data():
    '''
    Makes sure that the analyser can still correctly read a basic file set. 
    '''
    a = Analyser('test-LogFile',
                 parentDir=r'C:\Users\jia335\projects\c_sep\test_dir',
                 runScheme=['SubtractBackground', 'Sum'],
                 runArgs=[{
                     'window': 5,
                     'inverted': False
                 }, {}])
    assert np.all(a.results == (np.zeros_like(a.results) + 10))
Esempio n. 24
0
def test_lalalalala2():
    sut = Analyser()
    result = sut.analyse("alias to")
    result += sut.analyse("do")
    result += sut.analyse("równe")
    result += sut.analyse("cudzysłów")
    result += sut.analyse("vim tylda")
    result += sut.analyse("kropka")
    result += sut.analyse("ukośnik")
    result += sut.analyse("to")
    result += sut.analyse("do")
    result += sut.analyse("cudzysłów")

    assert 'alias todo=\"vim ~./todo\"' == "".join(result)
Esempio n. 25
0
    def test_add_to_cache_no_value(self, mocked_connect, mocked_redis):
        mocked_connect.return_value = (None, None)
        mocked_redis_conn = mock.MagicMock()
        mocked_redis_transaction = mock.MagicMock()
        mocked_redis.return_value = mocked_redis_conn
        mocked_redis_conn.pipeline.return_value = mocked_redis_transaction

        Analyser().add_to_cache("key", None)

        mocked_redis.assert_called_once_with(host='cache', port=6379, db=0)
        mocked_redis_conn.pipeline.assert_not_called()
        mocked_redis_conn.delete.assert_not_called()
        mocked_redis_conn.hmset.assert_not_called()
        mocked_redis_transaction.execute.assert_not_called()
Esempio n. 26
0
    def test_run_infinitely(self, mocked_sleep, mocked_gen_wc, mocked_prune,
                            mocked_daily_senti, mocked_connect, mocked_redis):

        # Custom class used to stop the infinite loop
        class CustomExhausted(Exception):
            pass

        mocked_connect.return_value = (None, None)
        mocked_sleep.side_effect = [None, None, CustomExhausted]

        self.assertRaises(CustomExhausted, Analyser().run_infinitely)

        self.assertEqual(3, mocked_daily_senti.call_count)
        self.assertEqual(3, mocked_prune.call_count)
        self.assertEqual(3, mocked_gen_wc.call_count)
        self.assertEqual(3, mocked_sleep.call_count)
Esempio n. 27
0
def main():
    """Main entry method to the Kyffin project."""
    (options, args) = parse()

    if options.test_flag:
        from kyffin.test.statistical_test import StatisticalTest
        suite = unittest.TestLoader().loadTestsFromTestCase(StatisticalTest)
        unittest.TextTestRunner(verbosity=2).run(suite)
    else :
#        print "Kyffin ({0})".format(loadVersion())
        if options.csv is not None:
            tech = techniqueFactory.getTechnique(options.technique)
            gui = guiFactory.getGUI(options.gui, options.technique)
            ml = mlFactory.getML(options.ml, tech)
            analyser = Analyser(tech, gui, ml, options.five_flag, options.export_path, options.classify)
            analyser.run(options.csv)
        else:
            raise BaseException('No data file specified')
Esempio n. 28
0
    def test_get_word_frequency_for_viewpoint(self, mocked_connect,
                                              mocked_redis):
        Tweet = namedtuple("Tweet", ["tweet_text"])

        mocked_db_cursor = mock.MagicMock()
        mocked_db_cursor.__iter__.return_value = iter(
            [Tweet("hello"), Tweet("hello again")])
        mocked_connect.return_value = (None, mocked_db_cursor)

        word_counts = Analyser().get_word_frequency_for_viewpoint(True)

        mocked_db_cursor.execute.assert_called_once_with(
            "SELECT tweet_text "
            "FROM tweet "
            "WHERE viewpoint = %s", (True, ))
        self.assertEqual(2, word_counts["hello"])
        self.assertEqual(1, word_counts["again"])
        self.assertEqual(0, word_counts["goodbye"])
Esempio n. 29
0
    def method(self):
        if not self.check_preprocess():
            p = Preprocess(self.file)
            p.preprocess()
            print('File preprocessed!')
        else:
            print('Already preprocessed file.')

        with open('preprocessed.csv') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='"')
            data = []
            data_df = []
            for row in reader:
                value = self.calculate_date(row[7])
                if value:
                    data.append((row[0], self.decay_date(value)))

            for key, group in groupby(data, key=lambda x: x[0]):
                data_df.append([key, sum(j for i, j in group)])

            df = pd.DataFrame(data_df, columns=['establishment', 'decay_date'])

        # Create x, where x the 'final_metric' column's values as floats
        x = df[['decay_date']].values.astype(float)

        # Create a minimum and maximum processor object
        min_max_scaler = preprocessing.MinMaxScaler(feature_range=(1, 10))

        # Create an object to transform the data to fit minmax processor
        x_scaled = min_max_scaler.fit_transform(x)

        # Run the normalizer on the dataframe
        df_normalized = pd.DataFrame(x_scaled)

        df.drop('decay_date', axis=1, inplace=True)
        df_normalized = df_normalized.rename(columns={0: 'decay_date'})
        df = pd.concat([df, df_normalized], axis=1)

        a = Analyser(df)
        #a.polarity()
        #a.confiability()
        a.viewer('pol', 'conf')
        df = a.mname()
        return df
Esempio n. 30
0
def run():
    analyser = Analyser()
    if analyser.valid:
        analyser.print_all()
        analyser.analyse_all()
    if not analyser.valid:
        analyser.arch = idaapi.askstr(
            0, "x86 / x64", "Set architecture manually (x86 or x64)")
        if not (analyser.arch == "x86" or analyser.arch == "x64"):
            return False
        if (analyser.arch == "x86"):
            analyser.BOOT_SERVICES_OFFSET = BOOT_SERVICES_OFFSET_x86
        if (analyser.arch == "x64"):
            analyser.BOOT_SERVICES_OFFSET = BOOT_SERVICES_OFFSET_x64
        analyser.print_all()
        analyser.analyse_all()
    if len(analyser.Protocols["All"]):
        wind = ProtsWindow("Protocols", analyser, nb=10)
        wind.show()
    return True