Exemplo n.º 1
0
class StrategyCalculator():
    def __init__(self, tickerName):
        self.tickerName = tickerName
        self.comparator = Comparator(self.tickerName)
        self.analyser = Analyser(self.tickerName, self.comparator)

    def inform(self, df):
        # print("Calculating Strategy for " + str(self.tickerName) + " at " + str(timeStamp) + "...")
        ### TO-DO: Develop strategies to calculate

        ##
        ## Inform Analyser to do the interval Analysis first
        ## This is before the pseudotrades to ensure no clashes
        self.analyser.intervalAnalysis(df.head(1))

        #1. Calculate ATR for potential trade
        atr = atrcalc.ATRcalc(df)
        indi = ind.Indicator()

        Results = indi.beginCalc(df, self.tickerName, atr)

        for i in Results:
            if Results[i] != 0:
                self.analyser.PseudoTrade(df, i, Results[i], atr)

            ### for testing
            # self.analyser.PseudoTrade(df,i, Results[i], atr)
            ###
        ### END TO-DO
        # print("Calculated Strategy for " + str(self.tickerName) + " at " + str(timeStamp))
        self.comparator.compare(Results, atr)
Exemplo n.º 2
0
 def __init__(self, repo, user, token):
     """
     The ActiveNess init also initialse the Super classes on which it is dependent
     """
     Analyser.__init__(self, repo, user)
     RepoSummariser.__init__(self, token)
     self.initialise_repo(user, repo)
Exemplo n.º 3
0
def test_lalalala7():
    sut = Analyser()
    sut.analyse('ustawienia capital wyłącz')
    assert 'dzień dobry, nazywam się czesio. lubię jeść muchy' == ''.join(
        sut.analyse(
            "Dzień dobry przecinek nazywam się Czesio kropka lubię jeść muchy")
    )
Exemplo n.º 4
0
def go(program_path, pattern_path):
    if not (program_path.exists() and program_path.is_file()):
        fatal(f'given program file does not exist')

    if not (pattern_path.exists() and pattern_path.is_file()):
        fatal(f'given pattern file does not exist')

    output_path = get_out_filepath(program_path.parents[0])

    program = read_json(program_path)
    patterns_json = read_json(pattern_path)
    patterns = []

    for patt in patterns_json:
        patterns.append(Pattern(patt))

    analyser = Analyser(program, patterns)
    vulnerabilities = analyser.run()

    json_vulns = json.dumps([vuln.to_dict() for vuln in vulnerabilities], indent=2)
    debug(json_vulns)
    output_path.write_text(json_vulns)

    debug(f'Found vulnerabilities: {len(vulnerabilities)}')
    debug(vulnerabilities)

    # may be used by tester program
    return vulnerabilities
Exemplo n.º 5
0
class StrategyCalculator():
    def __init__(self, tickerName):
        self.tickerName = tickerName
        self.comparator = Comparator(self.tickerName)
        self.analyser = Analyser(self.tickerName, self.comparator)

    def inform(self, df):
        # print("Calculating Strategy for " + str(self.tickerName) + " at " + str(timeStamp) + "...")
        ### TO-DO: Develop strategies to calculate

        ##
        ## Inform Analyser to do the interval Analysis first
        ## This is before the pseudotrades to ensure no clashes
        self.analyser.intervalAnalysis(df.head(1))
        self.comparator.intervalAnalysis(df.head(1))

        indi = ind.Indicator()

        Results = indi.beginCalc(df, self.tickerName)

        for i in Results:
            # print('Indicator: ' + i)
            # print('Position: ' + str((Results[i])['position']))
            if (Results[i])["position"] != 0:
                self.analyser.PseudoTrade(df, i, Results[i])

            ### for testing
            # self.analyser.PseudoTrade(df,i, Results[i], atr)
            ###
        ### END TO-DO
        # print("Calculated Strategy for " + str(self.tickerName) + " at " + str(timeStamp))
        self.comparator.compare(Results, df["date"].values[0])
Exemplo n.º 6
0
def test_lalalalalala2():
    sut = Analyser()
    sut.analyse("ustawienia auto Space")
    result = sut.analyse(
        "alias spacja to do równe cudzysłów vim spacja tylda kropka ukośnik to do cudzysłów"
    )
    assert 'alias todo=\"vim ~./todo\"' == "".join(result)
Exemplo n.º 7
0
def simulation_tails(fname, uranium_type="Natural"):
    """Get the reprocessed and depleted uranium tails as dict"""

    if uranium_type == "Natural":
        sink = "DepletedNaturalUSink"
    elif uranium_type == "Reprocessed":
        sink = "DepletedReprocessedUSink"
    else:
        msg = "'uranium_type' has to be either 'Natural' or 'Reprocessed'."
        raise ValueError(msg)

    a = Analyser(fname)
    sim_end = a.query(selection='EndTime', table='Finish')[0][0]
    results = a.query(selection='NucId, Quantity',
                      table='ExplicitInventory',
                      condition='Time==? AND AgentId==?',
                      vals=(sim_end, a.names[sink]))
    comp = dict([(key, 0) for key in range(232, 239) if key != 237])
    quantity = 0
    for key, value in results:
        key = key / 10000 - 92 * 1000
        comp[key] = value
        quantity += value

    for key, value in comp.items():
        comp[key] = value / quantity

    return comp, quantity
Exemplo n.º 8
0
 def __init__(self, state):
     super().__init__()
     self.daemon = True
     self.audio_converter = None
     self.analyser = Analyser(state)
     self.fake_keyboard = FakeKeyboard()
     self.responses = None
Exemplo n.º 9
0
def test_shouldUseAutoCapitalLettersAfterDots():
    sut = Analyser()
    sut.analyse('ustawienia capital Auto')
    assert 'Dzień dobry, nazywam się Czesio. Lubię jeść muchy... Troche to dziwne.' == ''.join(
        sut.analyse(
            "duża dzień dobry przecinek nazywam się Czesio kropka lubię jeść muchy kropka kropka kropka troche to dziwne kropka"
        ))
Exemplo n.º 10
0
    def __init__(self, db):
        self.db = db

        self.analyser = Analyser(self.db)
        self.statistics = Statistics(self.db)
        self.sleep_statForMonth = {}
        self.DaysValidity = 0  # 用于计算各种分数的有效天数
        self.statMonth_analyser = StatMonthAnalyser(db)
Exemplo n.º 11
0
 def __init__(self):
     self.settings = Settings()
     self.logger = Logger(self.settings.logfile)
     self.reporter = Reporter(self.settings)
     self.setup = Setup(self.settings, self.logger)
     self.grader = Grader(self.settings, self.logger, self.setup)
     self.analyser = Analyser(self.settings, self.reporter, self.logger,
                              self.setup, self.grader)
Exemplo n.º 12
0
 def __init__(self, db):
     self.db = db
     self.stage_analyser = StageAnalyser()
     self.analyser = Analyser(self.db)
     self.stage_regularity_analyser = StageRegularityAnalyser(self.db)
     self.time_array = []  # 设置分隔为30分钟的时间段判定
     self.dict_time_point = {}  # 设置以时间段为key的记录时间点次数的dict
     self.offbed_time_array = []  # 所有睡眠期间离床时间集合
     self.divide_time()
Exemplo n.º 13
0
def analise(request):
    if request.method == 'POST':
        a = Analyser(1, 2)
        content = show_reviews(request)

        a.analyse(content['reviews'])

        return render(request, 'respostas.html', content)
    elif request.method == 'GET':
        return render(request, 'home.html', {})
Exemplo n.º 14
0
def analise(request):
	if request.method == 'POST':
		a = Analyser(1,2)
		content = show_reviews(request)

		a.analyse(content['reviews'])
		
		return render(request, 'respostas.html',content)
	elif request.method == 'GET':
		return render(request,'home.html',{})
Exemplo n.º 15
0
 def setup(self, explore, doTrain, impala, calcprobs, minmax, lossf, K,
           dropout, alpha, discount, lambd, lr, name, TopNvalues,
           cutOffdepth, ValueCutOff, ValueDiffCutOff, ProbabilityCutOff,
           historyLength, startAfterNgames, batchSize, sampleLenth, network,
           analyse, montecarlo):
     self.calcprobs, self.newreward, self.all_state, self.all_reward, self.explore, self.doTrain, self.previousState, self.actionState, self.parameters, self.phi, self.rating, self.connection, self.montecarlo = calcprobs, 0, [], [], explore, doTrain, [], None, [], [], 1000, None, montecarlo
     self.ImpaleIsActivated = impala
     if self.ImpaleIsActivated:
         self.historyLength, self.startAfterNgames, self.batchSize, self.sampleLenth = int(
             historyLength), int(startAfterNgames), int(batchSize), int(
                 sampleLenth)
         self.impala = Impala(self.train,
                              self.resettrace,
                              historyLength=self.historyLength,
                              startAfterNgames=self.startAfterNgames,
                              batchSize=self.batchSize,
                              sampleLenth=self.sampleLenth)
     else:
         self.historyLength, self.startAfterNgames, self.batchSize, self.sampleLenth, self.impala = None, None, None, None, None
     self.EloWhileTrain = [self.rating]
     self.name = name
     self.network = network
     self.gameNumber = 1
     self.K, self.dropout, self.alpha, self.discount, self.lambd, self.lr = K, dropout, alpha, discount, lambd, lr
     if not self.explore:
         self.K = None
     self.NextbestAction = []
     self.analyse = analyse
     if self.analyse:
         self.analyser = Analyser()
     else:
         self.analyser = None
     self.lossf = lossf
     self.currentAgent = self
     self.minimaxi = minmax
     if self.minimaxi:
         self.TopNvalues, self.cutOffdepth, self.ValueCutOff, self.ValueDiffCutOff, self.ProbabilityCutOff = int(
             TopNvalues), int(
                 cutOffdepth
             ), ValueCutOff, ValueDiffCutOff, ProbabilityCutOff
         self.minmaxer = MinMaxCalculate(
             self.value,
             TopNvalues=self.TopNvalues,
             cutOffdepth=self.cutOffdepth,
             ValueCutOff=self.ValueCutOff,
             ValueDiffCutOff=self.ValueDiffCutOff,
             ProbabilityCutOff=self.ProbabilityCutOff,
             explore=self.explore,
             K=self.K,
             calcprobs=self.calcprobs,
             montecarlo=self.montecarlo,
             discount=self.discount)
     else:
         self.TopNvalues, self.cutOffdepth, self.ValueCutOff, self.ValueDiffCutOff, self.ProbabilityCutOff = None, None, None, None, None
     self.Features = 'antSituation + [sum(mine)] + [sum(dine)] + mine[1:13] + dine[1:13] + splitDistance + baseDistance + [carryEnimy, carryAlly] + dice + score + flat_list'
Exemplo n.º 16
0
    def process_newest_weibo():
        """
        设置定时器: 每隔一段时间调用自身
        获取新微博: 先爬取 person 的最新一条微博, 若其已存在与数据库中, 则等待下次执行.
                                             若不存在, 则分析其情绪值, 并存入数据库中, 同时更新数据库中的统计数据
        分析情绪值: 若情绪正常, 则等待, 否则, 发送短信
        """

        # 获取新微博
        new_weibo = spider.get_newest_weibo()
        query = Weibo.select().where(
            Weibo.weibo_content == new_weibo["weibo_content"])
        print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        # 对新发出的微博进行处理
        if not query.exists():
            print('======发现新微博======')
            analyser = Analyser(user_id)
            sentiment_value = analyser.get_sentiment_value(
                new_weibo["weibo_content"])
            # 数据库操作
            db = DB()
            db.person = person
            weibo = db.store_weibo(**new_weibo)
            db.store_sentiment(weibo, sentiment_value)
            analyser.calculate()
            # 更新数据库后, 均值和方差变了, 需要再次更新
            MEAN = Person.get(Person.user_id == user_id).mean
            SD = Person.get(Person.user_id == user_id).std
            if sentiment_value not in range(MEAN - SD // 2, MEAN + SD // 2):
                message = '[{person_name}]发了一条[情绪值]为 {sentiment_value} 的微博, 内容为: {content}'.format(
                    person_name=person.username,
                    sentiment_value=sentiment_value,
                    content=new_weibo['weibo_content'],
                )
                print(message)
                print('============')
                # 发送短信
                send_sms(message)
        else:
            # 更新点赞数 转发数 和 评论数
            weibo = query[0]
            if (weibo.up_num != new_weibo['up_num']
                    or weibo.retweet_num != new_weibo['retweet_num']
                    or weibo.comment_num != new_weibo['comment_num']):
                print('微博状态发生变化, 正在更新数据库...')
                db = DB()
                db.person = person
                weibo.up_num = new_weibo['up_num']
                weibo.retweet_num = new_weibo['retweet_num']
                weibo.comment_num = new_weibo['comment_num']
                weibo.save()
                print('数据库更新完毕')

            print('======无新微博, 当前输出的是最后一条微博======')
Exemplo n.º 17
0
def test_lalalalala3():
    sut = Analyser()
    text = "ustawienia auto Space"
    assert ["Ala", " ", "ma", " ", "kota"] == sut.analyse("Ala ma kota")
    sut.analyse(text)
    assert ["Ala", "ma", "kota"] == sut.analyse("Ala ma kota")
    sut.analyse(text)
    assert ["Ala", " ", "ma", " ", "kota"] == sut.analyse("Ala ma kota")
Exemplo n.º 18
0
def test_lalalalala2():
    sut = Analyser()
    result = sut.analyse("alias to")
    result += sut.analyse("do")
    result += sut.analyse("równe")
    result += sut.analyse("cudzysłów")
    result += sut.analyse("vim tylda")
    result += sut.analyse("kropka")
    result += sut.analyse("ukośnik")
    result += sut.analyse("to")
    result += sut.analyse("do")
    result += sut.analyse("cudzysłów")

    assert 'alias todo=\"vim ~./todo\"' == "".join(result)
Exemplo n.º 19
0
    def test_smooth_results(self, _):
        smoothed = Analyser.smooth_results([
            self.RawAvgResult(Decimal(0.6), 1, None),
            self.RawAvgResult(Decimal(0.7), 2, None),
            self.RawAvgResult(Decimal(0.8), 3, None),
            self.RawAvgResult(Decimal(0.3), 4, None),
            self.RawAvgResult(Decimal(0.2), 5, None)
        ])
        self.assertEqual(5, len(smoothed))

        self.assertAlmostEqual(
            Decimal((0.6 * 0.6) + (0.5 * 0.3) + (0.5 * 0.1)), smoothed[0].avg)
        self.assertAlmostEqual(
            Decimal((0.7 * 0.6) + (0.56 * 0.3) + (0.5 * 0.1)), smoothed[1].avg)
        self.assertAlmostEqual(
            Decimal((0.8 * 0.6) + (0.638 * 0.3) + (0.56 * 0.1)),
            smoothed[2].avg)
        self.assertAlmostEqual(
            Decimal((0.3 * 0.6) + (0.7274 * 0.3) + (0.638 * 0.1)),
            smoothed[3].avg)
        self.assertAlmostEqual(
            Decimal((0.2 * 0.6) + (0.46202 * 0.3) + (0.7274 * 0.1)),
            smoothed[4].avg)
        self.assertEqual(1, smoothed[0].timestamp)
        self.assertEqual(2, smoothed[1].timestamp)
        self.assertEqual(3, smoothed[2].timestamp)
        self.assertEqual(4, smoothed[3].timestamp)
        self.assertEqual(5, smoothed[4].timestamp)
Exemplo n.º 20
0
 def __init__(self):
     self.settings = Settings()
     self.logger = Logger(self.settings.logfile)
     self.reporter = Reporter(self.settings)
     self.setup = Setup(self.settings, self.logger)
     self.grader = Grader(self.settings, self.logger, self.setup)
     self.analyser = Analyser(self.settings, self.reporter, self.logger, self.setup, self.grader)
Exemplo n.º 21
0
def analyse():
    input_json = request.get_json()
    # print("## INPUT JSON ##\n" + str(input_json))

    if "filters" not in input_json:
        filters_dict = {}
    else:
        filters_dict = input_json["filters"]
        for k, v in filters_dict.items():
            del filters_dict[k]
            filters_dict[int(k)] = v

    category = input_json['category']
    # print("filters_dict:\n" + str(filters_dict))
    # print("category: "  + str(category))
    coefs, intercept, coefs_indexes, null_columns = Analyser.analyse(
        filters_dict, category, ds)

    result = coefs.tolist()
    # result.append(intercept.tolist()) # append intercept

    # print("Coeficientes: " + str(coefs))
    # print("coef type:" + str(type(coefs)))

    output = {
        "coefs": result,
        "intercept": intercept.tolist(),
        "coefs_indexes": coefs_indexes,
        "null_types": null_columns
    }

    return json.dumps(output)
Exemplo n.º 22
0
 def test_create_dataframe_dict_from_indicators_dict(
         self, indicators_dict, stocks_list):
     dataframe_dict = Analyser.create_dataframe_dict_from_indicators_dict(
         indicators_dict)
     assert type(dataframe_dict) == dict
     assert type(dataframe_dict['Ação']) == list
     assert len(dataframe_dict['Ação']) == len(stocks_list)
Exemplo n.º 23
0
def qlcs_for_files(file_name_list):
    import json
    analyser = Analyser.full_analysis()
    for file_name in file_name_list:
        print('=== {} ==='.format(file_name))
        analysis = analyser.analyse_source_file(file_name)
        print(json.dumps(generate_qlcs(analysis, 'en'), indent=4))
Exemplo n.º 24
0
    def test_get_daily_avg_sentiment_by_viewpoint(self, mocked_open,
                                                  mocked_smooth,
                                                  mocked_add_to_cache,
                                                  mocked_connect,
                                                  mocked_redis):
        mocked_cursor = mock.MagicMock()
        mocked_timestamp = mock.MagicMock()
        mocked_timestamp.isoformat.side_effect = [1, 2, 3]
        mocked_cursor.fetchall.return_value = [
            self.RawAvgResult(0.4, mocked_timestamp, True),
            self.RawAvgResult(0.5, mocked_timestamp, False),
            self.RawAvgResult(0.6, mocked_timestamp, True)
        ]
        mocked_smooth.side_effect = [[
            self.RawAvgResult(0.44444444, mocked_timestamp, True),
            self.RawAvgResult(0.66666666, mocked_timestamp, True)
        ], [self.RawAvgResult(0.55555555, mocked_timestamp, False)]]
        mocked_connect.return_value = (None, mocked_cursor)

        Analyser().get_daily_avg_sentiment_by_viewpoint()

        mocked_cursor.execute.assert_called_once_with("<SQL here>")
        mocked_smooth.assert_any_call([
            self.RawAvgResult(0.4, mocked_timestamp, True),
            self.RawAvgResult(0.6, mocked_timestamp, True)
        ])
        mocked_smooth.assert_any_call(
            [self.RawAvgResult(0.5, mocked_timestamp, False)])
        mocked_add_to_cache.assert_any_call("vp:senti", {
            1: 0.44444,
            2: 0.66667
        })
        mocked_add_to_cache.assert_any_call("vn:senti", {3: 0.55556})
Exemplo n.º 25
0
    def test_prune_old_tweets(self, mocked_open, mocked_connect, mocked_redis):
        mocked_conn = mock.MagicMock()
        mocked_cursor = mock.MagicMock()
        mocked_connect.return_value = (mocked_conn, mocked_cursor)

        Analyser().prune_old_tweets()

        mocked_cursor.execute.assert_called_once_with("<SQL here>")
        mocked_conn.commit.assert_called_once_with()
Exemplo n.º 26
0
def main():
    """Main entry method to the Kyffin project."""
    (options, args) = parse()

    if options.test_flag:
        from kyffin.test.statistical_test import StatisticalTest
        suite = unittest.TestLoader().loadTestsFromTestCase(StatisticalTest)
        unittest.TextTestRunner(verbosity=2).run(suite)
    else :
#        print "Kyffin ({0})".format(loadVersion())
        if options.csv is not None:
            tech = techniqueFactory.getTechnique(options.technique)
            gui = guiFactory.getGUI(options.gui, options.technique)
            ml = mlFactory.getML(options.ml, tech)
            analyser = Analyser(tech, gui, ml, options.five_flag, options.export_path, options.classify)
            analyser.run(options.csv)
        else:
            raise BaseException('No data file specified')
Exemplo n.º 27
0
    def method(self):
        if not self.check_preprocess():
            p = Preprocess(self.file)
            p.preprocess()
            print('File preprocessed!')
        else:
            print('Already preprocessed file.')

        with open('preprocessed.csv') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='"')
            data = []
            data_df = []
            for row in reader:
                value = self.calculate_date(row[7])
                if value:
                    data.append((row[0], self.decay_date(value)))

            for key, group in groupby(data, key=lambda x: x[0]):
                data_df.append([key, sum(j for i, j in group)])

            df = pd.DataFrame(data_df, columns=['establishment', 'decay_date'])

        # Create x, where x the 'final_metric' column's values as floats
        x = df[['decay_date']].values.astype(float)

        # Create a minimum and maximum processor object
        min_max_scaler = preprocessing.MinMaxScaler(feature_range=(1, 10))

        # Create an object to transform the data to fit minmax processor
        x_scaled = min_max_scaler.fit_transform(x)

        # Run the normalizer on the dataframe
        df_normalized = pd.DataFrame(x_scaled)

        df.drop('decay_date', axis=1, inplace=True)
        df_normalized = df_normalized.rename(columns={0: 'decay_date'})
        df = pd.concat([df, df_normalized], axis=1)

        a = Analyser(df)
        #a.polarity()
        #a.confiability()
        a.viewer('pol', 'conf')
        df = a.mname()
        return df
Exemplo n.º 28
0
def test_shoundUseCapitalLetters():
    sut = Analyser()
    sut.analyse('ustawienia auto space')
    sut.analyse('ustawienia capital włącz')
    assert 'DZIEŃ DOBRY, NAZYWAM SIĘ CZESIO. CHCIAŁEM SPRAWDZIĆ JESZCZE, CZY DZIAŁA "CAPSLOCK" CUDZYSŁÓW.' == ''.join(
        sut.analyse(
            "dzień spacja dobry przecinek spacja nazywam spacja się spacja Czesio kropka spacja chciałem spacja sprawdzić spacja jeszcze przecinek spacja czy spacja działa spacja cudzysłów capslock cudzysłów spacja dosłownie cudzysłów kropka"
        ))
Exemplo n.º 29
0
def exercise(data):
    print(data)
    print("Started " + data["name"])
    global exercise_data
    exercise_data = data
    global exercise_in_progress
    exercise_in_progress = True  # Start exercise processing
    global analyser
    analyser = Analyser('references_' + data["name"] + '.json', data,
                        rdr)  # Create analyser for data["name"] exercise
Exemplo n.º 30
0
class Stages:
    def __init__(self, db):
        self.db = db
        self.analyser = Analyser(self.db)

    def on_get(self, req, resp, user_id, date):
        data_from_db = self.db.sleep_phase[user_id].find_one({'_id': date})
        if data_from_db is None or data_from_db[
                'ver'] != arith.SLEEP_STAGE_ALGORI_VERSION:  # 判断是否是读取数据库缓存还是直接进行计算。
            self.analyser.analyse(user_id,
                                  date)  # analyser 进行分析,然后就分析的数据写入到数据库中去
            data_from_db = self.db.sleep_phase[user_id].find_one({'_id': date})
        sleep_stages = data_from_db.get('data', [])
        for item in sleep_stages:
            item['time'] = item['time'].isoformat()
        #   get the status of sleep;time range from date 20.00  to
        result = {'result': sleep_stages}
        resp.body = json.dumps(result)
        resp.status = falcon.HTTP_200
def main():
    """
    Analyse the emails of the user and obtain the style metrics.
    
    Returns
    -------
    None.
    
    """
    if not(os.getcwd() in sys.path):
        sys.path.append(os.getcwd())
    
    #Creation of a Gmail resource
    service = build('gmail', 'v1',
                    credentials = auth.get_credentials(config.SCOPES, config.CREDS))
    anls = None
    nextPageToken = None
    
    usu = input('Introduce the user name: ')
    
    if (yes_no_question('Were there a previous execution with the same credentials?')):
        q = int(input('Introduce the remaining quota units: '))
        if (yes_no_question('Was it with the same user?')):
            ext = yes_no_question('Was the previously executed by extracting messages?')
            nextPageToken = input('Introduce NextPageToken: ')
            num_res = int(input('How many Gmail resources were extracted? '))
            anls = Analyser(service, usu, q, ext, num_res)
        else:
            anls = Analyser(service, usu, q)
    else:
        anls = Analyser(service, usu)
        
    if (yes_no_question('Has the user an email signature?')):
        print('Introduce the signature and finish it with the word "STOP".\n')
        entr = input()
        sign = ''
        while (entr != 'STOP'):
            sign += entr + '\n'
            entr = input()
        anls.analyse(nextPageToken, sign)
    else:
        anls.analyse(nextPageToken)
Exemplo n.º 32
0
 def netstream_loaded(self):
     # start = time()
     analyser = Analyser(self.replay)
     # print("analyser: %f" % (time()-start))
     # start = time()
     self.heatmap_tab.set_analyser(analyser)
     # print("heatmap: %f" % (time()-start))
     # start = time()
     self.distance_tab.set_analyser(analyser)
     # print("distance: %f" % (time()-start))
     logger.info('Netstream Parsed. No Errors found')
Exemplo n.º 33
0
class StudentEvaluator:
    """ Contains all the tools to analyse assignments """
    errors = 0

    def __init__(self):
        self.settings = Settings()
        self.logger = Logger(self.settings.logfile)
        self.reporter = Reporter(self.settings)
        self.setup = Setup(self.settings, self.logger)
        self.grader = Grader(self.settings, self.logger, self.setup)
        self.analyser = Analyser(self.settings, self.reporter, self.logger, self.setup, self.grader)


    def run(self):
        #""" Run the program (call this from main) """
        self.analyser.run()
        #self.reporter.run()

    def exit_value(self):
        #"""TODO: Generate the exit value for the application."""
        if (self.errors == 0):
            return 0
        else:
            return 42
Exemplo n.º 34
0
def compile_chip(chip, g):
    """
        Компилирует данные для микросхемы
    :param g:
    """

    analyser = Analyser(chip.pins, chip.name)
    g.add_chip(chip.name)
    first_command_index = len(g.commands) - 1
    #g.add_command('CMD_RESET_FULL')
    inputs = chip.inputs
    for power in chip.powerPlus:
        inputs.append(power)
    for power in chip.powerMinus:
        inputs.append(power)

    g.add_command_mask_1('CMD_INIT', inputs, chip.pins)
    analyser.set_ddr(inputs)

    # команды
    for cmd in chip.commands:
        if cmd.name == 'set':
            pins0 = cmd.lst0
            for power in chip.powerMinus:
                pins0.append(power)

            pins1 = cmd.lst1
            for power in chip.powerPlus:
                pins1.append(power)
            for pullUp in chip.pullUpOutputs:
                pins1.append(pullUp)
            analyser.set_pins_to_0(pins0)
            analyser.set_pins_to_1(pins1)
            if OPTIMIZE_CMD_ALL:
                g.add_command_mask_1('CMD_SET_ALL', analyser.get_levels_mask(), chip.pins, 1)
            else:
                g.add_command_mask_2('CMD_SET', pins0, pins1, chip.pins)

        elif cmd.name == 'test':
            if OPTIMIZE_CMD_TEST:
                optimized_mask = analyser.get_test_all_mask(cmd.lst0, cmd.lst1)
            else:
                optimized_mask = None

            if optimized_mask is None:
                g.add_command_mask_2('CMD_TEST', cmd.lst0, cmd.lst1, chip.pins)
            else:
                g.add_command_mask_1('CMD_TEST_ALL', optimized_mask, chip.pins, 1)

        elif cmd.name == 'set+test':
            pins0 = cmd.lst0
            for power in chip.powerMinus:
                pins0.append(power)

            pins1 = cmd.lst1
            for power in chip.powerPlus:
                pins1.append(power)
            for pullUp in chip.pullUpOutputs:
                pins1.append(pullUp)

            analyser.set_pins_to_0(pins0)
            analyser.set_pins_to_1(pins1)

            if OPTIMIZE_CMD_ALL:
                g.add_command_mask_1('CMD_SET_ALL', analyser.get_levels_mask(), chip.pins, 1)
            else:
                g.add_command_mask_2('CMD_SET', pins0, pins1, chip.pins)

            if OPTIMIZE_CMD_TEST:
                optimized_mask = analyser.get_test_all_mask(cmd.lst0_2, cmd.lst1_2)
            else:
                optimized_mask = None

            if optimized_mask is None:
                g.add_command_mask_2('CMD_TEST', cmd.lst0_2, cmd.lst1_2, chip.pins)
            else:
                g.add_command_mask_1('CMD_TEST_ALL', optimized_mask, chip.pins, 1)

        elif cmd.name == 'pulse+':
            if OPTIMIZE_LAST_PULSE and analyser.pulse(cmd.pin, '+'):
                g.add_command('CMD_LAST_PULSE')
            else:
                g.add_command('CMD_PULSE_PLUS', convert_pin(cmd.pin, chip.pins, 28))

        elif cmd.name == 'pulse-':
            if OPTIMIZE_LAST_PULSE and analyser.pulse(cmd.pin, '-'):
                g.add_command('CMD_LAST_PULSE')
            else:
                g.add_command('CMD_PULSE_MINUS', convert_pin(cmd.pin, chip.pins, 28))

        elif cmd.name == 'config':
            inputs = cmd.lst0
            for power in chip.powerPlus:
                inputs.append(power)
            for power in chip.powerMinus:
                inputs.append(power)
            chip.inputs = cmd.lst0
            chip.outputs = cmd.lst1
            g.add_command_mask_1('CMD_INIT', inputs, chip.pins)
            analyser.set_ddr(inputs)

        elif cmd.name == 'test-z':
            pins = cmd.lst1
            g.add_command_mask_1('CMD_TEST_Z', pins, chip.pins)

        elif cmd.name == 'test-oc':
            pins = cmd.lst1
            g.add_command_mask_1('CMD_TEST_OC', pins, chip.pins)

        elif cmd.name == 'repeat-pulse':
            g.add_command('CMD_REPEAT_PULSE', cmd.value & 0xff, (cmd.value >> 8) & 0xff)


    g.add_command('CMD_END')
    # проходимся по всем команам этой МС и выполняем оптимизации

    while True:
        optimized = False
        for i in range(first_command_index, len(g.commands)):
            cmd = g.commands[i]
            if isinstance(cmd, (list, tuple)):
                cmd_name = cmd[0]
            else:
                continue
            if i+1 < len(g.commands):
                cmd_next = g.commands[i+1]
                cmd_next_name = cmd_next[0]
            else:
                break
            #print cmd_name, cmd_next_name
            if OPTIMIZE_SET_AND_TEST and cmd_name.startswith('CMD_SET_ALL_') and cmd_next_name.startswith('CMD_TEST_ALL_'):
                optimized = True
                #print g.commands[i]
                g.commands[i][0] = 'CMD_SET_ALL_AND_TEST_' + cmd_next_name[len('CMD_TEST_ALL_'):]
                for j in range(1, len(cmd_next)):
                    g.commands[i].append(cmd_next[j])
                #print g.commands[i]
                del g.commands[i+1]
                break
            if OPTIMIZE_LAST_PULSE_AND_TEST and cmd_name == 'CMD_LAST_PULSE' and cmd_next_name.startswith('CMD_TEST_ALL_'):
                g.commands[i+1][0] = 'CMD_LAST_PULSE_AND_TEST_' + cmd_next_name[len('CMD_TEST_ALL_'):]
                #print g.commands[i+1]
                del g.commands[i]
                optimized = True
                break
            # CMD_SET_ALL_16, CMD_TEST_ALL_16  -> CMD_SET_AND_TEST_ALL
            # CMD_LAST_PULSE, CMD_TEST_ALL_16  -> CMD_LAST_PULSE_AND_TEST_ALL
        if not optimized:
            break
Exemplo n.º 35
0
from analyser import Analyser
from datetime import datetime, timedelta
from time import sleep
from random import randint, sample


if __name__ == '__main__':
    while True:
        try:
            x_size, y_size = input("Enter grid size: ").split(',')
            x_size, y_size = int(x_size), int(y_size)
            break
        except Exception as e:
            pass
    chomp = Chomp(x_size, y_size)
    analyser = Analyser(x_size, y_size)
    while True:
        num_players = input("One or two players? ")
        if num_players=="1" or num_players=="2":
            num_players = int(num_players)
            break
        elif num_players.lower() == "one":
            num_players = 1
            break
        elif num_players.lower() == "two":
            num_players = 2
            break

    player = 'human' if num_players==2 or randint(0,1)==0 else 'computer'
    while True:
        print(chomp)
Exemplo n.º 36
0
	def run_analysis(self,cycle,plotdir,c=None):
		an = Analyser(self.articles,self.average,self.average.views,self.pattern,cycle,plotdir,c)
		if not os.path.exists(os.path.dirname(os.path.abspath(__file__))+"/plots/"+plotdir):
			os.makedirs(os.path.dirname(os.path.abspath(__file__))+"/plots/"+plotdir)
		an.run(self.overall,self.boolplot)
		print "analysis performed:", plotdir
Exemplo n.º 37
0
import openCorporaDictFromPreparedFile
from analyser import Analyser
dictionary = openCorporaDictFromPreparedFile.dictionary
lemmas = openCorporaDictFromPreparedFile.lemmas
myanalyser = Analyser(dictionary, lemmas)

#text = 'Стала стабильнее экономическая и политическая обстановка, предприятия вывели из тени зарплаты сотрудников. Все Гришины одноклассники уже побывали за границей, он был чуть ли не единственным, кого не вывозили никуда дальше Красной Пахры.'
file2parse = open('dataset_37845_1.txt', 'r', encoding='utf8')
result_file = open('result.txt', 'w', encoding='utf8')

for line in file2parse:
    result = myanalyser.analyse(line.strip('\n'))
    result_file.write(result + '\n')

file2parse.close()
result_file.close()

#text = 'виновный посол напомнил после о столе'
#text = 'мой друг говорит с сестрой о море'
#result = myanalyser.analyse(text)
#print(result)