Esempio n. 1
0
    def Run(self):
        parser = argparse.ArgumentParser(
            description='GitHub Repository Uploader.',
        )
        #parser.add_argument('path_dir_pj')
        parser.add_argument('-n', '--username', action='append')
        parser.add_argument('-d', '--path_dir_db')
        parser.add_argument('-id', '--path_dir_input_db')
        parser.add_argument('-od', '--path_dir_output_db')
        parser.add_argument('-u', '--url', '--upload_url', action='append')
        parser.add_argument('-y', '--yaml')
        self.__args = parser.parse_args()
        print(self.__args)

        # 起動引数チェック
        usernames = self.__GetUsernames()
        Log().debug(f'対象ユーザ: {usernames}')
        path_out = self.__GetDirOutputDb()
        Log().debug(f'出力パス: {path_out}')
        path_out.mkdir(parents=True, exist_ok=True)
        self.__GetYaml()
        self.__GetUrl()

        # 草DB作成

        # 草データ取得
        data = None
        try:
            data = FromApi()
        except RateLimitError as e:
            data = FromSvg()
        finally:
            if data is None: raise NotGetError()
        Insert(data)
Esempio n. 2
0
 def Insert(db, table_name, **kv):
     columns = ','.join([k for k in kv.keys()])
     values = ','.join(GetInsertValues(kv.values()))
     #db.query(f'insert into {table_name} ({columns}) values ({values});')
     sql = f'insert into {table_name} ({columns}) values ({values});'
     Log().debug(sql)
     db.query(sql)
Esempio n. 3
0
def find_all_process_most_frequent(varas_group):

    macro_trace_processes = {}

    for vara in varas_group:
        df_vara = df_log[df_log['case: orgao'] == vara]

        p = PreProcess(df=df_vara)
        p.select_desired_columns()
        p.filter_outlier_timestamp()
        p.filter_outlier_movements(lower=0.05, upper=0.95)
        p.filter_outlier_trace_time(lower=0.05, upper=0.95)

        l = Log(df_log=p.df_log.sort_values('time:timestamp'))
        all_macro_trace = find_all_macro_trace(l.log, macrosteps)

        for tran in all_macro_trace:
            if tran not in macro_trace_processes:
                macro_trace_processes[tran] = 0
            macro_trace_processes[tran] += 1

    macro_trace_processes = {k: v for k, v in \
        sorted(macro_trace_processes.items(),
            key=lambda item: item[1])}

    macro_trace_processes
Esempio n. 4
0
    def __init__(self, uuid):

        self.TAG = "BrowserProfileLauncher"
        self.APPINDICATOR_ID = "io_serdarsen_github_budgie_browser_profile_launcher"
        self.dir_path = os.path.dirname(os.path.realpath(__file__))
        self.manager = None
        self.popover = None
        self.popoverHeightOffset = 20
        self.popoverMinHeight = 150
        self.popoverMaxHeight = 480
        self.popoverMinWidth = 256
        self.popoverHeight = 0
        self.popoverWidth = 230
        self.launcherButtonHeight = 36
        self.lenProfiles = 0
        self.launcherButtons = []
        self.availableBrowsers = []
        self.currentProfile = None
        self.currentBrowser = None
        self.chromiumBrowser = None
        self.chromeBrowser = None

        Budgie.Applet.__init__(self)

        self.log = Log("budgie-browser-profile-launcher")
        self.localStateHelper = LocalStateHelper()
        self.popenHelper = PopenHelper()
        self.sortHelper = SortHelper()

        self.buildIndicator()
        self.buildPopover()
        self.buildStack()

        self.update(True)
Esempio n. 5
0
 def CreateTable(db, table_name, **name_types):
     columns = ', '.join(k + ' ' + v for k, v in name_types.items())
     #for k, v in name_types.items():
     #    k + ' ' + v
     #db.query('create table {table_name} (Id integer, Name text);')
     sql = f'create table {table_name} ({columns});'
     Log().debug(sql)
     db.query(sql)
Esempio n. 6
0
    def Run(self):
        parser = argparse.ArgumentParser(
            description='GitHub Repository Uploader.',
        )
        #parser.add_argument('path_dir_pj')
        parser.add_argument('-n', '--username', action='append')
        parser.add_argument('-i', '--path_dir_input')
        parser.add_argument('-o', '--path_dir_output')
        parser.add_argument('-u', '--url', '--upload_url', action='append')
        parser.add_argument('-y', '--yaml')
        self.__args = parser.parse_args()
        print(self.__args)

        # 起動引数チェック
        usernames = self.__GetUsernames()
        Log().debug(f'対象ユーザ: {usernames}')
        path_out = self.__GetDirOutputDb()
        Log().debug(f'出力パス: {path_out}')
        path_out.mkdir(parents=True, exist_ok=True)
        self.__Getyaml()
        self.__GetUrl()
Esempio n. 7
0
 def __init__(self, driver, timeout=10):
     self.byDic = {
         'id': By.ID,
         'name': By.NAME,
         'class_name': By.CLASS_NAME,
         'xpath': By.XPATH,
         'link_text': By.LINK_TEXT,
         'css': By.CSS_SELECTOR
     }
     self.driver = driver
     self.outTime = timeout
     log = Log()
     self.loger = log.get_log()
Esempio n. 8
0
def log(url, method):
    log = Log()

    log.time = datetime.now()
    log.url = url
    log.method = method

    log_file = open(constants.LOG_FILE, "a")

    log_file.write(
        str(log.url) + "\t" + log.method + "\t" +
        log.time.strftime('%Y-%m-%d %H:%M:%S') + "\n")

    log_file.close()
Esempio n. 9
0
def login(driver):
    log = Log()
    loger = log.get_log()
    loger.info("登录系统")
    login = LoginPage(driver)
    driver.get(url)
    driver.maximize_window()
    login.login_system(userName, passWord)
    sleep(1)
    user_page = UserPage(driver)
    user_page.go_to_system()
    user_page.switch_tab(2)
    title = user_page.get_title()
    assert "一企一档" in title, "进入一企一档失败,所以case失败"
    def __init__(self):

        self.TAG = "LocalStateHelper"
        self.log = Log("budgie-browser-profile-launcher")
        self.jsonHelper = JsonHelper()

        self.localStateFileName = "Local State"
        self.lastProfileNum = 0
        self.lastPersonNum = 0

        self.home_dir = expanduser("~")
        self.chromiumConfigPath = self.home_dir + "/.config/chromium/"
        self.chromeConfigPath = self.home_dir + "/.config/google-chrome/"
        self.chromiumCachePath = self.home_dir + "/.cache/chromium/"
        self.chromeCachePath = self.home_dir + "/.cache/google-chrome/"

        self.availbleBrowsers = []
    def __init__(self):

        self.TAG = "PopenHelper"
        self.log = Log("budgie-browser-profile-launcher")

        self.procs = []
Esempio n. 12
0
# _*_ coding:utf-8 _*_
# @Author: emily
# @Date  : 2018/3/20 13:52
# @Contact : [email protected]
# @Desc: 安踏测试环境做忠诚度计算相关的性能测试时候,无法判断订单是否全部被处理完成。
# 为了解决这个问题,需要从日志里找出订单开始处理和处理完成的两断信息,如果关键信息出现了两次,那么认为已经处理完成。
# 如果没有出现关键信息或者不只出现两次,那么记录下来。

from log.Log import Log
import os
import time

log = Log()


#获取需要分析的全部关键字
def getKeyWord():
    for i in range(0, 3):
        orderId = "record:key:orderEventHandler/152885895197" + str(i)
        searchKeyWord(orderId)
        i += 1


#查询关键字是否存在两个
def searchKeyWord(keywords):
    filename = "C:/test/loyalty2-calc.log"
    word = keywords
    count = 0
    try:
        fobj = open(filename, 'r', encoding='UTF-8')
    except IOError as e:
from log.Log import INTENSE_FILTERING
from discovery.DFG import DFG
import visualization.Visualizer as Visualizer
from log.PreProcess import PreProcess

file_path = '/home/vercosa/Documentos/bases_desafio_cnj/'+\
            'log_vara_2.csv'


p = PreProcess(file_location=file_path)
p.select_desired_columns()
p.filter_outlier_timestamp()
p.filter_outlier_movements(lower=0.01, upper=0.99)
p.filter_outlier_trace_time(lower=0.01, upper=0.99)

l = Log(df_log=p.df_log.sort_values('time:timestamp'))
# l.filter_variants(1.1)

dfg = DFG(l.log,
          parameters={parameters.Parameters.AGGREGATION_MEASURE:'mean'},
          variant=dfg_discovery.Variants.FREQUENCY)

dfg.filter_activities(number_act=10)
dfg.filter_edges(percentage=0.3)

print(dfg.dfg)

Visualizer.\
    dfg_visualizer(dfg.dfg, 
                   l.log,
                   variant=dfg_visualization.Variants.FREQUENCY)
Esempio n. 14
0
 def __init__(self, driver):
     self.driver = driver
     self.timeout = 10
     self.t = 0.5
     user = Log()
     self.log = user.get_log()
              'Baixa/Arquivamento',  
             ]

pp = PreProcess(file_location=file_path)
pp.select_desired_columns()
pp.filter_outlier_timestamp()
pp.map_movements(movement_path)

df_log = pp.df_log
df_vara = df_log[df_log['case: orgao'] == vara]

pp_vara = PreProcess(df=df_vara)
pp_vara.filter_outlier_movements(lower=0.05, upper=0.95)
pp_vara.filter_outlier_trace_time(lower=0.05, upper=0.95)

log = Log(df_log=pp_vara.df_log.sort_values('time:timestamp'))

median_case_duration = case_statistics.\
  get_median_caseduration(log.log, parameters={
    case_statistics.Parameters.TIMESTAMP_KEY: "time:timestamp"
})

print('median case duration: ', str(median_case_duration/ (24*60*60)))

ms = MacroSteps(log.log, macrosteps)

res = ms.calc_macrosteps()

print(res)

Esempio n. 16
0
df_time = df_time[(df_time['duration'] < lower_bound) |
                  (df_time['duration'] > upper_bound)]

df_time.count()
df_time.reset_index(level=0, inplace=True)

df['case:concept:name'].nunique()

key = ['case:concept:name']
i1 = df.set_index(key).index
i2 = df_time.set_index(key).index

df = df[~i1.isin(i2)]

df['case:concept:name'].nunique()

# log = log_converter.apply(df_log)

l = Log(df_log=df)
l.filter_variants(1)
dfg = DFG(l.log,
          parameters={parameters.Parameters.AGGREGATION_MEASURE: 'mean'},
          variant=dfg_discovery.Variants.PERFORMANCE)

print(dfg.dfg)

Visualizer.dfg_visualizer(dfg.dfg,
                          l.log,
                          variant=dfg_visualization.Variants.PERFORMANCE)

print('teste')
 def __init__(self):
     self.TAG = "JsonHelper"
     self.log = Log("budgie-browser-profile-launcher")
from log.Log import Log
from pm4py.statistics.traces.log import case_statistics
from pm4py.algo.discovery.dfg import algorithm as dfg_discovery
from discovery.DFG import DFG
import visualization.Visualizer as Visualizer


# file_path = 'log/examples/running-example.xes'
file_path = 'log/examples/Receipt phase of an environmental permit' + \
			' application process (_WABO_) CoSeLoG project.xes'


l = Log(file_path)

# variants_count = case_statistics.get_variant_statistics(l.log)
# variants_count = \
#     sorted(variants_count, 
#            key=lambda x: x['count'], 
#            reverse=True)

# print('')
# print(variants_count)
# print('')

l.filter_variants(Log.INTENSE_FILTERING)

# dfg_discovery.apply(l.log)

dfg = DFG(l.log)