Exemple #1
0
def test_indexing_get_pi_rate():
    """
    Test indexing of database collections for api queries
    to ensure that they run efficiently
    """
    #*** Instantiate classes:
    flow = flows_module.Flow(config)

    #*** Ingest packets older than flow timeout:
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[0], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))

    #*** Ingest packets:
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())

    #*** Test packet_ins collection indexing...
    #*** Should be 5 documents in packet_ins collection:
    assert flow.packet_ins.count() == 5
    #*** Get query execution statistics:
    explain = api.get_pi_rate(test=1)

    #*** Check an index is used:
    assert explain['queryPlanner']['winningPlan']['inputStage'][
        'stage'] == 'IXSCAN'
    #*** Check how query ran:
    assert explain['executionStats']['executionSuccess'] == True
    assert explain['executionStats']['nReturned'] == 3
    assert explain['executionStats']['totalKeysExamined'] == 3
    assert explain['executionStats']['totalDocsExamined'] == 3
Exemple #2
0
def get_click_user(request):
    day = request.GET.get("day")
    ad_id = request.GET.get("adid")
    label = request.GET.get("label")
    ad_click_user_map = parse_rtb_log.get_click_user(".", day,
                                                     ["rtb_log_crit"])
    print("enter get_click_user view, day %s, ad_id %s" % (day, ad_id))
    redis_host = config.get_value("redis_server", "host")
    redis_port = config.get_int_value("redis_server", "port")
    redis_password = config.get_value("redis_server", "password")
    label_user_index = config.get_int_value("redis_server", "label_user_index")
    user_label_index = config.get_int_value("redis_server", "user_label_index")

    redis_client_label_user = parse_rtb_log.connect_redis(
        host=redis_host,
        port=redis_port,
        db_index=label_user_index,
        password=redis_password)
    redis_client_user_label = parse_rtb_log.connect_redis(
        host=redis_host,
        port=redis_port,
        db_index=user_label_index,
        password=redis_password)
    user_id_set = ad_click_user_map[ad_id]
    print(",".join(user_id_set))
    for user_id in user_id_set:
        redis_client_label_user.sadd(label, user_id)
        old_label = redis_client_user_label.get(user_id)
        new_label = label
        if old_label is not None:
            old_label_set = set(re.split(",|:|;", old_label))
            old_label_set.add(label)
            new_label = ",".join(old_label_set)
        redis_client_user_label.set(user_id, new_label)
    return HttpResponse("Hello welcome to use get_click_user")
Exemple #3
0
def main():
    """Jcli Main Entry."""

    # Parse arguments provided by the user
    parser = parse.create_parser()
    args = parser.parse_args()

    # Set config object that will hold information on the Jenkins server
    run_config = config.read(args.config)

    # Get url, user and password to be able setup connection to the server
    url = config.get_value(run_config, 'jenkins', 'url')
    user = config.get_value(run_config, 'jenkins', 'user')
    password = config.get_value(run_config, 'jenkins', 'password')

    # 'job' command
    if args.main_command == 'job':
        job_executor = Job(args.job_command, url, user, password, args)
        job_executor.run()

    # 'view' command
    if args.main_command == 'view':
        view_executor = View(args.view_command, url, user, password, args)
        view_executor.run()

    # 'node' command
    if args.main_command == 'node':
        node_executor = Node(args.node_command, url, user, password, args)
        node_executor.run()

    # 'plugin' command
    if args.main_command == 'plugin':
        plugin_executor = Plugin(args.plugin_command, url, user, password,
                                 args)
        plugin_executor.run()
Exemple #4
0
    def startup(self):
        db_uname = config.get_value('Username')
        db_pword = config.get_value('Password')

        import database
        self.db = database.Database(db_uname, db_pword)
        success = self.db.change_user(db_uname, db_pword, 'gnutr_db')

        if success == 0:
            import gnutr
            import sys
            gnutr.Dialog(
                'error', 'Failed to connect to the database.\n\n' +
                'I suggest that you delete the file\n ' +
                '"~/.gnutrition/config" and run "gnutrition" again.')
            gtk.main_quit()
            sys.exit()

        import store
        self.store = store.Store()

        import person
        self.person = person.Person()
        self.person.setup()

        import base_win
        self.base_win = base_win.BaseWin(self)
        self.base_win.show()
Exemple #5
0
def show_swarm_distribution():
    Xi = config.get_value("Xi")
    Xj = config.get_value("Xj")
    #Wi = config.get_value("Wi")
    #Wj = config.get_value("Wj")
    plt.scatter(Xi, Xj)
    plt.show()
Exemple #6
0
    def startup(self):
        db_uname = config.get_value("Username")
        db_pword = config.get_value("Password")

        import database

        self.db = database.Database(db_uname, db_pword)
        success = self.db.change_user(db_uname, db_pword, "gnutr_db")

        if success == 0:
            import gnutr
            import sys

            gnutr.Dialog(
                "error",
                "Failed to connect to the database.\n\n"
                + "I suggest that you delete the file\n "
                + '"~/.gnutrition/config" and run "gnutrition" again.',
            )
            gtk.main_quit()
            sys.exit()

        import store

        self.store = store.Store()

        import person

        self.person = person.Person()
        self.person.setup()

        import base_win

        self.base_win = base_win.BaseWin(self)
        self.base_win.show()
Exemple #7
0
def update_filename_path(key):
    DIR = cfg.get_value('CSV_FILENAME_BAIDU_DIR', 'NULL')
    cfg.set_value('CSV_FILENAME_BAIDU', DIR + key + '.csv')
    DIR = cfg.get_value('CSV_FILENAME_HOTSPOT_DIR', 'NULL')
    cfg.set_value('CSV_FILENAME_HOTSPOT', DIR + key + '_TOP10.csv')
    DIR = cfg.get_value('CSV_FILENAME_WEIBO_DIR', 'NULL')
    cfg.set_value('CSV_FILENAME_WEIBO', DIR + key + '_WEIBO.csv')
def get_click_user(log_base_dir, day, filters):
    log_dir_name = os.path.join(log_base_dir, day)
    print(log_dir_name)
    print(os.path.abspath(log_dir_name))
    print(filters)
    print(os.path.curdir)
    file_list = utility.get_file_list(log_dir_name, filters)
    print("number of log file is %d" % len(file_list))
    redis_host = config.get_value("redis_server", "host")
    redis_port = config.get_int_value("redis_server", "port")
    redis_password = config.get_value("redis_server", "password")
    db_index = config.get_int_value("redis_server", "push_id_index")
    user_id_index = config.get_int_value("rtb_log_index", "user_id")
    redis_client = connect_redis(host=redis_host,
                                 port=redis_port,
                                 db_index=db_index,
                                 password=redis_password)
    ad_click_user_map = dict()
    for file_name in file_list:
        parse_file(file_name,
                   redis_client,
                   ad_click_user_map,
                   index=user_id_index)

    return ad_click_user_map
Exemple #9
0
def check_version():
    import gnutr_consts
    import install
    this_ver = install.gnutr_version()
    if config.get_value(
            'check_disabled') or not config.get_value('check_version'):
        return 0
    import time
    interval = config.get_value('check_interval')
    last_check = config.get_value('last_check')
    time_now = time.time()
    if (time_now - last_check > interval):
        (curr_ver, sr, date, sr_url,
         mesg) = get_latest_version(gnutr_consts.LATEST_VERSION)
        config.set_key_value('sr', sr)
        config.set_key_value('sr_date', date)
        config.set_key_value('sr_url', sr_url)
        update = False
        if this_ver == curr_ver:
            pass  # Nothing to do
        else:
            update = cmp_version_strings(this_ver, curr_ver)
        if update:
            update_version(curr_ver, mesg)
    last_check = config.set_key_value('last_check', time_now)
    return 1
Exemple #10
0
def encodeutf(string):
    str_data = []
    str_count = 0
    if config.get_value('XMLFLAG') == 0:
        str_data.append(hex(len(string)))
        str_data.append(hex(0))
        for str_i in string:
            encod_str = hex(ord(str_i.decode("utf-8")))
            str_data.append(encod_str)
            str_data.append(hex(0))
        str_data.append(hex(0))
        str_data.append(hex(0))
    elif config.get_value('XMLFLAG') == 1:
        #此时为XML资源文件格式
        str_data.append(hex(len(string)))
        str_data.append(hex(len(string)))
        for str_i in string:
            encod_str = hex(ord(str_i.decode("utf-8")))
            str_data.append(encod_str)
        str_data.append(hex(0))
        #和4倍对齐,所以最多循环3次
        for i in range(3):
            if (str_count + 1 + 2) % 4 == 0:
                return str_data
            else:
                str_count += 1
                str_data.append(hex(0))
    # print str_data
    return str_data
Exemple #11
0
def test_indexing_get_pi_rate():
    """
    Test indexing of database collections for api queries
    to ensure that they run efficiently
    """
    #*** Instantiate classes:
    flow = flows_module.Flow(config)

    #*** Ingest packets older than flow timeout:
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[0], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))

    #*** Ingest packets:
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())

    #*** Test packet_ins collection indexing...
    #*** Should be 5 documents in packet_ins collection:
    assert flow.packet_ins.count() == 5
    #*** Get query execution statistics:
    explain = api.get_pi_rate(test=1)

    #*** Check an index is used:
    assert explain['queryPlanner']['winningPlan']['inputStage']['stage'] == 'IXSCAN'
    #*** Check how query ran:
    assert explain['executionStats']['executionSuccess'] == True
    assert explain['executionStats']['nReturned'] == 3
    assert explain['executionStats']['totalKeysExamined'] == 3
    assert explain['executionStats']['totalDocsExamined'] == 3
def get_traffic_data():
    key = get_key('google_key')
    origin = config.get_value('origin').replace(' ','+')
    destination = config.get_value('destination').replace(' ','+')

    pairs = { 'origins' : origin, 'destinations' : destination, 'departure_time' : 'now', 'traffic_model' : 'pessimistic', 'key' : key}
    result = requests.get('https://maps.googleapis.com/maps/api/distancematrix/json', params = pairs)

    return result.json()
Exemple #13
0
 def evaluator(self, candidates, args):
     fitness = []
     if config.get_value("DTW_ALGO") == "CUSTOM_DTW":
         fitness = dtw.Costom_Dtw(candidates=candidates,
                                  v=self.v,
                                  dimensions=self.dimensions)
     if config.get_value("DTW_ALGO") == "Pierre_DTW":
         fitness = dtw.Pierre_DTW(candidates=candidates, v=self.v)
     return fitness
Exemple #14
0
def set_craw_start():
    DEFAULT = cfg.get_value('DEFAULT_NAME')
    KEYWORDS = cfg.get_value('KEYWORDS', DEFAULT)
    update_filename_path(KEYWORDS)

    print('set_craw_start=', KEYWORDS, cfg._global_dict)
    KEYWORD_LIST = cfg.get_value('KEYWORD_LIST', [DEFAULT])
    if not KEYWORDS in KEYWORD_LIST:
        craw_start(KEYWORDS)
    return "Start Craw OK"
Exemple #15
0
def craw_start(key):
    DIR = cfg.get_value('CSV_FILENAME_WEIBO_DIR')
    cfg.set_value('CSV_FILENAME_WEIBO', DIR + key + '_WEIBO.csv')
    print('craw_start: ', cfg._global_dict)
    WeiBo.sina_crawl(key, url_cnt=4)

    DIR = cfg.get_value('CSV_FILENAME_BAIDU_DIR')
    cfg.set_value('CSV_FILENAME_BAIDU', DIR + key + '.csv')
    print('craw_topTen baidu:', cfg._global_dict)
    craw_baidu(key)
Exemple #16
0
def lambda_handler(event, context):
    settings = config.load()

    if settings.aws_region:
        storage.set_region(config.get_value("aws_region"))

    if settings.storage:
        storage_options = config.get_value("storage")
        storage.set_options(storage_options)

    enable_vulnerability_alerts()
Exemple #17
0
def get_connection():
    """return a working WLAN(STA_IF) instance or None"""

    # First check if there already is any connection:
    if wlan_sta.isconnected():
        return wlan_sta

    try:
      do_connect(config.get_value('wifi_ssid'), config.get_value('wifi_password'))
    except OSError as e:
      print("exception", str(e))
Exemple #18
0
def get_configurations():
    #Get Applications
    applications = config.get_value('application', 'applications')
    configurations = []
    for item in applications.split(','):
        d = {}
        databases = config.get_value('databases', item).split(',')
        d['name'] = item
        d['databases'] = databases
        configurations.append(d)

    return {'data': configurations}
Exemple #19
0
def get_connection(application, database):
    sql_type = config.get_sql_type('database')
    if sql_type == 'sql-server':
        sql_server = config.get_value('server', application)
        user_id = config.get_value(application, 'userid')
        password = config.get_value(application, 'password')
        conn = f"Driver={{ODBC Driver 11 for SQL Server}};Server={sql_server};Database={database};UID={user_id};PWD={password};"
    elif sql_type == 'sqlite':
        database_addr = config.get_value('server', application)
        conn = f"Driver=SQLite3 ODBC Driver;Database={database_addr}"

    return conn
Exemple #20
0
def send_vulnerable_by_severtiy_to_splunk():
    """Send vulnerable_by_severity.json to Splunk"""
    host = config.get_value("splunk_host")
    token = config.get_value("splunk_token")

    if not host or not token:
        raise

    s = Splunk(host, token)

    s.send_vulnerable_by_severtiy(
        storage.read_json(
            f"{datetime.date.today().isoformat()}/data/repositories.json"))
Exemple #21
0
def main(data,
         wmin,
         wmax,
         pop_size,
         max_evaluations,
         prng=None,
         display=False):
    if prng is None:
        prng = Random()
        prng.seed(time())

    v = data
    config.set_value("series_length", len(v))
    problem = timeseriesproblem.timeseriesproblem(dimensions=4,
                                                  v=v,
                                                  wmin=wmin,
                                                  wmax=wmax,
                                                  random=prng)

    ea = pso.pso(prng)
    ea.terminator = inspyred.ec.terminators.evaluation_termination
    ea.topology = inspyred.swarm.topologies.ring_topology
    ea.observer = inspyred.ec.observers.default_observer

    seeds = []
    if config.get_value("CHAOS_ALGO") != "None":
        seeds = CHAOS_INIT()

    final_pop = ea.evolve(generator=problem.generator,
                          evaluator=problem.evaluator,
                          pop_size=pop_size,
                          seeds=seeds,
                          bounder=problem.bounder,
                          maximize=problem.maximize,
                          max_evaluations=max_evaluations,
                          neighborhood_size=5)

    if display:
        best = max(final_pop)
        print('Best Solution: \n{0}'.format(str(best)))
        #print('Best Solution: \n{0}'.format(config.get_value("gbestx")))
        if config.get_value("SHOW_MOTIF") == True:
            SHOW_MOTIF(data, best.candidate)
        if gl.get_value("gbest_sum") != "not found":
            gl.set_value("gbest_sum",
                         gl.get_value("gbest_sum") + config.get_value("gbest"))
        else:
            gl.set_value("gbest_sum", config.get_value("gbest"))
        return ea
Exemple #22
0
def getData(ball):
    """
    生成训练集(样本、label),生成待预测数据
    :param ball: 哪个球
    :return:
    """
    df = pd.read_csv("powerballData/sampleData-1.csv").sort_values(
        by=['time'], ascending=True)
    data_num = df.shape[0]
    issue_num = conf.get_value("issue_num")
    i_max = data_num - issue_num

    x_train = np.zeros(shape=(i_max, 1 * issue_num))
    y_train = np.zeros(shape=(i_max, 1))

    for i in range(i_max):
        x = df.iloc[i:i + issue_num,
                    ball].values.flatten()  # ndarray  1*(6*issue_num)
        y = df.iloc[i + issue_num, ball]
        x_train[i] = x
        y_train[i] = y

    x_train = x_train.T
    y_train = y_train.T
    x_test = df.iloc[data_num - issue_num:data_num,
                     ball].values.flatten().reshape(-1, 1)

    print("---------训练集输入---------", x_train.shape)
    print("---------训练集输出---------", y_train.shape)
    print("---------待预测集输入---------", x_test.shape)

    return x_train, y_train, x_test
Exemple #23
0
def send_config_value(config_key, value):
    query = ('''
    mutation{
    configValue(key: "%s", name: "%s", value: "%s")
    }
    ''' % (config.get_value('key'), config_key, value))
    client.execute(query)
Exemple #24
0
def query(name, **kwargs):
    url = "https://api.github.com/graphql"

    api_token = config.get_value("token")

    transport = RequestsHTTPTransport(
        url=url,
        use_json=True,
        headers={
            "Authorization": "Bearer %s" % api_token,
            "Accept": "application/vnd.github.vixen-preview+json",
        },
    )

    log.debug(
        f"Calling query {name}.graphql with token starting {api_token[0:4]}")
    client = Client(transport=transport, fetch_schema_from_transport=False)
    queries = {}
    for filename in os.listdir("query"):
        with open(f"query/{filename}") as query_file:
            queries[filename.split(".")[0]] = query_file.read()
    query_template = Template(queries[name])
    full_query = query_template.render(**kwargs)
    query = gql(full_query)
    return Dict(client.execute(query))
Exemple #25
0
def check_dn_disks(config):
  """ Check disks given in 'dfs_data_dir_list' """
  LOG.debug('Checking DataNode disks')
  dir_list = get_value(config, DIR_LIST_KEY, 'HDFS', 'DATANODE')
  devices = read_fstab()
  if dir_list:
    LOG.debug('%s: %s', DIR_LIST_KEY, dir_list)
    for disk in dir_list.split(','):
      fstype, options = get_mount(devices, disk)

      if not fstype == None:
        # Check disk has been formatted with ext3, ext4, or xfs
        LOG.info("Disk '%s' formatted with '%s'", disk, fstype)
        if not fstype.lower() in SUPPORTED_FILESYSTEMS:
          LOG.error("Disk '%s' is not mounted or has not been "
            "formatted with ext3, ext4, or xfs" % disk)
      else:
        LOG.error("Unable to determine file system format for disk '%s'. "
          "Check fstab" % disk)

      if not options == None:
        # Check disk has been mounted with 'noatime' option
        LOG.info("Disk '%s' mount options: '%s'", disk, options)
        if 'noatime' not in options:
          LOG.error("Disk '%s' is not mounted or has not been "
            "mounted with 'noatime' option" % disk)
      else:
        LOG.error("Unable to determine mount options for disk '%s'. "
          "Check fstab" % disk)
  else:
    LOG.error("Configuration error: "
      "unable to find property '%s'" % DIR_LIST_KEY)
Exemple #26
0
def show_convergence_rate():
    v = config.get_value("CONVERGENCE_RATE_LIST")
    x = []
    for i in range(len(v)):
        x.append(i + 1)
    plt.plot(v)
    plt.show()
Exemple #27
0
def route_data_overview_activity(today):
    repositories = storage.read_json(f"{today}/data/repositories.json")
    counts = defaultdict(int)
    repositories_by_activity = defaultdict(list)
    for repo in repositories["active"]:
        if "recentCommitDaysAgo" in repo:
            currency = repo.currencyBand
            counts[currency] += 1
            repositories_by_activity[currency].append(repo)

    bands = ["within a month", "within a quarter", "within a year", "older"]
    template_data = {
        "content": {
            "title": "Overview - Activity",
            "org": config.get_value("github_org"),
            "activity": {
                "bands": bands,
                "counts": counts,
                "repositories": repositories_by_activity,
            },
        },
        "footer": {
            "updated": today
        },
    }

    overview_activity_status = storage.save_json(
        f"{today}/routes/overview_activity.json", template_data)
    return overview_activity_status
Exemple #28
0
def global_data_init():
    CWDIR = os.getcwd()

    CSV_FILENAME_BAIDU_DIR = CWDIR + '/topic/'
    CSV_FILENAME_HOTSPOT_DIR = CWDIR + '/topic/'
    CSV_FILENAME_WEIBO_DIR = CWDIR + '/topic/'

    cfg.set_value("CSV_FILENAME_BAIDU_DIR", CSV_FILENAME_BAIDU_DIR)
    cfg.set_value("CSV_FILENAME_HOTSPOT_DIR", CSV_FILENAME_HOTSPOT_DIR)
    cfg.set_value("CSV_FILENAME_WEIBO_DIR", CSV_FILENAME_WEIBO_DIR)
    print(CSV_FILENAME_BAIDU_DIR, CSV_FILENAME_HOTSPOT_DIR, CSV_FILENAME_WEIBO_DIR)

    # 默认值设置
    cfg.set_value('DEFAULT_NAME', '科比')
    DEFAULT_NAME = cfg.get_value('DEFAULT_NAME')
    #======= 默认数据 ==========
    # KEYWORDS = '元旦'
    # 百度默认数据
    # CSV_FILENAME_BAIDU = CSV_FILENAME_BAIDU_DIR + "元旦.csv"
    cfg.set_value("CSV_FILENAME_BAIDU" , CSV_FILENAME_BAIDU_DIR + DEFAULT_NAME + ".csv")
    # 焦点排序的数据
    # CSV_FILENAME_HOTSPOT = CSV_FILENAME_HOTSPOT_DIR + "元旦_TOP10.csv"
    cfg.set_value("CSV_FILENAME_HOTSPOT", CSV_FILENAME_HOTSPOT_DIR + DEFAULT_NAME + "_TOP10.csv")
    # 微博数据
    # CSV_FILENAME_WEIBO = CSV_FILENAME_WEIBO_DIR + "元旦.csv"
    cfg.set_value("CSV_FILENAME_WEIBO", CSV_FILENAME_WEIBO_DIR + DEFAULT_NAME + "_WEIBO.csv")

    KEYWORD_LIST = get_keyword_list('data.txt')
    cfg.set_value('KEYWORD_LIST', KEYWORD_LIST)
    print('read KEYWORD_LIST: ', KEYWORD_LIST)
Exemple #29
0
def cli_task(task):
    today = datetime.date.today().isoformat()
    org = config.get_value("github_org")
    history = get_history()

    if task == "repository-status":
        get_github_repositories_and_classify_by_status(org, today)
    elif task == "get-activity":
        get_github_activity_refs_audit(org, today)
        get_github_activity_prs_audit(org, today)
    elif task == "dependabot":
        get_dependabot_status(org, today)
    elif task == "advisories":
        if history.current:
            update_github_advisories_status()
        else:
            get_github_resolve_alert_status()
    elif task == "membership":
        analyse_repo_ownership(today)
        analyse_team_membership(today)
    elif task == "analyse-activity":
        analyse_pull_request_status(today)
        analyse_activity_refs(today)
    elif task == "patch":
        analyse_vulnerability_patch_recommendations(today)
    elif task == "routes":
        build_route_data(today)
    else:
        log.error("ERROR: Undefined task")
Exemple #30
0
def send_sensor_value(sensor_type, value):
    query = ('''
    mutation{
    sensorValue(key: "%s", sensorType: "%s", value: "%s")
    }
    ''' % (config.get_value('key'), sensor_type, value))
    client.execute(query)
Exemple #31
0
def send_controller_value(controller, value):
    query = ('''
    mutation{
    controllerCall(key: "%s", controller: "%s", value: "%s")
    }
    ''' % (config.get_value('key'), controller, value))
    client.execute(query)
Exemple #32
0
        def wb_save_on_click(b):

            # Create the output element in the JSON file
            _dict = self.dump()

            key = list(_dict.keys())[0]
            value = _dict[key]

            config.set_value(key, value)

            # Add an empty key for the marker-aggregator (to be dealt with
            # in a second phase)
            if config.get_value("marker-aggregator") is None:
                config.set_value("marker-aggregator", [{}])

                # Add a default marker-sink (to be dealt with
                # in a second phase)
                config.set_value("marker-sink", [{
                    "output_file": "./marker_output.csv",
                    "include_header": True
                }])
            else:
                config.set_value("marker-sink", [{
                    "output_file": "./marker_output.csv",
                    "include_header": True
                }, {
                    "output_file": "./agg_marker_output.csv",
                    "include_header": True
                }])
def parse(masteries):
    champions = []
    for champion in masteries:
        champion_id = champion["championId"]
        champion_name = id_to_name(champion_id)

        version = config.get_value("version")["champion"]
        image_url = IMAGE_API.format(version=version, champion_name=champion_name)

        chest_granted = champion["chestGranted"]

        champion_level = champion["championLevel"]
        if champion_level == 5:
            tokens = champion["tokensEarned"]
            level_progress = (tokens / 3) * 100
        elif champion_level == 6:
            tokens = champion["tokensEarned"]
            level_progress = (tokens / 3) * 100
        elif champion_level == 7:
            level_progress = 100
        else:
            xp_since = champion["championPointsSinceLastLevel"]
            xp_until = champion["championPointsUntilNextLevel"]
            level_progress = (xp_since / (xp_since + xp_until)) * 100

        info = [champion_name, champion_level, level_progress, chest_granted, image_url]
        champions.append(info)

    # sort champions based on level, progress, chest, name
    champions = sorted(champions, key=lambda champions:(champions[1],
        champions[2], champions[3], champions[0]), reverse=True)
    
    return champions
Exemple #34
0
    def _swarm_variator(self, random, candidates, args):
        inertia = args.setdefault('inertia', 0.5)
        cognitive_rate = args.setdefault('cognitive_rate', 2.1)
        social_rate = args.setdefault('social_rate', 2.1)
        if len(self.archive) == 0:
            self.archive = self.population[:]
        if len(self._previous_population) == 0:
            self._previous_population = self.population[:]
        neighbors = self.topology(self._random, self.archive, args)
        offspring = []
        for x, xprev, pbest, hood in zip(self.population,
                                         self._previous_population,
                                         self.archive, neighbors):
            nbest = max(hood)
            particle = []

            for xi, xpi, pbi, nbi in zip(x.candidate, xprev.candidate,
                                         pbest.candidate, nbest.candidate):
                value = (xi + inertia * (xi - xpi) +
                         cognitive_rate * random.random() * (pbi - xi) +
                         social_rate * random.random() * (nbi - xi))
                if (random.random() < config.get_value("CRAZY_PSO")):
                    t = random.random()
                    value += 1 + t
                particle.append(int(value))
            particle = self.bounder(particle, args)
            offspring.append(particle)
        return offspring
Exemple #35
0
 def show( self):
     page = config.get_value('Page')
     if page == 'Plan':
         self.on_plan_button_released( None)
     elif page == 'Food':
         self.on_food_button_released( None)
     else:
         self.on_recipe_button_released( None)
Exemple #36
0
    def __init__(self):
        if not config.get_value("Name"):
            import druid

            self.druid = druid.Druid(self)
            self.druid.show()
        else:
            self.startup()
Exemple #37
0
def track_command_usage(command_name, arguments=None):
    """ List a command as used """

    username = config.get_value(config.VALUE_USER_NAME)

    data = {
        "event": command_name,
        "properties": {
            "command_name": command_name,
            "event_source": "cli",
            "token": get_token(),
            "arguments": arguments,
            "distinct_id": username,
        },
    }

    encoded_data = base64.b64encode(json.dumps(data))
    url = mixpanel_api_url_template % encoded_data

    requests.get(url)
Exemple #38
0
def test_indexing():
    """
    Test indexing of packet_ins and classification database collections

    Packets are ingested from 3 flows.

    Packets from one of the flows are too old to be significant

    The most recent packet is the one that the flow context is in
    and it only has one other packet ingested (i.e. packet_count == 2)
    """
    #*** Initial main_policy won't match as looking for tcp-1234:
    policy = policy_module.Policy(config,
                            pol_dir_default="config/tests/regression",
                            pol_dir_user="******",
                            pol_filename="main_policy_regression_static.yaml")

    #*** Instantiate flow and identities objects:
    flow = flows_module.Flow(config)
    ident = identities_module.Identities(config, policy)

    #*** Ingest packets older than flow timeout:
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[0], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))
    flow.ingest_packet(DPID1, INPORT1, pkts_ARP_2.RAW[1], datetime.datetime.now() - datetime.timedelta \
                                (seconds=config.get_value("flow_time_limit")+1))
    #*** Ingest current packets from two different flows:
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[0], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[1], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[2], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[1], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[3], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[4], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[5], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[6], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[7], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[8], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[9], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts2.RAW[10], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT2, pkts2.RAW[11], datetime.datetime.now())
    flow.ingest_packet(DPID1, INPORT1, pkts.RAW[2], datetime.datetime.now())
    #*** Classify the packet:
    policy.check_policy(flow, ident)
    flow.classification.commit()

    #*** Test packet_ins collection indexing...
    #*** Should be 16 documents in packet_ins collection:
    assert flow.packet_ins.count() == 16
    #*** Get query execution statistics:
    explain = flow.packet_count(test=1)
    #*** Check an index is used:
    assert explain['queryPlanner']['winningPlan']['inputStage']['stage'] == 'IXSCAN'
    #*** Check how query ran:
    assert explain['executionStats']['executionSuccess'] == True
    assert explain['executionStats']['nReturned'] == 2
    #*** MongoDB returns 2 or 3 for this, not sure why...???:
    assert explain['executionStats']['totalKeysExamined'] > 1
    assert explain['executionStats']['totalKeysExamined'] < 4    
    assert explain['executionStats']['totalDocsExamined'] == 2

    #*** Test classifications collection indexing...
    #*** Should be 4 documents in classifications collection:
    assert flow.classifications.count() == 4
    #*** Get query execution statistics:
    explain2 = flow.classification.test_query()
    #*** Check an index is used:
    assert explain2['queryPlanner']['winningPlan']['inputStage']['stage'] == 'FETCH'
    #*** Check how query ran:
    assert explain2['executionStats']['executionSuccess'] == True
    assert explain2['executionStats']['nReturned'] == 1
    assert explain2['executionStats']['totalKeysExamined'] == 1
    assert explain2['executionStats']['totalDocsExamined'] == 1
Exemple #39
0
import json
import config
import weather
import route

c = config.get_value('openweather_key')
print(c)

e = weather.get_current_weather()
print(e)

t = route.get_current_traffic('2 Impala ave Sandton','39 Rivonia Rd Sandton')
print(t)

def get_key():
    return config.get_value('google_key')
Exemple #41
0
def get_token():
    return config.get_value(config.VALUE_MIXPANEL_TOKEN)
Exemple #42
0
def get_token():
    return get_value(VALUE_HEROKU_TOKEN)