コード例 #1
0
    def load(self, papers):
        ct = CodeTimer("Convert paper to graph", silent=True, unit="s")
        with ct:
            for index, paper in enumerate(papers):
                self.loader.load_json(paper.to_dict(), "Paper")
        log.debug("Convert papers to graph took {}s".format(ct.took))

        try:
            if db_loading_lock is not None:
                db_loading_lock.acquire()
                log.info("{}Acquired DB loading lock.".format(
                    self.name + ": " if self.name else ""))
        except NameError:
            # we are in singlethreaded mode. no lock set
            pass
            log.debug("Load Data to DB...")
        try:
            ct = CodeTimer("Create Indexes", silent=True, unit="s")
            with ct:
                self.loader.create_indexes(graph)
            log.debug("Creating Indexes took {}s".format(ct.took))
            ct = CodeTimer("Load to DB", silent=True, unit="s")
            with ct:
                self.loader.merge(graph)
            log.debug("Loading papers to db took {}s".format(ct.took))
        finally:
            try:
                if db_loading_lock is not None:
                    log.info("{}Release DB loading lock.".format(
                        self.name + ": " if self.name else ""))
                    db_loading_lock.release()
            except NameError:
                # we are in singlethreaded mode. no lock set
                log.debug("...Loaded Data to DB")
                pass
コード例 #2
0
def testing():
    centroids_meters, centroids_feet = open_locations_data()   
    r_s = 4000
    t = 6
    start_time = datetime.datetime(2020,1,1,0, 0, 0)+datetime.timedelta(hours=2*t)
    end_time = datetime.datetime(2020,1,1,2, 0, 0)+datetime.timedelta(hours=2*t)
    print(start_time)
    #counts = open_ride_data(start_time,end_time)
    #with open('../../../Experimental_Data/test_countdata.json', 'w+') as outfile:
    #    json.dump(counts, outfile)
    
    with open('../../../Experimental_Data/test_countdata.json', 'r') as infile:
        counts = json.load(infile)

    problem2 = probalistic_coverage.ProbablisticCoverage_v2(centroids_meters,centroids_meters,counts,r_s)
    problem3 = probalistic_coverage.ProbablisticCoverage_v2(centroids_meters,centroids_meters,counts,r_s,soft_edges = True,soft_edge_eps = 0.05)

    k = 10
    with CodeTimer("general accel"):
        S = problem3.bound_greedy_accel_general(k,lowerbound = True)
    print(problem2.objective(S))
    # with CodeTimer("special accel"):
    #     S2 = problem2.lb_greedy_accel_specialized(k)
    # print(problem2.objective(S2))
    with CodeTimer("basic"): 
        S3 = problem3.lb_greedy(k)
    print(problem2.objective(S3))
    with CodeTimer("basic specialized"):
        S4 = problem2.lb_greedy_2(k)
    print(problem2.objective(S4))
    print(S)
    print(S3)
    print(S4)
    n = 25
  
    S_g = problem2.find_greedy_choices(S)
    S_u = problem2.find_greedy_choices(S,upperbound=True)

    margs = [problem2.objective([S[i]]+S[:i])-problem2.objective(S[:i]) for i in range(len(S))]
    g_margs = [problem2.objective([S_g[i]]+S[:i])-problem2.objective(S[:i]) for i in range(len(S))]
    
    u_margs_true = [problem2.pairwise_upperbound(S[i],S[:i]) for i in range(len(S))]
    u_margs = [problem2.pairwise_upperbound(S_u[i],S[:i]) for i in range(len(S))]

    l_margs_true = [problem2.pairwise_lowerbound(S[i],S[:i]) for i in range(len(S))]
    plt.figure()
    plt.plot(margs)
    plt.plot(g_margs)
    plt.legend(["marginals","best marginals "])
    plt.figure()
    plt.plot(u_margs)
    plt.plot(u_margs_true)
    plt.plot(l_margs_true)
    plt.legend(["Opt Upper bounds","our upperbound","lowerbounds"])
    plt.show()
コード例 #3
0
def test_single_blank():
    from linetimer import CodeTimer

    ct = CodeTimer()

    with ct:
        sleep(0.1)

    assert ct.took >= 100.0
コード例 #4
0
def analyse(device, config=None, data=None):
    """
    Data sample posted to AWS by device
    Load data from storage using device ID
    Call analytics module passing data in a Python object
    Analytics runs and updates data objects as necessary
    Updates are persisted to storage medium
    """
    with CodeTimer('analyse'):
        _run_analysis(device, config=config, data=data)
コード例 #5
0
def _run_analysis(device, config=None, data=None):
    # If CW_CONFIG is set use it to find configuration
    # Otherwise look for config/default.json or config/configure.py in that order
    if 'CW_CONFIG' in os.environ:
        config_path = os.environ['CW_CONFIG']
        analytics_config = _load_config(config_path)
    else:
        config_path = 'config/default.json'
        if os.path.exists(config_path):
            analytics_config = _load_config(config_path)
        else:
            analytics_config = _load_config('config.configure')
    print('Loaded configuration from', config_path)

    # If supplementary config information is provided merge it
    if config is not None:
        analytics_config = merge(analytics_config, config)

    # Find the analytics module and import it
    if 'analytics' in analytics_config:
        print('Using analytics module:', analytics_config['analytics'])
        analytics = importlib.import_module(analytics_config['analytics'])
    else:
        analytics = importlib.import_module('process.analytics')

    silent = True
    if 'codetimer' in analytics_config:
        silent = not analytics_config['codetimer']
    with CodeTimer('create data store', silent=silent):
        data_store = storage.ConcurrentDataStore(analytics_config, data)
    with CodeTimer('retrieve data', silent=silent):
        device_data = data_store.retrieve(device)
    if 'filter' in analytics_config:
        with CodeTimer('filter data', silent=silent):
            print('Using filter module:', analytics_config['filter'])
            filter = importlib.import_module(analytics_config['filter'])
            device_data = filter.filter(analytics_config, device_data)
    with CodeTimer('process data', silent=silent):
        result = analytics.process(device_data)
    with CodeTimer('store data', silent=silent):
        data_store.store(device, result)
コード例 #6
0
def test_two_named():
    from linetimer import CodeTimer

    name1 = "name1"
    name2 = "name2"

    ct1 = CodeTimer(name1)

    with ct1:
        sleep(1)

    ct2 = CodeTimer(name2)

    with ct2:
        sleep(2)

    assert ct1.took >= 1000.0
    assert ct1.name == name1

    assert ct2.took >= 2000.0
    assert ct2.name == name2
コード例 #7
0
def test_single_named():
    from linetimer import CodeTimer

    name = "name"

    ct = CodeTimer(name)

    with ct:
        sleep(0.1)

    assert ct.took >= 100.0
    assert ct.name == name
コード例 #8
0
def test_two_nested():
    from linetimer import CodeTimer

    name1 = "outer"
    name2 = "inner"

    ct1 = CodeTimer(name1)

    with ct1:
        sleep(0.1)

        ct2 = CodeTimer(name2)

        with ct2:
            sleep(0.2)

    assert ct1.took >= 300.0
    assert ct1.name == name1

    assert ct2.took >= 200.0
    assert ct2.name == name2
コード例 #9
0
def test_time_unit():
    from linetimer import CodeTimer

    unit1 = 's'

    ct1 = CodeTimer('ct_' + unit1, unit=unit1)

    with ct1:
        sleep(0.1)

    assert ct1.took >= 0.1
    assert ct1.unit == unit1

    unit2 = 'xyz'

    ct2 = CodeTimer('ct_' + unit2, unit=unit2)

    with ct2:
        sleep(0.1)

    assert ct2.took >= 100
    assert ct2.unit == unit2
コード例 #10
0
 def greedy_with_timing(self, n):
     ct = CodeTimer()
     X = list(range(len(self.sensor_sets)))
     S = []
     times = []
     cummulative_time = 0
     for i in range(n):
         with ct:
             x = max(
                 X,
                 key=lambda x: self.objective(S + [x]) - self.objective(S))
             S.append(x)
             X.remove(x)
         cummulative_time += ct.took
         times.append(cummulative_time)
     return S, times
コード例 #11
0
def speed_experiment():
    centroids_meters, centroids_feet = open_locations_data()
    r_s = 3000
    experiment_data = []
    with open('../../../Experimental_Data/countdata.json', 'r') as infile:
        count_sets = json.load(infile)

    speed_data = []
    for t in range(12):
        data = {"t":t}
        print("Time:",t)
        data['g_times'] = []
        data['lb_times'] = []
        data['ub_times'] = []
        counts = count_sets[t]
        timer  = CodeTimer()
        problem = probalistic_coverage.ProbablisticCoverage_v2(centroids_meters,centroids_meters,counts,r_s,soft_edges = True,soft_edge_eps = 0.05)
        n_s = list(range(1,51))
        S, times = problem.greedy_with_timing(50)
        data['g_times'] = times
        for n in n_s:
            print("n = ",n)
            # print("greedy")
            # with timer:
            #     S = problem.greedy(n)
            # data['g_times'].append(timer.took)

            print("greedy")
            with timer:
                S = problem.bound_greedy_accel_general(n,lowerbound = True)
            data['lb_times'].append(timer.took)

            print("greedy")
            with timer:
                S = problem.bound_greedy_accel_general(n,lowerbound = False)
            data['ub_times'].append(timer.took)
        print(data)
        speed_data.append(data)
    now = datetime.datetime.now()
    date_time = now.strftime("%m-%d-%Y_%H-%M")
    with open('../../../Experimental_Data/speed experiment '+date_time+"_soft.json", 'w+') as outfile:
        json.dump(speed_data, outfile)
コード例 #12
0
def Active_Learning_Testing(total_num_plans=240,
                            plans_per_round=30,
                            random_seed=150,
                            noise_value=0.2,
                            random_sampling_enabled=False,
                            include_gain=True,
                            include_discovery_term=True,
                            include_feature_distinguishing=True,
                            include_prob_term=True,
                            include_feature_feedback=True,
                            manager_pickle_file="default_man.p",
                            prob_feat_select=0.2,
                            preference_distribution_string="power_law",
                            repetitions=1):

    print("doing probability per level =", prob_feat_select)
    print(manager_pickle_file)
    ret_struct = []
    for i in range(repetitions):
        with CodeTimer():
            ret_struct.append(
                test_full_cycle_and_accuracy(
                    test_size=1000,
                    num_rounds=int(total_num_plans / plans_per_round),
                    num_plans_per_round=plans_per_round,
                    random_sampling_enabled=random_sampling_enabled,
                    include_gain=include_gain,
                    include_discovery_term=include_discovery_term,
                    include_feature_distinguishing=
                    include_feature_distinguishing,
                    include_prob_term=include_prob_term,
                    include_feature_feedback=include_feature_feedback,
                    random_seed=random_seed,
                    manager_pickle_file=manager_pickle_file,
                    input_rating_noise=noise_value,
                    prob_feat_select=prob_feat_select,
                    preference_distribution_string=
                    preference_distribution_string))
    #end for loop
    return ret_struct
コード例 #13
0
def test_logger_func(capsys):
    import logging.config
    from linetimer import CodeTimer

    logger_config = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'standard': {
                'format': '[%(levelname)s] - %(message)s'
            },
        },
        'handlers': {
            'default': {
                'level': 'INFO',
                'formatter': 'standard',
                'class': 'logging.StreamHandler',
                'stream': 'ext://sys.stdout',  # Default is stderr
            },
        },
        'loggers': {
            '': {  # root logger
                'handlers': ['default'],
                'level': 'INFO',
                'propagate': True
            }
        }
    }

    logging.config.dictConfig(logger_config)

    logger = logging.getLogger()

    with CodeTimer('ct', unit='s', logger_func=logger.info):
        sleep(0.1)

    captured = capsys.readouterr()
    assert captured.out.startswith("[INFO] - Code block 'ct' took: 0.1")
    assert captured.out.endswith(' s\n')
コード例 #14
0
def google_tr(
    text: str,
    from_lang: Optional[str] = "auto",
    to_lang: str = "zh",
    page: Optional[Page] = PAGE,
    verbose: Union[bool, int] = False,
    timeout: float = 5,
):
    # fmt: on
    """Deepl via pyppeteer.

    text = "Test it and more"
    from_lang="auto"
    to_lang="zh"
    # page=PAGE
    page=None
    verbose=True
    """
    #

    try:
        text = str(text).strip()
    except Exception as exc:
        logger.warning("%s, setting to ''", exc)
        text = ""

    if not text:
        return ""

    if to_lang in ["zh", "chinese", "zhong"]:
        to_lang = "zh-CN"
    if from_lang is None:
        from_lang = "auto"
    if from_lang in ["zh", "chinese", "zhong"]:
        from_lang = "zh-CN"
    if to_lang in ["zh", "chinese", "zhong"]:
        to_lang = "zh-CN"

    if from_lang in [to_lang]:
        logger.info("Nothing to do, man")
        return text

    _ = 5000
    if len(text) > _:  # 5250
        logger.warning("text too long, trimmed to %s", _)
        text = text[:_]
        # return text

    # set verbose=40 to turn most things off
    if isinstance(verbose, bool):
        if verbose:
            logzero.setup_default_logger(level=10)
        else:
            logzero.setup_default_logger(level=20)
    else:  # integer: log_level
        logzero.setup_default_logger(level=verbose)

    logger.debug(" Entry ")

    # if page is not supplied, get one, TODO use get_pwbrowser
    if not isinstance(page, Page):
        page = PAGE

    try:
        google_tr.from_lang
    except AttributeError:
        google_tr.from_lang = "auto"
    try:
        google_tr.to_lang
    except AttributeError:
        google_tr.to_lang = "zh-CN"

    url = ""
    if (from_lang, to_lang) != (google_tr.from_lang, google_tr.to_lang):
        url = f"https://translate.google.cn/?sl={from_lang}&tl={to_lang}&op=translate"

        logger.debug(" lang pair changed")

        try:
            page.goto(url, timeout=45 * 1000)
        except Exception as exc:
            logger.error(exc)
            raise

    try:
        textarea = page.wait_for_selector('//textarea', timeout=45 * 1000)
    except Exception as exc:
        logger.error(exc)
        raise

    if verbose < 11 or verbose is True:
        _ = False  # silent
    else:
        _ = True
    with CodeTimer(name="fetching", unit="s", silent=_):
        _ = """
        # maybe no need to click, save some time
        sel_btn = "#ow42 > div:nth-child(1) > span > button > i"
        try:  # first nothing to click, timeout in 2 s
            btn = await page.wait_for_selector(sel_btn, timeout=5000)
            await btn.click()
        except Exception as exc:
            logger.error(exc)
        # """

        timeout = 30  # defaults to 30 seconds
        n_lines = len(text.splitlines())
        if n_lines > 100:
            timeout = n_lines * 0.4
        timeout = timeout * 1000  # convert to ms
        try:
            textarea.fill('', timeout=timeout)
            sleep(0.1)
            textarea.fill('', timeout=timeout)
            sleep(0.1)
            textarea.fill(text, timeout=timeout)
        except Exception as exc:
            logger.error(exc)
            raise

        idx = 0
        flag = False
        ulimit = 3 / 0.1
        while not flag and idx < ulimit:
            idx += 1
            content = page.content()
            # doc = pq(content)

            # flag = re.findall(r'data-text="[^"]+', doc.html())
            flag = re.findall(r'data-text="[^"]+', content)
            logger.debug(flag)
            if flag:
                break
            sleep(0.1)
        logger.debug("loop: %s", idx)

        sleep(0.1)
        content = page.content()
        # doc = pq(content)

        try:
            # res, = re.search(r'data-text="([^"]+)', doc.html()).groups()
            res, = re.search(r'data-text="([^"]+)', content).groups()
        except Exception as exc:
            logger.error(exc)
            res = str(exc)
        res = html.unescape(res)
        logger.debug(res)

    if (from_lang, to_lang) != (google_tr.from_lang, google_tr.to_lang):
        logger.debug("url=%s", url)
        logger.debug("page.url=%s", page.url)
        logger.debug("%s, %s", url == page.url,
                     re.findall(rf"tl={to_lang}", page.url))
        if not re.findall(rf"tl={to_lang}", page.url):
            logger.warning(
                " target lang [%s] does not appear to be a valid language, falled back to the previous tl [%s]",
                to_lang, google_tr.to_lang)
        else:
            google_tr.from_lang, google_tr.to_lang = from_lang, to_lang

    logger.debug(" Fini ")

    return res
コード例 #15
0
def effectiveness_experiment(n):
    centroids_meters, centroids_feet = open_locations_data()
    r_s = 3000
    experiment_data = []

    #Collect Data for testing
    count_sets = []
    for t in range(12):
        start_time = datetime.datetime(2020,1,1,0, 0, 0)+datetime.timedelta(hours=2*t)
        end_time = datetime.datetime(2020,1,1,2, 0, 0)+datetime.timedelta(hours=2*t)
        print(start_time)
        counts = open_ride_data(start_time,end_time)
        count_sets.append(counts)
    
    # with open('/Users/andrewdownie/Documents/Graduate Studies/Research/Simulations/Experimental_Data/countdata.json', 'w+') as outfile:
    #     json.dump(count_sets, outfile)
    
    #with open('/Users/andrewdownie/Documents/Graduate Studies/Research/Simulations/Experimental_Data/countdata.json', 'r') as infile:
    #    count_sets = json.load(infile)

    for t in range(12):
        estimated_bounds = []
        theoretical_bounds = []
        greedy_values = []
        lb_values = []
        
        start_time = datetime.datetime(2020,1,1,0, 0, 0)+datetime.timedelta(hours=2*t)
        end_time = datetime.datetime(2020,1,1,2, 0, 0)+datetime.timedelta(hours=2*t)
        print(start_time)
        counts = count_sets[t]
        g_timer = CodeTimer("greedy time",unit = "s")
        lb_timer = CodeTimer("lb time", unit = "s")
        ub_timer = CodeTimer("ub time", unit = "s")

        trial_data = {"n":n,"t":t}
        #initalize problem
        problem = probalistic_coverage.ProbablisticCoverage_v2(centroids_meters,centroids_meters,counts,r_s,soft_edges = True, soft_edge_eps = 0.05)
        
        # execute Algorithms
        with g_timer:
            S = problem.greedy(n)
        trial_data["greedy_time"] = g_timer.took
        
        with lb_timer:
            S_lower = problem.bound_greedy_accel_general(n,lowerbound = True)
        trial_data["lb_time"] = lb_timer.took

        with ub_timer:
            S_upper = problem.bound_greedy_accel_general(n,lowerbound = False)
        trial_data["ub_time"] = ub_timer.took


        # Lower Bound Approximation Analysis
        print("computing bounds for lower bound algo")
        S_g_l = problem.find_greedy_choices(S_lower)
        S_u_l = problem.find_greedy_choices(S_lower,upperbound=True)

        # for i in range(n):
        #     print("numer",problem.marginal(S_g_l[i],S_lower[:i]))
        #     print("denominator",max([problem.marginal(S_lower[i],S_lower[:i]),0.00001]))

        

        theoretical_alphas_lb = compute_theoretical_alphas(S_g_l,S_lower,problem)
        #print("Theortical Alphas",theoretical_alphas_lb)
        trial_data["theoretical_bounds_lb"] = [compute_bound_sequential(theoretical_alphas_lb[:i+1])for i in range(len(theoretical_alphas_lb))]

        
        # print("Theortical Alphas",theoretical_alphas_lb)
        # print("Theoretical Bound",trial_data["theoretical_bounds_lb"])

        estimated_alphas_lb = [problem.pairwise_upperbound(S_u_l[i],S_lower[:i])/max([problem.pairwise_lowerbound(S_lower[i],S_lower[:i]),0.0000001] )for i in range(n)]
        trial_data["estimated_bounds_lb"] = [compute_bound_sequential(estimated_alphas_lb[:i+1])for i in range(len(estimated_alphas_lb))]
    

        # print("Estimated Alphas",estimated_alphas_lb)
        # print("Estimated Bound",trial_data["estimated_bounds_lb"])


        print("computing bounds for upper bound algo")
        S_g_u = problem.find_greedy_choices(S_upper)

        theoretical_alphas_ub = compute_theoretical_alphas(S_g_u,S_upper,problem)
        trial_data["theoretical_bounds_ub"] = [compute_bound_sequential(theoretical_alphas_ub[:i+1])for i in range(len(theoretical_alphas_ub))]

        
        # print("Theortical Alphas ub",theoretical_alphas_ub)
        # print("Theoretical Bound ub",trial_data["theoretical_bounds_ub"])

        estimated_alphas_ub = [problem.pairwise_upperbound(S_upper[i],S_upper[:i])/max([problem.pairwise_lowerbound(S_upper[i],S_upper[:i]),0.00001] )for i in range(n)]
        trial_data["estimated_bounds_ub"] = [compute_bound_sequential(estimated_alphas_ub[:i+1])for i in range(len(estimated_alphas_ub))]
    

        # print("Estimated Alphas",estimated_alphas_ub)
        # print("Estimated Bound",trial_data["estimated_bounds_ub"])
        trial_data["greedy_values"] = [problem.objective(S[:i])for i in range(1,len(S)+1)]
        trial_data["lb_values"] = [problem.objective(S_lower[:i])for i in range(1,len(S)+1)]
        trial_data["ub_values"] = [problem.objective(S_upper[:i])for i in range(1,len(S)+1)]
        
        experiment_data.append(trial_data)

    print(experiment_data)
    now = datetime.datetime.now()
    date_time = now.strftime("%m-%d-%Y_%H-%M")
    print("date and time:",date_time)	
    with open('../../../Experimental_Data/experiment '+date_time+"_soft.json", 'w+') as outfile:
        json.dump(experiment_data, outfile)
コード例 #16
0
    f = open(a, "r")
    content = f.read().encode()
    f.close()
    return content


# Read input files.
msg1 = readfile("1kb.txt")


def hashit(content):
    return SHA256.new(content)


print("Hash time")
with CodeTimer():
    digest1 = hashit(msg1)

# Key gen time
print("key gen time")
with CodeTimer():
    key = DSA.generate(1024)
    publickey = key.publickey()

with open("publickey.pem", "w") as f:
    f.write(publickey.export_key().decode())


# Sign the message
def sign(content, key):
    signer = DSS.new(key, 'fips-186-3')
コード例 #17
0
ファイル: four_sum.py プロジェクト: stevewyl/MyLeetCode
            i += 1
        i += 1
    return result


def fourSum(nums: List[int], start: int, target: int) -> List[List[int]]:
    """O(N^3)"""
    nums.sort()  # O(NlogN)
    n = len(nums)
    result = []
    i = 0
    while i < n:  # O(N)
        three_sum_res = threeSum(nums, i + 1, target - nums[i])  # O(N^2)
        for res in three_sum_res:
            res = [nums[i]] + res
            result.append(res)
        while i < n - 1 and nums[i] == nums[i + 1]:
            i += 1
        i += 1
    return result


if __name__ == "__main__":
    nums = [1, 0, -1, 0, -2, 2]
    target = 0
    with CodeTimer("four_sum"):
        try:
            res = fourSum(nums, 0, target)
            assert res == [[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]]
        except:
            print(res)
コード例 #18
0
    offset = 20
    dims_outer = [dims[0] + 2 * offset, dims[1] + 2 * offset]
    alphas = []
    sim = submodular_sim(dims=dims)
    a = 400
    b = 600
    X = [{
        'x': random.random() * dims[0] + offset,
        'y': random.random() * dims[1] + offset,
        'r': random.uniform(math.sqrt(a / math.pi), math.sqrt(b / math.pi))
    } for i in range(m1)]
    m2 = 3
    #Xn = [[{'x':random.random()*dims[0],'y':random.random()*dims[1],'r':random.uniform(math.sqrt(a/math.pi),math.sqrt(b/math.pi))} for i in range(m2)]for i in range(n)]

    print("Computing Greedy Algorithm")
    ct_lb = CodeTimer("lb_greedy", unit="s")
    ct_g = CodeTimer("greedy", unit="s")
    ct_lba = CodeTimer("accel lb greedy", unit="s")

    lb_times = []
    g_times = []
    lba_times = []

    n_s = list(range(0, 50, 10))
    for n_test in n_s:
        with ct_lb:
            S = lb_greedy(X, n_test, sim)
        print(ct_lb.took)
        print(sim.coverage(S))
        lb_times.append(ct_lb.took)
            'Hour', 'T Ground', '31-day Avg lag DB'
        ])  #Read the weather data, ignoring the first 25 lines of header

    First_Hour = WeatherData[WeatherData[
        "Hour"] == 1]  #filter data to only include the fist hour of every day
    First_Hour = First_Hour.set_index([pd.Index(range(365))
                                       ])  #set index as zero-based day of year
    T_Mains = 0.65 * First_Hour['T Ground'] + 0.35 * First_Hour[
        '31-day Avg lag DB']  #Equation 10, ACM, Appendix B. Returns the mains water temperature as a function of the ground temper
    Zones_Dict[each] = T_Mains

#%%---------------------------GENERATE AND SAVE REQUESTED DRAW PROFILES---------
Data = pd.read_csv(File_Location)  #Read the file to be converted
proper_order = Data.columns.to_list()  #reference correct column order
for each in New_Climate_Zones:  #repeat for each new zone required
    with CodeTimer('converting to climate zone {0} complete, it'.format(each)):
        del Data[
            'Fraction Hot Water']  #we are going to recalculate this column
        del Data[
            'Hot Water Volume (gal)']  #we are going to recalculate this column
        del Data[
            'Hot Water Flow Rate (gpm)']  #we are going to recalculate this column
        del Data[
            'Mains Temperature (deg F)']  #we are going to recalculate this column

        Zone_T_Mains = Zones_Dict[
            each]  #get T_Mains Temperature data from previously created dictionary
        if len(
                Data
        ) > 40000:  #use swifter module to speed up coed if it's a long file, because swifter actually slows it down if it's below this threshold
            # with CodeTimer('swap climate data (swifter)'): #for testing
コード例 #20
0
ファイル: three_sum.py プロジェクト: stevewyl/MyLeetCode

def threeSum(nums: List[int], start: int, target: int) -> List[List[int]]:
    """
    O(NlogN + N^2) = O(N^2)
    """
    nums.sort()  # O(NlogN)
    n = len(nums)
    result = []
    i = 0
    while i < n:  # O(N)
        two_sum_res = twoSum(nums, i + 1, target - nums[i])  # O(N)
        for res in two_sum_res:
            res = [nums[i]] + res
            result.append(res)
        while i < n - 1 and nums[i] == nums[i + 1]:
            i += 1
        i += 1
    return result


if __name__ == "__main__":
    nums = [-1, 0, 1, 2, -1, -4]
    target = 0
    with CodeTimer("three_sum"):
        try:
            res = threeSum(nums, 0, target)
            assert res == [[-1, -1, 2], [-1, 0, 1]]
        except:
            print(res)
コード例 #21
0
def getPanphonDistanceSingleton1():
    global _PANPHON_DISTANCE_SINGLETON
    if _PANPHON_DISTANCE_SINGLETON is None:
        _PANPHON_DISTANCE_SINGLETON = panphon.distance.Distance()
    return _PANPHON_DISTANCE_SINGLETON


def getPanphonDistanceSingleton2():
    if not hasattr(getPanphonDistanceSingleton2, "value"):
        setattr(getPanphonDistanceSingleton2, "value",
                panphon.distance.Distance())
    return getPanphonDistanceSingleton2.value


for iters in (1, 1, 10, 100, 1000, 10000):
    with CodeTimer(f"getPanphonDistanceSingleton1() {iters} times"):
        for i in range(iters):
            dst = getPanphonDistanceSingleton1()
    with CodeTimer(f"getPanphonDistanceSingleton2() {iters} times"):
        for i in range(iters):
            dst = getPanphonDistanceSingleton2()

for words in (1, 10):
    with CodeTimer(f"is_panphon() {words} words"):
        string = " ".join(["ei" for i in range(words)])
        is_panphon(string)

    with CodeTimer(f"is_panphon() on 1 word {words} times"):
        string = "ei"
        for i in range(words):
            is_panphon(string)
コード例 #22
0
ファイル: main.py プロジェクト: jexp/cord-19-data-2-graph
import os

from linetimer import CodeTimer
from load_data import load
from download_data import download
from metadata_loader import run_metadata_load

if __name__ == "__main__":
    # with CodeTimer("Downloader", unit="s"):
    # if not os.environ["ENV"] == "DEVELOPMENT":
    # download()
    with CodeTimer("Importer", unit="s"):
        load()

    with CodeTimer("Import Metadata.csv", unit="s"):
        run_metadata_load()
def speed_experiment(n):
    #open geographic information
    centroids_meters, centroids_feet = open_locations_data()
    r_s = 3000
    experiment_data = []

    #get count sets for the observations
    with open('../../Experimental_Data/countdata.json', 'r') as infile:
        count_sets = json.load(infile)
    total_data = []

    #Run the speed test 10 times to take average of later
    for i in range(10):
        speed_data = []
        for t in range(12):
            data = {"t": t}
            print("Time:", t)
            data['g_times'] = []
            data['lb_times'] = []
            data['ub_times'] = []
            counts = count_sets[t]
            timer = CodeTimer()
            problem = probabilistic_coverage.probabilistic_coverage(
                centroids_meters,
                centroids_meters,
                counts,
                r_s,
                soft_edges=True,
                soft_edge_eps=0.005)
            n_s = list(range(1, n + 1, 3))
            X = problem.get_X()
            ap = approximator.approximator(problem.objective, 2)
            #S, times = problem.greedy_with_timing(n)

            #execute the 3 algorithms
            for n_i in n_s:
                print("n = ", n_i)
                print("Classical Greedy")
                with timer:
                    S = ap.greedy_fit(X, n_i)
                data['g_times'].append(timer.took)

                print("Pessimisitic Greedy")
                with timer:
                    S = ap.pairwise_fit(X, n_i, lb=True)
                data['lb_times'].append(timer.took)

                print("Optimistic Greedy")
                with timer:
                    S = ap.pairwise_fit(X, n_i, lb=False)
                data['ub_times'].append(timer.took)
            print(data)
            speed_data.append(data)
        total_data.append(speed_data)

        #save the data
        now = datetime.datetime.now()
        date_time = now.strftime("%m-%d-%Y_%H-%M")
        with open(
                '../../../Experimental_Data/speed experiment ' + date_time +
                "_soft.json", 'w+') as outfile:
            json.dump(speed_data, outfile)
コード例 #24
0
ファイル: martin-bulk-load.py プロジェクト: timgates42/py2neo
from py2neo import Graph
from linetimer import CodeTimer

graph = Graph(scheme="bolt")

graph.run('MATCH (a) DETACH DELETE a')

graph.run("""UNWIND range(0, 250000) as i CREATE (t:Test) 
SET t.a = 'aaaaaaaaa', t.b = 'bbbbbbbbb', t.c = 'ccccccccc'
""")

# large data query
query = 'MATCH (t:Test) RETURN t.a as a, t.b as b, t.c as c'

results = graph.run(query)
with CodeTimer('Iterate over list of results', unit='s'):
    for x in results:
        pass

results = graph.run(query)
with CodeTimer('.to_data_frame()', unit='s'):
    results.to_data_frame()

results = graph.run(query)
with CodeTimer('.data()', unit='s'):
    results.data()
コード例 #25
0
ファイル: n_sum.py プロジェクト: stevewyl/MyLeetCode
                while l < r and nums[l] == left:
                    l += 1
                while l < r and nums[r] == right:
                    r -= 1
    else:
        while i < nl:
            sub_result = nSumTarget(nums, n - 1, i + 1, target - nums[i])
            for res in sub_result:
                res = [nums[i]] + res
                result.append(res)
            while i < nl - 1 and nums[i] == nums[i + 1]:
                i += 1
            i += 1
    print(result)
    return result

def nSum(nums: List[int], n: int, target: int) -> List[List[int]]:
    nums.sort()
    return nSumTarget(nums, n, 0, target)

if __name__ == "__main__":
    nums = [1, 0, -1, 0, -2, 2]
    n = 4
    target = 0
    with CodeTimer("n_sum"):
        try:
            res = nSum(nums, n, target)
            assert res == [[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]]
        except:
            print(res)
def effectiveness_experiment(n):
    """
    Function run the performance efffective expirements. First it collects the problem to run the problem on and then the
    exectues the greedy and pairwise algorithms on each of the sets of data and records them to a json file.
    """

    #get york city geo data
    centroids_meters, centroids_feet = open_locations_data()
    r_s = 3000
    experiment_data = []

    #Collect Data for testing
    count_sets = []
    for t in range(12):
        start_time = datetime.datetime(2020, 1, 1, 0, 0,
                                       0) + datetime.timedelta(hours=2 * t)
        end_time = datetime.datetime(2020, 1, 1, 2, 0,
                                     0) + datetime.timedelta(hours=2 * t)
        print(start_time)
        counts = open_ride_data(start_time, end_time)
        count_sets.append(counts)

    # begin executing data
    for t in range(12):
        estimated_bounds = []
        theoretical_bounds = []
        greedy_values = []
        lb_values = []

        #logging
        start_time = datetime.datetime(2020, 1, 1, 0, 0,
                                       0) + datetime.timedelta(hours=2 * t)
        end_time = datetime.datetime(2020, 1, 1, 2, 0,
                                     0) + datetime.timedelta(hours=2 * t)
        print(start_time)
        counts = count_sets[t]

        #set up timers
        g_timer = CodeTimer("greedy time", unit="s")
        lb_timer = CodeTimer("lb time", unit="s")
        ub_timer = CodeTimer("ub time", unit="s")

        trial_data = {"n": n, "t": t}

        #initalize problem
        problem = probabilistic_coverage.probabilistic_coverage(
            centroids_meters,
            centroids_meters,
            counts,
            r_s,
            soft_edges=False,
            soft_edge_eps=0.0005)
        X = problem.get_X()
        ap = approximator.approximator(problem.objective, 2)

        # execute Algorithms
        with g_timer:
            S = ap.greedy_fit(X, n)
        trial_data["greedy_time"] = g_timer.took
        with lb_timer:
            S_lower = ap.pairwise_fit(X, n, lb=True)
        trial_data["lb_time"] = lb_timer.took
        with ub_timer:
            S_upper = ap.pairwise_fit(X, n, lb=False)
        trial_data["ub_time"] = ub_timer.took

        # Lower Bound Approximation Analysis
        print("computing bounds for lower bound algo")
        S_g_l = ap.find_greedy_choices(S_lower, X)
        S_u_l = ap.find_greedy_choices(S_lower, X, upperbound=True)

        theoretical_alphas_lb = compute_theoretical_alphas(
            S_g_l, S_lower, problem)
        trial_data["theoretical_bounds_lb"] = [
            compute_bound_sequential(theoretical_alphas_lb[:i + 1])
            for i in range(len(theoretical_alphas_lb))
        ]

        estimated_alphas_lb = [
            ap.pairwise_upperbound(S_u_l[i], S_lower[:i]) /
            max([ap.pairwise_lowerbound(S_lower[i], S_lower[:i]), 0.0000001])
            for i in range(n)
        ]
        trial_data["estimated_bounds_lb"] = [
            compute_bound_sequential(estimated_alphas_lb[:i + 1])
            for i in range(len(estimated_alphas_lb))
        ]

        print("computing bounds for upper bound algo")
        S_g_u = ap.find_greedy_choices(S_upper, X)

        theoretical_alphas_ub = compute_theoretical_alphas(
            S_g_u, S_upper, problem)
        trial_data["theoretical_bounds_ub"] = [
            compute_bound_sequential(theoretical_alphas_ub[:i + 1])
            for i in range(len(theoretical_alphas_ub))
        ]

        estimated_alphas_ub = [
            ap.pairwise_upperbound(S_upper[i], S_upper[:i]) /
            max([ap.pairwise_lowerbound(S_upper[i], S_upper[:i]), 0.00001])
            for i in range(n)
        ]
        trial_data["estimated_bounds_ub"] = [
            compute_bound_sequential(estimated_alphas_ub[:i + 1])
            for i in range(len(estimated_alphas_ub))
        ]

        trial_data["greedy_values"] = [
            ap.f(S[:i]) for i in range(1,
                                       len(S) + 1)
        ]
        trial_data["lb_values"] = [
            ap.f(S_lower[:i]) for i in range(1,
                                             len(S) + 1)
        ]
        trial_data["ub_values"] = [
            ap.f(S_upper[:i]) for i in range(1,
                                             len(S) + 1)
        ]

        experiment_data.append(trial_data)

    print(experiment_data)
    now = datetime.datetime.now()
    date_time = now.strftime("%m-%d-%Y_%H-%M")
    print("date and time:", date_time)
    with open('../../Experimental_Data/experiment ' + date_time + "_soft.json",
              'w+') as outfile:
        json.dump(experiment_data, outfile)
コード例 #27
0
def set_version():
    centroids_meters, centroids_feet = open_locations_data()   
    r_s = 4000
    t = 6
    start_time = datetime.datetime(2020,1,1,0, 0, 0)+datetime.timedelta(hours=2*t)
    end_time = datetime.datetime(2020,1,1,2, 0, 0)+datetime.timedelta(hours=2*t)
    print(start_time)
    counts = open_ride_data(start_time,end_time)
    problem1 = probalistic_coverage.ProbablisticCoverage.from_events_and_sensors(centroids_meters,centroids_meters,counts,r_s)
    problem2 = probalistic_coverage.ProbablisticCoverage_v2(centroids_meters,centroids_meters,counts,r_s)
    problem3 = probalistic_coverage.ProbablisticCoverage_v2(centroids_meters,centroids_meters,counts,r_s,soft_edges = True,soft_edge_eps = 0.05)
    # with CodeTimer("P1 not soft"):
    #     o1 = problem1.objective(problem1.sensors[:100],soft = False)
    # print("problem 1 Objective",o1)
    # with CodeTimer("P1 soft"):
    #     o2 = problem1.objective(problem1.sensors[:100],soft = True)
    # print("problem 1 Objective",o2)

    # with CodeTimer("P2 not soft"):
    #     o3 = problem2.objective(list(range(100)))
    # print("problem 2 Objective",o3)
    # with CodeTimer("P2 soft"):
    #     o4 = problem3.objective(list(range(100)))
    # print("problem 3 Objective",o4)

    # with CodeTimer("Greedy 1"):
    #     S = problem1.greedy(10,soft = False)
    # print(problem1.objective(S))
    ct = CodeTimer()
    g_times = []
    lb_times = []
    ac_times = []
    acg_times = []
    g_values = []
    lb_values = []
    ac_values = []
    acg_values = []
    n_s = list(range(1,200,5))
    for n_test in n_s:
        with ct:
            S = problem2.greedy(n_test)
        g_times.append(ct.took)
        v = problem2.objective(S)
        print(v)
        g_values.append(v)

        with ct:
            S = problem2.lb_greedy_2(n_test)
        lb_times.append(ct.took)
        v = problem2.objective(S)
        print(v)
        lb_values.append(v)

        with ct:
            S = problem2.lb_greedy_accel(n_test)
        ac_times.append(ct.took)
        v = problem2.objective(S)
        print(v)
        ac_values.append(v)
        with ct:
            S = problem2.lb_greedy_accel_general(n_test)
        acg_times.append(ct.took)
        v = problem2.objective(S)
        print(v)
        acg_values.append(v)

    plt.figure()
    plt.plot(n_s,g_times)
    plt.plot(n_s,lb_times)
    plt.plot(n_s,ac_times)
    plt.plot(n_s,acg_times)
    plt.legend(["g","lb","ac","acg"])

    print(ac_values)
    print(lb_values)
    plt.figure()
    plt.plot(n_s,g_values)
    plt.plot(n_s,lb_values)
    plt.plot(n_s,ac_values)
    plt.plot(n_s,acg_values)
    plt.legend(["g","lb","ac","acg"])
    plt.show()
    print("original",problem2.pairwise_lowerbound(11,list(range(10))))
    print("accelerated",problem2.pairwise_lowerbound_v2(11,list(range(10))))
コード例 #28
0
ファイル: main.py プロジェクト: covidgraph/data_cord19
        os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
    )
    SCRIPT_DIR = os.path.join(SCRIPT_DIR, "..")
    sys.path.append(os.path.normpath(SCRIPT_DIR))


from dataloader.download_data import download
from dataloader.load_data import load_data, load_data_mp


# Adapt to CovidGraph Dataloaders env API


if __name__ == "__main__":
    config = getConfig()
    print(
        "Start with loglevel '{}' and ENV={}".format(
            config.LOG_LEVEL, os.getenv("ENV", "DEVELOPMENT")
        )
    )

    with CodeTimer("Downloader", unit="s"):
        download()
    with CodeTimer("Importer", unit="s"):
        if config.NO_OF_PROCESSES == 1:
            load_data()
        elif config.NO_OF_PROCESSES > 1:
            load_data_mp(config.NO_OF_PROCESSES, config.PAPER_BATCH_SIZE)
        else:
            config.NO_OF_PROCESSES
コード例 #29
0
ファイル: main.py プロジェクト: covidgraph/data_MaSyMos
    cypher_transaction_block = []
    record = True
    for line in lines:
        stripped_line = line.replace('\r', '').replace('\n', '').strip()
        if stripped_line == ":begin":
            record = True
            continue
        elif stripped_line == ":commit":
            record = False
            commit(
                isolate_single_statements_in_transaction_block(
                    cypher_transaction_block))
            cypher_transaction_block = []
            continue
        if record:
            # ignore empty line. only record lines with content
            if stripped_line:
                cypher_transaction_block.append(stripped_line)


# Delete existing/old source files, to prevent confusion with newer files
clean_data_sources()
# Download source files
download_data()

for filename in os_sorted(os.listdir(DATASOURCE_PATH)):
    if filename.endswith(".cypher"):
        print(f"Start processing '{filename}'")
        with CodeTimer(filename, unit="s"):
            parse_cypher_file(os.path.join(DATASOURCE_PATH, filename))
コード例 #30
0
async def deepl_tr(
    text: str,
    from_lang: str = "auto",
    to_lang: str = "zh",
    page=None,
    verbose: Union[bool, int] = False,
    timeout: float = 5,
):
    # fmt: on
    """Deepl via pyppeteer.

    text = "Test it and more"
    from_lang="auto"
    to_lang="zh"
    page=PAGE
    verbose=True
    """
    #

    # set verbose=40 to turn most things off
    if isinstance(verbose, bool):
        if verbose:
            logzero.setup_default_logger(level=10)
        else:
            logzero.setup_default_logger(level=20)
    else:  # integer: log_level
        logzero.setup_default_logger(level=verbose)

    logger.debug(" Entry ")

    if page is None:
        try:
            # browser = await get_ppbrowser()
            browser = await pyppeteer.launch()
        except Exception as exc:
            logger.error(exc)
            raise

        try:
            page = await browser.newPage()
        except Exception as exc:
            logger.error(exc)
            raise

        url = r"https://www.deepl.com/translator"
        try:
            await page.goto(url, timeout=45 * 1000)
        except Exception as exc:
            logger.error(exc)
            raise

    url0 = f"{URL}#{from_lang}/{to_lang}/"

    url_ = f"{URL}#{from_lang}/{to_lang}/{quote(text)}"

    # selector = ".lmt__language_select--target > button > span"

    if verbose < 11 or verbose is True:
        _ = False  # silent
    else:
        _ = True
    with CodeTimer(name="fetching", unit="s", silent=_):
        _ = """
        await page.goto(url0)

        try:
            await page.waitForSelector(selector, timeout=8000)
        except Exception as exc:
            raise
        # """

        try:
            content = await page.content()
        except Exception as exc:
            logger.error(exc)
            raise

        doc = pq(content)
        text_old = doc('#source-dummydiv').text()
        logger.debug("Old source: %s", text_old)

        try:
            deepl_tr.first_run
        except AttributeError:
            deepl_tr.first_run = 1
            text_old = "_some unlikely random text_"

        # selector = "div.lmt__translations_as_text"
        if text.strip() == text_old.strip():
            logger.debug(" ** early result: ** ")
            logger.debug("%s, %s", text,
                         doc('.lmt__translations_as_text__text_btn').text())
            doc = pq(await page.content())
            content = doc('.lmt__translations_as_text__text_btn').text()
        else:
            # record content
            try:
                # page.goto(url_)
                await page.goto(url0)
            except Exception as exc:
                logger.error(exc)
                raise

            try:
                await page.waitForSelector(".lmt__translations_as_text",
                                           timeout=20000)
            except Exception as exc:
                logger.error(exc)
                raise

            doc = pq(await page.content())
            content_old = doc('.lmt__translations_as_text__text_btn').text()

            # selector = ".lmt__translations_as_text"
            # selector = ".lmt__textarea.lmt__target_textarea.lmt__textarea_base_style"

            # selector = ".lmt__textarea.lmt__target_textarea"
            # selector = '.lmt__translations_as_text__text_btn'
            try:
                await page.goto(url_)
            except Exception as exc:
                logger.error(exc)
                raise

            try:
                await page.waitForSelector(".lmt__translations_as_text",
                                           timeout=20000)
            except Exception as exc:
                logger.error(exc)
                raise

            doc = pq(await page.content())
            content = doc('.lmt__translations_as_text__text_btn').text()

            logger.debug("content_old: [%s], \n\t content: [%s]", content_old,
                         content)

            # loop until content changed
            idx = 0
            # bound = 50  # 5s
            while idx < timeout / 0.1:
                idx += 1
                await asyncio.sleep(.1)
                doc = pq(await page.content())
                content = doc('.lmt__translations_as_text__text_btn').text()
                logger.debug("content_old: (%s), \n\tcontent: (%s)",
                             content_old, content)

                if content_old != content and bool(content):
                    break

            logger.debug(" loop: %s", idx)

    logger.debug(" Fini ")

    await page.close()
    await browser.close()
    browser.process.communicate()

    return content