Esempio n. 1
0
def un_pack(dir):
    """unpack all file"""

    num = len(os.listdir(dir))

    count = 1

    for name in os.listdir(dir):

        progressbar.bar(count, num)
        file = os.path.join(dir, name)
        split = os.path.splitext

        name_end = split(file)

        # check the .tar.gz file
        if (name_end[-1] == ".gz" and split(name_end[0])[-1] == '.tar') or name_end[-1] == ".tgz":
            un_tgz(file)

        # check the .tar file
        elif name_end[-1] == '.tar':
            un_tar(file)

        elif name_end[-1] == '.zip':
            un_zip(file)

        count += 1
Esempio n. 2
0
def main():
    # setup pycuda and torch
    import pycuda.gl.autoinit
    import pycuda.gl
    assert torch.cuda.is_available()
    print('pytorch using GPU {}'.format(torch.cuda.current_device()))
    img = Image.open('misc/husky.jpg').transpose(Image.FLIP_TOP_BOTTOM)
    width, height = img.size

    state = torch.cuda.FloatTensor(width, height, 4)
    state = state.byte().contiguous()
    tex, cuda_buffer, sz = loadTexture('misc/husky.jpg')

    nbytes = state.numel() * state.element_size()

    for _ in bar(range(20000)):
        with cuda_activate(cuda_buffer) as ary:
            cpy = pycuda.driver.Memcpy2D()
            cpy.set_dst_device(state.data_ptr())
            cpy.set_src_array(ary)
            cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = nbytes // height
            cpy.height = height
            cpy(aligned=False)
            torch.cuda.synchronize()

    img_np = state.data.cpu().numpy().reshape(height, width, 4)
    print(img_np.shape)
    img_np[:, :, 3] = 255
    plt.imshow(img_np)
    plt.show()
Esempio n. 3
0
def download_imgs(pages, cookie, directory):
    """
    Takes the `dict` of pages to download,
    the cookie to use and the directory
    to save to, and then does the magic.
    """
    proxy = urllib.request.ProxyHandler({})
    opener = urllib.request.build_opener(proxy)
    opener.addheaders = [(
        "user-agent",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30"
    ), ("cookie", cookie)]
    urllib.request.install_opener(opener)

    for number, url in bar(pages.items()):
        urllib.request.urlretrieve(
            url, os.path.join(directory, f"page{number}.png"))
Esempio n. 4
0
	print("IMAGE", imageDesc, "RUN-TIME:", totalTimeMin, "Minutes and", totalTimeSec, "Seconds")
	return totalTime
# *+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
# IMAGE 1 RUN [custom complex]
# saves the start time of the program
startTime = time.time()

maxIterations = 255
imgx,imgy = 1000,1000
xmin = -0.1151953125
xmax = -0.0962109375
ymin = -0.935302734375
ymax = -0.916318359375

print("\n\n\nIMAGE 1 [custom complex] PROGRESS:")
for y in bar(range(imgy)):
	cy = ((ymax-ymin)/imgy)*y + ymin
	for x in range(imgx):
		cx = ((xmax-xmin)/imgx)*x + xmin
		c = newComplex(cx,cy)
		fractResult = mandelbrotNew(c)

		r = (((fractResult+1)//6)**2)
		g = 0
		b = (fractResult*50)%256

#saves the end time of the program and calculates time to run
endTime = time.time()
img1cust = timeCalc(startTime, endTime, "1 [custom complex]")
# *+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*
# IMAGE 1 RUN [built-in complex]
    def __init__(self, params, datasets):
        super(MobileNetv2_DeepLabv3, self).__init__()
        self.params = params
        self.datasets = datasets
        self.pb = bar()  # hand-made progressbar
        self.epoch = 0
        self.init_epoch = 0
        self.ckpt_flag = False
        self.train_loss = []
        self.val_loss = []
        self.summary_writer = SummaryWriter(log_dir=self.params.summary_dir)

        # build network
        block = []

        # conv layer 1
        block.append(nn.Sequential(nn.Conv2d(3, self.params.c[0], 3, stride=self.params.s[0], padding=1, bias=False),
                                   nn.BatchNorm2d(self.params.c[0]),
                                   # nn.Dropout2d(self.params.dropout_prob, inplace=True),
                                   nn.ReLU6()))

        # conv layer 2-7
        for i in range(6):
            block.extend(layers.get_inverted_residual_block_arr(self.params.c[i], self.params.c[i+1],
                                                                t=self.params.t[i+1], s=self.params.s[i+1],
                                                                n=self.params.n[i+1]))

        # dilated conv layer 1-4
        # first dilation=rate, follows dilation=multi_grid*rate
        rate = self.params.down_sample_rate // self.params.output_stride
        block.append(layers.InvertedResidual(self.params.c[6], self.params.c[6],
                                             t=self.params.t[6], s=1, dilation=rate))
        for i in range(3):
            block.append(layers.InvertedResidual(self.params.c[6], self.params.c[6],
                                                 t=self.params.t[6], s=1, dilation=rate*self.params.multi_grid[i]))

        # ASPP layer
        block.append(layers.ASPP_plus(self.params))

        # final conv layer
        block.append(nn.Conv2d(256, self.params.num_class, 1))

        # bilinear upsample
        block.append(nn.Upsample(scale_factor=self.params.output_stride, mode='bilinear', align_corners=False))

        self.network = nn.Sequential(*block).cuda()
        # print(self.network)

        # build loss
        self.loss_fn = nn.CrossEntropyLoss(ignore_index=255)

        # optimizer
        self.opt = torch.optim.RMSprop(self.network.parameters(),
                                       lr=self.params.base_lr,
                                       momentum=self.params.momentum,
                                       weight_decay=self.params.weight_decay)

        # initialize
        self.initialize()

        # load data
        self.load_checkpoint()
        self.load_model()
Esempio n. 6
0
    print(np.unique(xe))

    START = 0
    END = len(xt) // 2

    est = SGP4Estimator6D(Filter=UKHF)
    zz = np.concatenate((xr[START:END], xv[START:END]), axis=1)
    tt = xt[START:END]

    y = []

    def kf_cb(estimator, i):
        global y
        pb.update(i)
        y.append(np.linalg.norm(estimator.kf.y))

    pb = bar().start(len(tt))
    pb.start()
    est.run(tt, zz, cb=kf_cb)
    pb.finish()

    #    pb = bar().start(len(xt[START:END]))
    #    pb.start()
    #    est.run(tt, zz, cb=kf_cb, flip=True)
    #    pb.finish()

    ye, yr, yv = est.model.sgp4_array(jd, fr)
    print(np.unique(xe))

    plt.plot(xr - yr)
Esempio n. 7
0
 def Onclick_bt_rk(self, event):
     import progressbar
     progressbar.bar()
     message3 = "完成入库"
     wx.MessageBox(message3)
Esempio n. 8
0
with open(file_list_file, 'w') as file_list:
    for wrf_dir in wrf_dirs:
        print('processing', wrf_dir)
        
        wrf_dir_date = datetime.strptime(wrf_dir, '%Y%m%d')

        wrf_run_dir = path.join(input_dir, wrf_dir)
        nc_files = list(map(
            lambda f: path.join(wrf_run_dir, f),
            sorted(list(filter(
                lambda s: s.startswith('auxhist'), listdir(wrf_run_dir)
        )))))


        for f in bar(nc_files, redirect_stdout=True):
            try:
                ds = xr.open_dataset(f)
                for var_name, var_risico in var_names.items():
                    
                    date_str = ds.Times[0].values.tostring().decode("utf-8") 
                    date = datetime.strptime(date_str, '%Y-%m-%d_%H:%M:%S')
                    date = pd.Timestamp(date).round('60min').to_pydatetime()
                    
                    
                    if wrf_dir_date == date:
                        # skip first interval
                        continue

                    if date.hour%3 != 0:
                        continue
Esempio n. 9
0
    def __init__(self, params):
        super(MobileNetv2, self).__init__()
        self.params = params
        self.pb = bar()  # hand-made progressbar
        self.epoch = 0
        self.test_epoch = 0
        self.train_loss = 0
        self.test_loss = 0
        self.train_acc = 0
        self.test_acc = 0
        self.summary_writer = SummaryWriter(log_dir=self.params.summary_dir)

        # build network
        block = []

        # conv layer 1
        block.append(
            nn.Sequential(
                nn.Conv2d(3,
                          self.params.c[0],
                          3,
                          stride=1,
                          padding=1,
                          bias=False), nn.BatchNorm2d(self.params.c[0]),
                nn.Dropout2d(self.params.dropout_prob, inplace=True),
                nn.ReLU6()))

        # conv layer 2-8
        for i in range(7):
            block.extend(
                layers.get_inverted_residual_block_arr(self.params.c[i],
                                                       self.params.c[i + 1],
                                                       t=self.params.t[i + 1],
                                                       s=self.params.s[i + 1],
                                                       n=self.params.n[i + 1]))

        # conv layer 9
        block.append(
            nn.Sequential(
                nn.Conv2d(self.params.c[-2], self.params.c[-1], 1, bias=False),
                nn.BatchNorm2d(self.params.c[-1]), nn.ReLU6()))

        # pool and fc
        block.append(
            nn.Sequential(
                nn.AvgPool2d(self.params.image_size //
                             self.params.down_sample_rate),
                nn.Dropout2d(self.params.dropout_prob, inplace=True),
                nn.Conv2d(self.params.c[-1],
                          self.params.num_class,
                          1,
                          bias=True)))

        self.network = nn.Sequential(*block).cuda()
        # print(self.network)

        # build loss
        self.loss_fn = nn.CrossEntropyLoss().cuda()

        # optimizer
        self.opt = torch.optim.RMSprop(self.network.parameters(),
                                       lr=self.params.base_lr,
                                       momentum=self.params.momentum,
                                       weight_decay=self.params.weight_decay)

        # initialize
        self.initialize()

        # load data
        self.load_checkpoint()
        self.load_model()
from PIL import Image, ImageEnhance
from progressbar import progressbar as bar
import os

os.system('clear')

img = Image.open("tree.jpg").convert('RGB')
#newImg = img.convert('1')

for y in bar(range(3456)):
    for x in range(5184):
        r, g, b = img.getpixel((x, y))
        img.putpixel((x, y), (r, int(g / 2.25), 0))

img.save('result.png', "PNG")
img.show()
'''

im = Image.open('image.gif')
rgb_im = im.convert('RGB')
r, g, b = rgb_im.getpixel((1, 1))

print(r, g, b)
(65, 100, 137)

'''
Esempio n. 11
0
    #   dict(var='FFM', fun=perc25_inv_mean, out_name='FFM_P25'),
    dict(var='FFM', fun=perc50_inv_mean, out_name='FFM_P50')
]

if __name__ == '__main__':
    # risico output folder
    out_folder = sys.argv[1]
    # netcdf output file
    filename = sys.argv[2]
    aggr_filename = sys.argv[3]

    files = os.listdir(out_folder)
    grid = None
    outputs = {}

    for f in bar(files):
        if f.endswith('.zbin'):
            model, model_date, date_ref, variable = f.split('_')
            variable = variable.replace('.zbin', '')

            if variable not in ext_names.keys(): continue

            if variable not in outputs:
                outputs[variable] = []

            if not grid:
                values, grid = read_gzip_binary(filename=out_folder + f,
                                                read_grid=True)
            else:
                values, _ = read_gzip_binary(filename=out_folder + f,
                                             read_grid=False)
Esempio n. 12
0
 #l2 = '2 40485  24.3912 120.4159 8777261  17.9050 284.4369  0.28561606 10816'
 #MIN(bstar)
 #l1 = '1 81358U          20028.49779613 -.00005615  00000-0 -72071+0 0  9998'
 #l2 = '2 81358  62.6434  61.1979 0370276 129.5311 233.8804  9.81670356    16'
 #MAX(no_kozai)
 #l1 = '1 44216U 19006CS  20035.07310469  .00413944  15423-5  43386-3 0  9995'
 #l2 = '2 44216  95.9131 264.4538 0065601 211.8276 147.5518 16.05814498 43974'
 xs = Satrec.twoline2rv(l1, l2)
 #
 delta = float(2. * np.pi / (xs.no_kozai * 1440.))/50 #50 points per round
 start = get_satepoch(xs.epochyr, xs.epochdays)      #Start of epoch
 N = int(31./delta)
 xt = np.array([start + delta * k for k in range(N + 1)])
 xfr, xjd = np.modf(xt)
 xe,xr,xv = xs.sgp4_array(xjd, xfr)
 #
 pb = bar().start(len(xt))
 pb.start()
 xmoe, xtt = sgp4_moe_from_state_arrays(xr,xv,xt,cb=pb.update)
 pb.finish()
 #xmoe = norm_moe(xmoe.T).T
 #
 est = SGP4MOERegression(p=0)
 est.fit(xtt, xmoe)
 em = est.predict(xtt)
 plt.plot(xtt-xtt[0], _res_kep(xmoe.T,em.T).T)
 #
 em = est.predict(xt)
 ye,yr,yv = state_from_sgp4_moe(xt, em)
 #plt.plot(xt-xt[0], xr-yr)
 
Esempio n. 13
0
def parse_sql(sql,
              user,
              db_name,
              db_host,
              port,
              pwd,
              timeout=False,
              compute_ground_truth=True,
              subset_cache_dir="./subset_cache/"):
    '''
    @sql: sql query string.

    @ret: python dict with the keys:
        sql: original sql string
        join_graph: networkX graph representing query and its
        join_edges. Properties include:
            Nodes:
                - table
                - alias
                # FIXME: matches, or separate it out into ops AND predicates
                - matches
            Edges:
                - join_condition

            Note: This is the only place where these strings will be stored.
            Each of the subqueries will be represented by their nodes within
            the join_graph, and we can use these properties to reconstruct the
            appropriate query for the subsets.

        subset_graph: networkX graph representing each subquery.
        Properties include all the ground truth data that will need to be
        computed:
            - true_count
            - pg_count
            - total_count
    '''
    start = time.time()
    join_graph = extract_join_graph(sql)
    subset_graph = generate_subset_graph(join_graph)

    print("query has", len(join_graph.nodes), "relations,",
          len(join_graph.edges), "joins, and", len(subset_graph),
          " possible subsets.", "took:",
          time.time() - start)

    ret = {}
    ret["sql"] = sql
    ret["join_graph"] = join_graph
    ret["subset_graph"] = subset_graph

    if not compute_ground_truth:
        ret["join_graph"] = nx.adjacency_data(ret["join_graph"])
        ret["subset_graph"] = nx.adjacency_data(ret["subset_graph"])
        return ret

    assert user is not None
    make_dir(subset_cache_dir)
    subset_cache_file = subset_cache_dir + get_subset_cache_name(sql)
    # we should check and see which cardinalities of the subset graph
    # we already know. Note thate we have to cache at this level because
    # the maximal matching might make arbitrary choices each time.
    with shelve.open(subset_cache_file) as cache:
        if sql in cache:
            currently_stored = cache[sql]
        else:
            currently_stored = {}

    unknown_subsets = subset_graph.copy()
    unknown_subsets = unknown_subsets.subgraph(subset_graph.nodes -
                                               currently_stored.keys())

    print(len(unknown_subsets.nodes), "/", len(subset_graph.nodes),
          "subsets still unknown (", len(currently_stored), "known )")

    # let us update the ground truth values
    edges = get_optimal_edges(unknown_subsets)
    paths = list(reconstruct_paths(edges))
    for p in paths:
        for el1, el2 in zip(p, p[1:]):
            assert len(el1) > len(el2)

    # ensure the paths we constructed cover every possible path
    sanity_check_unknown_subsets = unknown_subsets.copy()
    for n1, n2 in edges.items():
        if n1 in sanity_check_unknown_subsets.nodes:
            sanity_check_unknown_subsets.remove_node(n1)
        if n2 in sanity_check_unknown_subsets.nodes:
            sanity_check_unknown_subsets.remove_node(n2)

    assert len(sanity_check_unknown_subsets.nodes) == 0

    subset_sqls = []

    for path in paths:
        join_order = [tuple(sorted(x)) for x in path_to_join_order(path)]
        join_order.reverse()
        sql_to_exec = nodes_to_sql(join_order, join_graph)
        if compute_ground_truth:
            prefix = "explain (analyze, timing off, format json) "
        else:
            prefix = "explain (analyze off, timing off, format json) "
        sql_to_exec = prefix + sql_to_exec
        subset_sqls.append(sql_to_exec)

    print("computing all", len(unknown_subsets),
          "unknown subset cardinalities with", len(subset_sqls), "queries")

    pre_exec_sqls = []

    # TODO: if we use the min #queries approach, maybe greedy approach and
    # letting pg choose join order is better?
    pre_exec_sqls.append("set join_collapse_limit to 1")
    pre_exec_sqls.append("set from_collapse_limit to 1")
    if timeout:
        pre_exec_sqls.append("set statement_timeout = {}".format(timeout))

    sanity_check_unknown_subsets = unknown_subsets.copy()
    for idx, path_sql in enumerate(bar(subset_sqls)):
        res = execute_query(path_sql, user, db_host, port, pwd, db_name,
                            pre_exec_sqls)
        if res is None:
            print("Query failed to execute, ignoring.")
            breakpoint()
            continue

        plan = res[0][0][0]
        plan_tree = plan["Plan"]
        results = list(analyze_plan(plan_tree))
        for result in results:
            # this assertion is invalid because PG may choose to use an implicit join predicate,
            # for example, if a.c1 = b.c1 and b.c1 = c.c1, then PG may choose to join on a.c1 = c.c1
            # assert nx.is_connected(join_graph.subgraph(result["aliases"])), (result["aliases"], plan_tree)
            aliases_key = tuple(sorted(result["aliases"]))
            if compute_ground_truth:
                currently_stored[aliases_key] = {
                    "expected": result["expected"],
                    "actual": result["actual"]
                }
            else:
                currently_stored[aliases_key] = {
                    "expected": result["expected"]
                }

            if aliases_key in sanity_check_unknown_subsets.nodes:
                sanity_check_unknown_subsets.remove_node(aliases_key)

        if idx % 5 == 0:
            with shelve.open(subset_cache_file) as cache:
                cache[sql] = currently_stored

    print(len(currently_stored), "total subsets now known")

    assert len(sanity_check_unknown_subsets.nodes) == 0

    with shelve.open(subset_cache_file) as cache:
        cache[sql] = currently_stored

    for node in subset_graph.nodes:
        subset_graph.nodes[node]["cardinality"] = currently_stored[node]

    print("total time:", time.time() - start)

    # json-ify the graphs
    ret["join_graph"] = nx.adjacency_data(ret["join_graph"])
    ret["subset_graph"] = nx.adjacency_data(ret["subset_graph"])

    return ret