Example #1
0
def read_tiff(filename, bands=None, xBSize=5000, yBSize=5000):
    '''import'''
    import gdal
    from tqdm import tqdm_gui
    '''program'''
    ds = gdal.Open(filename)
    gdal.UseExceptions()
    nrow = ds.RasterYSize
    ncol = ds.RasterXSize
    if bands == None:
        bands = range(ds.RasterCount)
    data = np.zeros((nrow, ncol, len(bands)))
    for b in bands:
        band = ds.GetRasterBand(b + 1)
        for i in tqdm_gui(range(0, nrow, yBSize),
                          desc="Channel %d/%d" % (b, len(bands) - 1),
                          leave=False):
            if i + yBSize < nrow:
                numRows = yBSize
            else:
                numRows = nrow - i
            for j in range(0, ncol, xBSize):
                if j + xBSize < ncol:
                    numCols = xBSize
                else:
                    numCols = ncol - j
                data[i:(i + numRows), j:(j + numCols),
                     b] = band.ReadAsArray(j, i, numCols, numRows)
    return data.astype(np.uint8)
Example #2
0
def main():
    activator = stats.Activator(desired_stats)
    try:
        progress_bar = tqdm_gui(desc='Computing statistics',
                                total=os.path.getsize(dataset_path),
                                position=0,
                                leave=False)
        with open(dataset_path) as dataset:
            order = 1
            line = None
            for row in dataset:
                line = Line(
                    order,
                    row.rstrip('\n').split(
                        maxsplit=-1 if row[0] == 'H' else 2))
                for stat in activator.active_stats[line.cmd]:
                    stat(line)
                execute_line_no_loads(line)
                order += 1
                progress_bar.update(len(row))
            for end_func in activator.end_dataset:
                end_func(line)
        progress_bar.close()
    except OSError:
        print('Could not open dataset.')
        print(dataset_path)
        progress_bar.close()
        return
    for writer in activator.console_output:
        print(f'{writer.__self__.__class__.__name__}\n{writer()}\n')
    for graph in activator.graph_output:
        graph()
def main():
    print('Welcome to unofficial dataset generator for key-value 2019 contest.')
    total_lines = valid_input(prompt='Enter dataset size: ', key_formatter=int)
    dataset: data.Dataset = valid_choice(data.datasets, heading='Choose dataset rules')
    data.generate_cmds(dataset)
    data.create_zones(total_lines, dataset)
    progress_bar = tqdm_gui(
        desc='Generating commands',
        total=total_lines,
        position=0,
        leave=False
    )
    order = 1
    for idx, end_of_zone in enumerate(dataset.zones):
        Implementation.cyc_cmd = dataset.cycles_cmds[idx]
        for i in range(order, end_of_zone + 1):
            generated.append(row := add_line())
            execute_line_no_loads(Line(order, row.split(maxsplit=-1 if row[0] == 'H' else 2)))
            progress_bar.update()
            order += 1
    progress_bar.close()
    print(
        f'\n{total_lines} lines successfully generated.', '\nPreview:\n', 
        *list(generated[:PREVIEW]), 
        '...' if total_lines > PREVIEW else '', 
        sep='\n'
    )
    file_dialog(generated, prompt='Do you want to save dataset?', start_dir=start_at, ext='txt')
    print('Program will now exit.')
Example #4
0
def main():
    file_info = [f'{idx + 1}: {item}' for idx, item in enumerate(sources)]
    print(*file_info, sep='\n', end='\n\n')
    progress_bar = None
    try:
        max_size = max(map(os.path.getsize, sources))
        progress_bar = tqdm_gui(desc='Finding differences in files',
                                total=max_size,
                                position=0,
                                leave=False)
        print(end=(newline := '\n'))
Example #5
0
    def add_label(self):
        result_down = self.check_files()
        if self.set_quarter == 1:
            self.startday = '%s-04-25' % self.set_year
            self.endday = '%s-07-25' % self.set_year
        elif self.set_quarter == 2:
            self.startday = '%s-07-25' % self.set_year
            self.endday = '%s-10-25' % self.set_year
        elif self.set_quarter == 3:
            self.startday = '%s-10-25' % self.set_year
            self.endday = '%s-01-25' % (self.set_year + 1)
        elif self.set_quarter == 4:
            self.startday = '%s-01-25' % (self.set_year + 1)
            self.endday = '%s-04-25' % (self.set_year + 1)
        else:
            print('季度输入错误')

        if result_down == True:
            data = pd.read_csv(
                '../report/base_information/perfprmance_%s-%s.csv' %
                (self.set_year, self.set_quarter),
                encoding='gbk')
            data.code = data.code.apply(lambda x: '%06d' % x)
            data['earnings'] = NaN
            data['fluctuate'] = NaN

            for item, code_ in tqdm.tqdm_gui(enumerate(data.code)):

                print(item, code_, self.startday, self.endday)
                try:
                    df = ts.get_k_data(code_, self.startday, self.endday)
                    price = df.close
                    price_f = price.iloc[0]
                    price_e = price.iloc[-1]
                    price_max = price.max()
                    price_min = price.min()
                    return_r = (price_e - price_f) / price_f * 100  # 收入
                    p_range = (price_max - price_min) / price_min * 100  #波动
                    data.ix[item, ['earnings']] = return_r
                    data.ix[item, ['fluctuate']] = p_range
                    print(price_f, price_e, price_max, price_min)
                    print(return_r.round(-1), p_range.round(-1))

                except:
                    pass
            data.to_csv(
                '../report/base_information/perfprmance_earn_%s-%s.csv' %
                (self.set_year, self.set_quarter),
                encoding='gbk',
                index=False)

        else:
            pass
Example #6
0
def concat_csv(path_dir):
    file_list = os.listdir(path_dir)  # path에 존재하는 파일 읽기
    file_list.sort()  # 파일 이름순서로 정렬

    for i in tqdm_gui(range(len(file_list)), desc='Merging progress'):
        file = file_list[i]
        path = path_dir + '{}'.format(file)
        df_temp = pd.read_csv(path, sep=',', encoding="euc-kr")
        if i == 0:
            df = df_temp.copy()
        else:
            df = pd.concat([df, df_temp], axis=0, sort=False)

    return df
Example #7
0
def get_all_magic():
    CodeList = list_input('all')
    i = 2
    df = get_magic(CodeList.code[i * 50])
    df = df.sort_index(ascending=False)
    for code_ in tqdm.tqdm_gui(CodeList.code[i * 50 + 1:i * 50 + 50]):
        time.sleep(3)
        print(code_)

        mg = get_magic(code_)

        mg.name = code_
        df = pd.concat([df, mg], axis=1)
    df.to_csv('..\\report\\magic_%s.csv' % CodeList.code[i * 50])
    return df
    def end_of_dataset(self, last_line):
        total_keys, keys_one_mention, keys_set_after_del = 0, 0, 0
        distances_medians, distances_lengths = [], []

        progress_bar = tqdm_gui(desc='Computing medians',
                                total=len(self.records) + 4,
                                position=0,
                                leave=False)

        for key in self.records:
            record = self.records[key]
            total_keys += 1
            distances_lengths.append(len(record.distances) + 1)
            if record.set_after_del:
                keys_set_after_del += 1
            if record.distances:
                distances_medians.append(median(record.distances))
            else:
                keys_one_mention += 1
            progress_bar.update()

        def _median(source: list):
            m = median(source) if source else 0
            progress_bar.update()
            return m

        self.wtab = Ptw(['Subject', 'Value'], aligns='cR')
        self.results = {
            'Total number of keys':
            total_keys,
            'Keys seen once only':
            keys_one_mention,
            'Keys set after removing':
            keys_set_after_del,
            'Median of distances':
            _median(distances_medians),
            'Median of number of cmds per key':
            _median(distances_lengths),
            'Median of number of keys per hashload':
            _median(self.H_list_lengths),
            'Median of number of unique keys per hashload':
            _median(self.H_set_lengths)
        }
        for subject in self.results:
            self.wtab.write_raw([subject, self.results[subject]])
        self.wtab.add_raw_to_table()
        progress_bar.close()
def main():
    try:
        progress_bar = tqdm_gui(desc='Solving dataset',
                                total=os.path.getsize(dataset_path),
                                position=0,
                                leave=False)
        with open(dataset_path) as dataset:
            line = None
            for row in dataset:
                line = Line(
                    None,
                    row.rstrip('\n').split(
                        maxsplit=-1 if row[0] == 'H' else 2))
                if (result := execute_line_return_result(line)):
                    generated.append(result)
                progress_bar.update(len(row))
        progress_bar.close()
Example #10
0
    def run(self, graph=False, verbose=True):
        results = [0] * len(self.setup.teams)
        num_mafia = self.setup.countAlignment(self.setup.mafia_type) + 1
        num_town = self.setup.countAlignment(self.setup.town_type) + 1
        town_results_by_day = [[0 for x in range(num_mafia)]
                               for x in range(num_town)]
        maf_results_by_day = [[0 for x in range(num_mafia)]
                              for x in range(num_town)]
        self.combined_results_by_day = [[0 for x in range(num_mafia)]
                                        for x in range(num_town)]
        last_day_count = [0] * (len(self.setup.players) + 1)

        for i in tqdm_gui(range(self.num_iterations)):
            sim = MafiaGame(setup=self.setup, verbose=verbose)
            sim.play()
            for [town, maf] in sim.alignment_numbers_history:
                if (sim.winner.name == "Town"):
                    town_results_by_day[town][maf] += 1
                else:
                    maf_results_by_day[town][maf] += 1
            results[[x.name
                     for x in self.setup.teams].index(sim.winner.name)] += 1
            last_day_count[sum(sim.alignment_numbers_history[-1])] += 1

        # Results
        for i, alignment in enumerate(self.setup.teams):
            print(f"{alignment.name}: {results[i]/self.num_iterations}")
        for i, num in enumerate(last_day_count):
            if (num != 0):
                print(f"{i}: {num/self.num_iterations}")
        for i in range(len(town_results_by_day)):
            for j in range(len(town_results_by_day[i])):
                t = town_results_by_day[i][j]
                m = maf_results_by_day[i][j]
                if t + m == 0:
                    print("----", end="  ")
                    self.combined_results_by_day[i][j] = -1
                else:
                    self.combined_results_by_day[i][j] = t / (t + m)
                    print("%3.2f" % (self.combined_results_by_day[i][j]),
                          end="  ")
            print("")
        if (graph):
            self.drawGraph()
def func2():
    input_list = sorted(
        glob('./../datasets/autocolorization/illustrations_resized/*.*'))

    check_folder(
        './../datasets/autocolorization/illustrations_resized_256/original/')
    check_folder(
        './../datasets/autocolorization/illustrations_resized_256/xdog/')

    for e, data in tqdm_gui(enumerate(input_list)):
        image = misc.imread(data, mode='RGB')
        image = resize_and_crop(image, 256)
        misc.imsave(
            './../datasets/autocolorization/illustrations_resized_256/original/'
            + str(e) + '.png', np.array(image))
        image = xdog(image, sigma_list=[0.3, 0.4, 0.5])
        misc.imsave(
            './../datasets/autocolorization/illustrations_resized_256/xdog/' +
            str(e) + '.png', np.array(image))
Example #12
0
def add_google_matrix():
    distance_km_list = []
    time_min_list = []
    id_list = []

    df_ulykke = pd.read_csv('./csv/komplett2010.csv',
                            'r',
                            delimiter=',',
                            encoding='iso-8859-1')
    df_ulykke = df_ulykke[[
        'Ulykkes id', 'latitude', 'longitude', 's_id', 's_latitude',
        's_longitude'
    ]]

    for i in tqdm_gui(range(len(df_ulykke))):
        try:
            acc_id = df_ulykke['Ulykkes id'][i]
            lat = df_ulykke['latitude'][i]
            long = df_ulykke['longitude'][i]
            origin = f'{lat},{long}'
            s_lat = df_ulykke['s_latitude'][i]
            s_long = df_ulykke['s_longitude'][i]
            destination = f'{s_lat},{s_long}'
        except:
            pass
        try:
            get = get_google_dist_matrix(origin, destination)
        except:
            get = [0, 0]
        distance_km = get[0]
        time_min = get[1]
        distance_km_list.append(distance_km)
        time_min_list.append(time_min)
        id_list.append(acc_id)
        print(f'Added info for ulykke {acc_id}')
    google_distance = pd.DataFrame({
        'sv_acc_id': id_list,
        'road_km': distance_km_list,
        'time_min': time_min_list
    })
    google_distance.to_csv('google_distance_matrix.csv')
    print('I made the file, now let me rest -.- ')
Example #13
0
def get_progress_bar(progress_bar_type, description, total, unit):
    """Construct a tqdm progress bar object, if tqdm is     ."""
    if tqdm is None:
        if progress_bar_type is not None:
            warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
        return None

    try:
        if progress_bar_type == "tqdm":
            return tqdm.tqdm(desc=description, total=total, unit=unit)
        elif progress_bar_type == "tqdm_notebook":
            return tqdm.tqdm_notebook(desc=description, total=total, unit=unit)
        elif progress_bar_type == "tqdm_gui":
            return tqdm.tqdm_gui(desc=description, total=total, unit=unit)
    except (KeyError, TypeError):
        # Protect ourselves from any tqdm errors. In case of
        # unexpected tqdm behavior, just fall back to showing
        # no progress bar.
        warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
    return None
 def __download_data(self, show_progress=False):
     url = URL + self.lang + ".tar.gz"
     print("Downloading a new data set in " + VALID_LANGUAGES[self.lang] +
           " to " + self.path)
     print("from " + url + "...")
     try:
         if show_progress:
             from tqdm import tqdm_gui
             with open(self.archive, "wb") as file:
                 for data in tqdm_gui(
                         requests.get(url, stream=True).iter_content()):
                     file.write(data)
         else:
             from tqdm import tqdm
             with open(self.archive, "wb") as file:
                 for data in tqdm(
                         requests.get(url, stream=True).iter_content()):
                     file.write(data)
     except KeyboardInterrupt:
         print("Interrupted by user.")
         self.__del_archive()
         sys.exit(1)
Example #15
0
def user_similarity_analysis(names, uids, filename):
    doc_path = './data/all_doc_list.pkl'
    if os.path.exists(doc_path):
        with open(doc_path, 'rb') as f:
            all_doc_list = pickle.load(f)
    else:
        all_doc_list = []

    for name, uid in tqdm_gui(list(zip(names, uids))[len(all_doc_list):]):
        print('获取弹幕:{}'.format(name))
        user = User(uid)
        user.get_danmaku()
        print('弹幕长度:{}'.format(len(user.danmaku_list)))
        user.danmaku_list.extract_keywords(500)
        doc = user.danmaku_list.tags
        # doc = [word for sentence in user.danmaku_list for word in jieba.cut(sentence)]
        all_doc_list.append(doc)
        with open(doc_path, 'wb') as f:
            pickle.dump(all_doc_list, f)

    global dictionary, corpus
    dictionary = corpora.Dictionary(all_doc_list)
    corpus = [dictionary.doc2bow(doc) for doc in all_doc_list]

    similarity_matrix = np.zeros((len(names), len(names)))
    dataset = np.zeros((len(names), len(dictionary.keys())))
    for idx, name in enumerate(names):
        doc_test_list = all_doc_list[idx]
        sim_row, data_row = calc_similarities(doc_test_list)
        similarity_matrix[idx, :] = sim_row
        dataset[idx, :] = data_row
    sim_frame = pd.DataFrame(similarity_matrix, index=names, columns=names)
    sim_frame.to_csv('data/{}.csv'.format(filename), encoding='utf8')

    df = pd.DataFrame(dataset, index=names)
    df.to_csv('data/{}_dataset.csv'.format(filename), encoding='utf8')

    return sim_frame, df
def _draw_rtree_nodes(graph, tree: RTreeBase, include_images):
    num_plots = len(list(tree.get_nodes())) + len(list(tree.get_leaf_entries()))
    with tqdm_gui(total=num_plots, desc="Drawing R-Tree", unit="node") as pbar:
        for level, nodes in enumerate(tree.get_levels()):
            subgraph = pydot.Subgraph(rank='same')
            graph.add_subgraph(subgraph)
            for node in nodes:
                img = None
                if include_images:
                    img = tempfile.mkstemp(prefix='node_', suffix='.png')[1]
                    highlight_node = node if not node.is_root else None
                    plot_rtree(tree, filename=img, show=False, highlight_node=highlight_node)
                subgraph.add_node(_rtree_node_to_pydot(node, img))
                pbar.update()
        leaf_subgraph = pydot.Subgraph(rank='same')
        graph.add_subgraph(leaf_subgraph)
        for entry in tree.get_leaf_entries():
            img = None
            if include_images:
                img = tempfile.mkstemp(prefix='entry_', suffix='.png')[1]
                plot_rtree(tree, filename=img, show=False, highlight_entry=entry)
            leaf_subgraph.add_node(_rtree_leaf_entry_to_pydot(entry, img))
            pbar.update()
Example #17
0
def tag_similarity_analysis(keywords, filename, tids_1=None):
    doc_path = './data/all_doc_list.pkl'
    if os.path.exists(doc_path):
        with open(doc_path, 'rb') as f:
            all_doc_list = pickle.load(f)
    else:
        all_doc_list = []

    for keyword in tqdm_gui(keywords[len(all_doc_list):]):
        danmaku_list = search_and_get_danmaku(keyword, tids_1)
        danmaku_list.extract_keywords(500)
        doc = danmaku_list.tags
        all_doc_list.append(doc)
        with open(doc_path, 'wb') as f:
            pickle.dump(all_doc_list, f)

    global dictionary, corpus
    dictionary = corpora.Dictionary(all_doc_list)
    corpus = [dictionary.doc2bow(doc) for doc in all_doc_list]

    similarity_matrix = np.zeros((len(keywords), len(keywords)))
    dataset = np.zeros((len(keywords), len(dictionary.keys())))
    for idx, name in enumerate(keywords):
        doc_test_list = all_doc_list[idx]
        sim_row, data_row = calc_similarities(doc_test_list)
        similarity_matrix[idx, :] = sim_row
        dataset[idx, :] = data_row
    sim_frame = pd.DataFrame(similarity_matrix,
                             index=keywords,
                             columns=keywords)
    sim_frame.to_csv('data/{}.csv'.format(filename), encoding='utf8')

    df = pd.DataFrame(dataset, index=keywords)
    df.to_csv('data/{}_dataset.csv'.format(filename), encoding='utf8')

    return sim_frame, df
Example #18
0
    def retrain(self, steps):
        inp, out = self.build_full('QQQ')
        inp = np.reshape(inp[:, :, :, 0], newshape=[-1, 30, 5, 1])
        out = np.reshape(out, [-1, 5, 5, 1])
        meta = '{}/{}_model.ckpt.meta'.format(self.direc, self.symbol.lower())
        ckpt = '{}/{}_model.ckpt'.format(self.direc, self.symbol.lower())
        with tf.Session() as sess:
            saver = tf.train.import_meta_graph(meta)
            saver.restore(sess, ckpt)
            for i in tqdm_gui(range(int(steps))):

                if i % 50 == 0:
                    train_accuracy = sess.run("Mean:0",
                                              feed_dict={
                                                  "input_:0": inp,
                                                  "exp_out:0": out
                                              })
                    ac = sess.run("Sub:0",
                                  feed_dict={
                                      "input_:0": inp,
                                      "exp_out:0": out
                                  })
                    print(
                        '========================SUMMARY REPORT============================='
                    )
                    print('step %d, train loss: %g' % (i, train_accuracy))
                    print('Validation accuracy {}%'.format(str(ac)))
                    # print('Estimated Time Remaining = ' + str(round((20000-i)*(timer/60)/60,2)) + ' Hours')
                    print(
                        '==================================================================='
                    )
                sess.run("Adam", feed_dict={"input_:0": inp, "exp_out:0": out})
            saver.save(sess, ckpt)
            with open(self.direc + "/time.txt", 'w') as f:
                f.write('%s' % (self.get_time()))
            print(self.get_time())
Example #19
0
from tqdm import tqdm, tqdm_gui
import time

for i in tqdm_gui(range(1000), gui=True, disable=False):
    time.sleep(0.1)
Example #20
0
def aug_function(inpath, outputpath, incsvfilepath, outcsvfilepath, augtype,
                 t_factor, r_factor, s_factor):
    #print("\n")
    #print(augtype)
    #print("\n")

    file1 = open(incsvfilepath,
                 'r')  # 2. open file containing bounding box coordinates
    lines = file1.readlines()
    randlist = lines  # selecting k random images to augment

    for i in tqdm_gui(randlist):

        x = i.strip().split(",")
        #print("\nx: ")
        #print(x)
        #print("\n")
        mylist = [
            int(float(x[1])),
            int(float(x[2])),
            int(float(x[3])),
            int(float(x[4]))
        ]  #extract coordinates from file
        cord = []
        for i in mylist:
            a = int(i)
            cord.append(a)

        a = x[0]  # image filename

        imageFolderPath = inpath  # 3. input image folder path
        loc = os.path.join(imageFolderPath, a)  #location of image

        # # Brightness and Contrast
        if (augtype == 'Brightness and Contrast'):

            image = cv.imread(loc)

            alpha = random.triangular(1, 2)
            beta = random.randint(20, 50)

            result = cv.addWeighted(image, alpha,
                                    np.zeros(image.shape, image.dtype), 0,
                                    beta)

            oname = "brightness_" + a
            csvr = [oname] + mylist

            with open(outcsvfilepath, 'a',
                      newline='') as file:  # 4. write to  csv file
                writer = csv.writer(file)
                writer.writerow(csvr)
            #print(mylist)

            #print(oname)
            drawRectangle(result, outputpath, cord, output_name=oname)

        else:

            img_class = utils.Image(path=loc)  # Create image class
            img = img_class.getImage()
            coord = cord
            if (augtype == 'Scale'):
                # # Test Scaling
                output = img_class.transform('scale', coord, s_factor)
                oname = "scaled_" + a  # name of output image

                ocord = output[1]  # augmented coordinates
                csvr = [oname
                        ] + output[1]  # line to be written to output csv file

                with open(outcsvfilepath, 'a',
                          newline='') as file:  # 4. write to  csv file
                    writer = csv.writer(file)
                    writer.writerow(csvr)

                drawRectangle(output[0],
                              outputpath,
                              output[1],
                              output_name=oname)  # for saving output image

            elif (augtype == 'Rotate'):
                #print("\nrotate")
                ##Test Rotation
                output = img_class.transform('rotate', coord, r_factor)
                oname = "rotated_" + a
                #   ##print("ONAME",oname)
                #    ##print("New Bounding Boxes rotation: ", output[1])
                ocord = output[1]
                csvr = [oname] + output[1]
                #    ##print("ocord",ocord)
                with open(outcsvfilepath, 'a', newline='') as file:
                    writer = csv.writer(file)
                    writer.writerow(csvr)

                drawRectangle(output[0],
                              outputpath,
                              output[1],
                              output_name=oname)
        #drawRectangle(img,outputpath, coord, output_name = "output_original.jpeg")

            elif (augtype == 'Translate'):
                #print("\ntranslate")
                # # Test Translation - Horizontal
                output = img_class.transform('translate', coord, t_factor)
                oname = "translated_" + a  # name of output image

                ocord = output[1]  # augmented coordinates
                csvr = [oname
                        ] + output[1]  # line to be written to output csv file

                with open(outcsvfilepath, 'a',
                          newline='') as file:  #write to  csv file
                    writer = csv.writer(file)
                    writer.writerow(csvr)

                drawRectangle(output[0],
                              outputpath,
                              output[1],
                              output_name=oname)  # for saving output image

            elif (augtype == 'Flip'):

                #print("\nflip")
                #   # Test Flipping
                output = img_class.transform('flip', coord)
                oname = "flipped_" + a  # name of output image

                ocord = ' '.join(str(e)
                                 for e in output[1])  # augmented coordinates

                csvr = oname + ' ' + ocord  # line to be written to output csv file
                listcsvr = csvr.split(" ")
                #print("\noutcsvfilepath: ")
                #print(outcsvfilepath)
                #print("\ncsvr: ")
                #print(csvr)
                with open(outcsvfilepath, 'a',
                          newline='') as file:  #write to  csv file
                    writer = csv.writer(file)
                    writer.writerow(listcsvr)

                drawRectangle(output[0],
                              outputpath,
                              output[1],
                              output_name=oname)  # for saving output image

            elif (augtype == 'Shear'):
                # # Test Shear
                #print("\nshear")
                output = img_class.transform('shear', coord)
                oname = "shear_" + a  # name of output image

                ocord = output[1]  # augmented coordinates
                csvr = [oname
                        ] + output[1]  # line to be written to output csv file

                with open(outcsvfilepath, 'a',
                          newline='') as file:  #write to  csv file
                    writer = csv.writer(file)
                    writer.writerow(csvr)

                drawRectangle(output[0],
                              outputpath,
                              output[1],
                              output_name=oname)  # for saving output image

            elif (augtype == 'Saturation'):
                #print("\nhsv")
                HSV_output_name = "Saturated_" + a
                img_HSV, bboxes_HSV = utils.RandomHSV(hue=None,
                                                      saturation=100,
                                                      brightness=None)(
                                                          img.copy(),
                                                          cord.copy())
                drawRectangle(
                    img_HSV,
                    outputpath,
                    bboxes_HSV,
                    output_name=HSV_output_name)  # for saving output image

                csvr = [HSV_output_name
                        ] + bboxes_HSV  # line to be written to output csv file
                with open(outcsvfilepath, 'a',
                          newline='') as file:  #write to  csv file
                    writer = csv.writer(file)
                    writer.writerow(csvr)
Example #21
0
                        sum(n.performance) / len(n.performance) * 100)
            plt.pause(.05)
            lastUpdate = time.time()
            pass
        pass

    pass

plt.show()

# %%
scorecard = []
testData = pd.read_csv("https://pjreddie.com/media/files/mnist_test.csv",
                       header=None)

ai = tqdm_gui(total=1)

for index, row in tqdm_gui(testData.iterrows(), total=testData[0].size):

    corectLabel = int(row[0])
    inputs = (numpy.asfarray(row[1:]) / 255.0 * 0.99) + 0.01

    outputs = n.query(inputs)
    label = numpy.argmax(outputs)

    if (label == corectLabel):
        scorecard.append(1)
    else:
        scorecard.append(0)

        pass
Example #22
0
from tqdm import tqdm_gui
import tqdm
from random import random, randint
from time import sleep

for i in tqdm_gui(range(0, 50)):
    #tqdm_gui.set_description()
    sleep(.03)

##t = tqdm.__init__(self, iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, gui=False, **kwargs)
Example #23
0
para['kindex'] = GPdc.MAT52
para['mprior'] = sp.array([0.]+[-1.]*d)
para['sprior'] = sp.array([1.]*(d+1))
para['s'] = 1e-9
para['ninit'] = 10
#para['maxf'] = 2500
para['volper'] = 1e-6
para['DH_SAMPLES'] = 8
para['DM_SAMPLES'] = 8
para['DM_SUPPORT'] = 1200
para['DM_SLICELCBPARA'] = 1.
para['SUPPORT_MODE'] = [ESutils.SUPPORT_SLICELCB,ESutils.SUPPORT_SLICEPM]

OP = OPTutils.PESFS(ojf,lb,ub,para,initstate=copy.deepcopy(initstate))
#[OP.X,OP.Y,OP.S,OP.D,OP.R,OP.C,OP.T,OP.Tr,OP.Ymin,OP.Xmin,OP.Yreg,OP.Rreg] = copy.deepcopy(initstate)
for i in tqdm_gui(xrange(runn),gui=True):
    
    state = [OP.X,OP.Y,OP.S,OP.D,OP.R,OP.C,OP.T,OP.Tr,OP.Ymin]
    try:
        pass
        #OP.step()
    except:
        import pickle
        pickle.dump(state,open('state.p','wb'))
        raise
        
    OE.step()
    O.step()
    #OL.step()
    
    plt.close(f)
Example #24
0
plt.plot(progress_range, durations)
#plt.plot(progress_range)
#plt.plot(durations)
plt.show()

count=0
for i in progress_range:
    # here do something long at each iteration
    time.sleep(durations[count])
    count += 1
    pbar.update(i) #this adds a little symbol at each iteration
pbar.finish()

print
print("Using tqdm")

from tqdm import tqdm
count=0
for i in tqdm(progress_range):
    time.sleep(durations[count])
    count += 1

print("Using tqdm_gui")

from tqdm import tqdm_gui
count=0
for i in tqdm_gui(progress_range):
    time.sleep(durations[count])
    count += 1
Example #25
0
print(
    colored(
        'Before using please make sure your csv file has a "Name" Column to get names',
        'red'))
university = input("Enter Your University Name: ")
acronym = input("Enter Your University Acronym: ")
eventname = input("Enter event name: ")
leadname = input("Your Name: ")
currentdate = datetime.date(datetime.now())

fname = 'certificates/'
if os.path.exists(fname):
    shutil.rmtree(fname)
os.mkdir(fname)

for names in tqdm_gui(file['Name']):
    image = Image.new('RGB', (1000, 900), (255, 255, 255))

    draw = ImageDraw.Draw(image)
    font_path = './Almondita.ttf'
    fontdev = ImageFont.truetype('arial.ttf', size=35)
    fontcert = ImageFont.truetype('arial.ttf', size=55)
    fontname = ImageFont.truetype('arial.ttf', size=35)
    signature = ImageFont.truetype(font_path, 150)

    colordev = 'rgb(128, 128, 128)'
    colorcert = 'rgb(89, 89, 89)'
    colorname = 'rgb(77, 148, 255)'

    dsc_logo = Image.open('logo.jpg')
    dsc_logo = dsc_logo.resize((75, 75))
Example #26
0
img.save('Albedomap_grey_nov.png')
FILENAME = 'Albedomap_grey_nov.png'  #image can be in gif jpeg or png format
im = Image.open(FILENAME).convert('RGB')
pix = im.load()

# Define longitude of ascending node and loop statements
k, k_final, dk = -180, 180, 1
longrun = False
zeroiteration = True
firstiteration = False
seconditeration = False

#loop to obtain all data
while dogleg == True:
    warnings.simplefilter("ignore")
    for i in tqdm_gui(range(388)):
        time.sleep(0.0001)

        #intial values
        xtab = []
        ytab = []
        x2tab = []
        y2tab = []
        albedotab = []
        ttab = []
        lattab = []
        difftab = []

        #Several runs over range of possible k at different precisions
        if k > k_final and zeroiteration == True:
            zeroiteration = False
    "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018",
    "2019"
]
target_month = [
    "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"
]
target_date = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", \
               "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", \
               "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31"]

# Target url
url_format = "http://www.airportal.go.kr/servlet/aips.mobile.MobileRbHanCTL?cmd=c_getList&index=0&count=500&depArr=D&current_date={search_date}&tm={search_hour}&airport=ICN"
# headers = {'content-type': 'application/json;charset=utf-8'}

# ScrabpingMain Loop
for p_year in tqdm_gui(target_year, desc='target_year'):
    for p_month in target_month:
        # 종료 조건 추가 '2019년 9월'까지 데이터 수집
        if p_year == "2019" and p_month == "10": break
        last_day = calendar.monthrange(int(p_year), int(p_month))[1]  # 30
        for p_date in target_date:
            # 해당월의 마지막 날짜 체크
            exception_tf = False
            if last_day < (int(p_date)): break

            search_date = p_year + p_month + p_date

            if search_date in empty_list:
                print("search_date{} is no data".format(search_date))
                continue
            if search_date in file_list:
Example #28
0
from tqdm import tqdm_gui
import random
from time import time

for _ in tqdm_gui(range(10**8)):
    val = random.randint(0, 100)
    L.append(val)

for _ in tqdm_gui(range(10**6)):
    pos = random.randint(0, len(L))  ## insertion avant
    val = random.randint(0, 100)
    L.insert(pos, val)

## Astuce
L = []
debut = time()
for i in range(10**3):
    L.insert(0, i)
fin = time()
duree = fin - debut
print("Duree Insert: ", duree)
L = []
debut = time()
for i in range(10**3):
    L.append(i)
L.reverse()
fin = time()
duree = fin - debut
print("Duree Append+Reverse: ", duree)
Example #29
0
def train_network(stock, years, steps, direct, scale=True):

    bd = BuildDataset(symbol=stock, years=years, scale=scale)
    inp, out = bd.build_full('QQQ')

    inp=np.reshape(inp[:,:,:,0], newshape=[-1, 30,5,1])

    out = np.reshape(out, [-1, 5,5,1])
    print("Input data size:" + str(inp.shape))
    print("Output data size:" + str(out.shape))

    inp, inp_v, y, y_v = train_test_split(inp, out, test_size=.10)

    print(inp_v.shape)
    def next_batch(num, data, labels):
        '''
        Return a total of `num` random samples and labels.
        '''
        idx = np.arange(0, len(data))
        np.random.shuffle(idx)
        idx = idx[:num]
        data_shuffle = [data[i] for i in idx]
        labels_shuffle = [labels[i] for i in idx]

        return np.asarray(data_shuffle), np.asarray(labels_shuffle)

    def weight_var(shape):
        init = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(init)

    def bias_var(shape):
        init = tf.constant(0.0, shape=shape)
        return tf.Variable(init)

    def conv2d (x, W, stride):
        return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')

    def max_pool_2x2(x):
        return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    def Conv_Layer(input_, shape, name, str=1, pool=False):
        with tf.variable_scope(name):
            with tf. name_scope('Weights'):
                w = weight_var(shape)
            with tf.name_scope('Bias'):
                b = bias_var([shape[3]])

            h_conv = tf.nn.relu(conv2d(input_, w, stride=str)+ b)

            if pool == True:
                return max_pool_2x2(h_conv)
        return h_conv
    with tf.device("/gpu:0"):
        x_ = tf.placeholder(tf.float32, shape=[None, 30, 5, 1], name='input_')
        y_exp = tf.placeholder(tf.float32, shape=[None, 5, 5, 1], name='exp_out')

        #Conv 1
        cnv1 = Conv_Layer(x_, [3, 3, 1, 6], name="conv1")

        #Conv 2
        cnv2 = Conv_Layer(cnv1, [3, 3, 6, 6], name="conv2")
        with tf.name_scope("max_pool"):
            cnv2 = max_pool_2x2(cnv2)

        cnv22 = Conv_Layer(cnv2, [3, 3, 6, 16], name="conv22")
        cnv23 = Conv_Layer(cnv22, [3, 3, 16, 32], name="conv23")
        #Conv 3
        cnv3 = Conv_Layer(cnv23, [3, 3, 32, 32], name="conv3")

        #Conv 4
        cnv4 = Conv_Layer(cnv3, [3, 3, 32, 32], name="conv4")
        with tf.name_scope("max_pool"):
            cnv4 = max_pool_2x2(cnv4)

        #Fully connected layers

        with tf.name_scope("fully_connected1"):
            w_1 = weight_var([8*2*32, 1000])
            b1 = bias_var([1000])
            c4_flat = tf.reshape(cnv4, [-1, 8*2*32])
            fc1 = tf.nn.relu(tf.matmul(c4_flat, w_1) + b1)

        with tf.name_scope("fully_connected2"):
            w_2 = weight_var([1000, 800])
            b2 = bias_var([800])
            fc2 = tf.nn.relu(tf.matmul(fc1, w_2) + b2)

        with tf.name_scope("fully_connected3"):
            w_3 = weight_var([800, 400])
            b3 = bias_var([400])
            fc3 = tf.nn.relu(tf.matmul(fc2, w_3) + b3)

        with tf.name_scope("Up_conv1"):
            fc3 = tf.reshape(fc3, shape=[-1, 20, 20, 1])
            #d_cnv1 = tf.layers.conv2d_transpose(fc3, filters=16 , kernel_size = 2, strides=2)
            cnvu1 = Conv_Layer(fc3, shape=[3, 3, 1, 6], name="dconv1", pool=True)

        with tf.name_scope("Up_conv2"):
            cnvu2 = Conv_Layer(cnvu1, shape=[3, 3, 6, 16], name="dconv1", pool=False)
            cnvu2 = Conv_Layer(cnvu2, shape=[3, 3, 16, 6], name="dconv2", pool=True)

        with tf.name_scope("Up_conv3"):
            #d_cnv2 = tf.layers.conv2d_transpose(cnvu1, filters=6, kernel_size=1, strides=1)
            logits = Conv_Layer(cnvu2, shape=[3, 3, 6, 1], name="dconv2")
            tf.identity(logits, name="y_out")

        #real_c = tf.summary.image("Exp", y_exp, max_outputs=2)
        #pred_c = tf.summary.image("pred", logits, max_outputs=2)

        loss = tf.reduce_mean(tf.losses.mean_squared_error(labels=y_exp, predictions=logits))
        train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
        acc = tf.reduce_mean(tf.losses.absolute_difference(labels=y_exp,predictions=logits))
        acc = tf.cast(tf.subtract(tf.constant(100, dtype=tf.float32),tf.multiply(acc, tf.constant(100, dtype=tf.float32))), tf.float32)
        train_c = tf.summary.scalar("Train_loss", loss)
        val_c = tf.summary.scalar('Val_loss', loss)
        Accuracy = tf.cast(loss, tf.float32)
        print(acc)
        print(Accuracy)

        saver = tf.train.Saver(tf.all_variables())
        # TF SESSION
        config =tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth =True

        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            timer = 0
            train_start =time()
            write = tf.summary.FileWriter('/home/ian/Quant', sess.graph)
            j= 0
            loss = 0
            patience = 0
            for i in tqdm_gui(range(int(steps))):
                time_start = time()
                #batch = next_batch(len(), inp, y)

                if i% 5==0:
                    lo = acc.eval(feed_dict={x_: inp_v, y_exp: y_v})
                    if lo-loss < .0005:
                        patience +=1
                    else:
                        patience=0
                    loss = lo
                    #print(patience)
                if patience == 5:
                    break

                if i % 50 == 0:

                    train_accuracy = Accuracy.eval(feed_dict={x_: inp, y_exp: y})
                    ac = acc.eval(feed_dict={x_: inp_v, y_exp: y_v})
                    tc = train_c.eval(feed_dict={x_: inp, y_exp: y})
                    vc = val_c.eval(feed_dict={x_: inp_v, y_exp: y_v})
                    #image1, image2 = sess.run([real_c, pred_c],feed_dict={x_: batch[0][:10], y_exp: batch[1][:10]})
                    #p = int(100 * np.random.rand())
                    #print(bd.rescale(logits.eval(feed_dict={x_: inp_v[:100]})[p]).astype(np.int))
                    #print(bd.rescale(y_v[p]).astype(np.int))
                    #write.add_summary(image1, i)
                    #write.add_summary(image2, i)
                    write.add_summary(tc, i)
                    write.add_summary(vc, i)

                    print('========================SUMMARY REPORT=============================')
                    print('step %d, train loss: %g' % (i,train_accuracy))
                    print('Validation accuracy {}%'.format(str(ac)))
                    #print('Estimated Time Remaining = ' + str(round((20000-i)*(timer/60)/60,2)) + ' Hours')
                    print('===================================================================')

                train_step.run(feed_dict={x_: inp, y_exp: y})
                time_stop = time()
                timer = time_stop - time_start

                #print('Step: ' + str(i) + ' Epoch Time: ' + str(round(timer,2)) + ' Secs.' + ' Time elapsed: ' +
                      #str(round((time_stop-train_start)/60, 2)) + ' Mins.' + str(round((i/80000) *100, 1)) + " % complete")

            directory = direct
            if not os.path.exists(directory):
                os.makedirs(directory)

            s_path = saver.save(sess, "{}/{}_model.ckpt".format(directory, stock.lower()))
            print("model saved in {}".format(s_path))

            with open(directory+"/time.txt", 'w') as f:
                f.write('%s' % (bd.get_time()))
            print(bd.get_time())
Example #30
0
def train(model, train_data, valid_data, optim, device, opt, start_i):
    if opt.log:
        log_train_file = opt.log + '/train.log'
        log_valid_file = opt.log + '/valid.log'
        print(
            '[INFO] Training performance will be written to {} and {}'.format(
                log_train_file, log_valid_file))
        # check log file exists or not
        if not (os.path.exists(opt.log)):
            os.mkdir(opt.log)
        if not (os.path.exists(log_train_file)
                and os.path.exists(log_valid_file)):
            with open(log_train_file,
                      'w') as log_tf, open(log_valid_file, 'w') as log_vf:
                log_tf.write('epoch,loss\n')
                log_vf.write('epoch,loss\n')

    train_loss_list = []
    valid_loss_list = []
    for epoch_i in tqdm_gui(range(start_i, opt.epoch)):
        print('[INFO] Epoch: {}'.format(epoch_i))
        # train process
        start = time.time()
        train_loss = train_epoch(model, train_data, optim, device, opt)
        print('\t- (Training)    loss: {:8.5f}, elapse: {:3.3f}'.format(
            train_loss, (time.time() - start) / 60))
        train_loss_list += [train_loss]  # record each train loss
        # valid process
        start = time.time()
        valid_loss = valid_epoch(model, valid_data, device, opt)
        print('\t- (Validation)    loss: {:8.5f}, elapse: {:3.3f}'.format(
            valid_loss, (time.time() - start) / 60))
        valid_loss_list += [valid_loss]  # record each valid loss

        # record train and valid log files
        if log_train_file and log_valid_file:
            with open(log_train_file,
                      'a') as log_tf, open(log_valid_file, 'a') as log_vf:
                log_tf.write('{},{:8.5f}\n'.format(epoch_i, train_loss))
                log_vf.write('{},{:8.5f}\n'.format(epoch_i, valid_loss))

        # to save trained model
        model_state_dict = model.state_dict()
        checkpoint = {
            'model': model_state_dict,
            'setting': opt,
            'epoch': epoch_i
        }
        if not (os.path.exists(opt.chkpt)):
            os.mkdir(opt.chkpt)
        if opt.save_mode == 'best':
            model_name = '{}/eye_model.chkpt'.format(opt.chkpt)
            if train_loss <= min(train_loss_list):
                torch.save(checkpoint, model_name)
                print('\t[INFO] The checkpoint has been updated ({}).'.format(
                    opt.save_mode))
        elif opt.save_mode == 'interval':
            if (epoch_i % opt.save_interval) == 0 and epoch_i != 0:
                model_name = '{}/{}_{:0.3f}.chkpt'.format(
                    opt.chkpt, epoch_i, train_loss)
                torch.save(checkpoint, model_name)
                print('\t[INFO] The checkpoint has been updated ({}).'.format(
                    opt.save_mode))
        elif opt.save_mode == 'best_and_interval':
            model_name = '{}/eye_model.chkpt'.format(opt.chkpt)
            if train_loss <= min(train_loss_list):
                torch.save(checkpoint, model_name)
                print('\t[INFO] The best has been updated ({}).'.format(
                    opt.save_mode))
            if (epoch_i % opt.save_interval) == 0 and epoch_i != 0:
                model_name = '{}/{}_{:0.3f}.chkpt'.format(
                    opt.chkpt, epoch_i, train_loss)
                torch.save(checkpoint, model_name)
                print('\t[INFO] The checkpoint has been saved ({}).'.format(
                    opt.save_mode))
        # save last trained model
        if epoch_i == (opt.epoch - 1):
            model_name = '{}/{}_{:0.3f}.chkpt'.format(opt.chkpt, epoch_i,
                                                      train_loss)
            torch.save(checkpoint, model_name)
            print('\t[INFO] The last checkpoint has been saved.')
    variants = CaptureOne.selected_variants()
    for variant in variants:
        images.append(variant.parent_image.get())
else:
    raise ValueError(
        "Don't know what variants/images to use, please specify --all, --collection or --selected"
    )

if arguments["--progress"]:
    image_iterator = tqdm.tqdm(images,
                               unit="Image",
                               unit_scale=False,
                               leave=True,
                               position=0)
elif arguments["--progress-gui"]:
    image_iterator = tqdm.tqdm_gui(images, unit="Image", unit_scale=False)
else:
    image_iterator = images

for img_ae_obj in image_iterator:
    image = C1Image(img_ae_obj)

    matched_image = None
    if image.photo_name_size_key() in new_location_files:
        matched_image = new_location_files[image.photo_name_size_key()]

    log = None
    if arguments["--progress"]:
        log = tqdm.tqdm.write
    else:
        log = print
Example #32
0
                                                internationalsatlist, internationalsunlist = [], [], [], [], [],[], []

url = url_format.format(api_key=api_key, page_no=page_no)
response = requests.get(url, headers=headers)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
rescode = response.status_code

# 제대로 데이터가 수신됐는지 확인하는 코드 성공시 200
if (rescode == 200):
    soup = BeautifulSoup(html, 'html.parser')
    total_count = int(soup.find("totalcount").text)
else : print("search_error")

# for page_no in tqdm_notebook(range(total_count), desc = 'page'):
for page_no in tqdm_gui(range(total_count), desc='page'):

    print(page_no)

    url = url_format.format(api_key=api_key, page_no=page_no+1)
    response = requests.get(url, headers=headers)
    html = response.text
    soup = BeautifulSoup(html, 'html.parser')
    rescode = response.status_code
    # 제대로 데이터가 수신됐는지 확인하는 코드 성공시 200

    if (rescode == 200):
        soup = BeautifulSoup(html, 'html.parser')

        airlinekorean = soup.find_all("airlinekorean")
        airport = soup.find_all("airport")