Example #1
0
def create_lmdb(paths_labels, lmdb_save_path):
    # configurations
    # img_folder = 'D:/dataloader/RSSCN7/train_lr/*'  # glob matching pattern
    # lmdb_save_path = 'D:/dataloader/RSSCN7/rsscn7_bicLRx4.lmdb'  # must end with .lmdb

    dataset = []
    data_size = 0

    print('Read images...')
    pbar = ProgressBar(len(paths_labels))
    for i, (v, l) in enumerate(paths_labels):
        img = cv2.imread(v, cv2.IMREAD_UNCHANGED)
        dataset.append(img)
        data_size += img.nbytes
        # pbar.update('Read {}'.format(v))
    env = lmdb.open(lmdb_save_path, map_size=data_size * 10)
    print('Finish reading {} images.\nWrite lmdb...'.format(len(paths_labels)))

    pbar = ProgressBar(len(paths_labels))
    with env.begin(write=True) as txn:  # txn is a Transaction object
        for i, (v, l) in enumerate(paths_labels):
            base_name = os.path.splitext(os.path.basename(v))[0]
            key = base_name.encode('ascii')
            data = dataset[i]
            if dataset[i].ndim == 2:
                H, W = dataset[i].shape
                C = 1
            else:
                H, W, C = dataset[i].shape
            meta_key = (base_name + '.meta').encode('ascii')
            meta = '{:d}, {:d}, {:d}'.format(H, W, C)
            label_key = (base_name + '.label').encode('ascii')
            label = '{:d}'.format(l)
            # The encode is only essential in Python 3
            txn.put(key, data)
            txn.put(meta_key, meta.encode('ascii'))
            txn.put(label_key, label.encode('ascii'))
            # pbar.update('Write {}'.format(v))
    print('Finish writing lmdb.')

    # create keys cache
    keys_cache_file = os.path.join(lmdb_save_path, '_keys_cache.p')
    env = lmdb.open(lmdb_save_path,
                    readonly=True,
                    lock=False,
                    readahead=False,
                    meminit=False)
    with env.begin(write=False) as txn:
        print('Create lmdb keys cache: {}'.format(keys_cache_file))
        keys = [key.decode('ascii') for key, _ in txn.cursor()]
        pickle.dump(keys, open(keys_cache_file, "wb"))
    print('Finish creating lmdb keys cache.')
Example #2
0
 def __remove_colliders(self):
     pbar = ProgressBar(list(self.colliders.values()),
                        title="REMOVE TILE COLLIDERS")
     for guid, collider in self.colliders.items():
         self.objects_xml.remove_object(guid)
         pbar.update("%s removed" % collider.name)
     self.__clean_objects(self.colliders)
Example #3
0
def query_search(url):

    sublist = []
    engine_urls = [
        'https://www.virustotal.com/en/domain/%s/information/',
        'https://www.google.com/search?q=site:%s&num=100',
        'https://search.yahoo.com/search?p=%s&b=1'
    ]

    i = 0
    for engine in engine_urls:
        request_handler = RequestHandler()
        data = request_handler.send(engine % url.replace("http://", ""))
        if data == 'None':
            return None
        soup = BeautifulSoup(data, "lxml")
        results = set(
            re.findall(r"\w+\.{}".format(url.replace("http://", "")),
                       soup.text))
        for subdomain in results:
            if "www." not in subdomain:
                output = GREEN + "%s\n" % (subdomain) + RESET
                sublist.append(output)
        i += 1
        progressBar = ProgressBar()
        progressBar.progress(i, len(engine_urls), status='Searching')
    return sublist
Example #4
0
 def _test_step(model, optimizer, batch_dim, test_batch, start_time,
                _test_update):
     self.load(directory)
     from_start = timedelta(seconds=int((time.time() - start_time)))
     self.log('End of training ({} epochs) in {}'.format(
         epochs, from_start))
     if test_batch is not None:
         pr = ProgressBar(80, test_batch)
         output = defaultdict(list)
         for i in range(test_batch):
             for k, v in self.session.run(test_fetch_dict(
                     model, optimizer),
                                          feed_dict=test_feed_dict(
                                              model, optimizer,
                                              batch_dim)).items():
                 output[k].append(v)
             pr.update(i + 1)
         self.log(date=False)
         output = {k: np.mean(v) for k, v in output.items()}
     else:
         output = self.session.run(
             test_fetch_dict(model, optimizer),
             feed_dict=test_feed_dict(model, optimizer, batch_dim))
     if _test_update is not None:
         output.update(
             _test_update(model, optimizer, batch_dim, test_batch))
     p = pprint.PrettyPrinter(indent=1, width=80)
     self.log('Test --> {}'.format(p.pformat(output)))
     for k in output:
         self.print['Test ' + k].append(output[k])
     return output
Example #5
0
    def __retrieve_scene_objects(self):
        pbar = ProgressBar(list(
            Path(self.model_lib_folder).rglob(XML_FILE_PATTERN)),
                           title="Retrieve project infos")
        for i, path in enumerate(pbar.iterable):
            if not is_octant(path.stem):
                msfs_scene_object = MsfsSceneObject(self.model_lib_folder,
                                                    path.stem, path.name)
                self.objects[msfs_scene_object.xml.guid] = msfs_scene_object
                pbar.update("%s" % path.name)
                continue

            if COLLIDER_SUFFIX in path.stem:
                msfs_collider = MsfsCollider(self.model_lib_folder, path.stem,
                                             path.name)
                self.colliders[msfs_collider.xml.guid] = msfs_collider
                pbar.update("%s" % path.name)
                continue

            msfs_tile = MsfsTile(self.model_lib_folder, path.stem, path.name)
            if not msfs_tile.lods:
                msfs_tile.remove_files()
            else:
                self.tiles[msfs_tile.xml.guid] = msfs_tile
            pbar.update("%s" % path.name)
Example #6
0
def main(input_folder, save_folder):
    input_folder += str(config["upscale_factor"])
    save_folder += str(config['upscale_factor'])
    """A multi-thread tool to crop sub imags."""
    n_thread = 20
    compression_level = 0  # 3 is the default value in cv2
    # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer
    # compression time. If read raw images during training, use 0 for faster IO speed.

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
        print('mkdir [{:s}] ...'.format(save_folder))
    else:
        print('Folder [{:s}] already exists. Exit...'.format(save_folder))

    img_list = []
    for root, _, file_list in sorted(os.walk(input_folder)):
        path = [os.path.join(root, x)
                for x in file_list]  # assume only images in the input_folder
        img_list.extend(path)

    def update(arg):
        pbar.update(arg)

    pbar = ProgressBar(len(img_list))

    print("Start 2 ...")
    pool = Pool(n_thread)
    for path in img_list:
        pool.apply_async(worker,
                         args=(path, save_folder, compression_level),
                         callback=update)
    pool.close()
    pool.join()
    print('All subprocesses done.')
def extract_feat_faster_start(args, cfg):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    num_gpus = len(args.gpu_id.split(','))

    MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES
    MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES
    CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH

    # Extract features.
    imglist = os.listdir(args.image_dir)
    num_images = len(imglist)
    print('Number of images: {}.'.format(num_images))

    if args.num_cpus != 0:
        ray.init(num_cpus=args.num_cpus)
    else:
        ray.init()
    img_lists = [imglist[i::num_gpus] for i in range(num_gpus)]

    pb = ProgressBar(len(imglist))
    actor = pb.actor

    print('Number of GPUs: {}.'.format(num_gpus))
    extract_feat_list = []
    for i in range(num_gpus):
        extract_feat_list.append(
            extract_feat_faster.remote(i, img_lists[i], cfg, args, actor))

    pb.print_until_done()
    ray.get(extract_feat_list)
    ray.get(actor.get_counter.remote())
Example #8
0
    def split_tiles(self):
        self.__split_tiles(self.__retrieve_tiles_to_process())
        previous_tiles = {guid: tile for guid, tile in self.tiles.items()}
        new_tiles = {}

        # reload the project to retrieve the new tiles
        self.__retrieve_scene_objects()

        # create the matching dictionary between the previous tiles and the corresponding splitted tiles
        for previous_guid, previous_tile in previous_tiles.items():
            new_tiles[previous_tile] = [
                tile for tile in self.tiles.values()
                if previous_tile.name in tile.name
                and previous_tile.name != tile.name
            ]

        pbar = ProgressBar(
            new_tiles.items(),
            title=
            "REPLACE THE OLD TILES BY THE NEW SPLITTED TILES IN THE SCENE DEFINITION FILE"
        )
        for previous_tile, new_tiles in new_tiles.items():
            self.__replace_tiles_in_objects_xml(previous_tile, new_tiles)

            pbar.update(
                "splitted tiles added, replacing the previous %s tile" %
                previous_tile.name)
    def __convert_dds_texture_files(self):
        ON_POSIX = 'posix' in sys.builtin_module_names

        self.converted_data, data = itertools.tee(self.converted_data)
        pbar = ProgressBar(list(data), title="convert " + self.DDS_FORMAT + " textures to " + self.BMP_FORMAT)
        for chunck in self.converted_data:
            # create a pipe to get data
            input_fd, output_fd = os.pipe()

            for obj in chunck:
                print("-------------------------------------------------------------------------------")
                print("prepare command line: ", self.path, obj['file_name'], obj['dest_file_name'])

            processes = [subprocess.Popen([self.path, obj['file_name'], obj['dest_file_name']], stdout=output_fd, close_fds=ON_POSIX) for obj in chunck]

            os.close(output_fd)  # close unused end of the pipe

            # read output line by line as soon as it is available
            with io.open(input_fd, "r") as file:
                for line in file:
                    print(line, end=str())

            for p in processes:
                p.wait()

            pbar.update("%s converted to %s" % (os.path.basename(obj['file_name']), os.path.basename(obj['dest_file_name'])))
Example #10
0
    def run(self, data):
        results_file_name = "results/" + re.sub(".*/", "",
                                                data) + "_email_found.txt"
        data = pd.read_csv(data, sep="\t")
        colnames = list(data.columns) + ["邮箱", "作者信息", "作者机构", "文章标题", "查找方式"]
        data = data.values
        try:
            print("results stored in {}".format(results_file_name))
            done = self.__net.get_file_column(results_file_name)
            done = [list(i) for i in done]
        except Exception as e:
            print(e)
            print("did not found results table, create a new one")
            done = []

        bar = ProgressBar(data.shape[0])
        with open(results_file_name, mode='a', encoding='utf-8') as results:
            if not len(done):
                results.write('\t'.join(colnames) + '\n')
            for row in data:
                row = [str(i) for i in row]
                author, organ = (row[0].strip(), row[1].strip())
                if not self.array_in([author, organ], done):
                    email = self.iter_find_author_email(author, organ)
                    results.write('\t'.join(list(row) + email) + '\n')
                    done.append([author, organ])
                    results.flush()
                    bar.move()
                else:
                    bar.move()
Example #11
0
 def fix_tiles_lightning_issues(self, settings):
     isolated_print(EOL)
     lods = [lod for tile in self.tiles.values() for lod in tile.lods]
     pbar = ProgressBar(list(lods), title="FIX TILES LIGHTNING ISSUES")
     for lod in lods:
         lod.optimization_in_progress = False
         lod.prepare_for_msfs()
         pbar.update("%s lightning issues fixed" % lod.name)
Example #12
0
 def __backup_objects(objects: dict,
                      backup_path,
                      pbar_title="backup files"):
     pbar = ProgressBar(list())
     for guid, object in objects.items():
         object.backup_files(backup_path, dry_mode=True, pbar=pbar)
     if pbar.range > 0:
         pbar.display_title(pbar_title)
         for guid, object in objects.items():
             object.backup_files(backup_path, pbar=pbar)
Example #13
0
 def update_min_size_values(self, settings):
     pbar = ProgressBar(list())
     pbar.range = len(self.tiles) + len(self.colliders)
     pbar.display_title("Update lod values")
     for tile in self.tiles.values():
         tile.update_min_size_values(settings.target_min_size_values,
                                     pbar=pbar)
     for collider in self.colliders.values():
         collider.update_min_size_values(settings.target_min_size_values,
                                         pbar=pbar)
Example #14
0
def fix_object_bounding_box(resize_box=True):
    if not bpy.context.scene: return

    create_collection = bpy.data.collections.new(name=COPY_COLLECTION_NAME)
    bpy.context.scene.collection.children.link(create_collection)
    assert (create_collection is not bpy.context.scene.collection)

    # copy objects
    copy_objects(bpy.context.scene.collection, create_collection, False)

    obs = []
    for obj in create_collection.objects:
        if obj.type == MESH_OBJECT_TYPE:
            obs.append(obj)

    ctx = bpy.context.copy()

    if len(obs) < 1:
        return None

    ctx[ACTIVE_OBJ] = obs[0]

    ctx[SELECTED_OBJ] = obs

    # In Blender 2.8x this needs to be the following instead:
    ctx[SELECTED_EDITABLE_OBJ] = obs

    # join copied objects
    bpy.ops.object.join(ctx)

    bpy.ops.object.select_all(action=SELECT_ACTION)

    objects = bpy.context.scene.objects

    # fix objects origin: this also fixes the bounding box for the whole tile
    pbar = ProgressBar(objects)
    for obj in objects:
        center_origin(obj)
        pbar.update("bounded box updated for %s" % obj.name)

    bpy.ops.object.select_all(action=DESELECT_ACTION)

    # remove joined copied objects
    for obj in create_collection.objects:
        obj.select_set(True)
        bpy.ops.object.delete()

    bpy.ops.object.select_all(action=SELECT_ACTION)

    for c in bpy.context.scene.collection.children:
        bpy.context.scene.collection.children.unlink(c)

    if resize_box:
        # resize objects to fix spacing between tiles
        bpy.ops.transform.resize(value=(1.0045, 1.0045, 1))
Example #15
0
    def __multithread_process_data(processed_data, script_name, title,
                                   update_msg):
        ON_POSIX = 'posix' in sys.builtin_module_names

        processed_data, data = itertools.tee(processed_data)
        pbar = ProgressBar(list(data), title=title)

        try:
            for chunck in processed_data:
                # create a pipe to get data
                input_fd, output_fd = os.pipe()
                params = [
                    str(bpy.app.binary_path), "--background", "--python",
                    os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                 script_name), "--"
                ]

                for obj in chunck:
                    print(
                        "-------------------------------------------------------------------------------"
                    )
                    print(
                        "prepare command line: ",
                        "\"" + str(bpy.app.binary_path) +
                        "\" --background --python \"" + os.path.join(
                            os.path.dirname(os.path.dirname(__file__)),
                            script_name) + "\" -- " +
                        str(" ").join(obj["params"]))

                si = subprocess.STARTUPINFO()
                si.dwFlags = subprocess.STARTF_USESTDHANDLES | subprocess.HIGH_PRIORITY_CLASS

                processes = [
                    subprocess.Popen(params + obj["params"],
                                     stdout=output_fd,
                                     stderr=subprocess.DEVNULL,
                                     close_fds=ON_POSIX,
                                     startupinfo=si,
                                     encoding=ENCODING) for obj in chunck
                ]

                os.close(output_fd)  # close unused end of the pipe

                # read output line by line as soon as it is available
                with io.open(input_fd, "r", buffering=1) as file:
                    for line in file:
                        print(line, end=str())

                for p in processes:
                    p.wait()

                pbar.update("%s %s" % (obj["name"], update_msg))
        except:
            pass
Example #16
0
 def __retrieve_shapes(self):
     pbar = ProgressBar(list(
         Path(self.scene_folder).rglob(DBF_FILE_PATTERN)),
                        title="Retrieve shapes")
     for i, path in enumerate(pbar.iterable):
         self.shapes[path.stem] = MsfsShape(self.scene_folder, path.stem,
                                            path.stem + XML_FILE_EXT,
                                            path.name,
                                            path.stem + SHP_FILE_EXT,
                                            path.stem + SHX_FILE_EXT)
         pbar.update("%s" % path.name)
Example #17
0
    def run(self, journal_list_file):
        base_path = os.path.dirname(os.path.abspath(__file__)) + '/'
        results_file_name = base_path + "results/" + \
            os.path.splitext(os.path.basename(journal_list_file))[
                0] + "_article_information.txt"
        done_journal_list_file_name = base_path + "data/done_journals.txt"
        with open(journal_list_file) as jl:
            journal_list = [
                jn.strip() for jn in jl.read().split('\n') if jn.strip()
            ]

        colnames = [
            '文章ID', '期刊名', '年份', '期', '论文题目', '作者', '机构', '关键词', '作者简介',
            '通讯作者姓名', '通讯作者邮箱', '通讯作者电话', '第一作者姓名', '第一作者邮箱', '第一作者电话'
        ]

        try:
            print("results stored in {}".format(results_file_name))
            done = self.__net.get_file_column(results_file_name, number=1)
            done = done.tolist()
        except Exception:
            # print(e)
            print("did not found results table, create a new one")
            done = []

        try:
            with open(done_journal_list_file_name) as djl:
                done_journal_list = [
                    jn.strip() for jn in djl.read().split('\n') if jn.strip()
                ]
        except Exception:
            done_journal_list = []

        bar = ProgressBar(len(journal_list))
        with open(results_file_name, mode='a',
                  encoding='utf-8') as results, open(
                      done_journal_list_file_name, mode='a',
                      encoding='utf-8') as done_journal_file:
            if not len(done):
                results.write('\t'.join(colnames) + '\n')
            for journal in journal_list:
                if journal not in done_journal_list:
                    for paper_id in self.all_journal_ids(journal):
                        if paper_id not in done:
                            found_info = self.find_article_info(paper_id)
                            results.write('\t'.join(found_info) + '\n')
                            done.append(paper_id)
                            # results.flush()
                    done_journal_file.write(journal + '\n')
                    done_journal_list.append(journal)
                    # done_journal_file.flush()
                    bar.move()
                else:
                    bar.move()
Example #18
0
    def add_tile_colliders(self):
        # clean previous colliders
        self.__remove_colliders()

        lods = [lod for tile in self.tiles.values() for lod in tile.lods]
        pbar = ProgressBar(
            list(lods),
            title="REMOVE ROAD AND COLLISION TAGS IN THE TILE LODS")
        for lod in lods:
            lod.optimization_in_progress = False
            lod.remove_road_and_collision_tags()
            pbar.update("road and collision tags removed from %s" % lod.name)

        pbar = ProgressBar(list(self.tiles.values()),
                           title="ADD TILE COLLIDERS")
        for tile in self.tiles.values():
            tile_guid = tile.xml.guid
            new_collider = tile.add_collider()
            self.__add_object_in_objects_xml(tile_guid, new_collider)
            pbar.update("collider added for %s tile" % tile.name)
Example #19
0
    def remove_colliders(self):
        # clean previous colliders
        self.__remove_colliders()

        lods = [lod for tile in self.tiles.values() for lod in tile.lods]
        pbar = ProgressBar(
            list(lods), title="ADD ROAD AND COLLISION TAGS IN THE TILE LODS")
        for lod in lods:
            lod.optimization_in_progress = False
            lod.prepare_for_msfs()
            pbar.update("road and collision tags added from %s" % lod.name)
Example #20
0
 def backup_files(self, backup_subfolder):
     backup_path = os.path.join(self.backup_folder, backup_subfolder)
     if not os.path.isfile(
             get_backup_file_path(backup_path, self.scene_folder,
                                  self.SCENE_OBJECTS_FILE)):
         pbar = ProgressBar([self.SCENE_OBJECTS_FILE],
                            title="backup " + self.SCENE_OBJECTS_FILE)
         backup_file(backup_path,
                     self.scene_folder,
                     self.SCENE_OBJECTS_FILE,
                     pbar=pbar)
    def _generate_AX(self):
        self.log('Creating features and adjacency matrices..')
        pr = ProgressBar(60, len(self.data))

        data = []
        smiles = []
        data_S = []
        data_A = []
        data_X = []
        data_D = []
        data_F = []
        data_Le = []
        data_Lv = []

        max_length = max(mol.GetNumAtoms() for mol in self.data)
        max_length_s = max(len(Chem.MolToSmiles(mol)) for mol in self.data)

        for i, mol in enumerate(self.data):
            A = self._genA(mol, connected=True, max_length=max_length)
            D = np.count_nonzero(A, -1)
            if A is not None:
                data.append(mol)
                smiles.append(Chem.MolToSmiles(mol))
                data_S.append(self._genS(mol, max_length=max_length_s))
                data_A.append(A)
                data_X.append(self._genX(mol, max_length=max_length))
                data_D.append(D)
                data_F.append(self._genF(mol, max_length=max_length))

                L = D - A
                Le, Lv = np.linalg.eigh(L)

                data_Le.append(Le)
                data_Lv.append(Lv)

            pr.update(i + 1)

        self.log(date=False)
        self.log(
            'Created {} features and adjacency matrices  out of {} molecules!'.
            format(len(data), len(self.data)))

        self.data = data
        self.smiles = smiles
        self.data_S = data_S
        self.data_A = data_A
        self.data_X = data_X
        self.data_D = data_D
        self.data_F = data_F
        self.data_Le = data_Le
        self.data_Lv = data_Lv
        self.__len = len(self.data)
Example #22
0
def main():
    parser = argparse.ArgumentParser(
        'Extract sub images from Hyperspectral images.')
    parser.add_argument('-p', '--path', type=str, default='E:/HSI/CAVE/CAVE/')
    parser.add_argument('-o', '--out-path', type=str, default='')
    parser.add_argument('-t', '--threads', type=int, default=1)
    parser.add_argument('-c', '--crop-size', type=int, default=256)
    parser.add_argument('-s', '--stride', type=int, default=96)

    opt = parser.parse_args()
    in_path = opt.path
    out_path = opt.out_path
    n_threads = opt.threads
    crop_sz = opt.crop_size
    stride = opt.stride
    thres_sz = 30  #
    compression_level = 3

    if out_path == '':
        dataset_name = in_path.split('/')[-2].split('\\')[-1]
        if in_path[-1] == '/':
            out_path = in_path.replace(dataset_name + '/',
                                       dataset_name + '_sub.h5')
        else:
            out_path = in_path.replace(dataset_name, dataset_name + '_sub.h5')

    # if not os.path.exists(out_path):
    #     os.makedirs(out_path)
    #     print('mkdir [{:s}] ...'.format(out_path))
    # else:
    #     print('[*] Folder [{:s}] already exists.'.format(out_path))
    #     # return

    img_list = []
    for root, _, file_list in sorted(os.walk(in_path)):
        path = [os.path.join(root, x) for x in file_list]
        img_list.extend(path)

    def update(arg):
        pbar.update(arg)

    pbar = ProgressBar(len(img_list))

    pool = Pool(n_threads)
    for path in img_list:
        pool.apply_async(worker,
                         args=(path, out_path, crop_sz, stride, thres_sz,
                               compression_level),
                         callback=update)
    pool.close()
    pool.join()
    print("-----------Generation Finish-------------")
Example #23
0
 def __create_optimization_folders(self):
     pbar = ProgressBar(list())
     link_tiles_by_position = self.__link_tiles_by_position()
     for parent_tile, tiles in link_tiles_by_position.items():
         parent_tile.create_optimization_folders(tiles,
                                                 dry_mode=True,
                                                 pbar=pbar)
     if pbar.range > 0:
         pbar.display_title("Create optimization folders")
         for parent_tile, tiles in link_tiles_by_position.items():
             parent_tile.create_optimization_folders(tiles,
                                                     dry_mode=False,
                                                     pbar=pbar)
Example #24
0
def drawResultDirectory(filePath, resultPath, *args):
    fileList = sorted(glob(filePath))
    pbar = ProgressBar(len(fileList))

    for index, value in enumerate(fileList):
        pbar.update("Read {}".format(value))
        # Read image
        img = Image.open(value, "r")

        img = drawResultFile(img, args)
        img.save(resultPath + os.path.basename(value))

    print("Completed!")
Example #25
0
            def _eval_step(epoch, epochs, min_epochs, model, optimizer,
                           batch_dim, eval_batch, start_time,
                           last_epoch_start_time, _eval_update):
                from_start = timedelta(seconds=int((time.time() - start_time)))
                last_epoch = timedelta(seconds=int((time.time() -
                                                    last_epoch_start_time)))
                eta = timedelta(
                    seconds=int((time.time() - start_time) * (epochs - epoch) /
                                epoch)) if (time.time() -
                                            start_time) > 1 else '-:--:-'

                self.log(
                    'Epochs {:10}/{} in {} (last epoch in {}), ETA: {}'.format(
                        epoch, epochs, from_start, last_epoch, eta))

                if eval_batch is not None:
                    pr = ProgressBar(80, eval_batch)
                    output = defaultdict(list)

                    for i in range(eval_batch):
                        for k, v in self.session.run(
                                eval_fetch_dict(epoch, epochs, min_epochs,
                                                model, optimizer),
                                feed_dict=eval_feed_dict(
                                    epoch, epochs, min_epochs, model,
                                    optimizer, batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)

                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        eval_fetch_dict(epoch, epochs, min_epochs, model,
                                        optimizer),
                        feed_dict=eval_feed_dict(epoch, epochs, min_epochs,
                                                 model, optimizer, batch_dim))

                if _eval_update is not None:
                    output.update(
                        _eval_update(epoch, epochs, min_epochs, model,
                                     optimizer, batch_dim, eval_batch))

                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Validation --> {}'.format(p.pformat(output)))

                for k in output:
                    self.print[k].append(output[k])

                return output
Example #26
0
    def __find_different_tiles(self, tiles, project_to_compare_name,
                               tiles_to_compare, objects_xml_to_compare):
        different_tiles = []
        pbar = ProgressBar(tiles.items(), title="FIND THE DIFFERENT TILES")
        for guid, tile in pbar.iterable:
            found_tile = self.__find_by_tile_name(tile, tiles_to_compare)
            if not found_tile:
                different_tiles.append(tile)
            elif len(tile.lods) != len(found_tile.lods):
                different_tiles.append(tile)

            pbar.update("%s checked" % tile.name)

        return different_tiles
    def _genReward(self, metric, batch_size=15):
        self.log('Calculating molecule rewards..')

        pr = ProgressBar(60, len(self.data))

        i = 0
        self.data_rwd = []
        while i < len(self.data):
            mols = self.data[i:i + batch_size]
            rwds = reward(mols, metric, self).reshape(-1)
            self.data_rwd.append(rwds)
            i += batch_size
            pr.update(min(i, len(self.data)))
        self.data_rwd = np.concatenate(self.data_rwd, -1)
Example #28
0
def download_file(v_data, directory):
    if not v_data:
        return False

    file_path = directory + v_data["name"]
    if Path(file_path).exists():
        print(f"Already exists: ", end="")
        return True

    r = requests.get(v_data["url"], stream=True)
    indicator = ProgressBar(int(r.headers.get('content-length')))
    with open(file_path, 'wb') as f:
        for chunk in r.iter_content(chunk_size=1024 * 1024):
            if chunk:
                f.write(chunk)
                indicator.progress += len(chunk)
    return True
Example #29
0
def val(model, criterion, dataloader, epoch=None, val_writer=None, lr=None, msglogger=None):
    with t.no_grad():
        """
        计算模型在验证集上的准确率等信息
        """
        model.eval()
        val_losses = AverageMeter()
        val_top1 = AverageMeter()
        val_top5 = AverageMeter()
        val_progressor = None
        if not msglogger:
            val_progressor = ProgressBar(mode="Val  ", epoch=epoch, total_epoch=opt.max_epoch, model_name=opt.model,
                                         lr=lr,
                                         total=len(dataloader))
        for ii, (data, labels, img_path,tag) in enumerate(dataloader):
            if not check_date(img_path, tag, msglogger): return
            input = data.to(opt.device)
            labels = labels.to(opt.device)
            score = model(input)
            loss = criterion(score, labels)
            precision1, precision5 = accuracy(score, labels, topk=(1, 5))  # top1 和 top5 的准确率
            val_losses.update(loss.item(), input.size(0))
            val_top1.update(precision1[0].item(), input.size(0))
            val_top5.update(precision5[0].item(), input.size(0))
            if val_progressor:
                val_progressor.current = ii + 1
                val_progressor.current_loss = val_losses.avg
                val_progressor.current_top1 = val_top1.avg
                val_progressor.current_top5 = val_top5.avg
                val_progressor()
                if ii % opt.print_freq == 0:
                    if val_writer:
                        grid = make_grid((input.data.cpu() * 0.225 + 0.45).clamp(min=0, max=1))
                        val_writer.add_image('val_images', grid, ii * (epoch + 1))  # 测试图片
                        val_writer.add_scalar('loss', val_losses.avg, ii * (epoch + 1))  # 训练误差
                        val_writer.add_text('top1', 'val accuracy top1 %.2f%%' % val_top1.avg,
                                            ii * (epoch + 1))  # top1准确率文本
                        val_writer.add_scalars('accuracy', {'top1': val_top1.avg,
                                                            'top5': val_top5.avg,
                                                            'loss': val_losses.avg}, ii * (epoch + 1))

        if msglogger:
            msglogger.info('==> Top1: %.3f    Top5: %.3f    Loss: %.3f\n',
                           val_top1.avg, val_top5.avg, val_losses.avg)
        return [val_losses.avg, val_top1.avg, val_top5.avg]
def main():
    """A multi-thread tool to crop sub imags."""
    dataset_root_path = '/home/ybahat/Datasets' if gethostname(
    ) == 'ybahat-System-Product-Name' else '/home/tiras/datasets' if 'tiras' in os.getcwd(
    ) else '/media/ybahat/data/Datasets'
    input_folder = os.path.join(dataset_root_path,
                                'DIV2K_train/DIV2K_train_HR')
    save_folder = os.path.join(dataset_root_path,
                               'DIV2K_train/DIV2K_train_sub_HR')
    n_thread = 20
    crop_sz = 256  #480
    step = 30  #240
    thres_sz = 48
    compression_level = 3  # 3 is the default value in cv2
    multi_scale = False
    # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer
    # compression time. If read raw images during training, use 0 for faster IO speed.

    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
        print('mkdir [{:s}] ...'.format(save_folder))
    else:
        print('Folder [{:s}] already exists. Exit...'.format(save_folder))
        sys.exit(1)

    img_list = []
    for root, _, file_list in sorted(os.walk(input_folder)):
        path = [os.path.join(root, x)
                for x in file_list]  # assume only images in the input_folder
        img_list.extend(path)

    def update(arg):
        pbar.update(arg)

    pbar = ProgressBar(len(img_list))

    pool = Pool(n_thread)
    for path in img_list:
        pool.apply_async(worker,
                         args=(path, save_folder, crop_sz, step, thres_sz,
                               compression_level, multi_scale),
                         callback=update)
    pool.close()
    pool.join()
    print('All subprocesses done.')