예제 #1
0
    def split_tiles(self):
        self.__split_tiles(self.__retrieve_tiles_to_process())
        previous_tiles = {guid: tile for guid, tile in self.tiles.items()}
        new_tiles = {}

        # reload the project to retrieve the new tiles
        self.__retrieve_scene_objects()

        # create the matching dictionary between the previous tiles and the corresponding splitted tiles
        for previous_guid, previous_tile in previous_tiles.items():
            new_tiles[previous_tile] = [
                tile for tile in self.tiles.values()
                if previous_tile.name in tile.name
                and previous_tile.name != tile.name
            ]

        pbar = ProgressBar(
            new_tiles.items(),
            title=
            "REPLACE THE OLD TILES BY THE NEW SPLITTED TILES IN THE SCENE DEFINITION FILE"
        )
        for previous_tile, new_tiles in new_tiles.items():
            self.__replace_tiles_in_objects_xml(previous_tile, new_tiles)

            pbar.update(
                "splitted tiles added, replacing the previous %s tile" %
                previous_tile.name)
예제 #2
0
 def _test_step(model, optimizer, batch_dim, test_batch, start_time,
                _test_update):
     self.load(directory)
     from_start = timedelta(seconds=int((time.time() - start_time)))
     self.log('End of training ({} epochs) in {}'.format(
         epochs, from_start))
     if test_batch is not None:
         pr = ProgressBar(80, test_batch)
         output = defaultdict(list)
         for i in range(test_batch):
             for k, v in self.session.run(test_fetch_dict(
                     model, optimizer),
                                          feed_dict=test_feed_dict(
                                              model, optimizer,
                                              batch_dim)).items():
                 output[k].append(v)
             pr.update(i + 1)
         self.log(date=False)
         output = {k: np.mean(v) for k, v in output.items()}
     else:
         output = self.session.run(
             test_fetch_dict(model, optimizer),
             feed_dict=test_feed_dict(model, optimizer, batch_dim))
     if _test_update is not None:
         output.update(
             _test_update(model, optimizer, batch_dim, test_batch))
     p = pprint.PrettyPrinter(indent=1, width=80)
     self.log('Test --> {}'.format(p.pformat(output)))
     for k in output:
         self.print['Test ' + k].append(output[k])
     return output
    def __convert_dds_texture_files(self):
        ON_POSIX = 'posix' in sys.builtin_module_names

        self.converted_data, data = itertools.tee(self.converted_data)
        pbar = ProgressBar(list(data), title="convert " + self.DDS_FORMAT + " textures to " + self.BMP_FORMAT)
        for chunck in self.converted_data:
            # create a pipe to get data
            input_fd, output_fd = os.pipe()

            for obj in chunck:
                print("-------------------------------------------------------------------------------")
                print("prepare command line: ", self.path, obj['file_name'], obj['dest_file_name'])

            processes = [subprocess.Popen([self.path, obj['file_name'], obj['dest_file_name']], stdout=output_fd, close_fds=ON_POSIX) for obj in chunck]

            os.close(output_fd)  # close unused end of the pipe

            # read output line by line as soon as it is available
            with io.open(input_fd, "r") as file:
                for line in file:
                    print(line, end=str())

            for p in processes:
                p.wait()

            pbar.update("%s converted to %s" % (os.path.basename(obj['file_name']), os.path.basename(obj['dest_file_name'])))
예제 #4
0
 def __remove_colliders(self):
     pbar = ProgressBar(list(self.colliders.values()),
                        title="REMOVE TILE COLLIDERS")
     for guid, collider in self.colliders.items():
         self.objects_xml.remove_object(guid)
         pbar.update("%s removed" % collider.name)
     self.__clean_objects(self.colliders)
예제 #5
0
 def fix_tiles_lightning_issues(self, settings):
     isolated_print(EOL)
     lods = [lod for tile in self.tiles.values() for lod in tile.lods]
     pbar = ProgressBar(list(lods), title="FIX TILES LIGHTNING ISSUES")
     for lod in lods:
         lod.optimization_in_progress = False
         lod.prepare_for_msfs()
         pbar.update("%s lightning issues fixed" % lod.name)
예제 #6
0
def fix_object_bounding_box(resize_box=True):
    if not bpy.context.scene: return

    create_collection = bpy.data.collections.new(name=COPY_COLLECTION_NAME)
    bpy.context.scene.collection.children.link(create_collection)
    assert (create_collection is not bpy.context.scene.collection)

    # copy objects
    copy_objects(bpy.context.scene.collection, create_collection, False)

    obs = []
    for obj in create_collection.objects:
        if obj.type == MESH_OBJECT_TYPE:
            obs.append(obj)

    ctx = bpy.context.copy()

    if len(obs) < 1:
        return None

    ctx[ACTIVE_OBJ] = obs[0]

    ctx[SELECTED_OBJ] = obs

    # In Blender 2.8x this needs to be the following instead:
    ctx[SELECTED_EDITABLE_OBJ] = obs

    # join copied objects
    bpy.ops.object.join(ctx)

    bpy.ops.object.select_all(action=SELECT_ACTION)

    objects = bpy.context.scene.objects

    # fix objects origin: this also fixes the bounding box for the whole tile
    pbar = ProgressBar(objects)
    for obj in objects:
        center_origin(obj)
        pbar.update("bounded box updated for %s" % obj.name)

    bpy.ops.object.select_all(action=DESELECT_ACTION)

    # remove joined copied objects
    for obj in create_collection.objects:
        obj.select_set(True)
        bpy.ops.object.delete()

    bpy.ops.object.select_all(action=SELECT_ACTION)

    for c in bpy.context.scene.collection.children:
        bpy.context.scene.collection.children.unlink(c)

    if resize_box:
        # resize objects to fix spacing between tiles
        bpy.ops.transform.resize(value=(1.0045, 1.0045, 1))
예제 #7
0
    def __multithread_process_data(processed_data, script_name, title,
                                   update_msg):
        ON_POSIX = 'posix' in sys.builtin_module_names

        processed_data, data = itertools.tee(processed_data)
        pbar = ProgressBar(list(data), title=title)

        try:
            for chunck in processed_data:
                # create a pipe to get data
                input_fd, output_fd = os.pipe()
                params = [
                    str(bpy.app.binary_path), "--background", "--python",
                    os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                 script_name), "--"
                ]

                for obj in chunck:
                    print(
                        "-------------------------------------------------------------------------------"
                    )
                    print(
                        "prepare command line: ",
                        "\"" + str(bpy.app.binary_path) +
                        "\" --background --python \"" + os.path.join(
                            os.path.dirname(os.path.dirname(__file__)),
                            script_name) + "\" -- " +
                        str(" ").join(obj["params"]))

                si = subprocess.STARTUPINFO()
                si.dwFlags = subprocess.STARTF_USESTDHANDLES | subprocess.HIGH_PRIORITY_CLASS

                processes = [
                    subprocess.Popen(params + obj["params"],
                                     stdout=output_fd,
                                     stderr=subprocess.DEVNULL,
                                     close_fds=ON_POSIX,
                                     startupinfo=si,
                                     encoding=ENCODING) for obj in chunck
                ]

                os.close(output_fd)  # close unused end of the pipe

                # read output line by line as soon as it is available
                with io.open(input_fd, "r", buffering=1) as file:
                    for line in file:
                        print(line, end=str())

                for p in processes:
                    p.wait()

                pbar.update("%s %s" % (obj["name"], update_msg))
        except:
            pass
예제 #8
0
    def remove_colliders(self):
        # clean previous colliders
        self.__remove_colliders()

        lods = [lod for tile in self.tiles.values() for lod in tile.lods]
        pbar = ProgressBar(
            list(lods), title="ADD ROAD AND COLLISION TAGS IN THE TILE LODS")
        for lod in lods:
            lod.optimization_in_progress = False
            lod.prepare_for_msfs()
            pbar.update("road and collision tags added from %s" % lod.name)
예제 #9
0
 def __retrieve_shapes(self):
     pbar = ProgressBar(list(
         Path(self.scene_folder).rglob(DBF_FILE_PATTERN)),
                        title="Retrieve shapes")
     for i, path in enumerate(pbar.iterable):
         self.shapes[path.stem] = MsfsShape(self.scene_folder, path.stem,
                                            path.stem + XML_FILE_EXT,
                                            path.name,
                                            path.stem + SHP_FILE_EXT,
                                            path.stem + SHX_FILE_EXT)
         pbar.update("%s" % path.name)
    def _generate_AX(self):
        self.log('Creating features and adjacency matrices..')
        pr = ProgressBar(60, len(self.data))

        data = []
        smiles = []
        data_S = []
        data_A = []
        data_X = []
        data_D = []
        data_F = []
        data_Le = []
        data_Lv = []

        max_length = max(mol.GetNumAtoms() for mol in self.data)
        max_length_s = max(len(Chem.MolToSmiles(mol)) for mol in self.data)

        for i, mol in enumerate(self.data):
            A = self._genA(mol, connected=True, max_length=max_length)
            D = np.count_nonzero(A, -1)
            if A is not None:
                data.append(mol)
                smiles.append(Chem.MolToSmiles(mol))
                data_S.append(self._genS(mol, max_length=max_length_s))
                data_A.append(A)
                data_X.append(self._genX(mol, max_length=max_length))
                data_D.append(D)
                data_F.append(self._genF(mol, max_length=max_length))

                L = D - A
                Le, Lv = np.linalg.eigh(L)

                data_Le.append(Le)
                data_Lv.append(Lv)

            pr.update(i + 1)

        self.log(date=False)
        self.log(
            'Created {} features and adjacency matrices  out of {} molecules!'.
            format(len(data), len(self.data)))

        self.data = data
        self.smiles = smiles
        self.data_S = data_S
        self.data_A = data_A
        self.data_X = data_X
        self.data_D = data_D
        self.data_F = data_F
        self.data_Le = data_Le
        self.data_Lv = data_Lv
        self.__len = len(self.data)
예제 #11
0
def drawResultDirectory(filePath, resultPath, *args):
    fileList = sorted(glob(filePath))
    pbar = ProgressBar(len(fileList))

    for index, value in enumerate(fileList):
        pbar.update("Read {}".format(value))
        # Read image
        img = Image.open(value, "r")

        img = drawResultFile(img, args)
        img.save(resultPath + os.path.basename(value))

    print("Completed!")
예제 #12
0
파일: trainer.py 프로젝트: agitter/MolGAN
            def _eval_step(epoch, epochs, min_epochs, model, optimizer,
                           batch_dim, eval_batch, start_time,
                           last_epoch_start_time, _eval_update):
                from_start = timedelta(seconds=int((time.time() - start_time)))
                last_epoch = timedelta(seconds=int((time.time() -
                                                    last_epoch_start_time)))
                eta = timedelta(
                    seconds=int((time.time() - start_time) * (epochs - epoch) /
                                epoch)) if (time.time() -
                                            start_time) > 1 else '-:--:-'

                self.log(
                    'Epochs {:10}/{} in {} (last epoch in {}), ETA: {}'.format(
                        epoch, epochs, from_start, last_epoch, eta))

                if eval_batch is not None:
                    pr = ProgressBar(80, eval_batch)
                    output = defaultdict(list)

                    for i in range(eval_batch):
                        for k, v in self.session.run(
                                eval_fetch_dict(epoch, epochs, min_epochs,
                                                model, optimizer),
                                feed_dict=eval_feed_dict(
                                    epoch, epochs, min_epochs, model,
                                    optimizer, batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)

                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        eval_fetch_dict(epoch, epochs, min_epochs, model,
                                        optimizer),
                        feed_dict=eval_feed_dict(epoch, epochs, min_epochs,
                                                 model, optimizer, batch_dim))

                if _eval_update is not None:
                    output.update(
                        _eval_update(epoch, epochs, min_epochs, model,
                                     optimizer, batch_dim, eval_batch))

                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Validation --> {}'.format(p.pformat(output)))

                for k in output:
                    self.print[k].append(output[k])

                return output
    def _genReward(self, metric, batch_size=15):
        self.log('Calculating molecule rewards..')

        pr = ProgressBar(60, len(self.data))

        i = 0
        self.data_rwd = []
        while i < len(self.data):
            mols = self.data[i:i + batch_size]
            rwds = reward(mols, metric, self).reshape(-1)
            self.data_rwd.append(rwds)
            i += batch_size
            pr.update(min(i, len(self.data)))
        self.data_rwd = np.concatenate(self.data_rwd, -1)
예제 #14
0
    def __find_different_tiles(self, tiles, project_to_compare_name,
                               tiles_to_compare, objects_xml_to_compare):
        different_tiles = []
        pbar = ProgressBar(tiles.items(), title="FIND THE DIFFERENT TILES")
        for guid, tile in pbar.iterable:
            found_tile = self.__find_by_tile_name(tile, tiles_to_compare)
            if not found_tile:
                different_tiles.append(tile)
            elif len(tile.lods) != len(found_tile.lods):
                different_tiles.append(tile)

            pbar.update("%s checked" % tile.name)

        return different_tiles
    def __update_tiles_pos(self, msfs_project, settings):
        if not msfs_project.tiles.items():
            return

        pbar = ProgressBar(msfs_project.tiles.items(),
                           title="update tiles positions",
                           sleep=0.000001)
        for guid, tile in pbar.iterable:
            self.__update_scenery_object_pos(tile,
                                             self.find_scenery_objects(guid),
                                             settings)
            self.__update_scenery_object_pos(
                tile, self.find_scenery_objects_in_group(guid), settings)

            pbar.update("%s" % tile.name + " : new lat: " +
                        str(tile.pos.lat + float(settings.lat_correction)) +
                        " : new lon: " +
                        str(tile.pos.lon + float(settings.lon_correction)))
    def __update_colliders_pos(self, msfs_project, settings):
        if not msfs_project.colliders.items():
            return

        pbar = ProgressBar(msfs_project.colliders.items(),
                           title="update colliders positions",
                           sleep=0.000001)
        for guid, collider in msfs_project.colliders.items():
            self.__update_scenery_object_pos(collider,
                                             self.find_scenery_objects(guid),
                                             settings)
            self.__update_scenery_object_pos(
                collider, self.find_scenery_objects_in_group(guid), settings)

            pbar.update("%s" % collider.name + " : new lat: " +
                        str(collider.pos.lat +
                            float(settings.lat_correction)) + " : new lon: " +
                        str(collider.pos.lon + float(settings.lon_correction)))
예제 #17
0
    def add_tile_colliders(self):
        # clean previous colliders
        self.__remove_colliders()

        lods = [lod for tile in self.tiles.values() for lod in tile.lods]
        pbar = ProgressBar(
            list(lods),
            title="REMOVE ROAD AND COLLISION TAGS IN THE TILE LODS")
        for lod in lods:
            lod.optimization_in_progress = False
            lod.remove_road_and_collision_tags()
            pbar.update("road and collision tags removed from %s" % lod.name)

        pbar = ProgressBar(list(self.tiles.values()),
                           title="ADD TILE COLLIDERS")
        for tile in self.tiles.values():
            tile_guid = tile.xml.guid
            new_collider = tile.add_collider()
            self.__add_object_in_objects_xml(tile_guid, new_collider)
            pbar.update("collider added for %s tile" % tile.name)
예제 #18
0
 def __merge_shapes(self, shapes, shapes_to_merge):
     pbar = ProgressBar(shapes_to_merge.items())
     for name, shape in pbar.iterable:
         if not os.path.isfile(
                 os.path.join(self.scene_folder, shape.definition_file)):
             pbar.display_title("MERGE THE SHAPES")
             shutil.copyfile(
                 os.path.join(shape.folder, shape.definition_file),
                 os.path.join(self.scene_folder, shape.definition_file))
             shutil.copyfile(
                 os.path.join(shape.folder, shape.dbf_file_name),
                 os.path.join(self.scene_folder, shape.dbf_file_name))
             shutil.copyfile(
                 os.path.join(shape.folder, shape.shp_file_name),
                 os.path.join(self.scene_folder, shape.shp_file_name))
             shutil.copyfile(
                 os.path.join(shape.folder, shape.shx_file_name),
                 os.path.join(self.scene_folder, shape.shx_file_name))
             shapes[name] = shape
             pbar.update("%s merged" % shape.name)
예제 #19
0
    def optimize(self, settings):
        isolated_print(EOL)
        dest_format = settings.output_texture_format
        src_format = JPG_TEXTURE_FORMAT if dest_format == PNG_TEXTURE_FORMAT else PNG_TEXTURE_FORMAT
        lods = [lod for tile in self.tiles.values() for lod in tile.lods]
        self.__convert_tiles_textures(src_format, dest_format)
        self.update_min_size_values(settings)
        # some tile lods are not optimized
        if self.__optimization_needed():
            self.__create_optimization_folders()
            self.__optimize_tile_lods(self.__retrieve_lods_to_process())

        pbar = ProgressBar(list(lods), title="PREPARE THE TILES FOR MSFS")
        for lod in lods:
            lod.folder = os.path.dirname(
                lod.folder) if self.__optimization_needed() else lod.folder
            lod.optimization_in_progress = False
            lod.prepare_for_msfs()
            pbar.update("%s prepared for msfs" % lod.name)

        self.objects_xml.update_objects_position(self, settings)
예제 #20
0
    def __retrieve_scene_objects(self):
        pbar = ProgressBar(list(
            Path(self.model_lib_folder).rglob(XML_FILE_PATTERN)),
                           title="Retrieve project infos")
        for i, path in enumerate(pbar.iterable):
            if not is_octant(path.stem):
                msfs_scene_object = MsfsSceneObject(self.model_lib_folder,
                                                    path.stem, path.name)
                self.objects[msfs_scene_object.xml.guid] = msfs_scene_object
                pbar.update("%s" % path.name)
                continue

            if COLLIDER_SUFFIX in path.stem:
                msfs_collider = MsfsCollider(self.model_lib_folder, path.stem,
                                             path.name)
                self.colliders[msfs_collider.xml.guid] = msfs_collider
                pbar.update("%s" % path.name)
                continue

            msfs_tile = MsfsTile(self.model_lib_folder, path.stem, path.name)
            if not msfs_tile.lods:
                msfs_tile.remove_files()
            else:
                self.tiles[msfs_tile.xml.guid] = msfs_tile
            pbar.update("%s" % path.name)
예제 #21
0
def link_materials_to_packed_texture(objects, folder, file_name):
    img = bpy.data.images.load(os.path.join(folder, file_name))

    pbar = ProgressBar(bpy.context.scene.objects)
    for obj in pbar.iterable:
        material = obj.material_slots[0].material

        try:
            material.msfs_show_road_material = True
            material.msfs_show_collision_material = True
            material.msfs_show_day_night_cycle = True
            material.msfs_road_material = True
            material.msfs_collision_material = True
            material.msfs_day_night_cycle = True
        except AttributeError:
            pass

        source_image_nodes = [get_image_node(obj) for obj in objects]
        pbar.update("link packed texture to %s" % obj.name)
        # Update image in materials
        for node in source_image_nodes:
            node.image = img
            node.image.name = file_name
예제 #22
0
    def __convert_tiles_textures(self, src_format, dest_format):
        textures = self.__retrieve_tiles_textures(src_format)

        if textures:
            isolated_print(
                src_format +
                " texture files detected in the tiles of the project! Try to install pip, then convert them"
            )
            print_title("INSTALL PILLOW")
            install_python_lib("Pillow")

            pbar = ProgressBar(textures,
                               title="CONVERT " + src_format.upper() +
                               " TEXTURE FILES TO " + dest_format.upper())
            for texture in textures:
                file = texture.file
                if not texture.convert_format(src_format, dest_format):
                    raise ScriptError(
                        "An error was detected while converting texture files in "
                        + self.texture_folder + " ! Please convert them to " +
                        dest_format +
                        " format prior to launch the script, or remove them")
                else:
                    pbar.update("%s converted to %s" % (file, dest_format))
예제 #23
0
                # Print progress
                inc = inc + 1
                sys.stderr.write("\r\033[K" +
                                 get_progressbar_str(inc / (C * dH * dW)))
                sys.stderr.flush()
    sys.stderr.write("\n")
    sys.stderr.flush()
    return dst


# LR, HR path
hr_path = "C:/Users/LTT/OneDrive - K3v/Documents/LVTN/dataset/demo/HR/*"
lr_path = "C:/Users/LTT/OneDrive - K3v/Documents/LVTN/Dataset/demo/LRx4/"
hr_list = sorted(glob(hr_path))

# Scale factor
ratio = 1 / 4
# Coefficient
a = -1 / 2
pbar = ProgressBar(len(hr_list))

for index, value in enumerate(hr_list):
    pbar.update("Read {}".format(value))
    # Read image
    img = cv2.imread(value)
    dst = bicubic(img, ratio, a)
    cv2.imwrite(lr_path + os.path.basename(value), dst)

print("Completed!")
예제 #24
0
파일: trainer.py 프로젝트: YilongJu/MolGAN
    def train(self,
              batch_dim,
              epochs,
              steps,
              train_fetch_dict,
              train_feed_dict,
              eval_fetch_dict,
              eval_feed_dict,
              test_fetch_dict,
              test_feed_dict,
              _train_step=None,
              _eval_step=None,
              _test_step=None,
              _train_update=None,
              _eval_update=None,
              _test_update=None,
              eval_batch=None,
              test_batch=None,
              best_fn=None,
              min_epochs=None,
              look_ahead=None,
              save_every=None,
              directory=None,
              skip_first_eval=False,
              skip_training=False):

        if not skip_training:

            if _train_step is None:

                def _train_step(step, steps, epoch, epochs, min_epochs, model,
                                optimizer, batch_dim):
                    model.is_training = True
                    if not model.latent_opt:
                        model.is_training = False
                    print(
                        f"_train_step, batch_dim: {batch_dim}, is_training: {model.is_training}"
                    )
                    embeddings = model.sample_z(batch_dim)
                    # embeddings = model.z
                    # print(f"embeddings assigned: {embeddings}")
                    assign_op = model.embeddings_LO.assign(embeddings)

                    # a, b, c, _ = self.session.run([train_fetch_dict(step, steps, epoch, epochs, min_epochs, model, optimizer), assign_op], feed_dict=train_feed_dict(step, steps, epoch, epochs, min_epochs, model, optimizer, batch_dim))
                    # a, _ = self.session.run([train_fetch_dict(step, steps, epoch, epochs, min_epochs, model, optimizer), assign_op], feed_dict=train_feed_dict(step, steps, epoch, epochs, min_epochs, model, optimizer, batch_dim))
                    if model.latent_opt:
                        z_up = self.session.run(optimizer.train_step_z,
                                                feed_dict=train_feed_dict(
                                                    step, steps, epoch, epochs,
                                                    min_epochs, model,
                                                    optimizer, batch_dim))
                    z_updated_val = self.session.run(model.embeddings_LO)
                    # print(f"embeddings updated: {z_updated_val}")

                    a = self.session.run(
                        train_fetch_dict(step, steps, epoch, epochs,
                                         min_epochs, model, optimizer),
                        feed_dict=train_feed_dict(step, steps, epoch, epochs,
                                                  min_epochs, model, optimizer,
                                                  batch_dim))

                    #print("!!!!!!!!!!!!!!!!!!updates", b)
                    #print("###################",c)
                    return a
                    # return self.session.run(train_fetch_dict(step, steps, epoch, epochs, min_epochs, model, optimizer), feed_dict=train_feed_dict(step, steps, epoch, epochs, min_epochs, model, optimizer, batch_dim))

            if _eval_step is None:

                def _eval_step(epoch, epochs, min_epochs, model, optimizer,
                               batch_dim, eval_batch, start_time,
                               last_epoch_start_time, _eval_update):
                    model.is_training = False
                    print(
                        f"_eval_step, batch_dim: {batch_dim}, is_training: {model.is_training}"
                    )

                    self.log(">>> 0 <<<")
                    from_start = timedelta(seconds=int((time.time() -
                                                        start_time)))
                    last_epoch = timedelta(
                        seconds=int((time.time() - last_epoch_start_time)))
                    eta = timedelta(
                        seconds=int((time.time() - start_time) *
                                    (epochs - epoch) /
                                    epoch)) if (time.time() -
                                                start_time) > 1 else '-:--:-'
                    self.log(">>> 1 <<<")

                    self.log(
                        'Epochs {:10}/{} in {} (last epoch in {}), ETA: {}'.
                        format(epoch, epochs, from_start, last_epoch, eta))

                    if eval_batch is not None:
                        self.log(">>> 1a <<<")
                        pr = ProgressBar(80, eval_batch)
                        output = defaultdict(list)

                        for i in range(eval_batch):
                            for k, v in self.session.run(
                                    eval_fetch_dict(epoch, epochs, min_epochs,
                                                    model, optimizer),
                                    feed_dict=eval_feed_dict(
                                        epoch, epochs, min_epochs, model,
                                        optimizer, batch_dim)).items():
                                output[k].append(v)
                            pr.update(i + 1)

                        self.log(date=False)
                        output = {k: np.mean(v) for k, v in output.items()}
                    else:
                        self.log(">>> 1b <<<")
                        # print(eval_fetch_dict(epoch, epochs, min_epochs, model, optimizer))
                        # print(eval_feed_dict(epoch, epochs, min_epochs, model, optimizer, batch_dim))
                        output = self.session.run(
                            eval_fetch_dict(epoch, epochs, min_epochs, model,
                                            optimizer),
                            feed_dict=eval_feed_dict(epoch, epochs, min_epochs,
                                                     model, optimizer,
                                                     batch_dim))
                        self.log(">>> 1b2 <<<")

                    self.log(">>> 2 <<<")

                    if _eval_update is not None:
                        output.update(
                            _eval_update(epoch, epochs, min_epochs, model,
                                         optimizer, batch_dim, eval_batch))

                    self.log(">>> 3 <<<")

                    p = pprint.PrettyPrinter(indent=1, width=80)
                    self.log('Validation --> {}'.format(p.pformat(output)))

                    for k in output:
                        self.print[k].append(output[k])

                    self.log(">>> 4 <<<")
                    return output

            # ========================================================================

            best_model_value = None
            no_improvements = 0
            start_time = time.time()
            last_epoch_start_time = time.time()

            for epoch in range(epochs + 1):
                print(f">>>>>>>>> epoch {epoch} <<<<<<<<<")

                early_stop = False

                if not (skip_first_eval and epoch == 0):

                    result = _eval_step(epoch, epochs, min_epochs, self.model,
                                        self.optimizer, batch_dim, eval_batch,
                                        start_time, last_epoch_start_time,
                                        _eval_update)

                    if best_fn is not None and (
                            True if best_model_value is None else
                            best_fn(result) > best_model_value):
                        self.save(directory)
                        best_model_value = best_fn(result)
                        no_improvements = 0
                    elif look_ahead is not None and no_improvements < look_ahead:
                        no_improvements += 1
                        self.load(directory)
                    elif min_epochs is not None and epoch >= min_epochs:
                        self.log('No improvements after {} epochs!'.format(
                            no_improvements))
                        break

                    if save_every is not None and epoch % save_every == 0:
                        self.save(directory, epoch)

                    print(f"result['valid score']: {result['valid score']}")
                    print(f"result['unique score']: {result['unique score']}")
                    print(f"result['novel score']: {result['novel score']}")

                    # if result['valid score'] > 85 and result['novel score'] > 85 and result['unique score'] > 15:
                    #     print("early stop!")
                    #     early_stop = True

                if epoch < epochs and (not early_stop):
                    last_epoch_start_time = time.time()
                    pr = ProgressBar(80, steps)
                    for step in range(steps):
                        _train_step(steps * epoch + step, steps, epoch, epochs,
                                    min_epochs, self.model, self.optimizer,
                                    batch_dim)
                        pr.update(step + 1)

                    self.log(date=False)

                if early_stop:
                    print(f">>>> early stop at {epoch}! <<<<")
                    break
            """
            self.model = GraphGANModel ...
            self.optimizer = GraphGANOptimizer ...
            batch_dim = batch_dim ...
            eval_batch =
            """
        else:
            start_time = time.time()

        if _test_step is None:

            def _test_step(model, optimizer, batch_dim, test_batch, start_time,
                           _test_update):
                model.is_training = False
                print(
                    f"_test_step, batch_dim: {batch_dim}, is_training: {model.is_training}"
                )
                self.load(directory, model.test_epoch)
                from_start = timedelta(seconds=int((time.time() - start_time)))
                self.log('End of training ({} epochs) in {}'.format(
                    epochs, from_start))

                if test_batch is not None:
                    pr = ProgressBar(80, test_batch)
                    output = defaultdict(list)

                    for i in range(test_batch):
                        for k, v in self.session.run(test_fetch_dict(
                                model, optimizer),
                                                     feed_dict=test_feed_dict(
                                                         model, optimizer,
                                                         batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)

                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        test_fetch_dict(model, optimizer),
                        feed_dict=test_feed_dict(model, optimizer, batch_dim))

                if _test_update is not None:
                    output.update(
                        _test_update(model, optimizer, batch_dim, test_batch))

                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Test --> {}'.format(p.pformat(output)))

                with open(log_filename, 'a') as f:
                    f.write('Test --> {}'.format(p.pformat(output)))

                for k in output:
                    self.print['Test ' + k].append(output[k])

                return output

        _test_step(self.model, self.optimizer, batch_dim, eval_batch,
                   start_time, _test_update)
예제 #25
0
    def train(self,
              batch_dim,
              epochs,
              steps,
              train_fetch_dict,
              train_feed_dict,
              eval_fetch_dict,
              eval_feed_dict,
              test_fetch_dict,
              test_feed_dict,
              _train_step=None,
              _eval_step=None,
              _test_step=None,
              _train_update=None,
              _eval_update=None,
              _test_update=None,
              eval_batch=None,
              test_batch=None,
              best_fn=None,
              min_epochs=None,
              look_ahead=None,
              save_every=None,
              directory=None,
              skip_first_eval=False):

        if _train_step is None:

            def _train_step(step, steps, epoch, epochs, min_epochs, model,
                            optimizer, batch_dim):
                return self.session.run(
                    train_fetch_dict(step, steps, epoch, epochs, min_epochs,
                                     model, optimizer),
                    feed_dict=train_feed_dict(step, steps, epoch, epochs,
                                              min_epochs, model, optimizer,
                                              batch_dim))

        if _eval_step is None:

            def _eval_step(epoch, epochs, min_epochs, model, optimizer,
                           batch_dim, eval_batch, start_time,
                           last_epoch_start_time, _eval_update):
                from_start = timedelta(seconds=int((time.time() - start_time)))
                last_epoch = timedelta(seconds=int((time.time() -
                                                    last_epoch_start_time)))
                eta = timedelta(
                    seconds=int((time.time() - start_time) * (epochs - epoch) /
                                epoch)) if (time.time() -
                                            start_time) > 1 else '-:--:-'
                self.log(
                    'Epochs {:10}/{} in {} (last epoch in {}), ETA: {}'.format(
                        epoch, epochs, from_start, last_epoch, eta))
                if eval_batch is not None:
                    pr = ProgressBar(80, eval_batch)
                    output = defaultdict(list)
                    for i in range(eval_batch):
                        for k, v in self.session.run(
                                eval_fetch_dict(epoch, epochs, min_epochs,
                                                model, optimizer),
                                feed_dict=eval_feed_dict(
                                    epoch, epochs, min_epochs, model,
                                    optimizer, batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)
                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        eval_fetch_dict(epoch, epochs, min_epochs, model,
                                        optimizer),
                        feed_dict=eval_feed_dict(epoch, epochs, min_epochs,
                                                 model, optimizer, batch_dim))

                if _eval_update is not None:
                    output.update(
                        _eval_update(epoch, epochs, min_epochs, model,
                                     optimizer, batch_dim, eval_batch))
                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Validation --> {}'.format(p.pformat(output)))
                for k in output:
                    self.print[k].append(output[k])
                return output

        if _test_step is None:

            def _test_step(model, optimizer, batch_dim, test_batch, start_time,
                           _test_update):
                self.load(directory)
                from_start = timedelta(seconds=int((time.time() - start_time)))
                self.log('End of training ({} epochs) in {}'.format(
                    epochs, from_start))
                if test_batch is not None:
                    pr = ProgressBar(80, test_batch)
                    output = defaultdict(list)
                    for i in range(test_batch):
                        for k, v in self.session.run(test_fetch_dict(
                                model, optimizer),
                                                     feed_dict=test_feed_dict(
                                                         model, optimizer,
                                                         batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)
                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        test_fetch_dict(model, optimizer),
                        feed_dict=test_feed_dict(model, optimizer, batch_dim))
                if _test_update is not None:
                    output.update(
                        _test_update(model, optimizer, batch_dim, test_batch))
                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Test --> {}'.format(p.pformat(output)))
                for k in output:
                    self.print['Test ' + k].append(output[k])
                return output

        best_model_value = None
        no_improvements = 0
        start_time = time.time()
        last_epoch_start_time = time.time()

        for epoch in range(epochs + 1):
            if not (skip_first_eval and epoch == 0):
                result = _eval_step(epoch, epochs, min_epochs, self.model,
                                    self.optimizer, batch_dim, eval_batch,
                                    start_time, last_epoch_start_time,
                                    _eval_update)
                if best_fn is not None and (
                        True if best_model_value is None else
                        best_fn(result) > best_model_value):
                    self.save(directory)
                    best_model_value = best_fn(result)
                    no_improvements = 0
                elif look_ahead is not None and no_improvements < look_ahead:
                    no_improvements += 1
                    self.load(directory)
                elif min_epochs is not None and epoch >= min_epochs:
                    self.log('No improvements after {} epochs!'.format(
                        no_improvements))
                    break
                if save_every is not None and epoch % save_every == 0:
                    self.save(directory)
            if epoch < epochs:
                last_epoch_start_time = time.time()
                pr = ProgressBar(80, steps)
                for step in range(steps):
                    _train_step(steps * epoch + step, steps, epoch, epochs,
                                min_epochs, self.model, self.optimizer,
                                batch_dim)
                    pr.update(step + 1)
                self.log(date=False)
        _test_step(self.model, self.optimizer, batch_dim, eval_batch,
                   start_time, _test_update)
예제 #26
0
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.progress_bar import ProgressBar

# configurations
img_folder = '/mnt/lustre21/qiuhaonan/srdata/DIV2K_valid_HR_20small_bicLRx4/*'  # glob matching pattern
lmdb_save_path = '/mnt/lustre21/qiuhaonan/srdata/DIV2K_valid_HR_20small_bicLRx4.lmdb'  # must end with .lmdb

img_list = sorted(glob.glob(img_folder))
dataset = []
data_size = 0

print('Read images...')
pbar = ProgressBar(len(img_list))
for i, v in enumerate(img_list):
    pbar.update('Read {}'.format(v))
    img = cv2.imread(v, cv2.IMREAD_UNCHANGED)
    dataset.append(img)
    data_size += img.nbytes
env = lmdb.open(lmdb_save_path, map_size=data_size * 10)
print('Finish reading {} images.\nWrite lmdb...'.format(len(img_list)))

pbar = ProgressBar(len(img_list))
with env.begin(write=True) as txn:  # txn is a Transaction object
    for i, v in enumerate(img_list):
        pbar.update('Write {}'.format(v))
        base_name = os.path.splitext(os.path.basename(v))[0]
        key = base_name.encode('ascii')
        data = dataset[i]
        if dataset[i].ndim == 2:
            H, W = dataset[i].shape
예제 #27
0
def create_lmdb_from_imgs(data_path,
                          lmdb_path,
                          img_path_list,
                          keys,
                          batch=5000,
                          compress_level=1,
                          map_size=None):
    """Make lmdb from images.
    Contents of lmdb. The file structure is:
    example.lmdb
    ├── data.mdb
    ├── lock.mdb
    ├── meta_info.txt
    The data.mdb and lock.mdb are standard lmdb files and you can refer to
    https://lmdb.readthedocs.io/en/release/ for more details.
    The meta_info.txt is a specified txt file to record the meta information
    of the datasets. Each line in the txt file records 1)image name 
    (with extension), 2)image shape, and 3)compression level, separated 
    by a white space.
    For example, the meta information could be:
    `000_00000000.png (720,1280,3) 1`, which means:
    1) image name (with extension): 000_00000000.png;
    2) image shape: (720,1280,3);
    3) compression level: 1
    The image name is used without extension as the lmdb key.
    Args:
        data_path (str): Data path for reading images.
        lmdb_path (str): Lmdb save path.
        img_path_list (str): Image path list.
        keys (str): Used for lmdb keys.
        batch (int): After processing 'batch' number of images, lmdb commits.
            Default: 5000.
        compress_level (int): Compress level when encoding images. Default: 1.
        map_size (int | None): Map size for lmdb env. If None, use the
            estimated size from images. Default: None
    """

    assert len(img_path_list) == len(keys), (
        'img_path_list and keys should have the same length, '
        f'but got {len(img_path_list)} and {len(keys)}')
    print(f'Create lmdb for {data_path}, save to {lmdb_path}...')
    print(f'Total images: {len(img_path_list)}')
    if not lmdb_path.endswith('.lmdb'):
        raise ValueError("lmdb_path must end with '.lmdb'.")
    #### check if the lmdb file exist
    if os.path.exists(lmdb_path):
        print('Folder [{:s}] already exists. Exit.'.format(lmdb_path))
        sys.exit(1)

    # create lmdb environment
    if map_size is None:
        # obtain data size for one image
        img = cv2.imread(os.path.join(data_path, img_path_list[0]),
                         cv2.IMREAD_UNCHANGED)
        _, img_byte = cv2.imencode(
            '.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
        data_size_per_img = img_byte.nbytes
        print('Data size per image is: ', data_size_per_img)
        data_size = data_size_per_img * len(img_path_list)
        map_size = data_size * 10

    env = lmdb.open(lmdb_path, map_size=map_size)

    # write data to lmdb
    pbar = ProgressBar(
        len(img_path_list))  #tqdm(total=len(img_path_list), unit='chunk')
    txn = env.begin(write=True)  # txn is a Transaction object
    txt_file = open(os.path.join(lmdb_path, 'meta_info.txt'), 'w')
    for idx, (path, key) in enumerate(zip(img_path_list, keys)):
        # pbar.update(1)
        # pbar.set_description(f'Write {key}')
        pbar.update('Write {}'.format({key}))
        key_byte = key.encode('ascii')
        _, img_byte, img_shape = read_img_worker(os.path.join(data_path, path),
                                                 key, compress_level)
        h, w, c = img_shape

        txn.put(key_byte, img_byte)
        # write meta information
        txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n')
        if idx % batch == 0:
            txn.commit()
            txn = env.begin(write=True)
    # pbar.close()
    txn.commit()
    env.close()
    txt_file.close()
    print('\nFinish writing lmdb.')
예제 #28
0
def objective(arguments):
    """
    Main Pipeline for training and cross-validation. ToDo - Testing will be done separately in test.py.
    """
    """ Setup result directory and enable logging to file in it """
    outdir = make_results_dir(arguments)
    logger.init(outdir, logging.INFO)
    logger.info('Arguments:\n{}'.format(pformat(arguments)))
    """ Initialize Tensorboard """
    tensorboard_writer = initialize_tensorboard(outdir)
    """ Set random seed throughout python, pytorch and numpy """
    logger.info('Using Random Seed value as: %d' % arguments['random_seed'])
    torch.manual_seed(
        arguments['random_seed'])  # Set for pytorch, used for cuda as well.
    random.seed(arguments['random_seed'])  # Set for python
    np.random.seed(arguments['random_seed'])  # Set for numpy
    """ Set device - cpu or gpu """
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    logger.info(f'Using device - {device}')
    """ Load Model with weights(if available) """
    model: torch.nn.Module = get_model(arguments.get('model_args')).to(device)
    """ Create loss function """
    criterion = create_loss(arguments['loss_args'])
    """ Create optimizer """
    optimizer = create_optimizer(model.parameters(),
                                 arguments['optimizer_args'])
    """ Load parameters for the Dataset """
    dataset: BaseDataset = create_dataset(arguments['dataset_args'],
                                          arguments['train_data_args'],
                                          arguments['val_data_args'])
    """ Generate all callbacks """
    callbacks: List[Callbacks] = generate_callbacks(arguments, dataset, device,
                                                    outdir)
    """ Debug the inputs to model and save graph to tensorboard """
    dataset.debug()
    dummy_input = (torch.rand(
        1,
        arguments['dataset_args']['name'].value['channels'],
        *arguments['dataset_args']['name'].value['image_size'],
    )).to(device)
    tensorboard_writer.save_graph(model, dummy_input)
    """ Pipeline - loop over the dataset multiple times """
    max_validation_accuracy = 0
    itr = 0

    best_model_path = None
    delete_old_models = True

    run_callbacks(callbacks,
                  model=model,
                  optimizer=optimizer,
                  mode=CallbackMode.ON_TRAIN_BEGIN)
    for epoch in range(arguments['nb_epochs']):
        """ Train the model """
        train_data_args = arguments['train_data_args']
        if train_data_args['to_train']:
            train_dataloader = dataset.train_dataloader
            progress_bar = ProgressBar(
                target=len(train_dataloader),
                clear=True,
                description=f"Training {epoch + 1}/{arguments['nb_epochs']}: ")
            loss_running_average = RunningAverage()

            run_callbacks(callbacks,
                          model=model,
                          optimizer=optimizer,
                          mode=CallbackMode.ON_EPOCH_BEGIN,
                          epoch=epoch)
            model.train()
            for i, data in enumerate(train_dataloader, 0):
                # get the inputs
                inputs, labels = data
                inputs = inputs.to(device)
                labels = labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # Forward Pass
                outputs = model(inputs)

                classification_loss = criterion(outputs, labels)
                tensorboard_writer.save_scalar('Classification_Loss',
                                               classification_loss.item(), itr)
                classification_loss.backward()
                optimizer.step()

                # Compute running loss. Not exact but efficient.
                running_loss = loss_running_average.add_new_sample(
                    classification_loss.item())
                progress_bar.update(i + 1, [
                    ('current loss', classification_loss.item()),
                    ('running loss', running_loss),
                ])
                tensorboard_writer.save_scalar('Training_Loss',
                                               classification_loss, itr)
                itr += 1

            # Callbacks ON_EPOCH_END should be run only when training is enabled. Thus call here.
            run_callbacks(callbacks,
                          model=model,
                          optimizer=optimizer,
                          mode=CallbackMode.ON_EPOCH_END,
                          epoch=epoch)
        """ Validate the model """
        val_data_args = arguments['val_data_args']
        if val_data_args['validate_step_size'] > 0 and \
                epoch % val_data_args['validate_step_size'] == 0:
            correct, total = 0, 0
            validation_dataloader = dataset.validation_dataloader
            progress_bar = ProgressBar(
                target=len(validation_dataloader),
                clear=True,
                description=f"Validating {epoch + 1}/{arguments['nb_epochs']}: "
            )
            model.eval()
            with torch.no_grad():
                for i, data in enumerate(validation_dataloader, 0):
                    inputs, labels = data
                    inputs = inputs.to(device)
                    labels = labels.to(device)

                    outputs = model(inputs)
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()

                    progress_bar.update(i + 1, [
                        ('Batch Accuracy', 100 * correct / total),
                    ])

            val_accuracy = 100 * correct / total
            tensorboard_writer.save_scalar('Validation_Accuracy', val_accuracy,
                                           itr)
            logger.info(
                f'Accuracy of the network on the {dataset.get_val_dataset_size} validation images: {val_accuracy} %%'
            )
            """ Save Model """
            if val_accuracy > max_validation_accuracy:
                if delete_old_models and best_model_path:
                    delete_old_file(best_model_path)
                best_model_path = os.path.join(
                    outdir,
                    f'epoch_{epoch:04}-model-val_accuracy_{val_accuracy}.pth')
                torch.save(model.state_dict(), best_model_path)
                max_validation_accuracy = val_accuracy

        tensorboard_writer.flush()

        # Exit loop if training not needed
        if not train_data_args['to_train']:
            break

    run_callbacks(callbacks,
                  model=model,
                  optimizer=optimizer,
                  mode=CallbackMode.ON_TRAIN_END)

    logger.info('Finished Training')
    close_tensorboard()
    logger.info(f'Max Validation accuracy is {max_validation_accuracy}')
    return max_validation_accuracy  # Return in case later u wanna add hyperopt.