Пример #1
0
    def __init__(self, exec_conf: ExecutionConfig, mesh_conf, fragmentation_conf: FragmentationConfig):
        self._profiler = Profiler(enable_profiler)
        _logger.info("Solver: {}".format(self.openfoam_solver))
        self.exec_config = exec_conf
        self.result_dir = exec_conf.output_dir
        files.create_directory(self.result_dir)
        self.results = {}
        result_file_geom_prefix = None
        if type(mesh_conf) is SimpleBlockMeshConfig:
            result_file_geom_prefix = "gw{}_gh{}_gl{}".format(
                mesh_conf.width_mm,
                mesh_conf.height_mm,
                mesh_conf.length_mm)
            self.geom_values = "{}\t{}\t{}".format(
                mesh_conf.width_mm,
                mesh_conf.height_mm,
                mesh_conf.length_mm)
            self.geom_titles = "Geometry width\tGeometry height\tGeometry length"
        else:
            result_file_geom_prefix = ""
            self.geom_values = ""
            self.geom_titles = ""

            for line_idx in range(len(mesh_conf.width_lines)):
                result_file_geom_prefix += "w{}={}_".format(line_idx, mesh_conf.width_lines[line_idx])
                self.geom_titles += "{} {}\t".format("Width line", line_idx)
                self.geom_values += "{}\t".format(mesh_conf.width_lines[line_idx])

            for line_idx in range(len(mesh_conf.height_distance)):
                result_file_geom_prefix += "h{}={}_".format(line_idx, mesh_conf.height_distance[line_idx])
                self.geom_titles += "{} {}\t".format("Height line", line_idx)
                self.geom_values += "{}\t".format(mesh_conf.height_distance[line_idx])

            result_file_geom_prefix += "l={}".format(mesh_conf.length)
            self.geom_titles += "{}".format("Length")
            self.geom_values += "{}".format(mesh_conf.length)

        _logger.debug(result_file_geom_prefix)
        _logger.debug(self.geom_values)

        fragmentation_options_line = "fw{}_fh{}_fl{}".format(
            fragmentation_conf.width,
            fragmentation_conf.height,
            fragmentation_conf.length)
        self.fragmentation_values = "{}\t{}\t{}".format(
            fragmentation_conf.width,
            fragmentation_conf.height,
            fragmentation_conf.length)

        result_file_name = "result_{}_{}.txt".format(result_file_geom_prefix, fragmentation_options_line)
        self.result_file = os.path.join(self.result_dir, result_file_name)
        self.parsed_name = datetime.datetime.now().strftime("%Y-%m-%d-%H.txt")
Пример #2
0
    def __init__(self,
                 mesh_config: SimpleBlockMeshConfig,
                 fragmentation_config: FragmentationConfig,
                 execution_config: ExecutionConfig = ExecutionConfig()):
        self.mesh_config = mesh_config
        self.fragmentation_config = fragmentation_config
        self.exec_config = execution_config
        self.out_file = "system/blockMeshDict"

        if self.exec_config is not None:
            self.out_file = os.path.join(execution_config.execution_folder,
                                         self.out_file)

        self._profiler = Profiler(enable_profiler)
Пример #3
0
def start_traverse(
    game: GameInterface,
    player_to_train: int,
    regretModels: List[Optional[RegretMatching]],
    strategyModels: List[Optional[RegretMatching]],
) -> Tuple[int, ExpandableTensorSet, ExpandableTensorSet, Counter]:
    lowpriority()
    NUM_INNER_GAME_ITERATIONS = 100
    with torch.no_grad():
        playerRegret = ExpandableTensorSet(
            16 * 1024,
            (game.feature_dim(), game.action_dim(), game.action_dim()))
        strategyData = ExpandableTensorSet(
            16 * 1024,
            (game.feature_dim(), game.action_dim(), game.action_dim()))
        metrics: Counter = Counter()
        for _ in range(NUM_INNER_GAME_ITERATIONS):
            ng = game.clone()
            ng.reset()
            with Profiler(False):
                traverse(
                    ng,
                    player_to_train,
                    regretModels,
                    playerRegret,
                    strategyModels,
                    strategyData,
                    metrics,
                    0,
                    True,
                    1,
                )
        # print(metrics)

    return player_to_train, playerRegret, strategyData, metrics
Пример #4
0
def main(p: Parameters, o: Options):
    profiler = Profiler()
    profiler.event("start")
    if o.verbose:
        print(f"Experimenting with parameters: {p}")
    accuracy_results = experiment(p, o)
    profiler.event("end")
    print(profiler.summary(human=True))
    config.save_accuracy(accuracy_results)
    def do_train():
        model, optimizer = p.model.make_model_and_optimizer(
            dataset.input_shape, dataset.num_classes, o.use_cuda)

        def generate_epochs_callbacks():
            epochs_callbacks = []
            for epoch in p.savepoints:

                def callback(epoch=epoch):
                    scores = training.eval_scores(
                        model, dataset, p.transformations,
                        TransformationStrategy.random_sample,
                        o.get_eval_options())
                    if o.verbose_general:
                        print(
                            f"Saving model {model.name} at epoch {epoch}/{p.epochs}."
                        )
                    training.save_model(p, o, model, scores,
                                        experiment.model_path(p, epoch))

                epochs_callbacks.append((epoch, callback))

            return dict(epochs_callbacks)

        epochs_callbacks = generate_epochs_callbacks()

        # TRAINING
        if 0 in p.savepoints:
            scores = training.eval_scores(model, dataset, p.transformations,
                                          TransformationStrategy.random_sample,
                                          o.get_eval_options())
            print(f"Saving model {model.name} at epoch {0} (before training).")
            training.save_model(p, o, model, scores,
                                experiment.model_path(p, 0))
        pr = Profiler()
        pr.event("start")
        scores, history = training.run(p,
                                       o,
                                       model,
                                       optimizer,
                                       dataset,
                                       epochs_callbacks=epochs_callbacks)
        pr.event("end")
        print(pr.summary(human=True))

        training.print_scores(scores)
        return model, history, scores
Пример #6
0
 def start_game(self):
     if self.on_iter == self.max_games:
         return
     self.on_game += 1
     # print("Starting", self.on_game)
     ng = self.game.clone()
     ng.reset()
     with Profiler(False):
         metrics = Counter()
         policy_network = random.choice(self.policy_networks)
         if policy_network.num_steps >= 100:
             eval_net = copy.deepcopy(policy_network).cpu().eval()
         else:
             eval_net = None
         self.pool.apply_async(
             start_traverse,
             args=(ng, eval_net, metrics, 0,),
             callback=self.finish_game,
         )
Пример #7
0
def evaluate(data, model, scale, device):
    X, y, _, _ = data.split(1, shuffle=False)
    X, y = prep_data(X, y, device)

    X = scale(X)

    model.eval()

    profiler = Profiler("INFERENCE TIME")
    profiler.tick()
    pred = model(X)
    profiler.tock()
    print(profiler, end='\n\n')

    criterion = get_loss('bce')
    error = criterion(pred, y)
    print("loss: {:.4f}".format(error))

    acc = accuracy(torch.argmin(pred, dim=1), y[:, 0])
    print("accuracy: {:.4f}".format(acc))
Пример #8
0
def train(run_id: str, data_dir: str, validate_data_dir: str, models_dir: Path,
          umap_every: int, save_every: int, backup_every: int, vis_every: int,
          validate_every: int, force_restart: bool, visdom_server: str,
          port: str, no_visdom: bool):
    # Create a dataset and a dataloader
    train_dataset = LandmarkDataset(data_dir, img_per_cls, train=True)
    train_loader = LandmarkDataLoader(
        train_dataset,
        cls_per_batch,
        img_per_cls,
        num_workers=6,
    )

    validate_dataset = LandmarkDataset(validate_data_dir,
                                       v_img_per_cls,
                                       train=False)
    validate_loader = LandmarkDataLoader(
        validate_dataset,
        v_cls_per_batch,
        v_img_per_cls,
        num_workers=4,
    )

    validate_iter = iter(validate_loader)

    criterion = torch.nn.CrossEntropyLoss()

    # Setup the device on which to run the forward pass and the loss. These can be different,
    # because the forward pass is faster on the GPU whereas the loss is often (depending on your
    # hyperparameters) faster on the CPU.
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # FIXME: currently, the gradient is None if loss_device is cuda
    # loss_device = torch.device("cpu")
    # fixed by https://github.com/CorentinJ/Real-Time-Voice-Cloning/issues/237
    loss_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Create the model and the optimizer
    model = Encoder(device, loss_device)
    arc_face = ArcFace(model_embedding_size,
                       num_class,
                       scale=30,
                       m=0.35,
                       device=device)

    multi_gpu = False
    # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.device_count() > 1:
        multi_gpu = True
        model = torch.nn.DataParallel(model)
        arc_face = torch.nn.DataParallel(arc_face)
    model.to(device)
    arc_face.to(device)

    optimizer = torch.optim.SGD([{
        'params': model.parameters()
    }, {
        'params': arc_face.parameters()
    }],
                                lr=learning_rate_init,
                                momentum=0.9)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer,
                                                step_size=25000,
                                                gamma=0.5)

    init_step = 1

    # Configure file path for the model
    state_fpath = models_dir.joinpath(run_id + ".pt")
    pretrained_path = state_fpath

    backup_dir = models_dir.joinpath(run_id + "_backups")

    # Load any existing model
    if not force_restart:
        if state_fpath.exists():
            print(
                "Found existing model \"%s\", loading it and resuming training."
                % run_id)
            checkpoint = torch.load(pretrained_path)
            init_step = checkpoint["step"]
            model.load_state_dict(checkpoint["model_state"])
            optimizer.load_state_dict(checkpoint["optimizer_state"])
            optimizer.param_groups[0]["lr"] = learning_rate_init
        else:
            print("No model \"%s\" found, starting training from scratch." %
                  run_id)
    else:
        print("Starting the training from scratch.")
    model.train()

    # Initialize the visualization environment
    vis = Visualizations(run_id,
                         vis_every,
                         server=visdom_server,
                         port=port,
                         disabled=no_visdom)
    vis.log_dataset(train_dataset)
    vis.log_params()
    device_name = str(
        torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU")
    vis.log_implementation({"Device": device_name})

    # Training loop
    profiler = Profiler(summarize_every=500, disabled=False)
    for step, cls_batch in enumerate(train_loader, init_step):
        profiler.tick("Blocking, waiting for batch (threaded)")

        # Forward pass
        inputs = torch.from_numpy(cls_batch.data).float().to(device)
        labels = torch.from_numpy(cls_batch.labels).long().to(device)
        sync(device)
        profiler.tick("Data to %s" % device)

        embeds = model(inputs)
        sync(device)
        profiler.tick("Forward pass")

        output = arc_face(embeds, labels)
        loss = criterion(output, labels)
        sync(device)
        profiler.tick("Loss")

        # Backward pass
        optimizer.zero_grad()
        loss.backward()
        profiler.tick("Backward pass")

        optimizer.step()
        scheduler.step()
        profiler.tick("Parameter update")

        acc = get_acc(output, labels)
        # Update visualizations
        # learning_rate = optimizer.param_groups[0]["lr"]
        vis.update(loss.item(), acc, step)

        print("step {}, loss: {}, acc: {}".format(step, loss.item(), acc))

        # Draw projections and save them to the backup folder
        if umap_every != 0 and step % umap_every == 0:
            print("Drawing and saving projections (step %d)" % step)
            projection_dir = backup_dir / 'projections'
            projection_dir.mkdir(exist_ok=True, parents=True)
            projection_fpath = projection_dir.joinpath("%s_umap_%d.png" %
                                                       (run_id, step))
            embeds = embeds.detach()
            embeds = (embeds /
                      torch.norm(embeds, dim=1, keepdim=True)).cpu().numpy()
            vis.draw_projections(embeds, img_per_cls, step, projection_fpath)
            vis.save()

        # Overwrite the latest version of the model
        if save_every != 0 and step % save_every == 0:
            print("Saving the model (step %d)" % step)
            torch.save(
                {
                    "step": step + 1,
                    "model_state": model.state_dict(),
                    "optimizer_state": optimizer.state_dict(),
                }, state_fpath)

        # Make a backup
        if backup_every != 0 and step % backup_every == 0:
            if step > 4000:  # don't save until 4k steps
                print("Making a backup (step %d)" % step)

                ckpt_dir = backup_dir / 'ckpt'
                ckpt_dir.mkdir(exist_ok=True, parents=True)
                backup_fpath = ckpt_dir.joinpath("%s_%d.pt" % (run_id, step))
                torch.save(
                    {
                        "step": step + 1,
                        "model_state": model.state_dict(),
                        "optimizer_state": optimizer.state_dict(),
                    }, backup_fpath)

        # Do validation
        if validate_every != 0 and step % validate_every == 0:
            # validation loss, acc
            model.eval()
            for i in range(num_validate):
                with torch.no_grad():
                    validate_cls_batch = next(validate_iter)
                    validate_inputs = torch.from_numpy(
                        validate_cls_batch.data).float().to(device)
                    validat_labels = torch.from_numpy(
                        validate_cls_batch.labels).long().to(device)
                    validate_embeds = model(validate_inputs)
                    validate_output = arc_face(validate_embeds, validat_labels)
                    validate_loss = criterion(validate_output, validat_labels)
                    validate_acc = get_acc(validate_output, validat_labels)

                vis.update_validate(validate_loss.item(), validate_acc, step,
                                    num_validate)

            # take the last one for drawing projection
            projection_dir = backup_dir / 'v_projections'
            projection_dir.mkdir(exist_ok=True, parents=True)
            projection_fpath = projection_dir.joinpath("%s_umap_%d.png" %
                                                       (run_id, step))
            validate_embeds = validate_embeds.detach()
            validate_embeds = (validate_embeds / torch.norm(
                validate_embeds, dim=1, keepdim=True)).cpu().numpy()
            vis.draw_projections(validate_embeds,
                                 v_img_per_cls,
                                 step,
                                 projection_fpath,
                                 is_validate=True)
            vis.save()

            model.train()

        profiler.tick("Extras (visualizations, saving)")
Пример #9
0
def train(run_id: str, clean_data_root: Path, models_dir: Path,
          umap_every: int, save_every: int, backup_every: int, vis_every: int,
          force_restart: bool, visdom_server: str, no_visdom: bool):

    dataset = SpeakerVerificationDataset(clean_data_root)
    loader = SpeakerVerificationDataLoader(
        dataset,
        speakers_per_batch,
        utterances_per_speaker,
        num_workers=8,
    )

    # cuda
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    loss_device = torch.device("cpu")

    # 创建模型和优化器
    model = SpeakerEncoder(device, loss_device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate_init)
    init_step = 1

    # 为模型配置文件路径
    state_fpath = models_dir.joinpath(run_id + ".pt")
    backup_dir = models_dir.joinpath(run_id + "_backups")

    model.train()

    # 初始化可视化环境(visdom)
    vis = Visualizations(run_id,
                         vis_every,
                         server=visdom_server,
                         disabled=no_visdom)
    device_name = str(
        torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU")

    # 开始训练
    profiler = Profiler(summarize_every=10, disabled=False)
    for step, speaker_batch in enumerate(loader, init_step):
        profiler.tick("Blocking, waiting for batch (threaded)")

        # 正向传播
        inputs = torch.from_numpy(speaker_batch.data).to(device)
        sync(device)
        profiler.tick("Data to %s" % device)
        embeds = model(inputs)
        sync(device)
        profiler.tick("Forward pass")
        embeds_loss = embeds.view(
            (speakers_per_batch, utterances_per_speaker, -1)).to(loss_device)
        loss, eer = model.loss(embeds_loss)
        sync(loss_device)
        profiler.tick("Loss")

        # 反向传播
        model.zero_grad()
        loss.backward()
        profiler.tick("Backward pass")
        model.do_gradient_ops()
        optimizer.step()
        profiler.tick("Parameter update")
        vis.update(loss.item(), eer, step)

        # 进行一次UMAP投影可视化并保存图片
        if umap_every != 0 and step % umap_every == 0:
            # print("Drawing and saving projections (step %d)" % step)
            backup_dir.mkdir(exist_ok=True)
            projection_fpath = backup_dir.joinpath("%s_umap_%06d.png" %
                                                   (run_id, step))
            embeds = embeds.detach().cpu().numpy()
            vis.draw_projections(embeds, utterances_per_speaker, step,
                                 projection_fpath)
            vis.save()

        # 更新模型
        if save_every != 0 and step % save_every == 0:
            # print("Saving the model (step %d)" % step)
            torch.save(
                {
                    "step": step + 1,
                    "model_state": model.state_dict(),
                    "optimizer_state": optimizer.state_dict(),
                }, state_fpath)

        # 进行一次备份
        if backup_every != 0 and step % backup_every == 0:
            # print("Making a backup (step %d)" % step)
            backup_dir.mkdir(exist_ok=True)
            backup_fpath = backup_dir.joinpath("%s_bak_%06d.pt" %
                                               (run_id, step))
            torch.save(
                {
                    "step": step + 1,
                    "model_state": model.state_dict(),
                    "optimizer_state": optimizer.state_dict(),
                }, backup_fpath)

        profiler.tick("Extras (visualizations, saving)")
Пример #10
0
def train(run_id: str, clean_data_root: Path, models_dir: Path, umap_every: int, save_every: int,
          backup_every: int, vis_every: int, force_restart: bool, visdom_server: str,
          no_visdom: bool):
    # Create a dataset and a dataloader
    dataset = SpeakerVerificationDataset(clean_data_root)
    loader = SpeakerVerificationDataLoader(
        dataset,
        speakers_per_batch,
        utterances_per_speaker,
        num_workers=8,
    )
    
    # Setup the device on which to run the forward pass and the loss. These can be different, 
    # because the forward pass is faster on the GPU whereas the loss is often (depending on your
    # hyperparameters) faster on the CPU.
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # FIXME: currently, the gradient is None if loss_device is cuda
    loss_device = torch.device("cpu")
    
    # Create the model and the optimizer
    model = SpeakerEncoder(device, loss_device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate_init)
    init_step = 1
    
    # Configure file path for the model
    state_fpath = models_dir.joinpath(run_id + ".pt")
    backup_dir = models_dir.joinpath(run_id + "_backups")

    # Load any existing model
    if not force_restart:
        if state_fpath.exists():
            print("Found existing model \"%s\", loading it and resuming training." % run_id)
            checkpoint = torch.load(state_fpath)
            init_step = checkpoint["step"]
            model.load_state_dict(checkpoint["model_state"])
            optimizer.load_state_dict(checkpoint["optimizer_state"])
            optimizer.param_groups[0]["lr"] = learning_rate_init
        else:
            print("No model \"%s\" found, starting training from scratch." % run_id)
    else:
        print("Starting the training from scratch.")
    model.train()
    
    # Initialize the visualization environment
    vis = Visualizations(run_id, vis_every, server=visdom_server, disabled=no_visdom)
    vis.log_dataset(dataset)
    vis.log_params()
    device_name = str(torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU")
    vis.log_implementation({"Device": device_name})
    
    # Training loop
    profiler = Profiler(summarize_every=10, disabled=False)
    for step, speaker_batch in enumerate(loader, init_step):
        profiler.tick("Blocking, waiting for batch (threaded)")
        
        # Forward pass
        inputs = torch.from_numpy(speaker_batch.data).to(device)
        sync(device)
        profiler.tick("Data to %s" % device)
        embeds = model(inputs)
        sync(device)
        profiler.tick("Forward pass")
        embeds_loss = embeds.view((speakers_per_batch, utterances_per_speaker, -1)).to(loss_device)
        loss, eer = model.loss(embeds_loss)
        sync(loss_device)
        profiler.tick("Loss")

        # Backward pass
        model.zero_grad()
        loss.backward()
        profiler.tick("Backward pass")
        model.do_gradient_ops()
        optimizer.step()
        profiler.tick("Parameter update")
        
        # Update visualizations
        # learning_rate = optimizer.param_groups[0]["lr"]
        vis.update(loss.item(), eer, step)
        
        # Draw projections and save them to the backup folder
        if umap_every != 0 and step % umap_every == 0:
            print("Drawing and saving projections (step %d)" % step)
            backup_dir.mkdir(exist_ok=True)
            projection_fpath = backup_dir.joinpath("%s_umap_%06d.png" % (run_id, step))
            embeds = embeds.detach().cpu().numpy()
            vis.draw_projections(embeds, utterances_per_speaker, step, projection_fpath)
            vis.save()

        # Overwrite the latest version of the model
        if save_every != 0 and step % save_every == 0:
            print("Saving the model (step %d)" % step)
            torch.save({
                "step": step + 1,
                "model_state": model.state_dict(),
                "optimizer_state": optimizer.state_dict(),
            }, state_fpath)
            
        # Make a backup
        if backup_every != 0 and step % backup_every == 0:
            print("Making a backup (step %d)" % step)
            backup_dir.mkdir(exist_ok=True)
            backup_fpath = backup_dir.joinpath("%s_bak_%06d.pt" % (run_id, step))
            torch.save({
                "step": step + 1,
                "model_state": model.state_dict(),
                "optimizer_state": optimizer.state_dict(),
            }, backup_fpath)
            
        profiler.tick("Extras (visualizations, saving)")
Пример #11
0
class SimpleBlockMeshGenerator:
    def __init__(self,
                 mesh_config: SimpleBlockMeshConfig,
                 fragmentation_config: FragmentationConfig,
                 execution_config: ExecutionConfig = ExecutionConfig()):
        self.mesh_config = mesh_config
        self.fragmentation_config = fragmentation_config
        self.exec_config = execution_config
        self.out_file = "system/blockMeshDict"

        if self.exec_config is not None:
            self.out_file = os.path.join(execution_config.execution_folder,
                                         self.out_file)

        self._profiler = Profiler(enable_profiler)

    def __del__(self):
        self._profiler.print_report()

    def create(self, custom_out_file=None):
        _logger.info("\n\n===== Run geometry generating")
        self._profiler.start("Geometry creating")
        self._print_configuration()

        self._calculate_points()
        self._calculate_fragmentation()
        self._calculate_boundary()

        text = self._format_text()

        file_to_write = self.out_file
        if custom_out_file != 0 and (custom_out_file
                                     is not None) and (len(custom_out_file)):
            file_to_write = custom_out_file
        if custom_out_file == 0:
            file_to_write = 0

        self.save_geometry(text, file_to_write)
        self._profiler.stop("Geometry creating")
        _logger.info("===== End geometry generating\n\n")

    def generate(self):
        _logger.info("\n\n===== Run block mesh generating")
        self._profiler.start("Mesh generating")
        self.generate_mesh()
        self._profiler.stop("Mesh generating")
        # Just because there a lots of logs from blockMesh command
        _logger.info("===== End block mesh generating\n\n")

    def _print_configuration(self):
        _logger.log(LogLvl.LOG_INFO, "Generate mesh with size:")
        _logger.log(
            LogLvl.LOG_INFO,
            "width_mm: {}\theight_mm: {}\tlength_mm: {}".format(
                self.mesh_config.width_mm, self.mesh_config.height_mm,
                self.mesh_config.length_mm))

        _logger.log(LogLvl.LOG_INFO, "Generate mesh with fragmentation:")
        _logger.log(
            LogLvl.LOG_INFO, "{:>25}{:>25}{:>25}\n".format(
                "width_fragmentation: " + str(self.fragmentation_config.width),
                "height_fragmentation: " +
                str(self.fragmentation_config.height),
                "length_fragmentation: " +
                str(self.fragmentation_config.length)))

    def _calculate_points(self):
        # TODO add check, that convertToMeters is 0.001 (m to mm)
        width_mm = self.mesh_config.width_mm
        height_mm = self.mesh_config.height_mm
        length_mm = self.mesh_config.length_mm

        p1 = [0, 0, 0]
        p2 = [width_mm, 0, 0]
        p3 = [width_mm, height_mm, 0]
        p4 = [0, height_mm, 0]
        p5 = [0, 0, length_mm]
        p6 = [width_mm, 0, length_mm]
        p7 = [width_mm, height_mm, length_mm]
        p8 = [0, height_mm, length_mm]

        arr = [p1, p2, p3, p4, p5, p6, p7, p8]
        self.points = ""
        for i in range(len(arr)):
            self.points += "    ({} {} {})".format(arr[i][0], arr[i][1],
                                                   arr[i][2])
            if i + 1 != len(arr):
                self.points += "\n"

    def _calculate_fragmentation(self):
        mesh_elem_size_mm = self.fragmentation_config.elem_size_mm
        assert (self.mesh_config.width_mm >= mesh_elem_size_mm)
        assert (self.mesh_config.height_mm >= mesh_elem_size_mm)
        assert (self.mesh_config.length_mm >= mesh_elem_size_mm)

        # TODO enable dynamic fragmentation
        # length_fragmentation = int(float(length_mm) / mesh_elem_size_mm)
        # height_fragmentation = int(float(height_mm) / mesh_elem_size_mm)
        # width_fragmentation = int(float(width_mm) / mesh_elem_size_mm)
        # TODO try to use 6 2 1 (default in tutorial)

        # x - width, y - height, z - length
        self.fragmentation = "    hex (0 1 2 3 4 5 6 7) ({} {} {}) simpleGrading (1.0 1.0 1.0)".format(
            self.fragmentation_config.width, self.fragmentation_config.height,
            self.fragmentation_config.length)

    def _calculate_boundary(self):
        _logger.error("Not implemented")

    def _format_text(self):
        t = Template(MESH_FILE_TEMPLATE)
        boundary = """    topSurface
    {
        type patch;
        faces
        (
            (2 3 7 6)
        );
    }

    bottomSurface
    {
        type patch;
        faces
        (
            (0 1 5 4)
        );
    }

    rearFixedEnd
    {
        type patch;
        faces
        (
            (4 5 6 7)
        );
    }

    frontTractionEnd
    {
        type patch;
        faces
        (
            (0 1 2 3)
        );
    }

    leftSurface
    {
        type patch;
        faces
        (
            (0 3 7 4)
        );
    }
    
    rightSurface
    {
        type patch;
        faces
        (
            (1 2 6 5)
        );
    }"""
        return t.substitute(points=self.points,
                            fragmentation=self.fragmentation,
                            boundary=boundary)

    @staticmethod
    def save_geometry(text, filename):
        if filename != 0:
            _logger.debug("Save file to: {}".format(filename))
            f = open(filename, "w+")
            f.writelines(text)
            f.close()
        else:
            print(text)

    def generate_mesh(self):
        # FIXME temporary solution
        # FIXME Not check that is not None
        openfoam_folder = "OPENFOAM FOLDER NOT SPECIFIED"
        if self.exec_config is not None:
            if self.exec_config.openfoam_folder is not None:
                openfoam_folder = self.exec_config.openfoam_folder

        env_script = "ENV SCRIPT NOT SPECIFIED"
        if self.exec_config is not None:
            if self.exec_config.prepare_env_script is not None:
                env_script = self.exec_config.prepare_env_script

        # Make sure script have commented lines, where FOAM_INST_DIR is set
        prepare_call = "export FOAM_INST_DIR=" + openfoam_folder
        prepare_call += "; "
        prepare_call += ". " + env_script
        prepare_call += "; "
        prepare_call += "cd " + self.exec_config.execution_folder
        try:
            command = "{}; {}".format(prepare_call, "blockMesh")
            _logger.info(command)
            if _logger.log_lvl == LogLvl.LOG_DEBUG or print_mesh_stats:
                subprocess.call("{}".format(command), shell=True)
            else:
                subprocess.call("{} > /dev/null".format(command), shell=True)
        except OSError:
            raise OSError(
                "blockMesh not found. Please check that you have prepared OpenFOAM environment"
            )
Пример #12
0
def train(run_id: str, train_data_root: Path, test_data_root: Path,
          models_dir: Path, save_every: int, backup_every: int, vis_every: int,
          force_restart: bool, visdom_server: str, no_visdom: bool):
    # Create a dataset and a dataloader
    dataset = SpeakerVerificationDataset(train_data_root)
    loader = SpeakerVerificationDataLoader(
        dataset,
        speakers_per_batch,
        utterances_per_speaker,
        num_workers=dataloader_workers,
        # pin_memory=True,
    )
    test_dataset = SpeakerVerificationDataset(test_data_root)
    testdata_loader = SpeakerVerificationDataLoader(
        test_dataset,
        speakers_per_batch,
        utterances_per_speaker,
        num_workers=dataloader_workers,
        # pin_memory=True,
    )

    # Setup the device on which to run the forward pass and the loss. These can be different,
    # because the forward pass is faster on the GPU whereas the loss is often (depending on your
    # hyperparameters) faster on the CPU.
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Create the model and the optimizer
    model = SpeakerEncoder(device)
    raw_model = model
    if torch.cuda.device_count() > 1:
        print("Use", torch.cuda.device_count(), "GPUs.")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = torch.nn.DataParallel(model)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate_init)
    init_step = 1

    # Configure file path for the model
    state_fpath = models_dir.joinpath(run_id + ".pt")
    backup_dir = models_dir.joinpath(run_id + "_backups")

    # Load any existing model
    if not force_restart:
        if state_fpath.exists():
            print(
                "Found existing model \"%s\", loading it and resuming training."
                % run_id)
            checkpoint = torch.load(str(state_fpath))
            init_step = checkpoint["step"]
            raw_model.load_state_dict(checkpoint["model_state"])
            optimizer.load_state_dict(checkpoint["optimizer_state"])
            optimizer.param_groups[0]["lr"] = learning_rate_init
        else:
            print("No model \"%s\" found, starting training from scratch." %
                  run_id)
    else:
        print("Starting the training from scratch.")
    model.train()

    save_interval_s_time = time.time()
    prt_interval_s_time = time.time()
    total_loss, total_eer = 0, 0
    # Training loop
    profiler = Profiler(summarize_every=1, disabled=True)
    for step, speaker_batch in enumerate(loader, init_step):
        # step_s_time = time.time()
        sync(device)
        profiler.tick("Blocking, waiting for batch (threaded)")

        # Forward pass
        inputs = torch.from_numpy(speaker_batch.data).to(device)
        sync(device)
        profiler.tick("Data to %s" % device)
        embeds = model(inputs)
        sync(device)
        profiler.tick("Forward pass")
        embeds_loss = embeds.view(
            (speakers_per_batch, utterances_per_speaker, -1))
        loss, eer = raw_model.loss(embeds_loss)
        # print(loss.item(), flush=True)
        total_loss += loss.item()
        total_eer += eer
        sync(device)
        profiler.tick("Loss")

        # Backward pass
        model.zero_grad()
        loss.backward()
        profiler.tick("Backward pass")
        raw_model.do_gradient_ops()
        optimizer.step()
        sync(device)
        profiler.tick("Parameter update")

        if step % vis_every == 0:
            learning_rate = optimizer.param_groups[0]["lr"]
            prt_interval_e_time = time.time()
            cost_time = prt_interval_e_time - prt_interval_s_time
            prt_interval_s_time = prt_interval_e_time
            print(
                "    Step %06d> %d step cost %d seconds, lr:%.4f, Avg_loss:%.4f, Avg_eer:%.4f."
                % (
                    #   step, save_every, cost_time, loss.detach().numpy(), eer), flush=True)
                    step,
                    vis_every,
                    cost_time,
                    learning_rate,
                    total_loss / vis_every,
                    total_eer / vis_every),
                flush=True)
            total_loss, total_eer = 0, 0

        # Overwrite the latest version of the model && test model
        # save_every = 20
        if save_every != 0 and step % save_every == 0:
            # save
            torch.save(
                {
                    "step": step + 1,
                    "model_state": model.state_dict(),
                    "optimizer_state": optimizer.state_dict(),
                }, str(state_fpath))

            # test
            test_total_loss, test_total_eer = 0.0, 0.0
            for test_step, test_batch in enumerate(testdata_loader, 1):
                testinputs = torch.from_numpy(test_batch.data).to(device)
                with torch.no_grad():
                    test_embeds = model(testinputs)
                    test_embeds_loss = test_embeds.view(
                        (speakers_per_batch, utterances_per_speaker, -1))
                    test_loss, test_eer = raw_model.loss(test_embeds_loss)
                # print(loss.item(), flush=True)
                test_total_loss += test_loss.item()
                test_total_eer += test_eer
                test_prt_interval = 10
                if test_step % test_prt_interval == 0:
                    print(
                        "    |--Test Step %06d> Avg_loss:%.4f, Avg_eer:%.4f." %
                        (test_step, test_total_loss / test_step,
                         test_total_eer / test_step),
                        flush=True)
                if test_step == 50:
                    break

            # print log
            save_interval_e_time = time.time()
            cost_time = save_interval_e_time - save_interval_s_time
            print(
                "\n"
                "++++Step %06d> Saving the model, %d step cost %d seconds." % (
                    #   step, save_every, cost_time, loss.detach().numpy(), eer), flush=True)
                    step,
                    save_every,
                    cost_time),
                flush=True)
            save_interval_s_time = save_interval_e_time

        # Make a backup
        if backup_every != 0 and step % backup_every == 0:
            print("Making a backup (step %d)" % step)
            backup_dir.mkdir(exist_ok=True)
            backup_fpath = str(
                backup_dir.joinpath("%s_bak_%06d.pt" % (run_id, step)))
            torch.save(
                {
                    "step": step + 1,
                    "model_state": model.state_dict(),
                    "optimizer_state": optimizer.state_dict(),
                }, backup_fpath)
        sync(device)
        profiler.tick("Extras (visualizations, saving)")
Пример #13
0
def train(data, model, scale, config, device):
    from nn.optim.sgd import SGD
    from utils.train_history import TrainHistory

    X_train, y_train, X_test, y_test = data.split(config.train_part,
                                                  shuffle=True)

    assert len(X_train) > 0, "Wrong number of train examples"
    assert len(X_test) > 0, "Wrong number of test examples"

    X_train, y_train = prep_data(X_train, y_train, device)
    X_test, y_test = prep_data(X_test, y_test, device)

    print("first label in train part: {:.2f}%".format(
        y_train[:, 0].sum().float() / y_train.shape[0] * 100))
    print("first label in test part: {:.2f}%".format(
        y_test[:, 0].sum().float() / y_test.shape[0] * 100))

    scale.fit(torch.cat([X_train, X_test], dim=0))
    X_train = scale(X_train)
    X_test = scale(X_test)

    criterion = get_loss(config.loss)
    optimizer = SGD(model, **config.sgd_params)

    profiler = Profiler("TRAIN TIME")
    history = TrainHistory(config.epochs,
                           ["loss", "val_loss", "acc", "val_acc"])

    for i in range(1, config.epochs + 1):
        profiler.tick()
        losses = []
        for batch_X, batch_y in batch_iterator(X_train,
                                               y_train,
                                               config.batch_size,
                                               permute=True):
            output = model(batch_X)

            losses.append(criterion(output, batch_y).to("cpu"))
            grad = criterion.backward(output, batch_y)

            model.backward(grad)

            optimizer.optimise()

        test_pred = model(X_test)
        test_loss = criterion(test_pred, y_test)
        test_acc = accuracy(torch.argmax(test_pred, dim=1),
                            torch.argmax(y_test, dim=1))

        pred = model(X_train)
        acc = accuracy(torch.argmax(pred, dim=1), torch.argmax(y_train, dim=1))

        history.update(i, np.mean(losses), test_loss, acc, test_acc)
        history.print_progress()

        if config.cross_validation:
            idxs = np.random.permutation(
                np.arange(X_train.shape[0] + X_test.shape[0]))

            X = torch.cat([X_train, X_test], dim=0)
            y = torch.cat([y_train, y_test], dim=0)
            train_num = int(X.shape[0] * config.train_part)

            X_train = X[idxs[train_num:]]
            X_test = X[idxs[:train_num]]
            y_train = y[idxs[train_num:]]
            y_test = y[idxs[:train_num]]

        profiler.tock()
    history.visualize()
    print('\n', profiler, sep='', end='\n\n')
Пример #14
0
class Executor:
    # openfoam_solver = "solidEquilibriumDisplacementFoamMod"
    openfoam_solver = "solidDisplacementFoamMod"

    def __init__(self, exec_conf: ExecutionConfig, mesh_conf, fragmentation_conf: FragmentationConfig):
        self._profiler = Profiler(enable_profiler)
        _logger.info("Solver: {}".format(self.openfoam_solver))
        self.exec_config = exec_conf
        self.result_dir = exec_conf.output_dir
        files.create_directory(self.result_dir)
        self.results = {}
        result_file_geom_prefix = None
        if type(mesh_conf) is SimpleBlockMeshConfig:
            result_file_geom_prefix = "gw{}_gh{}_gl{}".format(
                mesh_conf.width_mm,
                mesh_conf.height_mm,
                mesh_conf.length_mm)
            self.geom_values = "{}\t{}\t{}".format(
                mesh_conf.width_mm,
                mesh_conf.height_mm,
                mesh_conf.length_mm)
            self.geom_titles = "Geometry width\tGeometry height\tGeometry length"
        else:
            result_file_geom_prefix = ""
            self.geom_values = ""
            self.geom_titles = ""

            for line_idx in range(len(mesh_conf.width_lines)):
                result_file_geom_prefix += "w{}={}_".format(line_idx, mesh_conf.width_lines[line_idx])
                self.geom_titles += "{} {}\t".format("Width line", line_idx)
                self.geom_values += "{}\t".format(mesh_conf.width_lines[line_idx])

            for line_idx in range(len(mesh_conf.height_distance)):
                result_file_geom_prefix += "h{}={}_".format(line_idx, mesh_conf.height_distance[line_idx])
                self.geom_titles += "{} {}\t".format("Height line", line_idx)
                self.geom_values += "{}\t".format(mesh_conf.height_distance[line_idx])

            result_file_geom_prefix += "l={}".format(mesh_conf.length)
            self.geom_titles += "{}".format("Length")
            self.geom_values += "{}".format(mesh_conf.length)

        _logger.debug(result_file_geom_prefix)
        _logger.debug(self.geom_values)

        fragmentation_options_line = "fw{}_fh{}_fl{}".format(
            fragmentation_conf.width,
            fragmentation_conf.height,
            fragmentation_conf.length)
        self.fragmentation_values = "{}\t{}\t{}".format(
            fragmentation_conf.width,
            fragmentation_conf.height,
            fragmentation_conf.length)

        result_file_name = "result_{}_{}.txt".format(result_file_geom_prefix, fragmentation_options_line)
        self.result_file = os.path.join(self.result_dir, result_file_name)
        self.parsed_name = datetime.datetime.now().strftime("%Y-%m-%d-%H.txt")

    def __del__(self):
        self._profiler.print_report()

    def run(self):
        _logger.info("\n\n===== Run calculation")
        self.__run_execution()
        _logger.info("===== End calculation\n\n")

        _logger.info("\n\n===== Run result parsing")
        self._profiler.start("Parse results")
        self.__parse_output_from_file("sigmaEq")
        self.__parse_output_from_file("D")
        self.__parse_output_from_file("Time")
        self._profiler.stop("Parse results")
        _logger.info("===== End result parsing\n\n")

    def __run_execution(self):
        self._profiler.start("Run solver")
        # FIXME no check if none
        prepare_call = "export FOAM_INST_DIR=" + self.exec_config.openfoam_folder
        prepare_call += "; "
        prepare_call += ". " + "$HOME/prog/OpenFOAM/OpenFOAM-dev/etc/bashrc_modified"
        prepare_call += "; "
        prepare_call += "cd " + self.exec_config.execution_folder
        try:
            subprocess.call(["{}; {} > {}".format(prepare_call, self.openfoam_solver, self.result_file)], shell=True)
        except OSError:
            _logger.error("{} not found.".format(self.openfoam_solver))
            _logger.error("Please make sure you are using modified version of OpenFOAM and env is prepared")
        self._profiler.stop("Run solver")

    def __parse_time(self, text):
        found_exec = re.findall(r'ExecutionTime = (\d+.?\d*) s', text)[-1]
        found_clock = re.findall(r'ClockTime = (\d+.?\d*) s', text)[-1]
        exec_time = float(found_exec)
        clock_time = float(found_clock)

        # Save result to file
        file_parsed_name = "{}-{}".format("time", self.parsed_name)
        file_parsed_result = os.path.join(self.result_dir, file_parsed_name)
        formatted_result = "{geometry}\t{fragmentation}\t{exec_time}\t{clock_time}\n".format(
            geometry=self.geom_values,
            fragmentation=self.fragmentation_values,
            exec_time=exec_time, clock_time=clock_time)

        with open(file_parsed_result, "a") as log_file:
            if os.stat(file_parsed_result).st_size == 0:
                log_file.write("{}"
                               "\tFragmentation width\tFragmentation height\tFragmentation length"
                               "\t{}\t{}\n".format(self.geom_titles, "Execution time", "Clock time"))
            log_file.write(formatted_result)

    def __parse_output(self, param_to_parse, text):
        _logger.debug("Parse: {}".format(param_to_parse))

        start_index = text.rfind("Max {} = ".format(param_to_parse))
        len = text[start_index:].find('\n')

        _logger.debug(text[start_index:start_index + len])

        # (.123 250000 1.00009e+06 160325 7.29635e-10 2.36271e+06 0 -2.40131e-45 5.12455e-06 2.01673e-06 1.18136e-05)
        p = re.compile(r'[-+]?[0-9]*\.?[0-9]+[eE]?[-+]?[0-9]*')
        values = p.findall(text[start_index:start_index + len])

        _logger.debug("Found values: {}".format(values))

        float_val = []
        for val in values:
            float_val.append(float(val))

        _logger.debug("Values as float: {}".format(float_val))

        max_value = -1.
        # FIXME Workaround for cantilever beam to use D which is with -D flag
        if param_to_parse == "sigmaEq":
            max_value = max(float_val)
        elif param_to_parse == "D":
            max_value = abs(max(float_val, key=abs))

        _logger.info("Max (Min) {}: {}".format(param_to_parse, max_value))
        # Save to map
        self.results[param_to_parse] = max_value

        # Save result to file
        file_parsed_name = "{}-{}".format(param_to_parse, self.parsed_name)
        file_parsed_result = os.path.join(self.result_dir, file_parsed_name)
        formatted_result = "{geometry}\t{fragmentation}\t{value}\n".format(geometry=self.geom_values,
                                                                           fragmentation=self.fragmentation_values,
                                                                           value=max_value)

        with open(file_parsed_result, "a") as log_file:
            if os.stat(file_parsed_result).st_size == 0:
                log_file.write("{}"
                               "\tFragmentation width\tFragmentation height\tFragmentation length"
                               "\t{}\n".format(self.geom_titles, param_to_parse))
            log_file.write(formatted_result)

    def __parse_output_from_file(self, param_to_parse):
        with open(self.result_file, 'rt') as file:
            contents = file.read()

        # TODO yeaah, custom call for "time"!
        if param_to_parse == "Time":
            self.__parse_time(contents)
        else:
            self.__parse_output(param_to_parse, contents)

    def get_results(self):
        return self.results
Пример #15
0
import random

from engine.game import GamePhase, GameState
from utils.profiler import Profiler
import torch

PROFILE = True
if __name__ == "__main__":
    dummyState = GameState(4)
    payoff_sum = torch.zeros((4, ), dtype=torch.float)
    action_hist = torch.zeros((
        4,
        dummyState.action_dim(),
    ),
                              dtype=torch.float)
    with Profiler(PROFILE):
        random.seed(1)
        for x in range(1000):
            gameState = GameState(4)
            first = True
            while not gameState.terminal():
                seatToAct = gameState.get_players_to_act()[0]
                if not PROFILE: gameState.print()
                if False and seatToAct == 0:
                    possible_actions = gameState.getPossibleActions(seatToAct)
                    print("Possible Actions: " + str(possible_actions))
                    action = input("Please give an action for seat " +
                                   str(seatToAct) + ": ")
                    action = int(action)
                    gameState.playerAction(seatToAct, action)
                else: