Ejemplo n.º 1
0
    def load(
        self,
        training=True,
    ):
        rank = self._config.get('distributed_rank')

        if self._load_dir:
            if os.path.isfile(self._load_dir + "/model_{}.pt".format(rank)):
                Log.out("Loading th2vec models", {
                    'save_dir': self._load_dir,
                })
                self._inner_model.load_state_dict(
                    torch.load(
                        self._load_dir + "/model_{}.pt".format(rank),
                        map_location=self._device,
                    ), )
                if training:
                    self._optimizer.load_state_dict(
                        torch.load(
                            self._load_dir + "/optimizer_{}.pt".format(rank),
                            map_location=self._device,
                        ), )
                    # self._scheduler.load_state_dict(
                    #     torch.load(
                    #         self._load_dir +
                    #         "/scheduler_{}.pt".format(rank),
                    #         map_location=self._device,
                    #     ),
                    # )

        return self
Ejemplo n.º 2
0
    def __init__(
        self,
        config: Config,
        train_dataset: ProofTraceRLLDataset,
    ):
        self._config = config

        self._action_coeff = config.get('prooftrace_search_action_coeff')

        self._device = torch.device(config.get('device'))
        self._type = config.get('prooftrace_search_model_type')

        self._model = SearchModel(config)

        self._ack = IOTAAck(
            config.get('prooftrace_search_iota_sync_dir'),
            self._model.modules(),
        )

        self._nll_loss = nn.NLLLoss()
        self._mse_loss = nn.MSELoss()

        self._train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=self._config.get('prooftrace_search_batch_size'),
            shuffle=True,
            collate_fn=lm_collate,
        )

        Log.out('ACK initialization', {
            "batch_size": self._config.get('prooftrace_search_batch_size'),
        })

        self._train_batch = 0
Ejemplo n.º 3
0
    def __init__(
        self,
        config: Config,
    ):
        self._config = config

        self._device = torch.device(config.get('device'))

        self._save_dir = config.get('sat_solver_save_dir')
        self._load_dir = config.get('sat_solver_load_dir')

        self._tb_writer = None
        if self._config.get('tensorboard_log_dir'):
            if self._config.get('distributed_rank') == 0:
                self._tb_writer = SummaryWriter(
                    self._config.get('tensorboard_log_dir'), )

        self._inner_model = S(self._config).to(self._device)

        Log.out(
            "Initializing solver",
            {'parameter_count': self._inner_model.parameters_count()},
        )

        self._model = self._inner_model
Ejemplo n.º 4
0
    def load(
        self,
        training=True,
    ):

        if self._load_dir:
            Log.out("Loading prooftrace search models", {
                'load_dir': self._load_dir,
            })

            self._model.load()

            if training and os.path.isfile(self._load_dir + "/optimizer.pt"):
                self._value_optimizer.load_state_dict(
                    torch.load(
                        self._load_dir + "/value_optimizer.pt",
                        map_location=self._device,
                    ), )
                self._policy_optimizer.load_state_dict(
                    torch.load(
                        self._load_dir + "/policy_optimizer.pt",
                        map_location=self._device,
                    ), )

        self._syn.broadcast({'config': self._config})

        return self
Ejemplo n.º 5
0
    def run(self, ) -> bool:
        lanes = self._detector.detect(self._image)

        dump = {
            'detected': [dict(l) for l in lanes],
        }

        assert len(lanes) > 1

        # TODO(stan): test criteria

        dump_path = os.path.join(self.dump_dir(), "dump.json")
        image_path = os.path.join(self.dump_dir(), "image.png")

        Log.out("Dumping detection", {
            'path': dump_path,
        })

        os.makedirs(self.dump_dir())
        with open(dump_path, 'w') as out:
            json.dump(dump, out, indent=2)

        cv2.imwrite(image_path, self._image.data())

        self._detector.close()

        return True
Ejemplo n.º 6
0
    def __init__(
        self,
        config: Config,
        spec: ScenarioSpec,
    ) -> None:
        super(LaneScenario, self).__init__(
            config,
            spec,
        )

        Log.out("Initializing detector", {
            'detector': spec.data()['detector'],
        })

        if spec.data()['detector'] == 'lanenet':
            self._detector = LaneNet(config)

        camera = Camera.from_dict(spec.data()['camera'])

        self._image = CameraImage.from_path_and_camera(
            os.path.join(
                os.path.dirname(spec.path()),
                spec.data()['image'],
            ),
            camera,
        )
Ejemplo n.º 7
0
    def __init__(
        self,
        config: Config,
    ):
        self._config = config
        self._accumulation_step_count = \
            config.get('th2vec_accumulation_step_count')

        self._device = torch.device(config.get('device'))

        self._save_dir = config.get('th2vec_save_dir')
        self._load_dir = config.get('th2vec_load_dir')

        self._tb_writer = None
        if self._config.get('tensorboard_log_dir'):
            if self._config.get('distributed_rank') == 0:
                self._tb_writer = SummaryWriter(
                    self._config.get('tensorboard_log_dir'), )

        self._inner_model = DP(self._config).to(self._device)

        Log.out(
            "Initializing th2vec",
            {'parameter_count': self._inner_model.parameters_count()},
        )

        self._model = self._inner_model
        self._train_batch = 0
Ejemplo n.º 8
0
def dump_trace(args):
    config, tokenizer, tr, idx, total = args
    ptra = tr.actions(tokenizer)

    test = False
    for nm in TEST_FILTER:
        if re.search(nm, tr.name()) is not None:
            test = True

    if test:
        path = os.path.join(
            os.path.expanduser(config.get('prooftrace_dataset_dir')),
            config.get('prooftrace_dataset_size'),
            "test_traces",
        )
    else:
        path = os.path.join(
            os.path.expanduser(config.get('prooftrace_dataset_dir')),
            config.get('prooftrace_dataset_size'),
            "train_traces",
        )

    ptra_path = os.path.join(path, ptra.path())
    Log.out("Writing ProofTraceActions", {
        'path': ptra_path,
        'index': idx,
        'total': total,
    })
    ptra.dump(ptra_path)

    length = ptra.len()
    del ptra

    return length
Ejemplo n.º 9
0
    def __init__(
        self,
        config: Config,
        train_dataset: ProofTraceLMDataset,
    ):
        self._config = config

        self._action_coeff = config.get('prooftrace_lm_action_coeff')
        self._grad_norm_max = config.get('prooftrace_lm_grad_norm_max')

        self._device = torch.device(config.get('device'))

        self._sequence_length = config.get('prooftrace_sequence_length')

        self._model = LModel(config)
        self._ack = IOTAAck(
            config.get('prooftrace_lm_iota_sync_dir'),
            self._model.modules(),
        )

        self._nll_loss = nn.NLLLoss()

        self._train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=self._config.get('prooftrace_lm_batch_size'),
            shuffle=True,
            collate_fn=lm_collate,
        )

        Log.out('ACK initialization', {
            "batch_size": self._config.get('prooftrace_lm_batch_size'),
        })

        self._train_batch = 0
Ejemplo n.º 10
0
    def __init__(
            self,
            config: Config,
            kernel: HolStepKernel,
    ):
        self._config = config
        self._kernel = kernel

        self._device = torch.device(config.get('device'))

        self._save_dir = config.get('th2vec_save_dir')
        self._load_dir = config.get('th2vec_load_dir')

        self._tb_writer = None
        if self._config.get('tensorboard_log_dir'):
            if self._config.get('distributed_rank') == 0:
                self._tb_writer = SummaryWriter(
                    self._config.get('tensorboard_log_dir'),
                )

        self._inner_model_G = G(self._config).to(self._device)
        self._inner_model_D = D(self._config).to(self._device)

        Log.out(
            "Initializing th2vec", {
                'G_parameter_count': self._inner_model_G.parameters_count(),
                'D_parameter_count': self._inner_model_D.parameters_count(),
            },
        )

        self._model_G = self._inner_model_G
        self._model_D = self._inner_model_D
        self._loss = nn.NLLLoss()
Ejemplo n.º 11
0
    def save(
            self,
    ):
        rank = self._config.get('distributed_rank')

        if self._save_dir:
            Log.out(
                "Saving th2vec models", {
                    'save_dir': self._save_dir,
                })

            torch.save(
                self._inner_model_G.state_dict(),
                self._save_dir + "/model_G_{}.pt".format(rank),
            )
            torch.save(
                self._optimizer_G.state_dict(),
                self._save_dir + "/optimizer_G_{}.pt".format(rank),
            )
            torch.save(
                self._inner_model_D.state_dict(),
                self._save_dir + "/model_D_{}.pt".format(rank),
            )
            torch.save(
                self._optimizer_D.state_dict(),
                self._save_dir + "/optimizer_D_{}.pt".format(rank),
            )
Ejemplo n.º 12
0
    def save(self, ):
        rank = self._config.get('distributed_rank')

        if self._save_dir:
            Log.out("Saving prooftrace models", {
                'save_dir': self._save_dir,
            })

            torch.save(
                self._inner_model_E.state_dict(),
                self._save_dir + "/model_E_{}.pt".format(rank),
            )
            torch.save(
                self._inner_model_H.state_dict(),
                self._save_dir + "/model_H_{}.pt".format(rank),
            )
            torch.save(
                self._inner_model_PH.state_dict(),
                self._save_dir + "/model_PH_{}.pt".format(rank),
            )
            torch.save(
                self._inner_model_VH.state_dict(),
                self._save_dir + "/model_VH_{}.pt".format(rank),
            )
            torch.save(
                self._optimizer.state_dict(),
                self._save_dir + "/optimizer_{}.pt".format(rank),
            )
Ejemplo n.º 13
0
    def __init__(
            self,
            config: Config,
    ):
        self._config = config

        self._device = torch.device(config.get('device'))

        self._save_dir = config.get('th2vec_save_dir')
        self._load_dir = config.get('th2vec_load_dir')
        self._embedder_load_dir = \
            config.get('th2vec_premiser_embedder_load_dir')

        self._tb_writer = None
        if self._config.get('tensorboard_log_dir'):
            if self._config.get('distributed_rank') == 0:
                self._tb_writer = SummaryWriter(
                    self._config.get('tensorboard_log_dir'),
                )

        self._inner_model = P(self._config).to(self._device)
        self._embedder = AE(self._config).to(self._device)

        Log.out(
            "Initializing th2vec", {
                'model_parameter_count': self._inner_model.parameters_count(),
                'embvedder_parameter_count': self._embedder.parameters_count(),
            },
        )

        self._model = self._inner_model
Ejemplo n.º 14
0
    def __init__(
        self,
        config: Config,
    ):
        self._config = config

        self._device = torch.device(config.get('device'))

        self._model = SearchModel(config)

        self._rollout_dir = os.path.join(
            os.path.expanduser(config.get('prooftrace_search_rollout_dir')),
            config.get('prooftrace_dataset_size'),
        )
        with gzip.open(
                os.path.join(
                    os.path.expanduser(config.get('prooftrace_dataset_dir')),
                    config.get('prooftrace_dataset_size'),
                    'traces.tokenizer',
                ), 'rb') as f:
            self._tokenizer = pickle.load(f)

        self._wrk = IOTAWrk(
            config.get('prooftrace_search_iota_sync_dir'),
            'rollout',
            self._model.modules(),
        )

        self._type = config.get('prooftrace_search_type')
        self._depth = config.get('prooftrace_search_depth')

        Log.out('WRK initialization', {})
Ejemplo n.º 15
0
    def save(self, ):
        if self._save_dir:
            Log.out("Saving prooftrace models", {
                'save_dir': self._save_dir,
            })

            torch.save(
                self._modules['E'].state_dict(),
                self._save_dir + "/model_E.pt",
            )
            torch.save(
                self._modules['T'].state_dict(),
                self._save_dir + "/model_T.pt",
            )
            torch.save(
                self._modules['PH'].state_dict(),
                self._save_dir + "/model_PH.pt",
            )
            torch.save(
                self._modules['VH'].state_dict(),
                self._save_dir + "/model_VH.pt",
            )
            torch.save(
                self._optimizer.state_dict(),
                self._save_dir + "/optimizer.pt",
            )
Ejemplo n.º 16
0
    def __init__(
        self,
        config: Config,
        test_dataset: ProofTraceLMDataset,
    ):
        self._config = config

        self._device = torch.device(config.get('device'))

        self._model = VModel(config)
        self._ack = IOTAAck(
            config.get('prooftrace_v_iota_sync_dir'),
            self._model.modules(),
        )

        self._mse_loss = nn.MSELoss()

        self._test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=self._config.get('prooftrace_v_batch_size'),
            shuffle=True,
            collate_fn=lm_collate,
        )

        Log.out('TST initialization', {
            "batch_size": self._config.get('prooftrace_v_batch_size'),
        })

        self._train_batch = 0
Ejemplo n.º 17
0
def translate(args, ):
    config, path, idx = args

    with gzip.open(path, 'rb') as f:
        ptra = pickle.load(f)

    rollout = Rollout(ptra.name(), [ptra], [])

    rollout_dir = os.path.join(
        os.path.expanduser(config.get('prooftrace_search_rollout_dir')),
        config.get('prooftrace_dataset_size'),
    )

    rdir = os.path.join(rollout_dir, rollout.name())
    if not os.path.exists(rdir):
        os.mkdir(rdir)

    now = datetime.datetime.now().strftime("%Y%m%d_%H%M_%S.%f")
    rnd = random.randint(0, 10e9)

    tmp_path = os.path.join(rdir, "{}_{}.tmp".format(now, rnd))
    fnl_path = os.path.join(rdir, "{}_{}.rollout".format(now, rnd))

    with gzip.open(tmp_path, 'wb') as f:
        pickle.dump(rollout, f, protocol=pickle.HIGHEST_PROTOCOL)
    os.rename(tmp_path, fnl_path)

    Log.out("Writing Rollout", {
        'path': fnl_path,
        'index': idx,
    })
Ejemplo n.º 18
0
Archivo: mcts.py Proyecto: spolu/z3ta
    def next(
        self,
        offset,
        step: int,
    ):
        assert len(self._children) > 0

        total = 0
        for n in self._children:
            total += n._N

        max_roll = 0
        child = None
        for n in self._children:
            if n._N > max_roll:
                max_roll = n._N
                child = n

        Log.out(
            "NEXT", {
                'step': step,
                'q': "{:.3f}".format(child._Q),
                'p': "{:.3f}".format(child._P),
                'n': "{:.3f}".format(child._N),
                'summary': child._ptra.summary(offset),
            })

        return child
Ejemplo n.º 19
0
    def update(self, ) -> None:
        update = self._config.update()
        if update:
            if 'prooftrace_lm_learning_rate' in update:
                lr = self._config.get('prooftrace_lm_learning_rate')
                if lr != self._learning_rate:
                    self._learning_rate = lr
                    for group in self._policy_optimizer.param_groups:
                        group['lr'] = lr
                    Log.out("Updated", {
                        "prooftrace_lm_learning_rate": lr,
                    })
            if 'prooftrace_lm_iota_min_update_count' in update:
                cnt = \
                    self._config.get('prooftrace_lm_iota_min_update_count')
                if cnt != self._min_update_count:
                    self._min_update_count = cnt
                    Log.out("Updated", {
                        "prooftrace_lm_iota_min_update_count": cnt,
                    })

            if self._tb_writer is not None:
                for k in update:
                    if k in [
                            'prooftrace_lm_learning_rate',
                            'prooftrace_lm_iota_min_update_count',
                            'prooftrace_lm_action_coeff',
                    ]:
                        self._tb_writer.add_scalar(
                            "prooftrace_lm_train_run/{}".format(k),
                            update[k],
                            self._epoch,
                        )
Ejemplo n.º 20
0
    def update(self, ) -> None:
        update = self._config.update()
        if update:
            if 'prooftrace_lm_learning_rate' in update:
                lr = \
                    self._config.get('prooftrace_lm_learning_rate')
                if lr != self._learning_rate:
                    self._learning_rate = lr
                    for group in self._optimizer.param_groups:
                        group['lr'] = lr
                    Log.out("Updated", {
                        "prooftrace_learning_rate": lr,
                    })
            if 'prooftrace_lm_value_coeff' in update:
                coeff = self._config.get('prooftrace_lm_value_coeff')
                if coeff != self._value_coeff:
                    self._value_coeff = coeff
                    Log.out("Updated", {
                        "prooftrace_lm_value_coeff": coeff,
                    })

            if self._tb_writer is not None:
                for k in update:
                    if k in [
                            'prooftrace_lm_learning_rate',
                            'prooftrace_lm_value_coeff',
                    ]:
                        self._tb_writer.add_scalar(
                            "prooftrace_lm_train_run/{}".format(k),
                            update[k],
                            self._train_batch,
                        )
Ejemplo n.º 21
0
def run():
    parser = argparse.ArgumentParser(description="")

    parser.add_argument(
        'config_path',
        type=str,
        help="path to the config file",
    )
    parser.add_argument(
        'spec_path',
        type=str,
        help="path to the spec file",
    )

    args = parser.parse_args()

    config = Config.from_file(args.config_path)
    spec = ScenarioSpec.from_file(args.spec_path)

    scenario = spec.scenario(config)

    Log.out("Starting scenario", {
        'id': scenario.id(),
        'dump_dir': scenario.dump_dir(),
    })

    scenario.run()

    Log.out("Finished scenario", {
        'id': scenario.id(),
        'viewer_url': scenario.view(),
    })
Ejemplo n.º 22
0
def load_all():
    parser = argparse.ArgumentParser(description="")

    parser.add_argument(
        'config_path',
        type=str,
        help="path to the config file",
    )
    parser.add_argument(
        '--dataset_size',
        type=str,
        help="config override",
    )

    args = parser.parse_args()

    config = Config.from_file(args.config_path)

    if args.dataset_size is not None:
        config.override(
            'prooftrace_dataset_size',
            args.dataset_size,
        )

    dataset_dir = os.path.join(
        os.path.expanduser(config.get('prooftrace_dataset_dir')),
        config.get('prooftrace_dataset_size'), 'train_traces')

    assert os.path.isdir(dataset_dir)
    files = [
        os.path.join(dataset_dir, f) for f in os.listdir(dataset_dir)
        if os.path.isfile(os.path.join(dataset_dir, f))
    ]

    ptras = []

    processed = 0
    for p in files:
        match = re.search("_(\\d+)_(\\d+)\\.actions$", p)
        if match is None:
            continue
        ptra_len = int(match.group(1))
        prepare_len = int(match.group(2))

        with gzip.open(p, 'rb') as f:
            ptra = pickle.load(f)
        ptras.append(ptra)
        Log.out(
            "Loaded ProofTrace", {
                'name': ptra.name(),
                'prepare_length': prepare_len,
                'length': ptra_len,
                'processed': processed,
                'all': len(files),
            })
        processed += 1

    Log.out("Loaded extracted ProofTraces LM Dataset", {
        'processed': processed,
    })
Ejemplo n.º 23
0
    def run_once(self, ):
        run_start = time.time()

        infos = self._ctl.aggregate()

        if len(infos) == 0:
            time.sleep(10)
            return

        rll_cnt_meter = Meter()
        pos_cnt_meter = Meter()
        neg_cnt_meter = Meter()
        demo_len_meter = Meter()

        for info in infos:
            rll_cnt_meter.update(info['rll_cnt'])
            pos_cnt_meter.update(info['pos_cnt'])
            neg_cnt_meter.update(info['neg_cnt'])
            if 'demo_len' in info:
                demo_len_meter.update(info['demo_len'])

        Log.out(
            "PROOFTRACE BEAM ROLLOUT CTL RUN", {
                'epoch': self._epoch,
                'run_time': "{:.2f}".format(time.time() - run_start),
                'update_count': len(infos),
                'rll_cnt': "{:.4f}".format(rll_cnt_meter.sum or 0.0),
                'pos_cnt': "{:.4f}".format(pos_cnt_meter.avg or 0.0),
                'neg_cnt': "{:.4f}".format(neg_cnt_meter.avg or 0.0),
                'demo_len': "{:.4f}".format(demo_len_meter.avg or 0.0),
            })

        if self._tb_writer is not None:
            if rll_cnt_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_search_rollout/rll_cnt",
                    rll_cnt_meter.sum,
                    self._epoch,
                )
            if pos_cnt_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_search_rollout/pos_cnt",
                    pos_cnt_meter.avg,
                    self._epoch,
                )
            if neg_cnt_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_search_rollout/neg_cnt",
                    neg_cnt_meter.avg,
                    self._epoch,
                )
            if demo_len_meter.avg is not None:
                self._tb_writer.add_scalar(
                    "prooftrace_search_rollout/demo_len",
                    demo_len_meter.avg,
                    self._epoch,
                )

        self._epoch += 1
Ejemplo n.º 24
0
 def from_file(
         path: str,
 ):
     Log.out("Loading map", {
         'path': path,
     })
     with open(path) as f:
         return SyntheticMap.from_dict(json.load(f))
Ejemplo n.º 25
0
    def init_training(
        self,
        train_dataset,
    ):
        if self._config.get('distributed_training'):
            self._model = torch.nn.parallel.DistributedDataParallel(
                self._inner_model,
                device_ids=[self._device],
            )

        self._optimizer = optim.Adam(
            self._model.parameters(),
            lr=self._config.get('th2vec_learning_rate'),
        )
        # self._scheduler = RampUpCosineLR(
        #     self._optimizer,
        #     self._config.get('th2vec_learning_rate_ramp_up'),
        #     self._config.get('th2vec_learning_rate_period'),
        #     self._config.get('th2vec_learning_rate_annealing'),
        # )

        self._train_sampler = None
        if self._config.get('distributed_training'):
            self._train_sampler = \
                torch.utils.data.distributed.DistributedSampler(
                    train_dataset,
                )

        pin_memory = False
        if self._config.get('device') != 'cpu':
            pin_memory = True

        batch_size = self._config.get('th2vec_batch_size') // \
            self._accumulation_step_count

        self._train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=batch_size,
            shuffle=(self._train_sampler is None),
            pin_memory=pin_memory,
            num_workers=8,
            sampler=self._train_sampler,
        )

        Log.out(
            'Training initialization', {
                "accumulation_step_count":
                self._accumulation_step_count,
                "world_size":
                self._config.get('distributed_world_size'),
                "batch_size":
                self._config.get('th2vec_batch_size'),
                "dataloader_batch_size":
                batch_size,
                "effective_batch_size":
                (self._config.get('th2vec_batch_size') *
                 self._config.get('distributed_world_size')),
            })
Ejemplo n.º 26
0
    def __init__(
        self,
        config: Config,
    ):
        self._config = config

        self._learning_rate = config.get('prooftrace_lm_learning_rate')
        self._min_update_count = \
            config.get('prooftrace_lm_iota_min_update_count')

        self._device = torch.device(config.get('device'))

        self._save_dir = config.get('prooftrace_save_dir')
        self._load_dir = config.get('prooftrace_load_dir')

        self._epoch = 0
        self._last_update = None

        self._tb_writer = None
        if self._config.get('tensorboard_log_dir'):
            self._tb_writer = SummaryWriter(
                self._config.get('tensorboard_log_dir'), )

        self._model = LModel(config)

        Log.out(
            "SYN Initializing",
            {
                'parameters_count_pE':
                self._model.modules()['pE'].parameters_count(),
                'parameters_count_pT':
                self._model.modules()['pT'].parameters_count(),
                'parameters_count_pH':
                self._model.modules()['pH'].parameters_count(),
            },
        )

        self._syn = IOTASyn(
            config.get('prooftrace_lm_iota_sync_dir'),
            self._model.modules(),
        )

        self._policy_optimizer = optim.Adam(
            [
                {
                    'params': self._model.modules()['pE'].parameters()
                },
                {
                    'params': self._model.modules()['pT'].parameters()
                },
                {
                    'params': self._model.modules()['pH'].parameters()
                },
            ],
            lr=self._learning_rate,
        )

        self._syn.broadcast({'config': self._config})
Ejemplo n.º 27
0
def setup():
    global _config

    if _config is None:
        Log.out("Defaulting config", {
            'path': "configs/dev.json",
        })

        _config = Config.from_file("configs/dev.json")
Ejemplo n.º 28
0
        def process(worker):
            total = 0
            total_sat = 0
            total_unsat = 0

            generated = 0
            sat = 0
            unsat = 0

            while generated < chunk:
                cnf = self.produce()
                success, _ = minisat.solve(cnf)

                store = False
                header = ""
                total += 1
                if success:
                    total_sat += 1
                    if sat <= unsat:
                        header = "c SAT\n"
                        generated += 1
                        sat += 1
                        store = True
                if not success:
                    total_unsat += 1
                    if unsat <= sat:
                        header = "c UNSAT\n"
                        generated += 1
                        unsat += 1
                        store = True

                if store:
                    with open(
                            os.path.join(
                                dataset_dir, "{}_{}.cnf".format(
                                    prefix,
                                    worker * chunk + generated,
                                )), 'w') as f:
                        f.write(header)
                        f.write(cnf)
                        f.flush()

                if total % 100 == 0:
                    Log.out(
                        "Generating samples", {
                            'generator':
                            self.name(),
                            'total':
                            total,
                            'sat_ratio':
                            "{:.3f}".format(total_sat /
                                            (total_sat + total_unsat)),
                            'worker':
                            worker,
                            'generated':
                            generated,
                        })
Ejemplo n.º 29
0
Archivo: viewer.py Proyecto: spolu/z3ta
def run_server():
    global _app

    Log.out("Starting embeds viewer server", {
        'port': 5001,
    })
    address = ('0.0.0.0', 5001)
    try:
        eventlet.wsgi.server(eventlet.listen(address), _app)
    except KeyboardInterrupt:
        Log.out("Stopping viewer server", {})
Ejemplo n.º 30
0
    def update(
        self,
        config: Config,
    ) -> None:
        self._config = config

        coeff = self._config.get('prooftrace_lm_action_coeff')
        if coeff != self._action_coeff:
            self._action_coeff = coeff
            Log.out("Updated", {
                "prooftrace_lm_action_coeff": coeff,
            })