Exemple #1
0
def file_chunks(filepath: Path2, chunk_size: int):
    with filepath.open(mode='r') as f:
        while True:
            chunk = list(itertools.islice(f, chunk_size))
            if len(chunk) == 0:
                return
            yield chunk
Exemple #2
0
 def resume(resume_file: Path2, weights: Path2,
            q_estimator: QEstimator) -> \
   Tuple[List[LabeledTransition],
         List[Job],
         List[Tuple[str, ReinforceGraph]]]:
     eprint("Looks like there was a session in progress for these weights! "
            "Resuming")
     q_estimator_name, *saved = \
         torch.load(str(weights))
     q_estimator.load_saved_state(*saved)
     replay_memory = []
     with resume_file.open('r') as f:
         num_samples = sum(1 for _ in f)
     if num_samples > args.buffer_max_size:
         samples_to_use = random.sample(range(num_samples),
                                        args.buffer_max_size)
     else:
         samples_to_use = None
     with resume_file.open('r') as f:
         for (idx, line) in enumerate(f, start=1):
             if num_samples > args.buffer_max_size and \
               idx not in samples_to_use:
                 continue
             try:
                 replay_memory.append(LabeledTransition.from_dict(
                     json.loads(line)))
             except json.decoder.JSONDecodeError:
                 eprint(f"Problem loading line {idx}: {line}")
                 raise
     already_done = []
     graphs_done = []
     with weights.with_suffix('.done').open('r') as f:
         for line in f:
             next_done = json.loads(line)
             already_done.append((Path2(next_done[0]), next_done[1],
                                  next_done[2]))
             graphpath = (args.graphs_dir / next_done[1])\
                 .with_suffix(".png")
             graph = ReinforceGraph.load(graphpath + ".json")
             graphs_done.append((graphpath, graph))
     return replay_memory, already_done, graphs_done
Exemple #3
0
def file_chunks(filepath : Path2, chunk_size : int):
    with filepath.open(mode='r') as f:
        while True:
            chunk = list(itertools.islice(f, chunk_size))
            if len(chunk) == chunk_size:
                while chunk[-1] != "-----\n":
                    nextline = f.readline()
                    if not nextline:
                        break
                    chunk += [nextline]
                    assert len(chunk) < chunk_size * 2
            elif len(chunk) == 0:
                return
            yield chunk
 def save_weights(self, filename: Path2, args: argparse.Namespace) -> None:
     with cast(BinaryIO, filename.open('wb')) as f:
         torch.save(
             ("features evaluator", args, sys.argv,
              (self.tactic_map, self.token_map), self.model.state_dict()),
             f)
 def save_weights(self, filename: Path2, args: argparse.Namespace) -> None:
     with cast(BinaryIO, filename.open('wb')) as f:
         torch.save(("polyarg evaluator", args, sys.argv, True,
                     self.model.state_dict()), f)
def write_summary_html(filename : Path2,
                       options : Sequence[Tuple[str, str]],
                       unparsed_args : List[str],
                       cur_commit : str, cur_date : datetime.datetime,
                       weights_hash: str,
                       individual_stats : List[ReportStats],
                       combined_stats : ReportStats) -> None:
    def report_header(tag : Any, doc : Doc, text : Text) -> None:
        html_header(tag, doc, text,index_css, index_js,
                    "Proverbot Report")
    doc, tag, text, line = Doc().ttl()
    with tag('html'):
        report_header(tag, doc, text)
        with tag('body'):
            with tag('h4'):
                text("{} files processed".format(len(individual_stats)))
            with tag('h5'):
                text("Commit: {}".format(cur_commit))
            with tag('h5'):
                text("Run on {}".format(cur_date.strftime("%Y-%m-%d %H:%M:%S.%f")))
            with tag('img',
                     ('src', 'logo.png'),
                     ('id', 'logo')):
                pass
            with tag('h2'):
                text("Proofs Completed: {}% ({}/{})"
                     .format(stringified_percent(combined_stats.num_proofs_completed,
                                                 combined_stats.num_proofs),
                             combined_stats.num_proofs_completed,
                             combined_stats.num_proofs))
            with tag('ul'):
                for k, v in options:
                    if k == 'filenames':
                        continue
                    elif not v:
                        continue
                    with tag('li'):
                        text("{}: {}".format(k, v))

            with tag('table'):
                with tag('tr', klass="header"):
                    line('th', 'Filename')
                    line('th', 'Number of Proofs in File')
                    line('th', '% Proofs Completed')
                    line('th', '% Proofs Incomplete')
                    line('th', '% Proofs Failed')
                    line('th', 'Details')
                sorted_rows = sorted(individual_stats,
                                     key=lambda fresult:fresult.num_proofs,
                                     reverse=True)
                for fresult in sorted_rows:
                    if fresult.num_proofs == 0:
                        continue
                    with tag('tr'):
                        line('td', fresult.filename)
                        line('td', str(fresult.num_proofs))
                        line('td', stringified_percent(fresult.num_proofs_completed,
                                                       fresult.num_proofs))
                        line('td', stringified_percent(fresult.num_proofs -
                                                       (fresult.num_proofs_completed +
                                                        fresult.num_proofs_failed),
                                                       fresult.num_proofs))
                        line('td', stringified_percent(fresult.num_proofs_failed,
                                                       fresult.num_proofs))
                        with tag('td'):
                            with tag('a',
                                     href=escape_filename(fresult.filename) + ".html"):
                                text("Details")
                with tag('tr'):
                    line('td', "Total")
                    line('td', str(combined_stats.num_proofs))
                    line('td', stringified_percent(combined_stats.num_proofs_completed,
                                                   combined_stats.num_proofs))
                    line('td', stringified_percent(combined_stats.num_proofs -
                                                   (combined_stats.num_proofs_completed +
                                                    combined_stats.num_proofs_failed),
                                                   combined_stats.num_proofs))
                    line('td', stringified_percent(combined_stats.num_proofs_failed,
                                                   combined_stats.num_proofs))
            text(f'Trained as: {unparsed_args}')
            doc.stag('br')
            text(f"Reported as: {sys.argv}")
            doc.stag('br')
            text(f"Weights hash: {weights_hash}")

    with filename.open("w") as fout:
        fout.write(doc.getvalue())