Exemple #1
0
def start_server_factory():
    def dump():
        print "$ python server.py -c <game> <generation_name>"
        print "$ python server.py <config_file>"
        sys.exit(1)

    if len(sys.argv) != 2 and len(sys.argv) != 4:
        dump()

    if len(sys.argv) == 4:
        if sys.argv[1] != "-c":
            dump()

        game, generation_prefix = sys.argv[2], sys.argv[3]
        conf = templates.server_config_template(game, generation_prefix, 1)
        print attrutil.attr_to_json(conf, pretty=True)

    else:
        if sys.argv[1] == "-c":
            dump()

        from ggplib.util.init import setup_once
        setup_once("server")

        from ggpzero.util.keras import init
        init()

        filename = sys.argv[1]
        assert os.path.exists(filename)
        ServerBroker(filename)
        reactor.run()
Exemple #2
0
    def save_sample_data(self):
        if self.training_in_progress:
            log.warning("skip writing json (gzipped): %s" %
                        self.sample_data_filename)
            return

        gen_samples = datadesc.GenerationSamples()
        gen_samples.game = self.conf.game
        gen_samples.date_created = get_date_string()

        gen_samples.with_generation = self.get_generation_name(
            self.conf.current_step)

        # only save the minimal number for this run
        gen_samples.num_samples = min(len(self.accumulated_samples),
                                      self.conf.num_samples_to_train)
        gen_samples.samples = self.accumulated_samples[:gen_samples.
                                                       num_samples]

        # write json file
        json.encoder.FLOAT_REPR = lambda f: ("%.5f" % f)

        log.info("writing json (gzipped): %s" % self.sample_data_filename)
        with gzip.open(self.sample_data_filename, 'w') as f:
            f.write(attrutil.attr_to_json(gen_samples, pretty=False))

        return gen_samples
Exemple #3
0
def start_worker_factory():

    if len(sys.argv) != 2:
        print "$ python worker.py -c"
        print "$ python server.py <config_file>"
        sys.exit(1)

    if sys.argv[1] == "-c":
        print attrutil.attr_to_json(default_conf(), pretty=True)
        return

    from ggplib.util.init import setup_once
    setup_once("worker")

    from ggpzero.util.keras import init
    init()

    broker = Worker(sys.argv[1])
    broker.start()
Exemple #4
0
    def save_our_config(self, rolled=False):
        if os.path.exists(self.conf_filename):
            if rolled:
                shutil.copy(
                    self.conf_filename, self.conf_filename + "-%00d" %
                    (self.conf.current_step - 1))
            else:
                shutil.copy(self.conf_filename, self.conf_filename + "-bak")

        with open(self.conf_filename, 'w') as open_file:
            open_file.write(attrutil.attr_to_json(self.conf, pretty=True))
def update_match_info(game, match_info):
    # overwrites match info file
    path_to_matches = matches_path(game)
    match_info_path = os.path.join(path_to_matches,
                                   match_info.randomToken + ".json")
    with open(match_info_path, 'w') as f:
        f.write(attrutil.attr_to_json(match_info))

    the_summaries = update_summaries(game, match_info)

    # overwrites summaries file
    save_summaries(the_summaries)
Exemple #6
0
    def format_msg(self, payload):
        assert attr.has(payload)
        name = clz_to_name(payload.__class__)

        msg = Message(name, payload)

        data = attrutil.attr_to_json(msg)
        compressed_data = zlib.compress(data)

        preamble = self.header.pack(len(compressed_data))
        assert len(preamble) == self.header.size
        return preamble + compressed_data
Exemple #7
0
def elo_dump_and_save(filename, ratings, verbose=False):
    if verbose:
        print "ELO DUMP:"
        print "========="

    # sort in place, so also benefit by saving in this order
    ratings.players.sort(reverse=True, key=operator.attrgetter("elo"))
    if verbose:
        for p in ratings.players:
            print p.name, p.played, p.elo

    with open(filename, "w") as f:
        contents = at.attr_to_json(ratings, pretty=True)
        f.write(contents)
Exemple #8
0
    def save_network(self, nn, generation_name=None):
        game = nn.generation_descr.game
        if generation_name is None:
            generation_name = nn.generation_descr.name
        else:
            nn.generation_descr.name = generation_name

        # save model / weights
        with open(self.model_path(game, generation_name), "w") as f:
            f.write(nn.get_model().to_json())

        nn.get_model().save_weights(self.weights_path(game, generation_name),
                                    overwrite=True)

        with open(self.generation_path(game, generation_name), "w") as f:
            f.write(attrutil.attr_to_json(nn.generation_descr, pretty=True))
Exemple #9
0
def test_attrs_recursive():
    print 'test_attrs_recursive.1'

    c = Container(DummyMsg('a'), DummyMsg('b'), DummyMsg('c'))

    m = Container(DummyMsg('o'), DummyMsg('p'), DummyMsg(c))

    d = attrutil.asdict_plus(m)
    pprint(d)

    r = attrutil.fromdict_plus(d)
    assert isinstance(r, Container)

    assert r.x.what == 'o'
    assert r.z.what.x.what == 'a'

    json_str = attrutil.attr_to_json(m, indent=4)
    print json_str

    k = attrutil.json_to_attr(json_str)
    assert k.x.what == 'o'
    assert k.z.what.x.what == 'a'
Exemple #10
0
 def save_summary_file(self):
     with open(self.summary_path, 'w') as open_file:
         open_file.write(attrutil.attr_to_json(self.summary, pretty=True))
Exemple #11
0
    def sync(self):
        # check summary matches current set of files
        if not self.check_summary() or not self.verify_db():
            self.get_summary(create=True)
            self.create_db()

        for step, file_path, md5sum in self.files_to_process():
            # lets delete any spurious memory
            gc.collect()

            log.debug("Processing %s" % file_path)
            data = attrutil.json_to_attr(gzip.open(file_path).read())

            if len(data.samples) != data.num_samples:
                # pretty inconsequential, but we should at least notify
                msg = "num_samples (%d) versus actual samples (%s) differ... trimming"
                log.warning(msg % (data.num_samples, len(data.samples)))

                data.num_samples = min(len(data.samples), data.num_samples)
                data.samples = data.samples[:data.num_samples]

            log.debug("Game %s, with gen: %s and sample count %s" %
                      (data.game, data.with_generation, data.num_samples))

            indx = self.db.size
            stats = StatsAccumulator()
            t = self.transformer

            # ZZZ really slow
            # ZZZ profile/gather times in loop... (guessing the time is in decoding state)
            time_check = 0
            time_stats = 0
            time_decode = 0
            time_decode_prevs = 0
            time_channels = 0
            time_outputs = 0
            time_db_resize = 0
            time_db_insert = 0

            cur_size = indx

            for sample in self.augment_data(data.samples):
                et = ElaspedTime()
                #t.check_sample(sample)
                time_check += et.update()

                stats.add(sample)
                time_stats += et.update()

                # add channels

                # only decode if not already decoded (as in the case of augmentation)
                state = fast_decode_state(sample.state)

                time_decode += et.update()

                prev_states = [
                    fast_decode_state(s) for s in sample.prev_states
                ]

                time_decode_prevs += et.update()

                cols = [t.state_to_channels(state, prev_states)]
                time_channels += et.update()

                for ri, policy in enumerate(sample.policies):
                    cols.append(t.policy_to_array(policy, ri))
                time_outputs += et.update()

                cols.append(t.value_to_array(sample.final_score))

                # is this an efficient way to do things?
                if indx >= cur_size:
                    cur_size += 20
                    self.db.resize(cur_size)
                time_db_resize += et.update()
                for ii, name in enumerate(self.db.names):
                    self.db[name][indx] = cols[ii]
                indx += 1
                time_db_insert += et.update()

            print "time_check: %.2f" % time_check
            print "time_stats: %.2f" % time_stats
            print "time_decode: %.2f" % time_decode
            print "time_decode_prevs: %.2f" % time_decode_prevs
            print "time_channels: %.2f" % time_channels
            print "time_outputs: %.2f" % time_outputs
            print "time_db_resize: %.2f" % time_db_resize
            print "time_db_insert: %.2f" % time_db_insert

            if indx != cur_size:
                cur_size = indx
                self.db.resize(indx)

            self.db.flush()
            log.debug("Added %d samples to db" % stats.num_samples)

            # add to the summary and save it
            step_sum = datadesc.StepSummary(
                step=step,
                filename=file_path,
                with_generation=data.with_generation,
                num_samples=stats.num_samples,
                md5sum=md5sum,
                stats_unique_matches=stats.unique_matches,
                stats_draw_ratio=stats.draw_ratio,
                stats_bare_policies_ratio=stats.bare_policies_ratio,
                stats_av_starting_depth=stats.av_starting_depth,
                stats_av_ending_depth=stats.av_ending_depth,
                stats_av_resigns=stats.av_resigns,
                stats_av_resign_false_positive=stats.av_resign_false_positive,
                stats_av_puct_visits=stats.av_puct_visits,
                stats_ratio_of_roles=stats.ratio_of_roles,
                stats_av_final_scores=stats.av_final_scores,
                stats_av_puct_score_dist=stats.av_puct_score_dist)

            print attrutil.attr_to_json(step_sum, pretty=True)

            self.summary.last_updated = timestamp()
            self.summary.total_samples = self.db.size
            self.summary.step_summaries.append(step_sum)

            self.save_summary_file()
            log.debug("Saved summary file")

        # lets delete any spurious memory
        gc.collect()
        self.save_summary_file()
        log.info("Data cache synced, saved summary file.")
Exemple #12
0
    def save_our_config(self):
        if os.path.exists(self.conf_filename):
            shutil.copy(self.conf_filename, self.conf_filename + "-bak")

        with open(self.conf_filename, 'w') as open_file:
            open_file.write(attrutil.attr_to_json(self.conf, indent=4))
Exemple #13
0
    def sync(self):
        # check summary matches current set of files
        if not self.check_summary() or not self.verify_db():
            self.get_summary(create=True)
            self.create_db()

        for step, file_path, md5sum in self.files_to_process():
            # lets delete any spurious memory
            gc.collect()

            log.debug("Processing %s" % file_path)
            data = attrutil.json_to_attr(gzip.open(file_path).read())

            if len(data.samples) != data.num_samples:
                # pretty inconsequential, but we should at least notify
                msg = "num_samples (%d) versus actual samples (%s) differ... trimming"
                log.warning(msg % (data.num_samples, len(data.samples)))

                data.num_samples = min(len(data.samples), data.num_samples)
                data.samples = data.samples[:data.num_samples]

            log.debug("Game %s, with gen: %s and sample count %s" % (data.game,
                                                                     data.with_generation,
                                                                     data.num_samples))

            indx = self.db.size
            stats = StatsAccumulator()
            t = self.transformer

            # ZZZ really slow
            # ZZZ profile/gather times in loop... (guessing the time is in decoding state)
            time_check = 0
            time_stats = 0
            time_decode = 0
            time_decode_prevs = 0
            time_channels = 0
            time_outputs = 0
            time_db_resize = 0
            time_db_insert = 0

            cur_size = indx

            for sample in self.augment_data(data.samples):
                # ensure that final scores are clamped before adding to db
                sample.final_score = [min(1.0, v) for v in sample.final_score]
                sample.final_score = [max(0.0, v) for v in sample.final_score]

                sample_is_draw = False
                if abs(sample.final_score[0] - 0.5) < 0.01:
                    assert abs(sample.final_score[1] - 0.5) < 0.01
                    sample_is_draw = True

                # XXX highly experimental
                if sample_is_draw and self.score_draw_as_random_hack:
                    # the idea is just to randomly asign a win or loss to train on.  Then the
                    # network can average out over a 'bazillion' draw samples and determine that
                    # the value should be 0.5.  In theory.  XXX Who knows?

                    if random.random() > 0.5:
                        sample.final_score = [1.0, 0]
                    else:
                        sample.final_score = [0, 1.0]

                et = ElaspedTime()

                # XXX too slow, and only useful for debugging serious bugs - disable
                # t.check_sample(sample)
                time_check += et.update()

                stats.add(sample, was_draw=sample_is_draw)
                time_stats += et.update()

                # add channels

                # only decode if not already decoded (as in the case of augmentation)
                state = fast_decode_state(sample.state)

                time_decode += et.update()

                prev_states = [fast_decode_state(s) for s in sample.prev_states]

                time_decode_prevs += et.update()

                cols = [t.state_to_channels(state, prev_states)]
                time_channels += et.update()

                for ri, policy in enumerate(sample.policies):
                    cols.append(t.policy_to_array(policy, ri))

                time_outputs += et.update()

                cols.append(t.value_to_array(sample.final_score))

                # is this an efficient way to do things?
                if indx >= cur_size:
                    cur_size += 20
                    self.db.resize(cur_size)

                time_db_resize += et.update()
                for ii, name in enumerate(self.db.names):
                    self.db[name][indx] = cols[ii]

                indx += 1
                time_db_insert += et.update()

            print "time_check: %.2f" % time_check
            print "time_stats: %.2f" % time_stats
            print "time_decode: %.2f" % time_decode
            print "time_decode_prevs: %.2f" % time_decode_prevs
            print "time_channels: %.2f" % time_channels
            print "time_outputs: %.2f" % time_outputs
            print "time_db_resize: %.2f" % time_db_resize
            print "time_db_insert: %.2f" % time_db_insert

            if indx != cur_size:
                cur_size = indx
                self.db.resize(indx)

            self.db.flush()
            log.debug("Added %d samples to db" % stats.num_samples)

            # add to the summary and save it
            step_sum = datadesc.StepSummary(step=step,
                                            filename=file_path,
                                            with_generation=data.with_generation,
                                            num_samples=stats.num_samples,
                                            md5sum=md5sum,
                                            stats_unique_matches=stats.unique_matches,
                                            stats_draw_ratio=stats.draw_ratio,
                                            stats_bare_policies_ratio=stats.bare_policies_ratio,
                                            stats_av_starting_depth=stats.av_starting_depth,
                                            stats_av_ending_depth=stats.av_ending_depth,
                                            stats_av_resigns=stats.av_resigns,
                                            stats_av_resign_false_positive=stats.av_resign_false_positive,
                                            stats_av_puct_visits=stats.av_puct_visits,
                                            stats_ratio_of_roles=stats.ratio_of_roles,
                                            stats_av_final_scores=stats.av_final_scores,
                                            stats_av_puct_score_dist=stats.av_puct_score_dist)

            print attrutil.attr_to_json(step_sum, pretty=True)

            self.summary.last_updated = timestamp()
            self.summary.total_samples = self.db.size
            self.summary.step_summaries.append(step_sum)

            self.save_summary_file()
            log.debug("Saved summary file")

        # lets delete any spurious memory
        gc.collect()
        self.save_summary_file()
        log.info("Data cache synced, saved summary file.")
def save_summaries(the_summaries):
    with open(summary_path(), 'w') as f:
        f.write(attrutil.attr_to_json(the_summaries,
                                      sort_keys=True,
                                      separators=(',', ': '),
                                      indent=4))
Exemple #15
0
    setup_once()

    from ggpzero.util.keras import init
    init()

    import tensorflow as tf
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.logging.set_verbosity(tf.logging.ERROR)


if __name__ == "__main__":
    ''' to create a template config file, use -c config_filename.
        otherwise to run, provide the config_filename '''

    setup()

    conf_filename = sys.argv[1]
    if os.path.exists(conf_filename):
        config = at.json_to_attr(open(conf_filename).read())
    else:
        print "Creating config"
        config = template_config()

    # save it - pick up new features
    with open(conf_filename, "w") as f:
        contents = at.attr_to_json(config, pretty=True)
        f.write(contents)

    lg = LittleGolemConnection(config)
    lg.loop_forever()