Exemple #1
0
    def __init__(
            self,
            dct: DNA = None,  # initial params dictionary
            dna_TFD: str = None,  # dna top-folder
            dna_SFD: str = None,  # dna sub-folder
            fn_pfx: str = FN_PREFIX,  # dna filename prefix
            verb=0):

        dict().__init__()
        self.verb = verb

        if not dct: dct = {}
        self.update(dct)

        if verb > 0: print('\n*** ParaDict *** initialized')

        self.dna_FD = None
        if dna_TFD:

            if not dna_SFD and 'name' in self: dna_SFD = self['name']
            assert dna_SFD, 'ERR: dna subfolder must be given!'

            prep_folder(dna_TFD)
            self.dna_FD = f'{dna_TFD}/{dna_SFD}'
            self._fpx = fn_pfx

            self.add_new(self.__load())  # add new from save
Exemple #2
0
    def save(
            self,
            saver=None,  # saver name
            step: int = None,  # for None uses self step
            session=None):

        assert saver in self.savers, 'ERR: unknown saver'

        prep_folder(self.save_FD)
        sv_name = f' {saver}' if saver else ''
        if self.verb > 0: print(f'MultiSaver{sv_name} saves variables...')

        for var in self.vars:
            ckpt_path = f'{self.save_FD}/{var}/{self.model_name}'
            if saver: ckpt_path += f'_{saver}'
            ckpt_path += '.ckpt'

            if not session: session = self.session

            latest_filename = 'checkpoint'
            if saver: latest_filename += f'_{saver}'

            self.savers[saver][var].save(
                sess=session,
                save_path=ckpt_path,
                global_step=step if step else self.s_step[saver][var],
                latest_filename=latest_filename,
                write_meta_graph=False,
                write_state=True)
            self.s_step[saver][var] += 1
            if self.verb > 1: print(f' > saved variables {var}')
Exemple #3
0
    def __init__(self,
                 dmk_dna: dict,
                 use_pretrained_cn=True,
                 acc_won_iv=(100000, 200000),
                 verb=1):

        self.verb = verb
        if self.verb > 0: print('\n *** GamesManager *** stars...')

        prep_folder(MODELS_FD)
        prep_folder(DMK_MODELS_FD)

        self.use_pretrained_cn = use_pretrained_cn

        self.in_que = Queue()  # here receives data from DMKs and tables

        self.gx_iv = acc_won_iv[-2]

        # create DMK dictionary
        self.dmkD = {}
        for name in dmk_dna:
            dmk_type = dmk_dna[name].pop('dmk_type')
            self.dmkD[name] = dmk_type(gm_que=self.in_que,
                                       name=name,
                                       acc_won_iv=acc_won_iv,
                                       **dmk_dna[name])

        assert sum([self.dmkD[name].n_players
                    for name in self.dmkD]) % N_TABLE_PLAYERS == 0

        self.families = set([self.dmkD[name].family for name in self.dmkD])

        self.tables = []  # list of tables
Exemple #4
0
    def save(self, save_old=True):

        prep_folder(self.dna_FD)

        if save_old:
            if os.path.isfile(self.__obj_FN()):
                shutil.copy(self.__obj_FN(), f'{self.__obj_FN()}_OLD')
            if os.path.isfile(self.__txt_FN()):
                shutil.copy(self.__txt_FN(), f'{self.__txt_FN()}_OLD')

        w_pickle(self, self.__obj_FN())
        with open(self.__txt_FN(), 'w') as file:
            file.write(str(self))
Exemple #5
0
def set_logger(log_folder: str,
               custom_name: Optional[str] = None,
               add_stamp=True,
               verb=1):

    prep_folder(log_folder)

    file_name = custom_name if custom_name else 'run'
    if add_stamp: file_name += f'_{stamp()}'
    file_name += '.log'

    sys.stdout = Logger(f'{log_folder}/{file_name}')

    path = os.path.join(os.path.dirname(sys.argv[0]),
                        os.path.basename(sys.argv[0]))
    if verb > 0:
        print(f'\nLogger started for {path} with file {file_name}')
        print(f' > in folder: {log_folder}')
Exemple #6
0
def hpmser_GX(
        func: Callable,  # function which parameters need to be optimized
        psd: dict,  # function params space for search
        func_defaults: dict = None,  # function defaults
        name: str = None,  # for None stamp will be used
        add_stamp=True,  # adds short stamp to name, when name given
        # sampling process parameters
    use_GX=True,  # uses genetic xrossing while sampling top points
        np_smooth: int = 3,  # number of points used for smoothing
        prob_opt=0.3,  # probability of sampling from estimated space (optimized)
        n_opt=30,  # number of points taken from estimated space
        prob_top=0.3,  # probability of sampling from area of top points
        n_top=20,  # number of points taken from area of top points
        use_config=True,  # uses config file to set/log settings
        distance_L2=True,  # use L1 or L2 for distance calculation
        stochastic_est:
    int = 3,  # number of samples used for stochastic estimation, for 0 does not
        # process envy options
    devices=None,  # devices to use for search
        use_all_cores=True,  # True: when devices is None >> uses all cores, otherwise as set by devices
        subprocess=True,  # True: runs func in subprocesses, otherwise in this process
        n_loops=None,  # limit for number of search loops
        hpmser_FD: str or
    bool = True,  # folder, where save search results and html, for None does not save, for True uses default
        do_TB=True,  # plots with TB
        preferred_axes: list = None,  # preferred axes for plot
        top_show_freq=20,
        verb=1) -> SRL:

    # some defaults
    if not name: name = stamp()
    elif add_stamp: name = f'{stamp(letters=0)}_{name}'
    if hpmser_FD is True: hpmser_FD = 'hpmser_runs'

    prep_folder(hpmser_FD)  # create folder if needed

    # check for continuation
    srl = None
    results_FDL = sorted(os.listdir(hpmser_FD))
    if len(results_FDL):
        print(
            f'\nThere are {len(results_FDL)} searches in hpmser_FD, do you want to continue with the last one ({results_FDL[-1]}) ..waiting 10 sec (y/n, n-default)?'
        )
        i, o, e = select.select([sys.stdin], [], [], 10)
        if i and sys.stdin.readline().strip() == 'y':
            name = results_FDL[-1]  # take last
            srl = SRL(name=name)
            srl.load(f'{hpmser_FD}/{name}')
            srl.smooth_and_sort()

    set_logger(log_folder=f'{hpmser_FD}/{name}',
               custom_name=name,
               add_stamp=False,
               verb=verb)  # set logger

    tbwr = TBPoltter(f'{hpmser_FD}/{name}') if do_TB else None

    if verb > 0:
        print(
            f'\n*** hpmser *** {name} started for: {func.__name__}, conf: {np_smooth} {prob_opt:.1f}-{n_opt} {prob_top:.1f}-{n_top}'
        )
        if srl: print(f' search will continue with {len(srl)} results...')

    if not srl:
        srl = SRL(paspa=PaSpa(psdd=psd, distance_L2=distance_L2,
                              verb=verb - 1),
                  name=name)
    srl.plot_axes = preferred_axes

    if verb > 0: print(f'\n{srl.paspa}\n')

    # manage devices
    if not subprocess: use_all_cores = False
    if devices is None:
        devices = [None] * (cpu_count() if use_all_cores else 1
                            )  # manage case of None for devices
    assert subprocess or (not subprocess and len(devices) == 1
                          ), 'ERR: cannot use many devices without subprocess'

    loop_func = ifunc_wrap_MP if subprocess else ifunc_wrap

    # manage func_defaults, remove psd keys from func_defaults
    if not func_defaults: func_defaults = {}
    for k in psd:
        if k in func_defaults: func_defaults.pop(k)

    # prepare special (corner) points
    cpa, cpb = srl.paspa.sample_corners()
    special_points = {0: cpa, 1: cpb}
    stochastic_indexes = [2 + ix for ix in range(stochastic_est)]
    sp = srl.paspa.sample_point()
    for ix in stochastic_indexes:
        special_points[ix] = sp
    stochastic_results = []

    avg_dst = srl.get_avg_dst()
    cr_ID = len(srl)
    sample_num = cr_ID

    max_run_ID = None if not len(srl) else srl.get_top_SR().id
    prev_max_run_ID = None
    scores_all = []
    try:
        while True:

            if use_config:
                config = update_config(hpmser_FD, name, np_smooth, prob_opt,
                                       n_opt, prob_top, n_top)
                np_smooth = config['np_smooth']
                prob_opt = config['prob_opt']
                n_opt = config['n_opt']
                prob_top = config['prob_top']
                n_top = config['n_top']
                srl.set_np_smooth(np_smooth)

            # use all available devices
            while devices:
                if verb > 1:
                    print(
                        f' >> got {len(devices)} devices at {cr_ID} loop start'
                    )
                if sample_num in special_points:
                    spoint = special_points[sample_num]
                    est_score = 0
                else:
                    params = {
                        'prob_opt': prob_opt,
                        'n_opt': n_opt,
                        'prob_top': prob_top,
                        'n_top': n_top,
                        'avg_dst': avg_dst
                    }
                    spoint, est_score = srl.get_opt_sample_GX(
                        **params) if use_GX else srl.get_opt_sample(**params)

                device = devices.pop(0)
                if verb > 1:
                    print(
                        f' >> starting run #{sample_num} on device {device} ..'
                    )
                loop_func(func=func,
                          device=device,
                          spoint=spoint,
                          est_score=est_score,
                          s_time=time.time(),
                          sample_num=sample_num,
                          func_defaults=func_defaults)

                sample_num += 1

            # get result from que
            res = hpmser_que.get()
            devices.append(res['device'])  # return device
            if verb > 1: print(f' >> got result from device {res["device"]}')
            sr = srl.add_result(point=res['spoint'], score=res['score'])

            if res['sample_num'] in stochastic_indexes:
                stochastic_results.append(res['score'])
                if len(stochastic_results) == stochastic_est and verb:
                    print(
                        f'\n*** stochastic estimation with {stochastic_est} points:'
                    )
                    print(
                        f'  > std_dev: {msmx(stochastic_results)["std"]:.3f}\n'
                    )

            avg_dst = srl.get_avg_dst()
            srl.save(folder=f'{hpmser_FD}/{name}')

            top_SR = srl.get_top_SR()

            # gots new MAX
            if top_SR.id != max_run_ID:
                prev_max_run_ID = max_run_ID
                max_run_ID = top_SR.id

            # current sr report
            if verb > 0:
                dif = sr.smooth_score - res['est_score']
                difs = f'{"+" if dif>0 else "-"}{abs(dif):.4f}'

                dist_to_max = srl.paspa.distance(top_SR.point, sr.point)
                time_passed = int(time.time() - res["s_time"])

                srp = f'{sr.id} {sr.smooth_score:.4f} [{sr.score:.4f} {difs}] {top_SR.id}:{dist_to_max:.3f}'
                srp += f'  avg_dst:{avg_dst:.3f}'
                srp += f'  conf: {np_smooth} {prob_opt:.1f}-{n_opt} {prob_top:.1f}-{n_top} {time_passed}s'
                print(srp)

                # new MAX report (last search is a new MAX, sr == top_SR)
                if max_run_ID == cr_ID:

                    msr = f'{srl.paspa.point_2str(sr.point)}\n'

                    prev_sr = srl.get_SR(prev_max_run_ID)
                    dp = srl.paspa.distance(prev_sr.point,
                                            sr.point) if prev_sr else 0

                    msr += f' dst_prev:{dp:.3f}\n'
                    for nps in NP_SMOOTH:
                        ss_np, avd, all_sc = srl.smooth_point(sr.point, nps)
                        msr += f'  NPS:{nps} {ss_np:.4f} [{max(all_sc):.4f}-{min(all_sc):.4f}] {avd:.3f}\n'
                    print(msr)

                if top_show_freq and len(srl) % top_show_freq == 0:
                    print(srl.nice_str(n_top=5, all_nps=None))

            if tbwr:
                scores_all.append(sr.score)
                score_diff = sr.score - res['est_score']
                score_avg = sum(scores_all) / len(scores_all)
                tbwr.log(avg_dst, 'hpmser/avg_dst', cr_ID)
                tbwr.log(score_avg, 'hpmser/score_avg', cr_ID)
                tbwr.log(sr.score, 'hpmser/score_current', cr_ID)
                tbwr.log(score_diff, 'hpmser/space_estimation_error', cr_ID)
                tbwr.log(abs(score_diff), 'hpmser/space_estimation_error_abs',
                         cr_ID)

            if len(srl) == n_loops:
                if verb > 0: print(f'...n_loops ({n_loops}) done!')
                break

            cr_ID += 1

    except KeyboardInterrupt:
        if verb > 0: print('...interrupted')

    srl.save(folder=f'{hpmser_FD}/{name}')

    results = srl.nice_str()
    if hpmser_FD:
        with open(f'{hpmser_FD}/{name}/{name}_results.txt', 'w') as file:
            file.write(results)

    if verb > 0: print(results)

    return srl
Exemple #7
0
 def __init__(self, logdir: str, flush_secs=10):
     prep_folder(logdir)
     self.sw = tf.summary.FileWriter(logdir=logdir, flush_secs=flush_secs)
Exemple #8
0
def train_cn(
        cn_dict: dict,
        device=-1,
        n_batches=50000,
        tr_SM=(1000, 10),  # train (size,montecarlo samples)
        ts_SM=(2000, 100000),  # test  (size,montecarlo samples)
        do_test=True,
        rq_trg=200,
        rep_freq=100,
        his_freq=500,
        verb=0):

    prep_folder(MODELS_FD)
    prep_folder(CN_MODELS_FD)

    test_batch, c_tuples = None, None
    if do_test: test_batch, c_tuples = get_test_batch(ts_SM[0], ts_SM[1])

    iPF = partial(prep2X7Batch, bs=tr_SM[0], n_monte=tr_SM[1])
    iPF.__name__ = 'prep2X7Batch'
    dqmp = DeQueMP(func=iPF, rq_trg=rq_trg, verb=verb)

    cnet = NEModel(  # model
        fwd_func=card_net,
        mdict=cn_dict,
        devices=device,
        save_TFD=CN_MODELS_FD,
        verb=verb)

    ze_pro = ZeroesProcessor(intervals=(50, 500, 5000),
                             tag_pfx='7_zeroes',
                             summ_writer=cnet.summ_writer)

    nHighAcc = 0
    sTime = time.time()
    for b in range(1, n_batches):

        # feed loop for towers
        batches = [dqmp.get_result() for _ in cnet.gFWD]
        feed = {}
        for ix in range(len(cnet.gFWD)):
            batch = batches[ix]
            tNet = cnet.gFWD[ix]
            feed.update({
                tNet['train_PH']: True,
                tNet['inA_PH']: batch['cA'],
                tNet['inB_PH']: batch['cB'],
                tNet['won_PH']: batch['wins'],
                tNet['rnkA_PH']: batch['rA'],
                tNet['rnkB_PH']: batch['rB'],
                tNet['mcA_PH']: batch['mAWP']
            })
        batch = batches[0]

        fetches = [
            cnet['optimizer'], cnet['loss'], cnet['loss_W'], cnet['loss_R'],
            cnet['loss_AWP'], cnet['acc_W'], cnet['acc_WC'],
            cnet['predictions_W'], cnet['oh_notcorrect_W'], cnet['acc_R'],
            cnet['acc_RC'], cnet['predictions_R'], cnet['oh_notcorrect_R'],
            cnet['gg_norm'], cnet['avt_gg_norm'], cnet['scaled_LR'],
            cnet['zeroes']
        ]
        lenNH = len(fetches)
        if his_freq and b % his_freq == 0: fetches.append(cnet['hist_summ'])

        out = cnet.session.run(fetches, feed_dict=feed)
        if len(out) == lenNH: out.append(None)
        _, loss, loss_W, loss_R, loss_AWP, acc_W, acc_WC, pred_W, ohnc_W, acc_R, acc_RC, pred_R, ohnc_R, gn, agn, lRs, zs, hist_summ = out

        if hist_summ: cnet.summ_writer.add_summary(hist_summ, b)

        ze_pro.process(zs, b)

        if b % rep_freq == 0:
            """ prints stats of rank @batch
            if verbLev > 2:
                rStats = batch['numRanks']
                nHands = 2*len(batch['crd7AB'])
                for ix in range(len(rStats)):
                    rStats[ix] /= nHands
                    print('%.5f '%rStats[ix], end='')
                print()
                cum = 0
                for ix in range(len(rStats)):
                    cum += rStats[ix]
                    print('%.5f ' %cum, end='')
                print()

                wStats = batch['numWins']
                nWins = nHands / 2
                for ix in range(len(wStats)):
                    wStats[ix] /= nWins
                    print('%.3f ' % wStats[ix], end='')
                print()
            #"""

            print('%6d, loss: %.6f, accW: %.6f, gn: %.6f, (%d/s)' %
                  (b, loss, acc_W, gn, rep_freq * tr_SM[0] /
                   (time.time() - sTime)))
            sTime = time.time()

            accsum = tf.Summary(value=[
                tf.Summary.Value(tag='1_crdN/0_iacW', simple_value=1 - acc_W)
            ])
            accRsum = tf.Summary(value=[
                tf.Summary.Value(tag='1_crdN/1_iacR', simple_value=1 - acc_R)
            ])
            losssum = tf.Summary(value=[
                tf.Summary.Value(tag='1_crdN/2_loss', simple_value=loss)
            ])
            lossWsum = tf.Summary(value=[
                tf.Summary.Value(tag='1_crdN/3_lossW', simple_value=loss_W)
            ])
            lossRsum = tf.Summary(value=[
                tf.Summary.Value(tag='1_crdN/4_lossR', simple_value=loss_R)
            ])
            lossAWPsum = tf.Summary(value=[
                tf.Summary.Value(tag='1_crdN/5_lossAWP', simple_value=loss_AWP)
            ])
            gNsum = tf.Summary(
                value=[tf.Summary.Value(tag='1_crdN/6_gn', simple_value=gn)])
            agNsum = tf.Summary(
                value=[tf.Summary.Value(tag='1_crdN/7_agn', simple_value=agn)])
            lRssum = tf.Summary(
                value=[tf.Summary.Value(tag='1_crdN/8_lRs', simple_value=lRs)])
            cnet.summ_writer.add_summary(accsum, b)
            cnet.summ_writer.add_summary(accRsum, b)
            cnet.summ_writer.add_summary(losssum, b)
            cnet.summ_writer.add_summary(lossWsum, b)
            cnet.summ_writer.add_summary(lossRsum, b)
            cnet.summ_writer.add_summary(lossAWPsum, b)
            cnet.summ_writer.add_summary(gNsum, b)
            cnet.summ_writer.add_summary(agNsum, b)
            cnet.summ_writer.add_summary(lRssum, b)

            acc_RC = acc_RC.tolist()
            for cx in range(len(acc_RC)):
                csum = tf.Summary(value=[
                    tf.Summary.Value(tag=f'3_Rcia/{cx}ica',
                                     simple_value=1 - acc_RC[cx])
                ])
                cnet.summ_writer.add_summary(csum, b)

            acc_WC = acc_WC.tolist()
            accC01 = (acc_WC[0] + acc_WC[1]) / 2
            accC2 = acc_WC[2]
            c01sum = tf.Summary(value=[
                tf.Summary.Value(tag='5_Wcia/01cia', simple_value=1 - accC01)
            ])
            c2sum = tf.Summary(value=[
                tf.Summary.Value(tag='5_Wcia/2cia', simple_value=1 - accC2)
            ])
            cnet.summ_writer.add_summary(c01sum, b)
            cnet.summ_writer.add_summary(c2sum, b)

            #""" reporting of almost correct cases in late training
            if acc_W > 0.99: nHighAcc += 1
            if nHighAcc > 10 and acc_W < 1:
                nS = pred_R.shape[0]  # batch size (num samples)
                nBS = 0
                for sx in range(nS):
                    ncRsl = ohnc_R[sx].tolist()  # OH sample
                    if max(ncRsl):  # there is 1 >> not correct
                        nBS += 1
                        if nBS < 3:  # print max 2
                            cards = sorted(batch['cB'][sx])
                            cS7 = ''
                            for c in cards:
                                cS7 += ' %s' % PDeck.cts(c)
                            cr = PDeck.cards_rank(cards)
                            print(pred_R[sx], ncRsl.index(1), cS7, cr[-1])
                if nBS: print(nBS)
                nBS = 0
                for sx in range(nS):
                    ncWsl = ohnc_W[sx].tolist()
                    if max(ncWsl):
                        nBS += 1
                        if nBS < 3:
                            cardsA = batch['cA'][sx]
                            cardsB = batch['cB'][sx]
                            cS7A = ''
                            for c in cardsA:
                                cS7A += ' %s' % PDeck.cts(c)
                            cS7A = cS7A[1:]
                            cS7B = ''
                            for c in cardsB:
                                cS7B += ' %s' % PDeck.cts(c)
                            cS7B = cS7B[1:]
                            crA = PDeck.cards_rank(cardsA)
                            crB = PDeck.cards_rank(cardsB)
                            print(
                                pred_W[sx], ncWsl.index(1), crA[-1][:2],
                                crB[-1][:2], '(%s - %s = %s - %s)' %
                                (cS7A, cS7B, crA[-1][3:], crB[-1][3:]))
                if nBS: print(nBS)
            #"""

        # test
        if b % 1000 == 0 and test_batch is not None:

            batch = test_batch
            feed = {
                cnet['inA_PH']: batch['cA'],
                cnet['inB_PH']: batch['cB'],
                cnet['won_PH']: batch['wins'],
                cnet['rnkA_PH']: batch['rA'],
                cnet['rnkB_PH']: batch['rB'],
                cnet['mcA_PH']: batch['mAWP']
            }

            fetches = [
                cnet['loss'], cnet['loss_W'], cnet['loss_R'], cnet['loss_AWP'],
                cnet['diff_AWP_mn'], cnet['diff_AWP_mx'], cnet['acc_W'],
                cnet['acc_WC'], cnet['predictions_W'], cnet['oh_notcorrect_W'],
                cnet['acc_R'], cnet['acc_RC'], cnet['predictions_R'],
                cnet['oh_notcorrect_R']
            ]

            out = cnet.session.run(fetches, feed_dict=feed)
            if len(out) == lenNH: out.append(None)
            loss, loss_W, loss_R, loss_AWP, dAWPmn, dAWPmx, acc_W, acc_WC, pred_W, ohnc_W, acc_R, acc_RC, pred_R, ohnc_R = out

            print('%6dT loss: %.7f accW: %.7f' % (b, loss, acc_W))

            accsum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/0_iacW', simple_value=1 - acc_W)
            ])
            accRsum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/1_iacR', simple_value=1 - acc_R)
            ])
            losssum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/2_loss', simple_value=loss)
            ])
            lossWsum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/3_lossW', simple_value=loss_W)
            ])
            lossRsum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/4_lossR', simple_value=loss_R)
            ])
            lossAWPsum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/5_lossAWP',
                                 simple_value=loss_AWP)
            ])
            dAWPmnsum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/6_dAWPmn', simple_value=dAWPmn)
            ])
            dAWPmxsum = tf.Summary(value=[
                tf.Summary.Value(tag='2_crdNT/7_dAWPmx', simple_value=dAWPmx)
            ])
            cnet.summ_writer.add_summary(accsum, b)
            cnet.summ_writer.add_summary(accRsum, b)
            cnet.summ_writer.add_summary(losssum, b)
            cnet.summ_writer.add_summary(lossWsum, b)
            cnet.summ_writer.add_summary(lossRsum, b)
            cnet.summ_writer.add_summary(lossAWPsum, b)
            cnet.summ_writer.add_summary(dAWPmnsum, b)
            cnet.summ_writer.add_summary(dAWPmxsum, b)

            acc_RC = acc_RC.tolist()
            for cx in range(len(acc_RC)):
                csum = tf.Summary(value=[
                    tf.Summary.Value(tag=f'4_RciaT/{cx}ca',
                                     simple_value=1 - acc_RC[cx])
                ])  # cia stands for "classification inverted accuracy"
                cnet.summ_writer.add_summary(csum, b)

            acc_WC = acc_WC.tolist()
            accC01 = (acc_WC[0] + acc_WC[1]) / 2
            accC2 = acc_WC[2]
            c01sum = tf.Summary(value=[
                tf.Summary.Value(tag='6_WciaT/01cia', simple_value=1 - accC01)
            ])
            c2sum = tf.Summary(value=[
                tf.Summary.Value(tag='6_WciaT/2cia', simple_value=1 - accC2)
            ])
            cnet.summ_writer.add_summary(c01sum, b)
            cnet.summ_writer.add_summary(c2sum, b)

    cnet.saver.save(step=cnet['g_step'])
    dqmp.close()
    if verb > 0: print('%s done' % cnet['name'])