예제 #1
0
def validate(working_dir, model_num=None, validate_name=None):
    """ Runs validate on the directories up to the most recent model, or up to
    (but not including) the model specified by `model_num`
    """
    if model_num is None:
        model_num, model_name = fsdb.get_latest_model()
    else:
        model_num = int(model_num)
        model_name = fsdb.get_model(model_num)

    # Model N was trained on games up through model N-2, so the validation set
    # should only be for models through N-2 as well, thus the (model_num - 1)
    # term.
    models = list(
        filter(lambda num_name: num_name[0] < (model_num - 1),
               fsdb.get_models()))
    # Run on the most recent 50 generations,
    # TODO(brianklee): make this hyperparameter dependency explicit/not hardcoded
    holdout_dirs = [
        os.path.join(fsdb.holdout_dir(), pair[1]) for pair in models[-50:]
    ]

    main.validate(working_dir,
                  *holdout_dirs,
                  checkpoint_name=os.path.join(fsdb.models_dir(), model_name),
                  validate_name=validate_name)
예제 #2
0
    def build_ar_node(self, lec, left_lec, right_lec, top_element, top_obj):
        left_element = None
        object_builder = ObjectBuilder()
        if left_lec:
            left_lec = validate(left_lec)
            left_index = self.infl_point(left_lec)
            left_element = left_lec[left_index]
        right_lec = validate(right_lec)

        right_index = self.infl_point(right_lec)
        right_element = right_lec[right_index]
        trio = object_builder.trio_builder(lec, left_lec, right_lec,
                                           top_element, left_element,
                                           right_element, top_obj)

        return trio
예제 #3
0
def test_validate(USER_EMAIL):
    '''A valid data obj is validated'''
    data = {
        'emailAddress': USER_EMAIL,
        'historyId': '123456789',
    }
    assert validate(data)
예제 #4
0
def test_validate_no_emailAddress():
    '''An data obj with no emailAddress key is nixed'''
    data = {
        'butter': 'yes',
        'historyId': '123456789',
    }
    assert not validate(data)
예제 #5
0
def test_validate_no_historyId():
    '''An data obj with no historyId key is nixed'''
    data = {
        'emailAddress': USER_EMAIL,
        'butter': 'no',
    }
    assert not validate(data)
예제 #6
0
def test_validate_wrong_emailAddress(USER_EMAIL):
    '''An data obj with wrond emailAddress value is nixed'''
    data = {
        'emailAddress': '*****@*****.**',
        'historyId': '123456789',
    }
    assert not validate(data)
 def test_validation(self):
     """
     Test that the password generated is always secure
     """
     password = generate_password()
     result = validate(password)
     self.assertTrue(result)
예제 #8
0
def validate(logdir=None, model_num=None):
    """ Runs validate on the directories up to the most recent model, or up to
    (but not including) the model specified by `model_num`
    """
    if model_num is None:
        model_num, model_name = get_latest_model()
    else:
        model_num = int(model_num)
        model_name = get_model(model_num)

    models = list(
        filter(lambda num_name: num_name[0] < model_num, get_models()))
    # Run on the most recent 20 generations,
    holdout_dirs = [os.path.join(HOLDOUT_DIR, pair[1])
                    for pair in models[-20:]]

    main.validate(*holdout_dirs,
                  load_file=os.path.join(MODELS_DIR, model_name),
                  logdir=logdir)
예제 #9
0
def evaluate(cf_test, cf_loss, batch_size, snapshot_name, gpu):
    net = main.load_snapshot(snapshot_name).to(device(gpu))
    loss_f = getattr(main, cf_loss['call'])(**dict_drop(cf_loss, 'call'))

    Data = getattr(main, cf_test['call'])
    test_iter = Data(split='test', batch_size=batch_size, gpu=gpu,
                     **dict_drop(cf_test, 'call'))

    print('\t'.join(loss_f.metric_names))
    val_metrics = main.validate(loss_f, net, test_iter, gpu)
    print(utils.tab_str(*val_metrics))
예제 #10
0
파일: event.py 프로젝트: jugovich/bmanemail
def _example_run(args):
    """Command to run example for ECT-584. First runs the extract on the raw emails to produce a raw dataset.
    This dataset is then cleaned and the training data is staged to produce a pre_processed dataset test dataset
    with predicted event values appended. This is then processed to create a test and training arff dataset.
    The training dataset is used to create a classification model using weka's J48 implementation of the C4.5 
    classification algorithm. Once the model has been created the test dataset has events predicated and these 
    predicated values are appended to the test dataset and saved to an excel file. Then this file is loaded and
    compared against the validation dataset to access the accuracy of the model.
    """
    #calls main.py run)extract with relative file paths
    #run_extract(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'raw')),
    #            os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')), 
    #            'extract', 'text', False)
    #calls main.py run_clean with relative file paths
    run_clean(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
              os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
              'extract', 'text')
    #calls main.py stage_train with relative file paths
    stage_train(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
                'extract_cleaned',
                'text',
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'train')),
                't_data',
                'excel',
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')))
    #calls main.py run_process with relative file paths
    run_process(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'stage')),
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process')), 
                'pre_processed', 
                'text')
    #calls main.py build_model with relative file paths
    build_model(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'train.arff')), 
                os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'model.model')))        
    #calls main.py classify with relative file paths
    classify(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'test')), 
             os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'process', 'model.model')), 
             os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'results', 'output.xls')), 
             'event')
    #calls main.py validate with relative file paths
    validate(os.path.normpath(os.path.join(__file__, '..', '..', 'data', 'results')), 
             'output')
    def test_that_always_return_secure_password(self):
        """
        Test that ALL the passwords generated are always secure
        """
        passwords = []
        for i in range(5000):
            pwd = generate_password()
            result = validate(pwd)
            passwords.append(result)

        print(passwords)
        validation = len(set(passwords)) == 1
        self.assertTrue(validation)
예제 #12
0
def train(cf_trn, cf_val, cf_opt, cf_loss,
          cf_scheduler, batch_size, val_batch_size, gpu, n_epochs,
          parent_snapshot, snapshot_name, save_interval):

    torch.backends.cudnn.benchmark = True
    main_gpu = gpu if isinstance(gpu, int) else gpu[0]
    dvc = device(main_gpu)
    
    net = get_net()
    loss_f = getattr(main, cf_loss['call'])(**dict_drop(cf_loss, 'call'))
    
    Opt = getattr(torch.optim, cf_opt['call'])
    opt = Opt(net.parameters(), **dict_drop(cf_opt, 'call'))

    ep = int(parent_snapshot.split('_')[-1][2:]) if parent_snapshot else 0
    Scheduler = getattr(torch.optim.lr_scheduler, cf_scheduler['call'])
    scheduler = Scheduler(opt, last_epoch=-1,
                          **dict_drop(cf_scheduler, 'call'))
    for i in range(ep): scheduler.step()

    Data = getattr(main, cf_trn['call'])
    trn_iter = Data(split='train', batch_size=batch_size, gpu=main_gpu,
                    **dict_drop(cf_trn, 'call'))
    Data = getattr(main, cf_val['call'])
    val_iter = Data(split='val', batch_size=val_batch_size, gpu=main_gpu,
                    **dict_drop(cf_val, 'call'))
    
    if save_interval:
        saver = utils.IntervalSaver(snapshot_name, ep, save_interval)
    else:
        saver = utils.RecordSaver(snapshot_name, ep)
    print('\t'.join(['ep', 'loss'] + loss_f.metric_names))
    
    while ep < n_epochs:
        scheduler.step()
        for trn_tuple in trn_iter:
            trn_tuple = [t.to(dvc) for t in trn_tuple]
            opt.zero_grad()
            loss = loss_f(net.train(True), *trn_tuple, ep)
            loss.backward()
            opt.step()
            
        ep += 1
        trn_metrics = loss_f.trn_metrics()
        val_metrics = main.validate(loss_f, net, val_iter, main_gpu)
        print(utils.tab_str(ep, loss, *trn_metrics))
        print(utils.tab_str('', 0.0, *val_metrics))

        saver.save(val_metrics[0], net, main_gpu, ep)

    del net, opt, trn_iter, val_iter
예제 #13
0
def validate(model_num=None, validate_name=None):
    """ Runs validate on the directories up to the most recent model, or up to
    (but not including) the model specified by `model_num`
    """
    if model_num is None:
        model_num, model_name = get_latest_model()
    else:
        model_num = int(model_num)
        model_name = get_model(model_num)

    # Model N was trained on games up through model N-2, so the validation set
    # should only be for models through N-2 as well, thus the (model_num - 1)
    # term.
    models = list(
        filter(lambda num_name: num_name[0] < (model_num - 1), get_models()))
    # Run on the most recent 50 generations,
    # TODO(brianklee): make this hyperparameter dependency explicit/not hardcoded
    holdout_dirs = [os.path.join(HOLDOUT_DIR, pair[1])
                    for pair in models[-50:]]

    main.validate(ESTIMATOR_WORKING_DIR, *holdout_dirs,
                  checkpoint_name=os.path.join(MODELS_DIR, model_name),
                  validate_name=validate_name)
예제 #14
0
def validate(logdir=None, model_num=None):
    """ Runs validate on the directories up to the most recent model, or up to
    (but not including) the model specified by `model_num`
    """
    if model_num is None:
        model_num, model_name = get_latest_model()
    else:
        model_num = int(model_num)
        model_name = get_model(model_num)

    # Model N was trained on games up through model N-2, so the validation set
    # should only be for models through N-2 as well, thus the (model_num - 1)
    # term.
    models = list(
        filter(lambda num_name: num_name[0] < (model_num - 1), get_models()))
    # Run on the most recent 50 generations,
    holdout_dirs = [
        os.path.join(HOLDOUT_DIR, pair[1]) for pair in models[-50:]
    ]

    main.validate(*holdout_dirs,
                  load_file=os.path.join(MODELS_DIR, model_name),
                  logdir=logdir)
예제 #15
0
def best_parameters(T, K, alpha, lambda_u, rampup_length, weight_decay,
                    ema_decay):
    print(T, K, alpha, lambda_u, rampup_length, weight_decay, ema_decay)
    args['T'] = T
    args['K'] = K
    args['alpha'] = alpha
    args['lambda_u'] = lambda_u
    args['rampup_length'] = rampup_length
    args['weight_decay'] = weight_decay
    train(datasetX, datasetU, model, ema_model, optimizer, 1, args)
    val_xe_loss, val_accuracy = validate(val_dataset,
                                         ema_model,
                                         1,
                                         args,
                                         split='Validation')
    return val_accuracy
예제 #16
0
파일: tune.py 프로젝트: floringogianu/neac
    def train(self, train_round):
        env, policy, policy_evaluation = self.env, self.pi, self.pi_evaluation
        _, end = train_round

        # learn for a number of steps
        learn(env, policy, policy_evaluation, train_round)

        # validate
        results = validate(policy, self.cfg, end)

        # save the agent
        if hasattr(self.cfg, "save_agent") and self.cfg.save_agent:
            torch.save(
                {
                    "step": end,
                    "policy": policy.estimator_state(),
                    "R/ep": results["R_ep"],
                },
                f"{self.cfg.out_dir}/policy_step_{end:07d}.pth",
            )
        return results
예제 #17
0
    def testValidateFail5(self):
	with self.assertRaises(ValueError):
	    main.validate('qwerty', '')
예제 #18
0
 def testValidate(self):
     self.assertEqual(main.validate('qwerty12', 'qwerty12'), 0)
예제 #19
0
 def test_invalid_too_short(self):
     """ 検証が正しくないケース 文字数が少ない場合"""
     assert not validate("")
예제 #20
0
 def test_valid(self):
     """検証が正しいケース"""
     assert validate("a")
     assert validate("a" * 100)
예제 #21
0
    def testValidateFail3(self):
	with self.assertRaises(TypeError):
	    main.validate('123456', '123456')
예제 #22
0
def rl_loop():
    """Run the reinforcement learning loop

    This is meant to be more of an integration test than a realistic way to run
    the reinforcement learning.
    """
    # TODO(brilee): move these all into appropriate local_flags file.
    # monkeypatch the hyperparams so that we get a quickly executing network.
    dual_net.get_default_hyperparams = lambda **kwargs: {
        'k': 8,
        'fc_width': 16,
        'num_shared_layers': 1,
        'l2_strength': 1e-4,
        'momentum': 0.9
    }

    dual_net.TRAIN_BATCH_SIZE = 16
    dual_net.EXAMPLES_PER_GENERATION = 64

    # monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
    preprocessing.SHUFFLE_BUFFER_SIZE = 1000

    flags.FLAGS.num_readouts = 10

    with tempfile.TemporaryDirectory() as base_dir:
        working_dir = os.path.join(base_dir, 'models_in_training')
        model_save_path = os.path.join(base_dir, 'models', '000000-bootstrap')
        local_eb_dir = os.path.join(base_dir, 'scratch')
        next_model_save_file = os.path.join(base_dir, 'models',
                                            '000001-nextmodel')
        selfplay_dir = os.path.join(base_dir, 'data', 'selfplay')
        model_selfplay_dir = os.path.join(selfplay_dir, '000000-bootstrap')
        gather_dir = os.path.join(base_dir, 'data', 'training_chunks')
        holdout_dir = os.path.join(base_dir, 'data', 'holdout',
                                   '000000-bootstrap')
        sgf_dir = os.path.join(base_dir, 'sgf', '000000-bootstrap')
        os.makedirs(os.path.join(base_dir, 'data'), exist_ok=True)

        print("Creating random initial weights...")
        main.bootstrap(working_dir, model_save_path)
        print("Playing some games...")
        # Do two selfplay runs to test gather functionality
        main.selfplay(load_file=model_save_path,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=0)
        main.selfplay(load_file=model_save_path,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=0)
        # Do one holdout run to test validation
        main.selfplay(load_file=model_save_path,
                      holdout_dir=holdout_dir,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=100)

        print("See sgf files here?")
        sgf_listing = subprocess.check_output(["ls", "-l", sgf_dir + "/full"])
        print(sgf_listing.decode("utf-8"))

        print("Gathering game output...")
        eb.make_chunk_for(output_dir=gather_dir,
                          local_dir=local_eb_dir,
                          game_dir=selfplay_dir,
                          model_num=1,
                          positions=dual_net.EXAMPLES_PER_GENERATION,
                          threads=8,
                          samples_per_game=200)

        print("Training on gathered game data...")
        main.train_dir(working_dir,
                       gather_dir,
                       next_model_save_file,
                       generation_num=1)
        print("Trying validate on 'holdout' game...")
        main.validate(working_dir, holdout_dir)
        print("Verifying that new checkpoint is playable...")
        main.selfplay(load_file=next_model_save_file,
                      holdout_dir=holdout_dir,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir)
예제 #23
0
    def testValidate(self):
	self.assertEqual(main.validate('qwerty12', 'qwerty12'), 0)
예제 #24
0
파일: run.py 프로젝트: gaojy98/nezha
import main as mn

if __name__ == "__main__":
    layer = mn.ConvLayer(4, 64, 64, 64, 64, 3, 3)
    arch = mn.Arch([1024 * 1024 * 1024], [1])
    layer1 = [[1, 1, 1, 1, 1, 1, 1], [4, 64, 64, 64, 64, 3, 3],
              [0, 1, 2, 3, 4, 5, 6]]
    schedule = mn.Schedule([layer1])
    print(mn.validate(layer, schedule, arch))
    print(mn.get_energy(layer, schedule, arch))
예제 #25
0
def test_validate():
    examples = EXAMPLES.split("\n\n")
    assert len(examples) == 4
    valid = [p for p in examples if validate(p)]
    assert len(valid) == 2
예제 #26
0
                              0.001,
                              momentum=0.9,
                              weight_decay=decay)

        checkpoint_path = "saved_models/CIFAR10/WD" + str(decay) + "/N" + str(
            n) + "/E500/checkpoint.pth.tar"

        print("Loading checkpoint for model: 2^" + str(n) + " at WD: " +
              str(decay))

        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        init_model.load_state_dict(checkpoint['init'])

        tr_err, tr_loss, tr_margin = main.validate(model, init_model, device,
                                                   train_loader)
        val_err, val_loss, val_margin = main.validate(model, init_model,
                                                      device, val_loader)
        margin[i].append(tr_margin)

plt.plot(hidden_units,
         np.array(margin[0]),
         marker="+",
         label="WD: 0",
         color="blue")
plt.plot(hidden_units,
         np.array(margin[1]),
         marker="+",
         label="WD: 0.001",
         color="black")
plt.plot(hidden_units,
예제 #27
0
    def testValidateFail4(self):
	with self.assertRaises(Exception):
	    main.validate('12qw', '12qw')
예제 #28
0
 def test_validate(self):
     self.assertTrue(main.validate((2, 4, 3)))
     self.assertFalse(main.validate((5, 10, 25)))
예제 #29
0
def rl_loop():
    """Run the reinforcement learning loop

    This is meant to be more of an integration test than a realistic way to run
    the reinforcement learning.
    """
    # monkeypatch the hyperparams so that we get a quickly executing network.
    dual_net.get_default_hyperparams = lambda **kwargs: {
        'k': 8, 'fc_width': 16, 'num_shared_layers': 1, 'l2_strength': 1e-4, 'momentum': 0.9}

    dual_net.TRAIN_BATCH_SIZE = 16
    dual_net.EXAMPLES_PER_GENERATION = 64

    #monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
    preprocessing.SHUFFLE_BUFFER_SIZE = 1000

    with tempfile.TemporaryDirectory() as base_dir:
        working_dir = os.path.join(base_dir, 'models_in_training')
        model_save_path = os.path.join(base_dir, 'models', '000000-bootstrap')
        next_model_save_file = os.path.join(base_dir, 'models', '000001-nextmodel')
        selfplay_dir = os.path.join(base_dir, 'data', 'selfplay')
        model_selfplay_dir = os.path.join(selfplay_dir, '000000-bootstrap')
        gather_dir = os.path.join(base_dir, 'data', 'training_chunks')
        holdout_dir = os.path.join(
            base_dir, 'data', 'holdout', '000000-bootstrap')
        sgf_dir = os.path.join(base_dir, 'sgf', '000000-bootstrap')
        os.makedirs(os.path.join(base_dir, 'data'), exist_ok=True)

        print("Creating random initial weights...")
        main.bootstrap(working_dir, model_save_path)
        print("Playing some games...")
        # Do two selfplay runs to test gather functionality
        main.selfplay(
            load_file=model_save_path,
            output_dir=model_selfplay_dir,
            output_sgf=sgf_dir,
            holdout_pct=0,
            readouts=10)
        main.selfplay(
            load_file=model_save_path,
            output_dir=model_selfplay_dir,
            output_sgf=sgf_dir,
            holdout_pct=0,
            readouts=10)
        # Do one holdout run to test validation
        main.selfplay(
            load_file=model_save_path,
            holdout_dir=holdout_dir,
            output_dir=model_selfplay_dir,
            output_sgf=sgf_dir,
            holdout_pct=100,
            readouts=10)

        print("See sgf files here?")
        sgf_listing = subprocess.check_output(["ls", "-l", sgf_dir + "/full"])
        print(sgf_listing.decode("utf-8"))

        print("Gathering game output...")
        main.gather(input_directory=selfplay_dir, output_directory=gather_dir)
        print("Training on gathered game data...")
        main.train(working_dir, gather_dir, next_model_save_file, generation_num=1)
        print("Trying validate on 'holdout' game...")
        main.validate(working_dir, holdout_dir)
        print("Verifying that new checkpoint is playable...")
        main.selfplay(
            load_file=next_model_save_file,
            holdout_dir=holdout_dir,
            output_dir=model_selfplay_dir,
            output_sgf=sgf_dir,
            readouts=10)
예제 #30
0
 def test_validate(self, expected, name, checksum):
     self.assertEqual(expected, main.validate(name, checksum))
예제 #31
0
def rl_loop():
    """Run the reinforcement learning loop

    This is meant to be more of an integration test than a realistic way to run
    the reinforcement learning.
    """
    # monkeypatch the hyperparams so that we get a quickly executing network.
    dual_net.get_default_hyperparams = lambda **kwargs: {
        'k': 8,
        'fc_width': 16,
        'num_shared_layers': 1,
        'l2_strength': 1e-4,
        'momentum': 0.9
    }

    dual_net.TRAIN_BATCH_SIZE = 16

    #monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
    preprocessing.SHUFFLE_BUFFER_SIZE = 10000

    with tempfile.TemporaryDirectory() as base_dir:
        model_save_file = os.path.join(base_dir, 'models', '000000-bootstrap')
        selfplay_dir = os.path.join(base_dir, 'data', 'selfplay')
        model_selfplay_dir = os.path.join(selfplay_dir, '000000-bootstrap')
        gather_dir = os.path.join(base_dir, 'data', 'training_chunks')
        holdout_dir = os.path.join(base_dir, 'data', 'holdout',
                                   '000000-bootstrap')
        sgf_dir = os.path.join(base_dir, 'sgf', '000000-bootstrap')
        os.mkdir(os.path.join(base_dir, 'data'))

        print("Creating random initial weights...")
        dual_net.DualNetworkTrainer(model_save_file).bootstrap()
        print("Playing some games...")
        # Do two selfplay runs to test gather functionality
        main.selfplay(load_file=model_save_file,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=0,
                      readouts=10)
        main.selfplay(load_file=model_save_file,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=0,
                      readouts=10)
        # Do one holdout run to test validation
        main.selfplay(load_file=model_save_file,
                      holdout_dir=holdout_dir,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=100,
                      readouts=10)

        print("See sgf files here?")
        sgf_listing = subprocess.check_output(
            "ls -l {}/full".format(sgf_dir).split())
        print(sgf_listing.decode("utf-8"))

        print("Gathering game output...")
        main.gather(input_directory=selfplay_dir, output_directory=gather_dir)
        print("Training on gathered game data... (ctrl+C to quit)")
        # increase num_steps to 1k or 10k to confirm overfitting.
        main.train(gather_dir,
                   save_file=model_save_file,
                   num_steps=200,
                   logdir="logs",
                   verbosity=2)
        print("Trying validate on 'holdout' game")
        main.validate(holdout_dir, load_file=model_save_file, logdir="logs")
예제 #32
0
    def testValidateFail8(self):
	with self.assertRaises(ValueError):
	    main.validate('qwerty12', 'QWERTY12')
예제 #33
0
def rl_loop():
    """Run the reinforcement learning loop

    This is meant to be more of an integration test than a realistic way to run
    the reinforcement learning.
    """
    # TODO(brilee): move these all into appropriate local_flags file.
    # monkeypatch the hyperparams so that we get a quickly executing network.
    flags.FLAGS.conv_width = 8
    flags.FLAGS.fc_width = 16
    flags.FLAGS.trunk_layers = 1
    flags.FLAGS.train_batch_size = 16
    flags.FLAGS.shuffle_buffer_size = 1000
    dual_net.EXAMPLES_PER_GENERATION = 64

    flags.FLAGS.num_readouts = 10

    with tempfile.TemporaryDirectory() as base_dir:
        flags.FLAGS.base_dir = base_dir
        working_dir = os.path.join(base_dir, 'models_in_training')
        flags.FLAGS.model_dir = working_dir
        model_save_path = os.path.join(base_dir, 'models', '000000-bootstrap')
        local_eb_dir = os.path.join(base_dir, 'scratch')
        next_model_save_file = os.path.join(base_dir, 'models',
                                            '000001-nextmodel')
        selfplay_dir = os.path.join(base_dir, 'data', 'selfplay')
        model_selfplay_dir = os.path.join(selfplay_dir, '000000-bootstrap')
        gather_dir = os.path.join(base_dir, 'data', 'training_chunks')
        holdout_dir = os.path.join(base_dir, 'data', 'holdout',
                                   '000000-bootstrap')
        sgf_dir = os.path.join(base_dir, 'sgf', '000000-bootstrap')
        os.makedirs(os.path.join(base_dir, 'data'), exist_ok=True)

        print("Creating random initial weights...")
        main.bootstrap(working_dir, model_save_path)
        print("Playing some games...")
        # Do two selfplay runs to test gather functionality
        main.selfplay(load_file=model_save_path,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=0)
        main.selfplay(load_file=model_save_path,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=0)
        # Do one holdout run to test validation
        main.selfplay(load_file=model_save_path,
                      holdout_dir=holdout_dir,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir,
                      holdout_pct=100)

        print("See sgf files here?")
        sgf_listing = subprocess.check_output(["ls", "-l", sgf_dir + "/full"])
        print(sgf_listing.decode("utf-8"))

        print("Gathering game output...")
        eb.make_chunk_for(output_dir=gather_dir,
                          local_dir=local_eb_dir,
                          game_dir=selfplay_dir,
                          model_num=1,
                          positions=dual_net.EXAMPLES_PER_GENERATION,
                          threads=8,
                          samples_per_game=200)

        print("Training on gathered game data...")
        main.train_dir(gather_dir, next_model_save_file)
        print("Trying validate on 'holdout' game...")
        main.validate(working_dir, holdout_dir)
        print("Verifying that new checkpoint is playable...")
        main.selfplay(load_file=next_model_save_file,
                      holdout_dir=holdout_dir,
                      output_dir=model_selfplay_dir,
                      output_sgf=sgf_dir)
예제 #34
0
                              weight_decay=decay)

        checkpoint_path = "saved_models/CIFAR10/SQUARE/WD" + str(
            decay) + "/N" + str(n) + "/E500/checkpoint.pth.tar"

        print("Loading checkpoint for model: 2^" + str(n) + " at WD: " +
              str(decay))

        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        init_model.load_state_dict(checkpoint['init'])

        tr_err, tr_loss, tr_margin = main.validate(model,
                                                   init_model,
                                                   device,
                                                   train_loader,
                                                   ir_strength=0,
                                                   square_loss=True)
        val_err, val_loss, val_margin = main.validate(model,
                                                      init_model,
                                                      device,
                                                      val_loader,
                                                      ir_strength=0,
                                                      square_loss=True)
        err[i].append(val_err)

plt.plot(hidden_units,
         np.array(err[0]),
         marker="+",
         label="WD: 0",
         color="blue")
예제 #35
0
def rl_loop():
    """Run the reinforcement learning loop

    This is meant to be more of an integration test than a realistic way to run
    the reinforcement learning.
    """
    # monkeypatch the hyperparams so that we get a quickly executing network.
    dual_net.get_default_hyperparams = lambda **kwargs: {
        'k': 8,
        'fc_width': 16,
        'num_shared_layers': 1,
        'l2_strength': 1e-4,
        'momentum': 0.9
    }

    dual_net.TRAIN_BATCH_SIZE = 16
    dual_net.EXAMPLES_PER_GENERATION = 64

    #monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
    preprocessing.SHUFFLE_BUFFER_SIZE = 1000

    # with tempfile.TemporaryDirectory() as base_dir:
    base_dir = "/tmp/minigo"
    with open('/tmp/foo', 'w') as fff:
        working_dir = os.path.join(base_dir, 'models_in_training')
        model_save_path = os.path.join(base_dir, 'models', '000000-bootstrap')
        next_model_save_file = os.path.join(base_dir, 'models',
                                            '000001-nextmodel')
        selfplay_dir = os.path.join(base_dir, 'data', 'selfplay')
        model_selfplay_dir = os.path.join(selfplay_dir, '000000-bootstrap')
        gather_dir = os.path.join(base_dir, 'data', 'training_chunks')
        holdout_dir = os.path.join(base_dir, 'data', 'holdout',
                                   '000000-bootstrap')
        sgf_dir = os.path.join(base_dir, 'sgf', '000000-bootstrap')
        os.makedirs(os.path.join(base_dir, 'data'), exist_ok=True)

        print("Creating random initial weights...")
        main.bootstrap(working_dir, model_save_path)
        for i in range(100):
            qmeas.start_time('main-loop')
            print("Playing some games...")
            # Do two selfplay runs to test gather functionality
            qmeas.start_time('main-loop-self-play')
            for j in range(2):
                main.selfplay(load_file=model_save_path,
                              output_dir=model_selfplay_dir,
                              output_sgf=sgf_dir,
                              holdout_pct=0,
                              readouts=10)
            qmeas.stop_time('main-loop-self-play')
            # Do one holdout run to test validation
            qmeas.start_time('main-loop-self-play-holdout')
            main.selfplay(load_file=model_save_path,
                          holdout_dir=holdout_dir,
                          output_dir=model_selfplay_dir,
                          output_sgf=sgf_dir,
                          holdout_pct=100,
                          readouts=10)
            qmeas.stop_time('main-loop-self-play-holdout')

            print("See sgf files here?")
            sgf_listing = subprocess.check_output(
                ["ls", "-l", sgf_dir + "/full"])
            print(sgf_listing.decode("utf-8"))

            print("Gathering game output...")
            qmeas.start_time('main-loop-gather')
            main.gather(input_directory=selfplay_dir,
                        output_directory=gather_dir)
            qmeas.stop_time('main-loop-gather')
            print("Training on gathered game data...")
            qmeas.start_time('main-loop-train')
            main.train(working_dir,
                       gather_dir,
                       next_model_save_file,
                       generation_num=1)
            qmeas.stop_time('main-loop-train')
            print("Trying validate on 'holdout' game...")
            qmeas.start_time('main-loop-validate')
            main.validate(working_dir, holdout_dir)
            qmeas.stop_time('main-loop-validate')
            print("Verifying that new checkpoint is playable...")
            main.selfplay(load_file=next_model_save_file,
                          holdout_dir=holdout_dir,
                          output_dir=model_selfplay_dir,
                          output_sgf=sgf_dir,
                          readouts=10)
            qmeas.stop_time('main-loop')
            qmeas._flush()
예제 #36
0
    def testValidateFail7(self):
	with self.assertRaises(TypeError):
	    main.validate('qwerty12_$', 'qwerty12_$')
예제 #37
0
 def test_invalid_too_log(self):
     """ 検証が正しくないケース 文字数が多い場合"""
     assert not validate("a" * 101)
예제 #38
0
    def testValidateFail6(self):
	with self.assertRaises(TypeError):
	    main.validate('', '')
예제 #39
0
from main import node, add_deck, add_cards, validate, init_p2thkeys
from utils.state import init_state
from sys import stdout
from conf import subscribed

init_p2thkeys()
accounts = node.listaccounts()
total = sum(1 for deck in pa.find_all_valid_decks(node, 1, 1))


def message(n):
    print('{} of {} Decks Loaded'.format(n + 1, total))
    stdout.flush()


for n, deck in enumerate(pa.find_all_valid_decks(node, 1, 1)):

    if not any(deck_id in subscribed for deck_id in ('*', deck.id)):
        continue
    else:
        add_deck(deck)
        message(n)
        if deck.id not in accounts:
            validate(deck)

        try:
            add_cards(deck)
            init_state(deck.id)
        except IndexError:
            continue
예제 #40
0
def test_validate():
    assert validate(parse("1-3 a: abcde"))
    assert not validate(parse("1-3 b: cdefg"))
    assert validate(parse("2-9 c: ccccccccc"))
    assert not validate(parse("2-9 c: cccccccccccc"))
    assert not validate(parse("8-9 f: ffqfffflf"))