Ejemplo n.º 1
0
    def __init__(self):
        """ Initializes the game controller so it is ready to start a new game. """
        parser = ArgumentParser(
            description='A simple console-based rogue-like game.')
        parser.add_argument('map_path',
                            type=str,
                            nargs='?',
                            help='path to map file to load')
        parser.add_argument('--new_game',
                            nargs='?',
                            dest='new_game_demanded',
                            const=True,
                            default=False)

        args = parser.parse_args()

        no_save_file = not os.path.isfile(SAVE_FILE_NAME)

        if args.new_game_demanded or no_save_file:
            if args.map_path is not None:
                game_map = FileWorldMapSource(args.map_path).get()
            else:
                game_map = RandomV1WorldMapSource(
                    Controller._DEFAULT_MAP_HEIGHT,
                    Controller._DEFAULT_MAP_WIDTH).get()

            mobs_count = Controller._MOB_COUNT
            positions = game_map.get_random_empty_positions(mobs_count + 1)
            player = src.fighter.Player(positions[0], [
                WeaponBuilder().with_name('SABER').with_attack(2).with_defence(
                    2).with_confusion_prob(0.2),
                WeaponBuilder().with_name('SPEAR').with_attack(4).with_defence(
                    1).with_confusion_prob(0.1),
                WeaponBuilder().with_name('SWORD').with_attack(1).with_defence(
                    3).with_confusion_prob(0.7)
            ])
            mobs = [
                src.fighter.Mob(
                    positions[i],
                    random.choice([
                        src.strategies.AggressiveStrategy(),
                        src.strategies.PassiveStrategy(),
                        src.strategies.CowardlyStrategy()
                    ])) for i in range(1, mobs_count + 1)
            ]

            self.model = model.Model(game_map, player, mobs)
        else:
            with open(SAVE_FILE_NAME, 'r') as file:
                self.model = model.Model(None, None, None)
                self.model.set_snapshot(file.read())

        self.program_is_running = True
        self.view = None
        self.player_died = False
        self.fighting_system = CoolFightingSystem()
Ejemplo n.º 2
0
    def test_train_one_epoch_split_3(self):
        num_cells = 4
        train_dataset, test_dataset = model_lib.make_datasets(
            'src/testutils/corpus/split-3/0-*.png', num_cells)

        model = model_lib.Model(train_dataset,
                                test_dataset,
                                epochs=1,
                                num_cells=num_cells)
        model.fit()

        self.assertEqual(model.generator.input_shape, (None, 256, 256, 3))
        self.assertEqual(model.generator.output_shape, (None, 256, 256, 1))
        self.assertFalse(model.generator.run_eagerly)

        with tempfile.NamedTemporaryFile(mode='w+', suffix=".png") as tmp_file:
            # We can't assert anything about the generated image, but we just
            # want to see that imageutils.predict(generator, ...) doesn't fail.
            imageutils.predict(model.generator, _PATH_TO_FONT, [
                '市',
                '口',
                '來',
            ], tmp_file.name)

        # We want to see that model_lib.generate_images(generator, ...) doesn't
        # fail.
        for items in test_dataset.take(1):
            inputs = items[:-1]
            target = items[-1]
            model_lib.generate_images(model.generator, inputs, target)
Ejemplo n.º 3
0
 def setUp(self):
     rating_file = "./fakedataset/dw_rating.dat"
     id_map_file = "./fakedataset/dw_id_map.mat"
     active_user_file = "./fakedataset/dw_active_users_all.mat"
     dst_filename = "./fakedataset/dw_user_item_matrix.mat"
     self.mat = dw.get_orignal_data(rating_file, id_map_file,
                                    active_user_file, dst_filename)
     self.model = model.Model(self.mat)
Ejemplo n.º 4
0
    def __init__(self, grammar, checkpointed=False, threshold=0):
        self.checkpointed = checkpointed
        self.language = grammar.split("/")[-1][8:-4]
        self.generator = generator.Generator(self.language)
        self.parser = subparser.Parser(self.language)

        if checkpointed:
            self.model = model_checkpointed.ModelCheckpointed(
                self.language, threshold)
        else:
            self.model = model.Model(self.language)
Ejemplo n.º 5
0
"""Start function for the entire model.

This module simply imports all the functions needed to apply the model on
the data and calls them one by one.

"""

import pickle

from src import config
from src import model

if __name__ == '__main__':
    influenza_estimator = model.Model()
    influenza_estimator.train()
Ejemplo n.º 6
0
def main():
    # PARAMETERS
    # training params
    num_training_epochs = 1000  # 3000
    stopping_criterion = 0.0  # 0.0
    train_batch_size = 1  # 1

    # optimization params
    learning_rate = 0.1  # 0.01

    # model params
    representation_nonlinearity = "sigmoid"  # "sigmoid"
    hidden_nonlinearity = "sigmoid"  # "sigmoid"

    # initialization params
    weight_initialization_range = [-0.1, 0.1]  # [-0.1, 0.1]
    output_layer_bias_val = -2.  # -2.
    freeze_output_layer_biases = True  # True

    # set up result dirs
    t_m.make_dir("logdirs")
    save_dir = t_m.get_logdir()
    print('Results will be saved in: {}'.format(save_dir))

    # create dataset
    data_fname = "data/data_01/data.csv"

    new_items = [[], [], []]
    representation_units_to_add = [0, 7]

    for ix, items_to_consider in enumerate(
        [["plant", "tree", "flower", "oak", "pine", "daisy", "rose"],
         ["animal", "birds", "fish", "robin", "canary", "sunfish", "salmon"]]):
        num_training_epochs = [2000, 4000][ix]
        dataset = d_m.Dataset(data_fname=data_fname,
                              items_to_consider=items_to_consider)

        # create model
        # some model and train params are locked to dataset params
        item_input_size = len(dataset.item_names_to_inds)
        relation_input_size = len(dataset.relation_names_to_inds)
        representation_size = 7 + sum(representation_units_to_add[:ix])
        hidden_size = 15  # 15
        output_size = len(dataset.attribute_names_to_inds)

        model = m_m.Model(
            item_input_size=item_input_size,
            relation_input_size=relation_input_size,
            representation_size=representation_size,
            hidden_size=hidden_size,
            output_size=output_size,
            representation_nonlinearity=representation_nonlinearity,
            hidden_nonlinearity=hidden_nonlinearity,
            wrange=weight_initialization_range,
            output_layer_bias_val=output_layer_bias_val,
            freeze_output_layer_biases=freeze_output_layer_biases)

        # create trainer
        trainer = t_m.Trainer(
            dataset,
            model,
            name="{}".format(0),
            train_batch_size=train_batch_size,
            num_training_epochs=num_training_epochs,
            stopping_criterion=stopping_criterion,
            learning_rate=learning_rate,
            save_dir=save_dir,
            test_freq=50,
            print_freq=100,
            show_plot=True,
            new_items=new_items[ix],
            representation_units_to_add=representation_units_to_add[ix])
        trainer.train()
Ejemplo n.º 7
0
print("load dataset")
use_supplement = tf.placeholder_with_default(False, [])
few_real_images, few_real_labels = MNIST.MNIST(batch_size)
supplementary_real_images, supplementary_real_labels = MNIST.MNIST(
    large_batch_size - batch_size)
many_real_images = tf.concat([few_real_images, supplementary_real_images],
                             axis=0)
many_real_labels = tf.concat([few_real_labels, supplementary_real_labels],
                             axis=0)
minibatch_real_images, minibatch_real_labels = tf.cond(
    use_supplement, lambda: (many_real_images, many_real_labels), lambda:
    (few_real_images, few_real_labels))

print("build model")
model = Model.Model(num_channel=num_channel, num_class=num_class)
minibatch_batch_size = tf.shape(minibatch_real_images)[0]
noise_batch_size = tf.placeholder_with_default(minibatch_batch_size, [])
default_fake_labels = minibatch_real_labels if conditioned_model and same_labels else None  # None means random-labeled or non-conditioned.
noise = model._random_noise(noise_batch_size,
                            default_labels=default_fake_labels)
real_labels = minibatch_real_labels if conditioned_model else None
model.build_network_outputs(noise,
                            minibatch_real_images,
                            fake_labels=default_fake_labels,
                            real_labels=real_labels)
model.build_losses()

print("build opt")
opt = tf.train.AdamOptimizer(learning_rate=2.0e-4)
with tf.control_dependencies(
Ejemplo n.º 8
0
import chess.engine
from src import model

engine = chess.engine.SimpleEngine.popen_uci('/usr/games/stockfish')

board = chess.Board()
info = engine.analyse(board, chess.engine.Limit(time=0.100))
print(info)
print("score= ", info['score'])
print(type(info['score']))

# engine.quit()

if __name__ == '__main__':
    model = model.Model()
    model.play()
Ejemplo n.º 9
0
# Parse the arguments
parser = argparse.ArgumentParser(description="This script trains my best model")
parser.add_argument("-modelName", dest="model_name", required=True, type=str, help="name of the model")
parser.add_argument("-data", dest="train_data_path", required=True, type=str, help="location of the training data")
parser.add_argument("-target", dest="train_labels_path", required=True, type=str, help="location of the training labels")
args = parser.parse_args()


# I'm using Windows, so since default of long in Windows is 4 bytes, need to force long as 8 bytes.
X_train = torch.tensor(torchfile.load(args.train_data_path, force_8bytes_long=True), dtype=torch.float64).reshape(-1, 108*108).to(device)
y_train = torch.tensor(torchfile.load(args.train_labels_path, force_8bytes_long=True), dtype=torch.float64).reshape(-1).long().to(device)


# Model definition
net = model.Model()
net.add_layer(layers.Conv2d((108, 108), 1, 16, kernel_size=18, stride=2))
net.add_layer(layers.ReLU())
net.add_layer(layers.MaxPool2d(2))
net.add_layer(layers.BatchNorm2d(16))
net.add_layer(layers.Conv2d((23, 23), 16, 32, kernel_size=5, stride=2))
net.add_layer(layers.ReLU())
net.add_layer(layers.MaxPool2d(2))
net.add_layer(layers.BatchNorm2d(32))
net.add_layer(layers.Flatten())
net.add_layer(layers.Linear(5 * 5 * 32, 256))
net.add_layer(layers.ReLU())
net.add_layer(layers.BatchNorm1d(256))
net.add_layer(layers.Linear(256, 128))
net.add_layer(layers.ReLU())
net.add_layer(layers.BatchNorm1d(64))
Ejemplo n.º 10
0
def get_trainer(config):

    vocab_config = {
        'pathes':
        [config['formulas_train_path'], config['formulas_validation_path']],
        'unk_token_threshold':
        config['unk_token_threshold'],
    }
    vocab = utils.Vocab(
        vocab_config
    )  # 构建并存储fomula的token表;fomula2tensor与tensor2fomula,return tensor和fomula字符串
    vocab_size = len(vocab.token2idx.keys())  # token个数
    cnn_model = cnn.CNN(config['cnn_params']).to(
        config['device'])  # 构建cnn模型,提取特征返回x
    encoder_model = encoder.Encoder(config['cnn_params']['conv6_c'],
                                    config['encoder_hidden_size'],
                                    config['bidirectional'],
                                    config['device']).to(config['device'])
    # 构建encoder模型,输入x,计算方向
    train_loader_config = {
        'batch_size': config['batch_size'],
        'images_path': config['images_train_path'],
        'formulas_path': config['formulas_train_path'],
        'sort_by_formulas_len': True,
        'shuffle': False,
    }

    train_loader = utils.data_loader(
        vocab, train_loader_config)  # 导入数据,并判断是否为end_of_batch

    embedding_model = embedding.Embedding(vocab_size, config['embedding_size'],
                                          vocab.pad_token).to(config['device'])
    # 对token表中的token进行词嵌入

    decoder_model = decoder.DecoderRNN(
        config['embedding_size'], config['decoder_hidden_size'],
        config['encoder_hidden_size'] * (2 if config['bidirectional'] else 1),
        vocab_size, config['device']).to(config['device'])  # 构建RNN模型,返回分数

    _model = model.Model(cnn_model, encoder_model, embedding_model,
                         decoder_model, config['device'])
    # 构建整体模型,加入positional embedding,调用generator model

    trainer_config = {
        'device':
        config['device'],
        'checkpoints_dir':
        config['checkpoints_dir'],
        'log_dir':
        config['log_dir'],
        'print_every_batch':
        config['print_every_batch'],
        'clip':
        config['clip'],
        'learning_rate':
        config['learning_rate'],
        'learning_rate_decay':
        config['learning_rate_decay'],
        'learning_rate_decay_step':
        config['learning_rate_decay_step'],
        'learning_rate_min':
        config['learning_rate_min'],
        'teacher_forcing_ratio':
        config['teacher_forcing_ratio'],
        'teacher_forcing_ratio_decay':
        config['teacher_forcing_ratio_decay'],
        'teacher_forcing_ratio_decay_step':
        config['teacher_forcing_ratio_decay_step'],
        'teacher_forcing_ratio_min':
        config['teacher_forcing_ratio_min'],
    }

    _trainer = trainer.Trainer(_model, train_loader,
                               trainer_config)  # 模型训练train_one_epoch
    return _trainer
Ejemplo n.º 11
0
# collect the raw data :
bamber = DataFactory.get_bamber(thklim=thklim)

# define the meshes :
mesh = Mesh('meshes/greenland_3D_5H.xml')
mesh.coordinates()[:, 2] /= 100000.0

# create data objects to use with varglas :
dbm = DataInput(None, bamber, mesh=mesh)

# get the expressions used by varglas :
Surface = dbm.get_spline_expression('h')
Bed = dbm.get_spline_expression('b')

model = model.Model(out_dir=out_dir)
model.set_geometry(Surface, Bed)
model.set_mesh(mesh, deform=True)
model.set_parameters(pc.IceParameters())
model.initialize_variables()
parameters['form_compiler']['quadrature_degree'] = 2

File(in_dir + 'u.xml') >> model.u
File(in_dir + 'v.xml') >> model.v
File(in_dir + 'w.xml') >> model.w
File(in_dir + 'beta2.xml') >> model.beta2
File(in_dir + 'eta.xml') >> model.eta

config = {'output_path': out_dir}

F = solvers.StokesBalanceSolver(model, config)
Ejemplo n.º 12
0
 def test_get_item_ids(self):
     i_ids1 = model.Model(self.mat).get_item_ids()
     i_ids2 = model.Model(self.mat).get_item_ids()
     self.assertSequenceEqual(i_ids1, i_ids2)
     self.assertEqual(len(i_ids1), 10)
Ejemplo n.º 13
0
 def test_get_user_ids(self):
     u_ids = model.Model(self.mat).get_user_ids()
     self.assertEqual(len(u_ids), 9)
     u_ids2 = model.Model(self.mat).get_user_ids()
     self.assertSequenceEqual(u_ids, u_ids2)
Ejemplo n.º 14
0
def get_trainer(config):

    vocab_config = {
        'pathes':
        [config['formulas_train_path'], config['formulas_validation_path']],
        'unk_token_threshold':
        config['unk_token_threshold'],
    }
    vocab = utils.Vocab(vocab_config)
    vocab_size = len(vocab.token2idx.keys())

    cnn_model = cnn.CNN(config['cnn_params']).to(config['device'])

    encoder_model = encoder.Encoder(config['cnn_params']['conv6_c'],
                                    config['encoder_hidden_size'],
                                    config['bidirectional'],
                                    config['device']).to(config['device'])

    train_loader_config = {
        'batch_size': config['batch_size'],
        'images_path': config['images_train_path'],
        'formulas_path': config['formulas_train_path'],
        'sort_by_formulas_len': True,
        'shuffle': False,
    }
    train_loader = utils.data_loader(vocab, train_loader_config)

    embedding_model = embedding.Embedding(vocab_size, config['embedding_size'],
                                          vocab.pad_token).to(config['device'])

    decoder_model = decoder.AttnDecoder(
        config['embedding_size'], config['decoder_hidden_size'],
        config['encoder_hidden_size'] * (2 if config['bidirectional'] else 1),
        vocab_size, config['device']).to(config['device'])

    _model = model.Model(cnn_model, encoder_model, embedding_model,
                         decoder_model, config['device'])

    trainer_config = {
        'device':
        config['device'],
        'checkpoints_dir':
        config['checkpoints_dir'],
        'log_dir':
        config['log_dir'],
        'print_every_batch':
        config['print_every_batch'],
        'clip':
        config['clip'],
        'learning_rate':
        config['learning_rate'],
        'learning_rate_decay':
        config['learning_rate_decay'],
        'learning_rate_decay_step':
        config['learning_rate_decay_step'],
        'learning_rate_min':
        config['learning_rate_min'],
        'teacher_forcing_ratio':
        config['teacher_forcing_ratio'],
        'teacher_forcing_ratio_decay':
        config['teacher_forcing_ratio_decay'],
        'teacher_forcing_ratio_decay_step':
        config['teacher_forcing_ratio_decay_step'],
        'teacher_forcing_ratio_min':
        config['teacher_forcing_ratio_min'],
    }

    _trainer = trainer.Trainer(_model, train_loader, trainer_config)

    return _trainer