예제 #1
0
 def resume(self):
     self.resume_check = True
     self.main.activeObj = set()
     print("LOADING THE GAME")
     load_game = Load(self.main)
     load_game.load()
     print(self.resume_check)
     #need to get values for these to actually work
     self.main.color = load_game.getColor()
     self.main.numPlayers = load_game.getNumPlayers()
     self.main.activeObj = set()
     self.main.board = Board(self.main)
     self.main.game = Game(self.main)
     self.main.deck = Deck(self.main)
     self.main.deck.start_deck()
     load_game.set_values()
     self.main.game.playing.playerInfoList = self.main.game.playing.getPlayerInfoList(
         self.main.game.playerNum)
     self.main.game.playing.relaxedButton.visible = False
     # print("6")
     # self.main.save = Save(self.main)
     # print("7")
     # self.main.save.save()
     print("DONE LOADING")
     self.main.gameStarted = True
예제 #2
0
def etl(game, extract_date, data_dir=DATA_DIR, db=load.DB_FILENAME):
    logger.info('Start ETL for game {0}'.format(game))
    load_date = datetime.today()
    data_dir = os.path.join(data_dir, game)
    if game == 'hb':
        trans_fun = Transform.hb_transform
    elif game == 'wwc':
        trans_fun = Transform.wwc_transform

    data = Extract.extract(data_dir, extract_date)
    data = Transform.transform(data, trans_fun, extract_date, load_date)
    Load.load(data, game, db=db)
예제 #3
0
    def test_load(self):
        data = [{'accountid':'1', 'gender':'female', 'age':42, 'country':'Germany', 
                 'extract_date':'2018-06-28', 'load_date':'2019-03-07', 'game':'foo'},
                {'accountid':'2', 'gender':'male', 'age':38, 'country':'United States', 
                 'extract_date':'2018-06-28', 'load_date':'2019-03-07', 'game':'bar'}]
        Load.load(data, 'test', TEST_DB)

        expected = [('foo', '1', 'female', 42, 'Germany', '2018-06-28', '2019-03-07'), 
                    ('bar', '2', 'male', 38, 'United States', '2018-06-28', '2019-03-07')]
        with sqlite3.connect(TEST_DB) as conn:
            c = conn.cursor()
            sql = '''SELECT * FROM test_accounts'''
            c.execute(sql)
            result = c.fetchall()
            self.assertEqual(result, expected)
예제 #4
0
def main():

    if len(sys.argv) != 3:
        print('Error: Execution -> python3 main.py <url> <name_database>')
        exit(1)

    url = sys.argv[1]
    name_db = sys.argv[2]

    transformation = Transformation(url=url,
                                    output_path='databases/',
                                    name_db=name_db)
    transformation.transformation()

    load = Load(transformation.new_engine)
    load.load(output_path='excel/')
예제 #5
0
파일: eval.py 프로젝트: KNakane/tensorflow
def main(args):
    print("------------Start Evaluation-----------")
    print("CheckPoint : {}".format(FLAGS.ckpt_dir))
    print("Network : {}".format(FLAGS.network))
    print("data : {}".format(FLAGS.data))
    print("---------------------------------------")

    # load dataset
    data = Load(FLAGS.data)
    batch_size = 100
    dataset = data.load(data.x_train,
                        data.y_train,
                        batch_size=batch_size,
                        is_training=True)
    iterator = dataset.make_initializable_iterator()
    inputs, labels = iterator.get_next()
    test_inputs = tf.random.uniform([batch_size * 3, FLAGS.z_dim], -1, +1)
    index = tile_index(batch_size * 3)

    model = eval(FLAGS.network)(z_dim=FLAGS.z_dim,
                                size=data.size,
                                channel=data.channel,
                                lr=0.0,
                                class_num=data.output_dim,
                                conditional=FLAGS.conditional,
                                opt=None,
                                trainable=False)

    D_logits, D_logits_ = model.inference(inputs, batch_size, labels)
    G = model.predict(test_inputs, batch_size * 3, index)

    tf.train.Saver()
    with tf.Session() as sess:
        utils = Utils(sess=sess)
        utils.initial()
        if utils.restore_model(FLAGS.ckpt_dir):
            image = sess.run(G)
            utils.gan_plot(image)
            return
        else:
            return
예제 #6
0
파일: train.py 프로젝트: KNakane/tensorflow
def main(args):
    message = OrderedDict({
        "Network": FLAGS.network,
        "Conditional": FLAGS.conditional,
        "data": FLAGS.data,
        "z_dim": FLAGS.z_dim,
        "epoch": FLAGS.n_epoch,
        "batch_size": FLAGS.batch_size,
        "Optimizer": FLAGS.opt,
        "learning_rate": FLAGS.lr,
        "n_disc_update": FLAGS.n_disc_update,
        "l2_norm": FLAGS.l2_norm,
        "Augmentation": FLAGS.aug
    })

    # Setting
    checkpoints_to_keep = FLAGS.checkpoints_to_keep
    keep_checkpoint_every_n_hours = FLAGS.keep_checkpoint_every_n_hours
    max_steps = FLAGS.n_epoch
    save_checkpoint_steps = FLAGS.save_checkpoint_steps
    batch_size = FLAGS.batch_size
    n_disc_update = FLAGS.n_disc_update
    restore_dir = FLAGS.init_model
    global_step = tf.train.create_global_step()

    # load dataset
    data = Load(FLAGS.data)
    dataset = data.load(data.x_train,
                        data.y_train,
                        batch_size=batch_size,
                        is_training=True)
    iterator = dataset.make_initializable_iterator()
    inputs, labels = iterator.get_next()
    test_inputs = tf.random.uniform([batch_size * 3, FLAGS.z_dim], -1, +1)

    model = eval(FLAGS.network)(z_dim=FLAGS.z_dim,
                                size=data.size,
                                channel=data.channel,
                                lr=FLAGS.lr,
                                class_num=data.output_dim,
                                l2_reg=FLAGS.l2_norm,
                                conditional=FLAGS.conditional,
                                opt=FLAGS.opt,
                                trainable=True)

    if FLAGS.network == 'WGAN' or FLAGS.network == 'WGAN-GP':
        D_logits, D_logits_ = model.inference(inputs, batch_size, labels)
        n_disc_update = 5
    else:
        D_logits, D_logits_ = model.inference(inputs, batch_size, labels)

    if FLAGS.network == 'ACGAN':
        dis_loss, gen_loss = model.loss(D_logits, D_logits_, labels)
    else:
        dis_loss, gen_loss = model.loss(D_logits, D_logits_)

    d_op, g_op = model.optimize(d_loss=dis_loss,
                                g_loss=gen_loss,
                                global_step=global_step)
    train_accuracy = model.evaluate(D_logits, D_logits_)
    G = model.predict(test_inputs, batch_size * 3)

    # logging for tensorboard
    util = Utils(prefix=FLAGS.network)
    util.conf_log()
    tf.summary.scalar('global_step', global_step)
    tf.summary.scalar('discriminator_loss', dis_loss)
    tf.summary.scalar('generator_loss', gen_loss)
    tf.summary.histogram('generator_output', G)
    tf.summary.histogram('True_image', inputs)
    tf.summary.image('image', inputs)
    tf.summary.image('fake_image', G)

    def init_fn(scaffold, session):
        session.run(iterator.initializer,
                    feed_dict={
                        data.features_placeholder: data.x_train,
                        data.labels_placeholder: data.y_train
                    })

    # create saver
    scaffold = tf.train.Scaffold(
        init_fn=init_fn,
        saver=tf.train.Saver(
            max_to_keep=checkpoints_to_keep,
            keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours))

    # create hooks
    hooks = []
    tf.logging.set_verbosity(tf.logging.INFO)
    metrics = {
        "global_step": global_step,
        "discriminator_loss": dis_loss,
        "generator_loss": gen_loss,
        "train_accuracy": train_accuracy
    }

    hooks.append(MyLoggerHook(message, util.log_dir, metrics,
                              every_n_iter=100))
    hooks.append(GanHook(G, util.log_dir, every_n_iter=10000))
    hooks.append(tf.train.NanTensorHook(dis_loss))
    hooks.append(tf.train.NanTensorHook(gen_loss))

    # training
    session = tf.train.MonitoredTrainingSession(
        checkpoint_dir=util.model_path,
        hooks=hooks,
        scaffold=scaffold,
        save_summaries_steps=1,
        save_checkpoint_steps=save_checkpoint_steps,
        summary_dir=util.tf_board)

    def _train(session, d_op, g_op, n_disc_update):
        for _ in range(max_steps):
            for _ in range(n_disc_update):
                c_op_vals = session.run(d_op)

            g_op_vals = session.run(g_op)
        return c_op_vals, g_op_vals

    with session:
        if restore_dir is not None:
            ckpt = tf.train.get_checkpoint_state(restore_dir)
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(session, ckpt.model_checkpoint_path)
        _train(session, d_op, g_op, n_disc_update)
    return