Ejemplo n.º 1
0
    def __init__(self, session=None):
        self.game = None
        self.separated = False
        self.model = Model()
        self.training = False

        self.inputs = tf.placeholder(dtype=tf.float32, shape=[None, 16, 16, 3], name='inputs')
        self.target_actions = tf.placeholder(dtype=tf.float32, shape=[None, 4], name='target_actions')
        self.rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='rewards')

        self.logits = self.model.inference(self.inputs)
        self.probs = tf.nn.softmax(self.logits)
        self.prediction = tf.argmax(self.probs, axis=1)
        self.loss = self.model.loss(probs=self.probs, target_actions=self.target_actions, rewards=self.rewards)
        self.algorithm = tf.train.AdamOptimizer(learning_rate=1e-4)
        self.optimizer = self.algorithm.minimize(self.loss)
        self.reward = 0
        self.saver = Saver()
        init = tf.global_variables_initializer()
        if session is None:
            session = tf.Session()
        self.session = session
        self.session.run(init)
        try:
            self.saver.restore(self.session, '/home/burger/projects/lightrider_burger/checkpoints/model/nn_model.ckpt')
        except NotFoundError:
            print('Not Found')
Ejemplo n.º 2
0
def save_state(fname, sess=None):
    from baselines import logger
    logger.warn('save_state method is deprecated, please use save_variables instead')
    sess = sess or get_session()
    dirname = os.path.dirname(fname)
    if any(dirname):
        os.makedirs(dirname, exist_ok=True)
    saver = Saver()
    saver.save(tf.compat.v1.get_default_session(), fname)
Ejemplo n.º 3
0
    def __init__(self, session=None):
        self.game = None
        self.separated = False
        self.model = Model2()
        self.model_T = Model3()
        self.training = False

        if session is None:
            session = tf.Session()

        self.inputs = tf.placeholder(dtype=tf.float32,
                                     shape=[None, 16, 16, 3],
                                     name='inputs2')
        self.target_actions = tf.placeholder(dtype=tf.float32,
                                             shape=[None, 16, 16],
                                             name='target_actions2')
        self.rewards = tf.placeholder(dtype=tf.float32,
                                      shape=[None],
                                      name='rewards2')

        self.logits = self.model.inference(self.inputs)
        flat = tf.reshape(self.logits, shape=(-1, 256))
        self.probs = tf.reshape(tf.nn.softmax(flat), shape=(-1, 16, 16))
        # self.prediction = tf.argmax(self.probs)
        self.loss = self.model.loss(probs=self.probs,
                                    target_actions=self.target_actions,
                                    rewards=self.rewards)
        self.algorithm = tf.train.AdamOptimizer(learning_rate=1e-5)
        self.optimizer = self.algorithm.minimize(self.loss)

        self.logits_T = self.model_T.inference(self.inputs)
        flat_T = tf.reshape(self.logits, shape=(-1, 256))
        self.probs_T = tf.reshape(tf.nn.softmax(flat_T), shape=(-1, 16, 16))

        self.assign_T_operation = [
            t.assign(o)
            for o, t in zip(self.model.parameters, self.model_T.parameters)
        ]

        self.reward = 0
        self.saver = Saver()
        init = tf.global_variables_initializer()

        self.session = session
        self.session.run(init)
        try:
            self.saver.restore(
                self.session,
                '/home/burger/projects/lightrider_burger/checkpoints/model2/nn_model2.ckpt'
            )
        except NotFoundError:
            print('Not Found')

        self.session.run(self.assign_T_operation)
Ejemplo n.º 4
0
    def eval(self):

        with open("digits.json") as f:
            digits_data = json.load(f)['digits']

        self.create_model(trainable=False)

        saver = Saver(var_list=self.var_list)

        with tf.Session() as self.sess:
            saver.restore(self.sess, CHECKPOINT_FILE_NAME)

            print('Test results', self.sess.run(self.y_prob, feed_dict={self.x: digits_data}))
Ejemplo n.º 5
0
def predict(file_name):
    model = build_model()
    saver = Saver()

    x = read_image(file_name)
    x = x.reshape((1, 28, 28))
    x = x.astype(numpy.float32) / 255.

    with tf.Session() as sess:
        saver.restore(sess, CHECKPOINT_FILE_NAME)

        y_pred = sess.run([model['y_pred']], feed_dict={model['x']: x})

        print(y_pred)
Ejemplo n.º 6
0
def tf_freeze_graph(sess: Session,
                    saver: Saver,
                    model_name: str,
                    logdir: str,
                    model_suffix: str = "pb",
                    conf_suffix: str = "pbtxt",
                    checkpoint_suffix: str = "ckpt"):
    """
    python freeze_graph.py --output_graph =./pbs/frozenGraph.pb --output_node_names=genderOut,ageOut --input_binary=true
    """
    input_conf = write_graph(
        sess.graph_def, logdir,
        extsep.join('_'.join((model_name, "graphDef")), conf_suffix))

    input_binary = True
    input_graph = write_graph(sess.graph_def,
                              logdir,
                              extsep.join('_'.join((model_name, "graphDef")),
                                          model_suffix),
                              as_text=False)

    input_checkpoint = saver.save(
        sess, path_join(logdir, extsep.join(model_name, checkpoint_suffix)))

    output_node_names = ""

    output_graph = path_join(
        logdir,
        extsep.join('_'.join((model_name, "frozenGraphDef")), model_suffix))
    freeze_graph(input_graph, "", input_binary, input_checkpoint,
                 output_node_names, "save/restore_all", "save/Const:0",
                 output_graph, True, "")
    return output_graph
Ejemplo n.º 7
0
def read_net_from_tf(model_name: str, logdir: str, frozen_graph: str = None):
    if frozen_graph is None:
        with Session as sess:
            frozen_graph = tf_freeze_graph(sess, Saver(), model_name, logdir)

    debug(frozen_graph)

    graph_path = path_join(
        logdir, frozen_graph)  # optimize_frozen_graph(logdir, frozen_graph)

    net: dnn_Net = readNetFromTensorflow(graph_path)
    net.setPreferableBackend(DNN_BACKEND_HALIDE)
    net.setPreferableTarget(DNN_TARGET_OPENCL)
    return net
Ejemplo n.º 8
0
def train(log):
    mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

    model = build_model()
    saver = Saver()

    train_losses = []
    train_accuracies = []

    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for batch_idx in range(1000000):
            batch_x, batch_y = mnist.train.next_batch(MB_SIZE)
            batch_x = transform_batch(batch_x)
            # batch_x = augment(batch_x)
            # batch_x = batch_x.reshape((-1, 28, 28))

            output = sess.run(
                [model['step'], model['loss'], model['accuracy']],
                feed_dict={
                    model['x']: batch_x,
                    model['y_true']: batch_y
                })
            train_losses.append(output[1])
            train_accuracies.append(output[2])

            if (batch_idx + 1) % REPORT_INTERVAL == 0:
                log("train loss",
                    numpy.average(train_losses[-REPORT_INTERVAL:]), "accuracy",
                    numpy.average(train_accuracies[-REPORT_INTERVAL:]))

            if (batch_idx + 1) % TEST_INTERVAL == 0:
                test(log, sess, model, mnist)
                with contextlib.suppress(FileExistsError):
                    os.makedirs(CHECKPOINT_DIR)
                saver.save(sess, CHECKPOINT_FILE_NAME)
Ejemplo n.º 9
0
    def train(self):

        self.create_model(trainable=False)

        saver = Saver(var_list=self.var_list)

        with tf.Session() as self.sess:
            tf.global_variables_initializer().run()  # initialize variables
            saver.restore(self.sess, CHECKPOINT_FILE_NAME)

            batches_n = 10000

            losses = []
            try:
                for batch_idx in range(batches_n):
                    vloss = self.train_on_batch()

                    losses.append(vloss)

                    if batch_idx % 100 == 0:
                        print(
                            'Batch {batch_idx}: mean_loss {mean_loss}'.format(
                                batch_idx=batch_idx,
                                mean_loss=np.mean(losses[-200:], axis=0)))
                        print('Test results',
                              self.sess.run([self.loss, self.accuracy]))

            except KeyboardInterrupt:
                print('Stopping training!')
                pass

            # Test trained model
            print('Test results', self.sess.run([self.loss, self.accuracy]))

            data = self.sess.run(self.x)
            with open("digits.json", "w") as f:
                json.dump({"digits": data.tolist()}, f)
Ejemplo n.º 10
0
    def train(self):

        self.create_model(trainable=True)
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

        saver = Saver(var_list=self.var_list)

        with tf.Session() as self.sess:
            tf.global_variables_initializer().run()  # initialize variables

            if os.path.exists(CHECKPOINT_FILE_NAME + ".meta"):
                print("Restoring existing weights")
                saver.restore(self.sess, CHECKPOINT_FILE_NAME)
            else:
                print("Training a new model")

            batches_n = 100000
            mb_size = 128

            losses = []
            try:
                for batch_idx in range(batches_n):
                    batch_xs, batch_ys = mnist.train.next_batch(mb_size)

                    vloss = self.train_on_batch(batch_xs, batch_ys)

                    losses.append(vloss)

                    if batch_idx % 100 == 0:
                        print(
                            'Batch {batch_idx}: mean_loss {mean_loss}'.format(
                                batch_idx=batch_idx,
                                mean_loss=np.mean(losses[-200:], axis=0)))
                        print(
                            'Test results',
                            self.sess.run(
                                [self.loss, self.accuracy],
                                feed_dict={
                                    self.x: mnist.test.images,
                                    self.y_target: mnist.test.labels
                                }))

                        saver.save(self.sess, CHECKPOINT_FILE_NAME)

            except KeyboardInterrupt:
                print('Stopping training!')
                pass

            # Test trained model
            print(
                'Test results',
                self.sess.run([self.loss, self.accuracy],
                              feed_dict={
                                  self.x: mnist.test.images,
                                  self.y_target: mnist.test.labels
                              }))
Ejemplo n.º 11
0
def load_state(fname, sess=None):
    from baselines import logger
    logger.warn('load_state method is deprecated, please use load_variables instead')
    sess = sess or get_session()
    saver = Saver()
    saver.restore(tf.compat.v1.get_default_session(), fname)
Ejemplo n.º 12
0
Archivo: Base.py Proyecto: xeren/OpenKE
    def __init__(self, baseshape, batchshape=None, optimizer=None, norm=None):
        '''Creates a new model.

	baseshape
A pair of numbers describing the amount of entities and relations.

	batchshape
A pair of numbers describing the amount of training statements per iteration and the amount of variants per statement.
The first variant is considered true while the rest is considered false.
default: Model not intended for training

	optimizer
The optimization algorithm used to approximate the optimal model in each iteration.
default: Stochastic Gradient Descent with learning factor of 1%.

	norm
The used vector norm to compute a scalar score from the model's prediction.
default: L1 norm (sum of absolute features).'''

        from tensorflow import name_scope, transpose, reshape, placeholder, int64, float32, Session, Graph, global_variables_initializer, variable_scope, nn, AUTO_REUSE
        from tensorflow.contrib.layers import xavier_initializer
        from tensorflow.python.training.saver import Saver
        self.base = baseshape
        if batchshape is None:
            batchshape = 0, 0
        self.batchsize = batchshape[0]
        self.negatives = batchshape[1] - 1
        self.__parameters = dict()
        if optimizer is None:
            import tensorflow
            optimizer = tensorflow.train.GradientDescentOptimizer(.01)
        if norm is None:
            from .norm import l1
            norm = l1
        self._norm = norm
        B, N = self.batchsize, self.negatives
        S = B * (N + 1)
        self.__graph = Graph()
        with self.__graph.as_default():
            self.__session = Session()
            with self.__session.as_default():
                initializer = xavier_initializer(uniform=True)
                with variable_scope('model',
                                    reuse=AUTO_REUSE,
                                    initializer=initializer):
                    with name_scope('input'):
                        self.batch_h = placeholder(int64, [S])
                        self.batch_t = placeholder(int64, [S])
                        self.batch_l = placeholder(int64, [S])
                        self.batch_y = placeholder(float32, [S])
                        self.all_h = transpose(
                            reshape(self.batch_h, [1 + N, -1]), [1, 0])
                        self.all_t = transpose(
                            reshape(self.batch_t, [1 + N, -1]), [1, 0])
                        self.all_l = transpose(
                            reshape(self.batch_l, [1 + N, -1]), [1, 0])
                        self.all_y = transpose(
                            reshape(self.batch_y, [1 + N, -1]), [1, 0])
                        self.postive_h = transpose(
                            reshape(self.batch_h[:B], [1, -1]), [1, 0])
                        self.postive_t = transpose(
                            reshape(self.batch_t[:B], [1, -1]), [1, 0])
                        self.postive_l = transpose(
                            reshape(self.batch_l[:B], [1, -1]), [1, 0])
                        self.negative_h = transpose(
                            reshape(self.batch_h[B:], [N, -1]), [1, 0])
                        self.negative_t = transpose(
                            reshape(self.batch_t[B:], [N, -1]), [1, 0])
                        self.negative_l = transpose(
                            reshape(self.batch_l[B:], [N, -1]), [1, 0])
                        self.predict_h = placeholder(int64, [None])
                        self.predict_t = placeholder(int64, [None])
                        self.predict_l = placeholder(int64, [None])
                    with name_scope('embedding'):
                        for k, v in self._embedding_def():
                            self.__parameters[k] = v
                    with name_scope('loss'):
                        self.__loss = self._loss_def()
                    with name_scope('predict'):
                        self.__prediction = self._predict_def()
                    grads_and_vars = optimizer.compute_gradients(self.__loss)
                    self.__training = optimizer.apply_gradients(grads_and_vars)
                self.__saver = Saver()
                self.__session.run(global_variables_initializer())
Ejemplo n.º 13
0
class BotNN2(object):
    def __init__(self, session=None):
        self.game = None
        self.separated = False
        self.model = Model2()
        self.model_T = Model3()
        self.training = False

        if session is None:
            session = tf.Session()

        self.inputs = tf.placeholder(dtype=tf.float32,
                                     shape=[None, 16, 16, 3],
                                     name='inputs2')
        self.target_actions = tf.placeholder(dtype=tf.float32,
                                             shape=[None, 16, 16],
                                             name='target_actions2')
        self.rewards = tf.placeholder(dtype=tf.float32,
                                      shape=[None],
                                      name='rewards2')

        self.logits = self.model.inference(self.inputs)
        flat = tf.reshape(self.logits, shape=(-1, 256))
        self.probs = tf.reshape(tf.nn.softmax(flat), shape=(-1, 16, 16))
        # self.prediction = tf.argmax(self.probs)
        self.loss = self.model.loss(probs=self.probs,
                                    target_actions=self.target_actions,
                                    rewards=self.rewards)
        self.algorithm = tf.train.AdamOptimizer(learning_rate=1e-5)
        self.optimizer = self.algorithm.minimize(self.loss)

        self.logits_T = self.model_T.inference(self.inputs)
        flat_T = tf.reshape(self.logits, shape=(-1, 256))
        self.probs_T = tf.reshape(tf.nn.softmax(flat_T), shape=(-1, 16, 16))

        self.assign_T_operation = [
            t.assign(o)
            for o, t in zip(self.model.parameters, self.model_T.parameters)
        ]

        self.reward = 0
        self.saver = Saver()
        init = tf.global_variables_initializer()

        self.session = session
        self.session.run(init)
        try:
            self.saver.restore(
                self.session,
                '/home/burger/projects/lightrider_burger/checkpoints/model2/nn_model2.ckpt'
            )
        except NotFoundError:
            print('Not Found')

        self.session.run(self.assign_T_operation)

    def setup(self, game):
        self.game = game

    def give_reward(self, reward):
        self.reward = reward

    def sample_move(self):
        legal = self.game.field.legal_moves(self.game.my_botid)
        if len(legal) == 0:
            return 0
        else:
            (_, chosen) = random.choice(legal)
            for i, dir in enumerate(DIRS):
                if chosen == dir[1]:
                    return i
            return 0

    def do_turn(self):
        self.game.last_order = None
        self.reward = 0

        if not self.training:
            # probs = self.session.run(self.probs, feed_dict={self.inputs: self.get_cell_tensor()})
            # action = np.random.choice(np.arange(4), p=probs[0])
            # string_move = DIRS[action][1]
            # self.game.issue_order(string_move)
            current_state = self.get_cell_tensor(
                reverse_players=self.game.my_botid)
            rlogits, probs = self.session.run(
                [self.logits, self.probs],
                feed_dict={self.inputs: current_state})

            # prediction = np.random.choice(np.arange(256), p=np.ravel(probs[0]))
            # prediction_coords = int(np.floor(prediction / 16)), prediction % 16

            # if np.isnan(probs[0, 0, 0]):
            #     print('NAAAAN')

            string_move = 'pass'
            legal = self.game.field.legal_moves(self.game.my_botid)
            if len(legal) > 0:
                legal_probs = []
                legal_logits = []
                for move in legal:
                    legal_probs.append(probs[0][move[0]])
                    legal_logits.append(rlogits[0, :, :, 0][move[0]])
                    # if prediction_coords == move[0]:
                    #     string_move = move[1]
                    #     break

                original_legal_probs = np.array(legal_probs)
                legal_logits = np.array(legal_logits)
                legal_probs = softmax(legal_logits)
                prediction_id = np.random.choice(np.arange(len(legal_probs)),
                                                 p=legal_probs)
                prediction_coords = legal[prediction_id][0]
                string_move = legal[prediction_id][1]
            self.game.issue_order(string_move)

        return 0

    def get_cell_tensor(self, reverse_players=False):
        p1 = self.game.field.players[0]
        p2 = self.game.field.players[1]
        cell_tensor = np.array(self.game.field.cell)
        cell_tensor = np.reshape(
            cell_tensor, (self.game.field.height, self.game.field.width))
        cell_tensor = cell_tensor[np.newaxis, :, :, np.newaxis]
        empty_tensor = np.zeros(
            (1, cell_tensor.shape[1], cell_tensor.shape[2], 2))
        cell_tensor = np.concatenate((cell_tensor, empty_tensor), axis=3)
        cell_tensor[cell_tensor < 3] = 0
        cell_tensor[cell_tensor == 3] = 1
        cell_tensor[0, p1.row, p1.col, 1] = 1
        cell_tensor[0, p2.row, p2.col, 2] = 1

        if reverse_players:
            cell_tensor[0, p1.row, p1.col, 1] = 0
            cell_tensor[0, p1.row, p1.col, 2] = 1
            cell_tensor[0, p2.row, p2.col, 1] = 1
            cell_tensor[0, p2.row, p2.col, 2] = 0

        return cell_tensor
Ejemplo n.º 14
0
                                         num_threads = 1,
                                         capacity = 200 * batch_size)
 
 batch_queue = slim.prefetch_queue.prefetch_queue([batch_images, batch_labels], capacity=50*batch_size)
 img, lb = batch_queue.dequeue()
 ## Load Model
 network_fn = nets_factory.get_network_fn(model_name)
 end_points = network_fn(img, is_training=False)
 print (end_points)
 task1 = tf.to_int32(tf.argmax(end_points['Logits'], 1))
 
 training_accuracy1 = slim.metrics.accuracy(task1, tf.to_int32(lb))
 
 variables_to_restore = slim.get_variables_to_restore()
 checkpoint_path = latest_checkpoint(train_dir)
 saver = Saver(variables_to_restore)
 config = ConfigProto()
 config.gpu_options.allow_growth=True
 sess = Session(config=config)
 sv = supervisor.Supervisor(logdir=checkpoint_path,
                            summary_op=None,
                            summary_writer=None,
                            global_step=None,
                            saver=None)
 correct = 0
 predict = 0
 with sv.managed_session(master='', start_standard_services=False, config=config) as sess:
     saver.restore(sess, checkpoint_path)
     optim_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
     layer = {}
     name = ['conv1w','conv1b',
Ejemplo n.º 15
0
class BotNN(object):
    def __init__(self, session=None):
        self.game = None
        self.separated = False
        self.model = Model()
        self.training = False

        self.inputs = tf.placeholder(dtype=tf.float32, shape=[None, 16, 16, 3], name='inputs')
        self.target_actions = tf.placeholder(dtype=tf.float32, shape=[None, 4], name='target_actions')
        self.rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='rewards')

        self.logits = self.model.inference(self.inputs)
        self.probs = tf.nn.softmax(self.logits)
        self.prediction = tf.argmax(self.probs, axis=1)
        self.loss = self.model.loss(probs=self.probs, target_actions=self.target_actions, rewards=self.rewards)
        self.algorithm = tf.train.AdamOptimizer(learning_rate=1e-4)
        self.optimizer = self.algorithm.minimize(self.loss)
        self.reward = 0
        self.saver = Saver()
        init = tf.global_variables_initializer()
        if session is None:
            session = tf.Session()
        self.session = session
        self.session.run(init)
        try:
            self.saver.restore(self.session, '/home/burger/projects/lightrider_burger/checkpoints/model/nn_model.ckpt')
        except NotFoundError:
            print('Not Found')

    def setup(self, game):
        self.game = game

    def give_reward(self, reward):
        self.reward = reward

    def sample_move(self):
        legal = self.game.field.legal_moves(self.game.my_botid)
        if len(legal) == 0:
            return 0
        else:
            (_, chosen) = random.choice(legal)
            for i, dir in enumerate(DIRS):
                if chosen == dir[1]:
                    return i
            return 0

    def do_turn(self):
        self.game.last_order = None
        self.reward = 0

        if not self.training:
            probs = self.session.run(self.probs, feed_dict={self.inputs: self.get_cell_tensor()})
            action = np.random.choice(np.arange(4), p=probs[0])
            string_move = DIRS[action][1]
            self.game.issue_order(string_move)

        return 0

    def get_cell_tensor(self, reverse_players=False):
        p1 = self.game.field.players[0]
        p2 = self.game.field.players[1]
        cell_tensor = np.array(self.game.field.cell)
        cell_tensor = cell_tensor[np.newaxis, :, :, np.newaxis]
        empty_tensor = np.zeros((1, cell_tensor.shape[1], cell_tensor.shape[2], 2))
        cell_tensor = np.concatenate((cell_tensor, empty_tensor), axis=3)
        cell_tensor[cell_tensor < 3] = 0
        cell_tensor[cell_tensor == 3] = 1
        cell_tensor[0, p1.row, p1.col, 1] = 1
        cell_tensor[0, p2.row, p2.col, 2] = 1

        if reverse_players:
            cell_tensor[0, p1.row, p1.col, 1] = 0
            cell_tensor[0, p1.row, p1.col, 2] = 1
            cell_tensor[0, p2.row, p2.col, 1] = 1
            cell_tensor[0, p2.row, p2.col, 2] = 0

        return cell_tensor
Ejemplo n.º 16
0
 def __init__(self):
     self.saver = TFSaver(max_to_keep=None)
Ejemplo n.º 17
0
class Saver:
    """Manages saving and loading model variables and model settings.
    """
    def __init__(self):
        self.saver = TFSaver(max_to_keep=None)

    def restore_model(self, sess, model_filename):
        """Restores the model trainable variables from a checkpoint file.
        """
        self.saver.restore(sess, model_filename)

    def save_model(self, sess, directory, filename):
        """Save the model trainable variables as a checkpoint file.
        """
        save_path = self.saver.save(sess,
                                    '{}/{}.ckpt'.format(directory, filename))
        return save_path

    def save_hparam(self, directory, filename, current_global_step,
                    current_learning_rate, hparam):
        """Save the model hyperparameter as a text file.
        """
        text_filename = '{}/{}'.format(directory, filename)

        with open(text_filename, 'w') as text_file:
            print('Percent Train: {}'.format(hparam['percent train']),
                  file=text_file)
            print('\n', end='', file=text_file)

            print('Number of Units: {}'.format(hparam['number of units']),
                  file=text_file)
            print('Number of Layers: {}'.format(hparam['number of layers']),
                  file=text_file)
            print('Attention Units: {}'.format(hparam['attention units']),
                  file=text_file)
            print('\n', end='', file=text_file)

            print('Input Sequence Length: {}'.format(
                hparam['input sequence length']),
                  file=text_file)
            print('Target Sequence Length: {}'.format(
                hparam['target sequence length']),
                  file=text_file)
            print('\n', end='', file=text_file)

            print('Maximum Gradient Norm: {}'.format(
                hparam['maximum gradient norm']),
                  file=text_file)
            print('Initial Learning Rate: {}'.format(
                hparam['initial learning rate']),
                  file=text_file)
            print('Learning Rate Decay Steps: {}'.format(
                hparam['learning rate decay steps']),
                  file=text_file)
            print('Learning Rate Decay Rate: {}'.format(
                hparam['learning rate decay rate']),
                  file=text_file)
            print('\n', end='', file=text_file)

            print('Dropout Keep Probability: {}'.format(
                hparam['dropout keep probability']),
                  file=text_file)
            print('Decoder Sampling Probability: {}'.format(
                hparam['decoder sampling probability']),
                  file=text_file)
            print('\n', end='', file=text_file)

            print('Batch Size: {}'.format(hparam['batch size']),
                  file=text_file)
            print('Number of Training Batches: {}'.format(
                hparam['number of training batches']),
                  file=text_file)
            print('\n', end='', file=text_file)

            print('Global Step: {}'.format(current_global_step),
                  file=text_file)
            print('Last Learning Rate: {}'.format(current_learning_rate),
                  file=text_file)
Ejemplo n.º 18
0
    def create_model(self):
        self.stft_input = tf.placeholder(
            dtype=tf.float32,
            shape=[None, self.stft_shape[1], self.stft_shape[2]])
        self.model = StftClassifierModel(1)
        self.model.inference(self.stft_input, training=False)
        self.session.run(tf.global_variables_initializer())
        self.style_models = [
            self.model.layers['x'],
            self.model.layers['conv1_1'],
            self.model.layers['conv1_2'],
            self.model.layers['conv1_3'],
            self.model.layers['conv1_4'],
            self.model.layers['conv_merge'],
        ]

        self.content_features_graph = self.model.layers['conv_merge']

        saver = Saver()
        # try:
        #     saver.restore(session, 'weights/stft_classifier/stft_classifier')
        # except NotFoundError:
        #     print('Not Found')

        style_loss = 0
        model_factors = [1.0] * len(self.style_models)
        sum_model_factor = np.sum(model_factors)
        style_losses = []
        self.style_features_list = []
        for style_model, model_factor in zip(self.style_models, model_factors):
            style_features = tf.placeholder(
                dtype=tf.float32,
                shape=[
                    None,
                    style_model.get_shape()[1].value,
                    style_model.get_shape()[2].value
                ])
            self.style_features_list.append(style_features)
            xs_gram = 0
            xs = style_features[0]
            xs = tf.reshape(xs, [-1, xs.get_shape()[-1].value])
            xs_gram += 1.0 * tf.matmul(tf.transpose(xs),
                                       xs) / xs.get_shape()[-1].value

            xg = style_model[0]
            xg = tf.reshape(xg, [-1, xg.get_shape()[-1].value])
            xg_gram = 1.0 * tf.matmul(tf.transpose(xg),
                                      xg) / xs.get_shape()[-1].value
            normalizer = tf.reduce_mean(
                tf.square(xs_gram)) * xs.get_shape()[0].value**2
            style_loss = model_factor * tf.nn.l2_loss(xs_gram -
                                                      xg_gram) / normalizer
            style_losses.append(params.style_factor * style_loss)

        self.style_losses = tf.stack(style_losses)
        self.style_loss = tf.reduce_mean(style_losses)
        self.const_zero = tf.constant(0)

        self.content_features = tf.placeholder(
            dtype=tf.float32,
            shape=[
                None, self.content_features_graph.shape[1],
                self.content_features_graph.shape[2]
            ])
        xc = self.content_features[0]
        xg = self.content_features_graph[0]
        self.content_loss = params.content_factor * tf.nn.l2_loss(
            xg - xc) / tf.reduce_mean(xc**2) / xc.get_shape()[0].value

        self.loss = 0
        if params.content_factor > 0:
            self.loss += self.content_loss
        if params.style_factor > 0:
            self.loss += self.style_loss
        self.loss *= 1e4
        self.grad = tf.gradients(self.loss, [self.stft_input])
        self.grad = tf.reshape(self.grad, [-1])
Ejemplo n.º 19
0
tf.summary.scalar('loss', loss)

accuracy = tf.reduce_mean(
    tf.cast(tf.equal(tf.greater(logits, 0), tf.cast(labels, tf.bool)),
            tf.float32))
tf.summary.scalar('accuracy', accuracy)

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    th = sess.run(trn_itr.string_handle())
    vh = sess.run(vld_itr.string_handle())

    merged = tf.summary.merge_all()
    trn_writer = FileWriter(os.path.join(model_folder, 'train'), sess.graph)
    vld_writer = FileWriter(os.path.join(model_folder, 'validation'))
    saver = Saver()
    profiler = Profiler(sess.graph)
    opts = (option_builder.ProfileOptionBuilder(
        option_builder.ProfileOptionBuilder.trainable_variables_parameter()).
            with_file_output(os.path.join(model_folder,
                                          'profile_model.txt')).build())
    profiler.profile_name_scope(options=opts)

    value_lv = None
    lv = tf.Summary()
    lv.value.add(tag='loss', simple_value=value_lv)
    value_av = None
    av = tf.Summary()
    av.value.add(tag='accuracy', simple_value=value_av)

    for n in range(N_STEPS):
Ejemplo n.º 20
0
    img, lb = batch_queue.dequeue()
#    img = tf.placeholder(tf.float32, shape=(None, 32,32,3))
    network_fn = nets_factory.get_network_fn(model_name)
    end_points = network_fn(img, is_training=False)
    output = end_points['Logits']
    
#    task1 = tf.to_int32(tf.argmax(end_points['Logits'], 1))
#    training_accuracy1 = slim.metrics.accuracy(task1, tf.to_int32(lb))
    
    def _get_init_fn(checkpoint_path, ignore_missing_vars=False):
        return slim.assign_from_checkpoint_fn(checkpoint_path,
                                              slim.get_variables_to_restore(),
                                              ignore_missing_vars = ignore_missing_vars)    
    variables_to_restore = slim.get_variables_to_restore()
    checkpoint_path = latest_checkpoint(train_dir)
    saver = Saver(variables_to_restore)
    config = ConfigProto()
    config.gpu_options.allow_growth=True
    sess = Session(config=config)
    sv = supervisor.Supervisor(logdir=checkpoint_path,
                               init_fn=_get_init_fn(checkpoint_path, ignore_missing_vars=True),
                               summary_op=None,
                               summary_writer=None,
                               global_step=None,
                               saver=None)
    correct = 0
    predict = 0
    with sv.managed_session(master='', start_standard_services=False, config=config) as sess:
#        saver.restore(sess, checkpoint_path)
        optim_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)