def _st(model: tf.keras.Model,
        gen_img: tf.Variable,
        content_path: str,
        style_path: str,
        content_layers: List[str],
        style_layers: List[str],
        lpi: Callable,
        opt: tf.train.AdamOptimizer,
        content_weight=1e3,
        style_weight=1e-2,
        num_iterations=100) -> None:
    """
    Style transfer from a style image to a source image with a given pre-trained network
    :param model: The model to use for the style transfer
    :param gen_img: The generated image to modify INPLACE
    :param content_path: The path to the source image to paint the style
    :param style_path: The path to the image to use the style
    :param content_layers: The list of content layers to use
    :param style_layers: The list of style layers to use
    :param lpi: The function to use to load and process image
    :param opt: The Adam optimizer to use
    :param content_weight: The weight for the content loss
    :param style_weight: The weight for the style loss
    :param num_iterations: The number of iteration to paint
    :return: The best image associated with his best loss
    """
    # Get the style and content feature representations (from our specified intermediate layers)
    style_features, content_features = compute_feature_representations(
        model, lpi, content_path, style_path, len(style_layers))
    gram_style_features = [
        gram_matrix(style_feature) for style_feature in style_features
    ]
    loss_weights = (style_weight, content_weight)
    cfg = {
        'model': model,
        'loss_weights': loss_weights,
        'gen_img': gen_img,
        'gram_style_features': gram_style_features,
        'content_features': content_features,
        'num_style_layers': len(style_layers),
        'num_content_layers': len(content_layers)
    }
    norm_means = np.array([103.939, 116.779, 123.68])
    min_vals = -norm_means
    max_vals = 255 - norm_means
    for i in range(num_iterations):
        grads, all_loss = compute_grads(cfg)
        loss, style_score, content_score = all_loss
        opt.apply_gradients([(grads, gen_img)])
        clipped = tf.clip_by_value(gen_img, min_vals, max_vals)
        gen_img.assign(clipped)
        _logger.info(
            f"Iteration n°{i} | loss : {loss} | style_score : {style_score} | content_score : {content_score}"
        )
Beispiel #2
0
    def __init__(self, config: Config, gpu_index, opt: tf.train.AdamOptimizer):
        self.config = config
        with tf.device('/gpu:%d' % gpu_index):
            self.x = tf.placeholder(tf.int32, [None, config.num_step1], 'x')
            self.y = tf.placeholder(tf.int32, [None, config.num_step2], 'y')

            losses, self.y_predict = self.encode_decode()
            self.loss = tf.reduce_mean(losses)
            self.grad = opt.compute_gradients(self.loss)
Beispiel #3
0
 def __init__(self, config: Config, gpu_index, opt: tf.train.AdamOptimizer):
     self.config = config
     with tf.device('/gpu:%d' % gpu_index):
         self.x = tf.placeholder(tf.int32, [None, config.num_step1], 'x')
         self.y = tf.placeholder(tf.int32, [None, config.num_step2], 'y')
         with tf.variable_scope('encode'):
             state, attention = self.encode()
         with tf.variable_scope('decode'):
             loss_list, self.y_predict = self.decode(state, attention)
         self.loss = tf.reduce_mean(loss_list)
         self.grad = opt.compute_gradients(self.loss)
    def __init__(self, config: Config, gpu_index, opt:tf.train.AdamOptimizer):
        with tf.device('/gpu:%d' % gpu_index):
            self.x = tf.placeholder(tf.int32, [None, config.num_step], 'x')
            char_dcit = tf.get_variable('char_size', [config.ch_size, config.num_units], tf.float32)
            x = tf.nn.embedding_lookup(char_dcit, self.x)#-1, 32, num_units

            self.y = tf.placeholder(tf.int32, [None, config.num_step], 'x')
            y_onehot = tf.one_hot(self.y, config.classes)

            loss_list, self.predict_list = self.bi_rnn(x, y_onehot, config)
            self.loss = tf.reduce_mean(loss_list)
            self.grad = opt.compute_gradients(self.loss)
Beispiel #5
0
    def __init__(self, gpu_index, config: Config, opt: tf.train.AdamOptimizer):
        self.config = config
        with tf.device('/gpu:%d' % gpu_index):
            self.articles = tf.placeholder(tf.int32, [None, config.num_step1], 'articles')
            self.questions = tf.placeholder(tf.int32, [None, config.num_step2], 'questions')
            self.answers = tf.placeholder(tf.int32, [None, config.num_step3], 'answers')

            article_state = self.get_state(None, self.articles, config.num_step1, 'articles')
            question_state = self.get_state(article_state, self.questions, config.num_step2, 'questions')

            loss_list, self.y_predict = self.get_answer(question_state, self.answers, config.num_step3, 'answer')

            self.loss = tf.reduce_mean(loss_list)
            self.grad = opt.compute_gradients(self.loss)
    def __init__(self, config: Config, gpu_index, opt: tf.train.AdamOptimizer,
                 char_size, training):
        self.config = config
        self.training = training
        with tf.device('/gpu:%d' % gpu_index):
            self.x = tf.placeholder(
                tf.float32,
                [None, config.picture_size, config.picture_size, 3], 'x')
            with tf.variable_scope('resnet50'):
                x = self.resnet50(self.x)

            lstm = tf.nn.rnn_cell.BasicLSTMCell(config.num_units, name='lstm')
            cell = tf.nn.rnn_cell.MultiRNNCell([lstm] * 2)

            batch_size_tf = tf.shape(self.x)[0]
            state = cell.zero_state(batch_size_tf, tf.float32)

            self.y = tf.placeholder(tf.int32, [None, config.num_step], 'y')
            y_onehot = tf.one_hot(self.y, char_size)  #-1, num_step, char_size

            loss_list = []
            precise_list = []

            with tf.variable_scope('rnn'):
                for i in range(config.num_step):
                    y_pred, state = cell(x, state)
                    y_pred = tf.layers.dense(y_pred,
                                             char_size,
                                             name='y_predict')
                    tf.get_variable_scope().reuse_variables()

                    y = y_onehot[:, i]
                    loss = tf.nn.softmax_cross_entropy_with_logits_v2(
                        logits=y_pred, labels=y)
                    loss_list.append(loss)

                    y_predict_id = tf.math.argmax(y_pred,
                                                  axis=1,
                                                  output_type=tf.int32)
                    y_id = self.y[:, i]
                    precise = tf.equal(y_predict_id, y_id)
                    precise = tf.cast(precise, tf.float32)
                    precise = tf.reduce_mean(precise)
                    precise_list.append(precise)

            self.loss = tf.reduce_mean(loss_list)
            self.precise = tf.reduce_mean(precise_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                self.grad = opt.compute_gradients(self.loss)
    def __init__(self, gpu_index, config: Config, opt: tf.train.AdamOptimizer):
        with tf.device('/gpu:%d' % gpu_index):
            self.x = tf.placeholder(tf.float32, [config.num_step, None],
                                    'x')  # num_step, stock_num
            self.y = tf.placeholder(tf.float32, [None], 'y')  # stock_num

            cell = MyCell(config.hidden_size, config.state_size)
            # state = cell.zero_state(config.stock_num)
            state = cell.zero_state(tf.shape(self.x)[1])

            with tf.variable_scope('rnn'):
                for i in range(config.num_step):
                    xi = self.x[i]  # [stock_num]
                    xi = tf.reshape(xi, [-1, 1])  # [stock_num, 1]
                    state = cell(xi, state, 'my_cell')
                    tf.get_variable_scope().reuse_variables()

            y_predict = tf.layers.dense(state, 1, name='dense')  # stock_num, 1
            self.y_predict = tf.reshape(y_predict, [-1])  # stock_num

            self.loss = tf.reduce_mean(tf.abs(self.y - self.y_predict))
            #[(g, v), (g, v)-------]
            self.grad = opt.compute_gradients(self.loss)
Beispiel #8
0
    def __init__(self, config: Config, gpu_index, opt: tf.train.AdamOptimizer,
                 char_size):
        with tf.device('/gpu:%d' % gpu_index):
            self.x = tf.placeholder(tf.int32, [None, config.num_step],
                                    'x')  #-1, 32
            char_dict = tf.get_variable('char_dict',
                                        [char_size, config.num_units],
                                        tf.float32)  #4000, 200
            x = tf.nn.embedding_lookup(char_dict, self.x)  #-1, 32, 200

            cell1 = tf.nn.rnn_cell.BasicLSTMCell(config.num_units,
                                                 state_is_tuple=True,
                                                 name='lstm1')
            cell2 = tf.nn.rnn_cell.BasicLSTMCell(config.num_units,
                                                 state_is_tuple=True,
                                                 name='lstm2')
            cell = tf.nn.rnn_cell.MultiRNNCell([cell1, cell2])

            # cell1 = MyLSTM(config.num_units, name = 'lstm1')
            # cell2 = MyLSTM(config.num_units, name='lstm2')
            # cell = MyMultiLSTM([cell1, cell2])

            batch_size_tf = tf.shape(self.x)[0]
            state = cell.zero_state(batch_size_tf, tf.float32)

            y = tf.concat(
                [self.x[:, 1:],
                 tf.zeros([batch_size_tf, 1], tf.int32)],
                axis=1)  #-1, 32
            y_onehot = tf.one_hot(y, char_size)  #-1, 32, 4000

            loss_list = []
            precise_list = []
            with tf.variable_scope('rnn'):
                for i in range(config.num_step):
                    xi = x[:, i]  #-1, 200
                    yi_pred, state = cell(xi, state)
                    yi_pred = tf.layers.dense(
                        yi_pred, char_size, name='yi_predict')  #-1, char_size
                    yi_onehot = y_onehot[:, i]  #-1, 4000

                    loss = tf.nn.softmax_cross_entropy_with_logits_v2(
                        logits=yi_pred, labels=yi_onehot)
                    loss_list.append(loss)

                    yi_predict_id = tf.math.argmax(yi_pred,
                                                   axis=1,
                                                   output_type=tf.int32)
                    yi_label_id = y[:, i]
                    precise = tf.equal(yi_label_id, yi_predict_id)
                    precise = tf.cast(precise, tf.float32)
                    precise = tf.reduce_mean(precise)
                    precise_list.append(precise)
                    tf.get_variable_scope().reuse_variables()
            self.loss = tf.reduce_mean(loss_list)
            self.precise = tf.reduce_mean(precise_list)
            self.grad = opt.compute_gradients(self.loss)

            self.batch_size = tf.placeholder(tf.int32, [], 'batch_size')
            self.xi = tf.placeholder(tf.int32, [None], 'xi')
            self.inputstate = cell.zero_state(self.batch_size, tf.float32)
            with tf.variable_scope('rnn', reuse=True):
                xi = tf.nn.embedding_lookup(char_dict, self.xi)
                y_predict, self.state = cell(xi, self.inputstate)
                y_predict = tf.layers.dense(y_predict,
                                            char_size,
                                            name='yi_predict')
                self.y_predict = tf.math.argmax(y_predict, axis=1)