Пример #1
0
    def _build_train_fn(self):
        """
        Adapted from https://gist.github.com/kkweon/c8d1caabaf7b43317bc8825c226045d2
        """

        action_prob_placeholder = self.model.output
        action_onehot_placeholder = K.placeholder(shape=(None, self.n_a),
                                                  name="action_onehot")
        adv_placeholder = K.placeholder(shape=(None, ), name="advantages")
        old_action_probs_placeholder = K.placeholder(shape=(None, ),
                                                     name="pi_old")

        action_prob = K.sum(action_prob_placeholder *
                            action_onehot_placeholder,
                            axis=1)

        r = action_prob / (old_action_probs_placeholder + 1e-10)

        clip_loss = K.minimum(
            r * adv_placeholder,
            K.clip(r, 1 - self.epsilon, 1 + self.epsilon) * adv_placeholder)
        loss = -K.mean(clip_loss + self.entropy_coeff * -action_prob *
                       K.log(action_prob + 1e-10))

        adam = Adam(lr=self.actor_learning_rate)

        updates = adam.get_updates(params=self.model.trainable_weights,
                                   loss=loss)

        self.train_fn = K.function(inputs=[
            self.model.input, action_onehot_placeholder, adv_placeholder,
            old_action_probs_placeholder
        ],
                                   outputs=[loss],
                                   updates=updates)
    def make_critic_train_fn(self):
        action_oh_pl = kb.placeholder(shape=(None, self.env.action_space.n))
        newaction_oh_pl = kb.placeholder(shape=(None, self.env.action_space.n))
        rewards_pl = kb.placeholder(shape=(None, ))
        dones_pl = kb.placeholder(shape=(None, ))

        critic_results = self.critic_model.output
        q_hat, new_q_hat = critic_results[0], critic_results[1]

        # TD loss
        val = rewards_pl + (1.0 - dones_pl) * self.gamma * new_q_hat - q_hat

        # Mean squared error of the prediction
        loss = kb.mean(val**2)

        adam = Adam(lr=self.learning_rate)

        update_op = adam.get_updates(
            loss=loss, params=self.critic_model.trainable_weights)

        train_fn = kb.function(inputs=[
            self.critic_model.input, action_oh_pl, newaction_oh_pl, rewards_pl,
            dones_pl
        ],
                               outputs=[self.critic_model.output, val, loss],
                               updates=update_op)
        return train_fn
Пример #3
0
def _build_tf_l2_similarity(max_rank=0, offset=1):
    # We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation):
    tf_db = K.placeholder(ndim=2, dtype=K.floatx())  # Where to find
    tf_labels = K.placeholder(ndim=1, dtype=K.floatx())  # and their labels

    tf_batch_query = K.placeholder(
        ndim=2, dtype=K.floatx())  # Used in case of memory issues
    batch_labels = K.placeholder(ndim=2, dtype=K.floatx())  # and their labels

    all_representations_T = K.expand_dims(tf_db, axis=0)  # 1 x D x N
    batch_representations = K.expand_dims(tf_batch_query, axis=0)  # 1 x n x D
    dist = -2. * K.batch_dot(batch_representations,
                             all_representations_T)  # 1 x n x N
    dist = K.squeeze(dist, axis=0)  # n x N
    dist += K.sum(tf_batch_query * tf_batch_query, axis=1, keepdims=True)
    dist += K.sum(tf_db * tf_db, axis=0, keepdims=True)

    if max_rank > 0:  # computing r@K or mAP@K
        # top_k finds the k greatest entries and we want the lowest. Note that distance with itself will be last ranked
        dist = -dist
        index_ranking = tf.nn.top_k(dist, k=max_rank + offset).indices
    else:
        index_ranking = tf.contrib.framework.argsort(dist,
                                                     axis=-1,
                                                     direction='ASCENDING',
                                                     stable=True)

    index_ranking = index_ranking[:, offset:]

    tf_ranking = tf.gather(tf_labels, index_ranking)

    return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking
    def make_train_fn(self):
        action_oh_pl = kb.placeholder(shape=(None, self.env.action_space.n))
        discounted_rw_pl = kb.placeholder(shape=(None,))

        action_prob = kb.sum(action_oh_pl * self.train_model.output, axis=-1)
        log_action_prob = kb.log(action_prob)

        loss = kb.mean(- log_action_prob * discounted_rw_pl)

        adam = Adam(lr=self.learning_rate)

        update_op = adam.get_updates(
            loss=loss,
            params=self.train_model.trainable_weights
        )

        train_fn = kb.function(
            inputs=[
                self.train_model.input,
                action_oh_pl,
                discounted_rw_pl
            ],
            outputs=[self.train_model.output, loss],
            updates=update_op
        )
        return train_fn
Пример #5
0
    def __init__(self, inp_shape):
        self.inp_shape = inp_shape
        self.model = Sequential()
        self.model.add(
            Dense(128,
                  activation='relu',
                  input_shape=inp_shape,
                  kernel_initializer='he_normal'))
        self.model.add(Dense(3, activation='softmax'))

        action_prob_placeholder = self.model.output
        action_onehot_placeholder = K.placeholder(
            shape=(None, 3), name="action_onehot_placeholder")
        discount_reward_placeholder = K.placeholder(shape=(None, ),
                                                    name="discount_reward")

        action_prob = K.sum(action_prob_placeholder *
                            action_onehot_placeholder,
                            axis=1)
        log_action_prob = K.log(action_prob)

        loss = -log_action_prob * discount_reward_placeholder
        loss = K.mean(loss)

        adam = optimizers.Adam(lr=1e-4)

        updates = adam.get_updates(params=self.model.trainable_weights,
                                   loss=loss)

        self.train_fn = K.function(inputs=[
            self.model.input, action_onehot_placeholder,
            discount_reward_placeholder
        ],
                                   outputs=[],
                                   updates=updates)
Пример #6
0
    def _build_train_fn(self):

        """
        Adapted from https://gist.github.com/kkweon/c8d1caabaf7b43317bc8825c226045d2
        """

        action_prob_placeholder = self.model.output
        action_onehot_placeholder = K.placeholder(shape=(None, self.n_a), name="action_onehot")
        adv_placeholder = K.placeholder(shape=(None,), name="advantages")

        action_prob = K.sum(action_prob_placeholder * action_onehot_placeholder, axis=1)
        log_action_prob = K.log(action_prob)

        loss = - log_action_prob * adv_placeholder
        loss = K.mean(loss)

        adam = Adam(lr=self.actor_learning_rate)

        updates = adam.get_updates(params=self.model.trainable_weights,
                                   loss=loss)

        self.train_fn = K.function(inputs=[self.model.input,
                                           action_onehot_placeholder,
                                           adv_placeholder],
                                   outputs=[loss],
                                   updates=updates)
Пример #7
0
    def optimizer(self):
        """ Actor Optimization: Advantages + Entropy term to encourage exploration
        (Cf. https://arxiv.org/abs/1602.01783)
        """
        actor, critic = self.actor_critic(self.actor_critic.input)

        action = K.placeholder(shape=(None, self.out_dim))
        advantages = K.placeholder(shape=(None, ))
        weighted_actions = K.sum(action * actor, axis=1)
        eligibility = K.log(weighted_actions +
                            1e-10) * K.stop_gradient(advantages)
        entropy = K.sum(actor * K.log(actor + 1e-10), axis=1)
        entropy = K.mean(entropy)
        actor_loss = 1.0e-3 * entropy - K.mean(eligibility)
        # actor_loss = 1.0e-4 * entropy - K.cast(K.sum(eligibility), 'float32')

        discounted_reward = K.placeholder(shape=(None, 1))
        # critic_loss = K.mean(K.square(discounted_reward - critic))
        critic_loss = K.mean(K.square(discounted_reward - critic))
        # loss = actor_loss + 0.5 * critic_loss
        # updates = self.adam_optimizer.get_updates(loss=loss, params=self.actor_critic.trainable_weights)
        # return K.function(inputs=[self.actor_critic.input, action, advantages, discounted_reward], \
        #                     outputs=loss, updates=updates)
        updates = self.adam_optimizer.get_updates(
            loss=[actor_loss, critic_loss],
            params=self.actor_critic.trainable_weights)
        return K.function(inputs=[self.actor_critic.input, action, advantages, discounted_reward], \
                            outputs=[actor_loss, critic_loss], updates=updates)
Пример #8
0
def _build_tf_cosine_similarity(max_rank=0, offset=1, eps=1e-12):
    # We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation):
    tf_db = K.placeholder(ndim=2, dtype=K.floatx())  # Where to find
    tf_labels = K.placeholder(ndim=1, dtype=K.floatx())  # and their labels

    tf_batch_query = K.placeholder(
        ndim=2, dtype=K.floatx())  # Used in case of memory issues
    batch_labels = K.placeholder(ndim=2, dtype=K.floatx())  # and their labels

    all_representations_T = K.expand_dims(tf_db, axis=0)  # 1 x D x N
    batch_representations = K.expand_dims(tf_batch_query, axis=0)  # 1 x n x D
    sim = K.batch_dot(batch_representations,
                      all_representations_T)  # 1 x n x N
    sim = K.squeeze(sim, axis=0)  # n x N
    sim /= tf.linalg.norm(tf_batch_query, axis=1, keepdims=True) + eps
    sim /= tf.linalg.norm(tf_db, axis=0, keepdims=True) + eps

    if max_rank > 0:  # computing r@K or mAP@K
        index_ranking = tf.nn.top_k(sim, k=max_rank + offset).indices
    else:
        index_ranking = tf.contrib.framework.argsort(sim,
                                                     axis=-1,
                                                     direction='DESCENDING',
                                                     stable=True)

    top_k = index_ranking[:, offset:]
    tf_ranking = tf.gather(tf_labels, top_k)

    return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking
Пример #9
0
def get_metrics_tf(gt: np.ndarray, pred: np.ndarray,
                   metric_fns: List[Callable]) -> Dict:
    """
    Calculates TensorFlow evaluation metrics for a given image predictions.

    :param gt: image ground truth.
    :param pred: image predictions.
    :param metric_fns: list of metric functions.
    :return: evaluation metrics.
    """
    gt_ph = K.placeholder(ndim=4)
    pred_ph = K.placeholder(ndim=4)
    metrics = {}
    for metric_fn in metric_fns:
        if type(metric_fn) is str:
            metric_name = metric_fn
            metric_fn = getattr(cloud_detection.losses, metric_fn)
        else:
            metric_name = metric_fn.__name__
        loss = K.mean(metric_fn(gt_ph, pred_ph))
        metrics[f"{metric_name}"] = loss.eval(session=K.get_session(),
                                              feed_dict={
                                                  gt_ph: gt,
                                                  pred_ph: pred
                                              })
    return metrics
Пример #10
0
def test_relu(x):
    K.clear_session()
    means, covariances, mode = x
    means_tensor = K.placeholder(means.shape, dtype=means.dtype)
    covariances_tensor = K.placeholder(
        covariances.shape, dtype=covariances.dtype
    )
    f = K.function(
        [means_tensor, covariances_tensor],
        kerasadf.activations.relu(
            [means_tensor, covariances_tensor], mode=mode
        ),
    )
    means_out, covariances_out = f([means, covariances])
    assert means.shape == means_out.shape
    assert covariances.shape == covariances_out.shape
    assert means.dtype.name == means_out.dtype.name
    assert covariances.dtype.name == covariances_out.dtype.name
    assert_leq(np.zeros_like(means_out), means_out)
    assert_leq(means, means_out)
    if mode == "diag":
        variances_out = covariances_out
    elif mode == "half":
        cov_shape = covariances_out.shape
        variances_out = np.reshape(
            np.sum(
                np.square(
                    np.reshape(
                        covariances_out,
                        (cov_shape[0], cov_shape[1], np.prod(cov_shape[2:])),
                    )
                ),
                axis=1,
            ),
            means_out.shape,
        )
    elif mode == "full":
        cov_shape = covariances_out.shape
        cov_rank = len(cov_shape) - 1
        variances_out = np.reshape(
            np.diagonal(
                np.reshape(
                    covariances_out,
                    (
                        cov_shape[0],
                        np.prod(cov_shape[1 : cov_rank // 2 + 1]),
                        np.prod(cov_shape[cov_rank // 2 + 1 :]),
                    ),
                ),
                axis1=-2,
                axis2=-1,
            ),
            means_out.shape,
        )
    assert means_out.shape == variances_out.shape
    assert_leq(np.zeros_like(variances_out), variances_out)
Пример #11
0
    def get_distance_matrix(self, X, Y):
        import tensorflow.keras.backend as K
        x_input = K.placeholder(X.shape)
        y_input = K.placeholder(Y.shape)
        dot = K.dot(x_input, K.transpose(y_input))
        x_norm = K.reshape(K.sum(K.pow(x_input, 2), axis=1), (-1, 1))
        y_norm = K.reshape(K.sum(K.pow(y_input, 2), axis=1), (1, -1))
        dist_mat = x_norm + y_norm - 2.0 * dot
        sqrt_dist_mat = K.sqrt(K.clip(dist_mat, min_value=0, max_value=10000))
        dist_func = K.function([x_input, y_input], [sqrt_dist_mat])

        return dist_func([X, Y])[0]
    def __build_train_fn(self):
        """Create a train function
        It replaces `model.fit(X, y)` because we use the output of model and use it for training.
        """
        action_prob_placeholder = self.model.model.outputs
        advantage_placeholder = K.placeholder(shape=(None, ), name="advantage")

        action_placeholder = []
        old_mu_placeholder = []
        action_prob_old = []
        loss = []
        for i in range(len(self.output_dim)):
            o_mu_pl = K.placeholder(shape=(None, ),
                                    name="old_mu_placeholder" + str(i))
            old_mu_placeholder.append(o_mu_pl)

            act_pl = K.placeholder(shape=(None, ),
                                   name="action_placeholder" + str(i),
                                   dtype='int32')
            action_placeholder.append(act_pl)

            act_prob = K.sum(K.one_hot(act_pl, self.output_dim[i]) *
                             action_prob_placeholder[i],
                             axis=1)

            act_prob_old = K.sum(K.one_hot(act_pl, self.output_dim[i]) *
                                 o_mu_pl,
                                 axis=1)
            action_prob_old.append(K.mean(-K.log(act_prob_old)))

            logp = K.log(act_prob)
            old_logp = K.log(act_prob_old)
            kl = losses.kullback_leibler_divergence(old_mu_placeholder[i],
                                                    action_prob_placeholder[i])

            l = (act_prob - act_prob_old) * advantage_placeholder - kl
            loss.append(-K.mean(l))

        entropy = K.sum(action_prob_old)
        loss = K.stack(loss)
        loss_p = K.sum(loss)

        adam = optimizers.Adam(lr=self.pi_lr)
        updates = adam.get_updates(loss=loss,
                                   params=self.model.trainable_weights)

        self.train_fn = K.function(inputs=[
            *self.model.model.inputs, *old_mu_placeholder, *action_placeholder,
            advantage_placeholder
        ],
                                   outputs=[loss_p, entropy],
                                   updates=updates)
Пример #13
0
    def __build_train_fn(self):
        """Create a train function
        It replaces `model.fit(X, y)` because we use the output of model and use it for training.
        For example, we need action placeholder
        called `action_one_hot` that stores, which action we took at state `s`.
        Hence, we can update the same action.
        This function will create
        `self.train_fn([state, action_one_hot, discount_reward])`
        which would train the model.
        """
        # """ Placeholders """
        # input_placeholder = K.placeholder(shape=(None, *self.input_dim), name='model_inputs')
        # actions_placeholder = K.placeholder(shape=(None,), dtype='uint8', name="selected_actions")
        # rewards_placeholder = K.placeholder(shape=(None,), name="discount_reward")
        #
        # """ Internal operations """
        # output_tensor = self.model(input_placeholder)
        # action_onehot_placeholder = K.one_hot(indices=actions_placeholder, num_classes=self.output_dim)
        #
        # action_prob = K.sum(output_tensor * action_onehot_placeholder, axis=1)
        # log_action_prob = K.log(action_prob + K.epsilon())
        #
        # loss = - log_action_prob * rewards_placeholder
        # total_loss = K.mean(loss)
        #
        # nn_train = K.function(inputs=[input_placeholder, actions_placeholder, rewards_placeholder],
        #                       outputs=[total_loss],
        #                       updates=Adam(lr=self.learning_rate).get_updates(total_loss, self.model.trainable_weights))
        # #                       updates=Adam(lr=5e-6, beta_1=0.5, beta_2=0.999).get_updates(total_loss, self.model.trainable_weights))
        #
        # self.train_fn = nn_train
        # ==================================================================================================================================================================================================================================
        """ Placeholders """
        observations_placeholder = K.placeholder(shape=(None, *self.input_dim), name='model_inputs')
        actions_placeholder = K.placeholder(shape=(None,), dtype='uint8', name="selected_actions")
        rewards_placeholder = K.placeholder(shape=(None,), name="discounted_rewards")

        """ Internal operations """
        Ylogits = self.model(observations_placeholder)

        cross_entropies = K.categorical_crossentropy(target=K.one_hot(indices=actions_placeholder, num_classes=self.output_dim),
                                                     output=Ylogits,
                                                     from_logits=True)  # from_logits=False
        loss = K.mean(rewards_placeholder * cross_entropies)

        nn_train = K.function(inputs=[observations_placeholder, actions_placeholder, rewards_placeholder],
                              outputs=[loss],
                              updates=Adam(lr=self.learning_rate).get_updates(loss, self.model.trainable_weights))  # RMSprop().get_updates(loss=loss, params=self.model.trainable_weights)

        self.train_fn = nn_train
Пример #14
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor], axis=0)
    
    model = vgg19.VGG19(include_top=False, weights="imagenet", input_tensor=inputTensor)
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    
    styleLayerNames = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1", "block5_conv1"]
    contentLayerName = "block5_conv2"
    loss = compute_loss(genTensor, outputDict, styleLayerNames, contentLayerName)
    
    # Setup gradients or use K.gradients().
    grads = K.gradients(loss, genTensor)
    kFunction = K.function([genTensor], [loss] + grads)
    evaluator = Evaluator(kFunction)
    print("   Beginning transfer.")
    x = tData
    for i in range(TRANSFER_ROUNDS):
        print("   Step %d." % i)
        # perform gradient descent using fmin_l_bfgs_b.
        x, tLoss, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxiter=20)
        print("      Loss: %f." % tLoss)
        img = deprocessImage(x.copy())
        saveFile = CONTENT_IMG_PATH[:-4] + STYLE_IMG_PATH[:-4] + str(i) + ".jpg"
        imageio.imwrite(saveFile, img)
        print("      Image saved to \"%s\"." % saveFile)
    print("   Transfer complete.")
Пример #15
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor], axis=0)
    model = None   #TODO: implement.
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    loss = 0.0
    styleLayerNames = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1", "block5_conv1"]
    contentLayerName = "block5_conv2"
    print("   Calculating content loss.")
    contentLayer = outputDict[contentLayerName]
    contentOutput = contentLayer[0, :, :, :]
    genOutput = contentLayer[2, :, :, :]
    loss += None   #TODO: implement.
    print("   Calculating style loss.")
    for layerName in styleLayerNames:
        loss += None   #TODO: implement.
    loss += None   #TODO: implement.
    # TODO: Setup gradients or use K.gradients().
    print("   Beginning transfer.")
    for i in range(TRANSFER_ROUNDS):
        print("   Step %d." % i)
        #TODO: perform gradient descent using fmin_l_bfgs_b.
        print("      Loss: %f." % tLoss)
        img = deprocessImage(x)
        saveFile = None   #TODO: Implement.
        #imsave(saveFile, img)   #Uncomment when everything is working right.
        print("      Image saved to \"%s\"." % saveFile)
    print("   Transfer complete.")
Пример #16
0
    def generate(self):
        model_path = os.path.expanduser(self.model_path)
        assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'

        self.yolo_model = load_model(model_path, compile=False)
        print('{} model, anchors, and classes loaded.'.format(model_path))

        # Generate colors for drawing bounding boxes.
        hsv_tuples = [(x / len(self.class_names), 1., 1.)
                      for x in range(len(self.class_names))]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))
        random.seed(10101)  # Fixed seed for consistent colors across runs.
        random.shuffle(
            self.colors)  # Shuffle colors to decorrelate adjacent classes.
        random.seed(None)  # Reset seed to default.

        # Generate output tensor targets for filtered bounding boxes.
        self.input_image_shape = K.placeholder(shape=(2, ))
        boxes, scores, classes = yolo_eval(self.yolo_model.output,
                                           self.anchors,
                                           len(self.class_names),
                                           self.input_image_shape,
                                           score_threshold=self.score,
                                           iou_threshold=self.iou)
        return boxes, scores, classes
Пример #17
0
def test_stochastic_binary_inference_mode(alpha, test_values, expected_values):
    K.set_learning_phase(0)
    x = K.placeholder(ndim=2)
    q = stochastic_binary(alpha)
    f = K.function([x], [q(x)])
    result = f([test_values])[0]
    assert_allclose(result, expected_values, rtol=1e-05)
Пример #18
0
def test_quantized_bits(bits, integer, symmetric, keep_negative, test_values,
                        expected_values):
    x = K.placeholder(ndim=2)
    f = K.function(
        [x], [quantized_bits(bits, integer, symmetric, keep_negative)(x)])
    result = f([test_values])[0]
    assert_allclose(result, expected_values, rtol=1e-05)
Пример #19
0
def test_quantized_relu(bits, integer, use_sigmoid, test_values,
                        expected_values):
    """Test quantized_relu function."""
    x = K.placeholder(ndim=2)
    f = K.function([x], [quantized_relu(bits, integer, use_sigmoid)(x)])
    result = f([test_values])[0]
    assert_allclose(result, expected_values, rtol=1e-05)
Пример #20
0
def test_quantized_relu_po2(bits, negative_slope, test_values,
                            expected_values):
    x = K.placeholder(ndim=2)
    f = K.function(
        [x], [quantized_relu_po2(bits, negative_slope=negative_slope)(x)])
    result = f([test_values])[0]
    assert_allclose(result, expected_values, rtol=1e-05)
Пример #21
0
    def _build_model(self, content: np.ndarray, style: np.ndarray) -> tuple:
        """
        Build a synthesis model with the given content and style.

        Args:
            content: the content to fuse the artwork into
            style: the artwork to get the style from

        Returns:
            a tuple of:
            -   the constructed VGG19 model from the input images
            -   the canvas tensor for the synthesized image

        """
        # load the content image into Keras as a constant, it never changes
        content_tensor = K.constant(content, name='Content')
        # load the style image into Keras as a constant, it never changes
        style_tensor = K.constant(style, name='Style')
        # create a placeholder for the trained image, this variable changes
        canvas = K.placeholder(content.shape, name='Canvas')
        # combine the content, style, and canvas tensors along the frame
        # axis (0) into a 4D tensor of shape [3, height, width, channels]
        tensor = K.concatenate([content_tensor, style_tensor, canvas], axis=0)
        # build the model with the input tensor of content, style, and canvas
        model = VGG19(include_top=False, input_tensor=tensor, pooling='avg')

        return model, canvas
Пример #22
0
    def get_interpolated(self, real_img, fake_img):
        alpha = K.placeholder(shape=(None, 1, 1, 1))
        interpolated_img = Input(shape=self.img_shape,
                                 tensor=alpha * real_img +
                                 (1 - alpha) * fake_img)

        return interpolated_img, alpha
Пример #23
0
def styleTransfer(cData, sData, tData):
    print("   Building transfer model.")
    contentTensor = K.variable(cData)
    styleTensor = K.variable(sData)
    genTensor = K.placeholder((1, CONTENT_IMG_H, CONTENT_IMG_W, 3))
    inputTensor = K.concatenate([contentTensor, styleTensor, genTensor],
                                axis=0)
    model = vgg19.VGG19(include_top=False,
                        weights="imagenet",
                        input_tensor=inputTensor)  #TODO: implement.
    outputDict = dict([(layer.name, layer.output) for layer in model.layers])
    print("   VGG19 model loaded.")
    loss = 0.0
    styleLayerNames = [
        "block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1",
        "block5_conv1"
    ]
    contentLayerName = "block5_conv2"
    print("   Calculating content loss.")
    contentLayer = outputDict[contentLayerName]
    contentOutput = contentLayer[0, :, :, :]
    genOutput = contentLayer[2, :, :, :]
    loss += CONTENT_WEIGHT * contentLoss(contentOutput,
                                         genOutput)  #TODO: implement.
    print("   Calculating style loss.")
    for layerName in styleLayerNames:
        styleLayer = outputDict[layerName]
        styleOutput = styleLayer[1, :, :, :]
        genOutput = styleLayer[2, :, :, :]
        loss += (STYLE_WEIGHT / len(styleLayerNames)) * styleLoss(
            styleOutput, genOutput)  #TODO: implement.
    loss += TOTAL_WEIGHT * totalLoss(genTensor)  #TODO: implement.
    # TODO: Setup gradients or use K.gradients().
    gradient = K.gradients(loss, genTensor)
    #create K.function to output loss and gradients
    print(type(gradient))
    outputs = [loss]
    outputs += gradient
    global f_outputs
    global x
    f_outputs = K.function([genTensor], outputs)
    x = cData
    print("   Beginning transfer.")
    for i in range(TRANSFER_ROUNDS):
        print("   Step %d." % i)
        #TODO: perform gradient descent using fmin_l_bfgs_b.
        start_time = time.time()
        x, tLoss, ph = fmin_l_bfgs_b(evaluator.loss,
                                     x.flatten(),
                                     fprime=evaluator.grads,
                                     maxfun=20)

        print("      Loss: %f." % tLoss)
        img = deprocessImage(x.copy())
        filename = 'hello' + str(i)
        saveFile = filename + '.jpg'  #TODO: Implement.
        imsave(saveFile, img)  #Uncomment when everything is working right.
        end_time = time.time()
        print("      Image saved to \"%s\"." % saveFile)
        print('Iteration %d completed in %ds' % (i, end_time - start_time))
	def _generate(self):
		model_path = os.path.expanduser(self._model_path)
		assert model_path.endswith(
			'.h5'), 'Keras model or weights must be a .h5 file'

		# load model, or construct model and load weights
		num_anchors = len(self._anchors)
		num_classes = len(self._class_names)
		try:
			self.yolo_model = load_model(model_path, compile=False)
		except:
			# make sure model, anchors and classes match
			self.yolo_model.load_weights(self._model_path)
		else:
			assert self.yolo_model.layers[-1].output_shape[-1] == \
				   num_anchors / len(self.yolo_model.output) * (
						   num_classes + 5), \
				'Mismatch between model and given anchor and class sizes'
		print(
			'*** {} model, anchors, and classes loaded.'.format(model_path))

		# generate output tensor targets for filtered bounding boxes.
		self.input_image_shape = K.placeholder(shape=(2,))
		boxes, scores, classes = eval(self.yolo_model.output, self._anchors,
									  len(self._class_names),
									  self.input_image_shape,
									  score_threshold=self._min_score,
									  iou_threshold=self._iou)
		return boxes, scores, classes
Пример #25
0
    def generate(self):
        model_path = os.path.expanduser(self.model_path)
        assert model_path.endswith(
            '.h5'), 'Keras model or weights must be a .h5 file.'

        # Load model, or construct model and load weights.
        num_anchors = len(self.anchors)
        num_classes = len(self.class_names)

        my_input = Input(shape=[], dtype=tf.string)
        self.base_input = my_input
        my_input = Lambda(get_inputs, output_shape=(416, 416, 3))(my_input)
        my_input = Input(tensor=my_input)
        self.yolo_model = yolo_body(my_input, num_anchors // 3, num_classes)
        self.yolo_model.load_weights(
            self.model_path)  # make sure model, anchors and classes match

        print(
            'Detection model, {} model, {} anchors, and {} classes load success!.'
            .format(model_path, num_anchors, num_classes))

        self.input_image_shape = K.placeholder(shape=(2, ))
        boxes, scores, classes = yolo_eval(self.yolo_model.output,
                                           self.anchors,
                                           len(self.class_names),
                                           self.input_image_shape,
                                           score_threshold=self.score,
                                           iou_threshold=self.iou)
        return boxes, scores, classes
Пример #26
0
    def discriminator_with_own_loss(self):
        z = tfk.Input(shape=(self.dim, ))

        e = K.placeholder(shape=(None, 1, 1, 1))
        f_img = self.generator(z)
        r_img = tfk.Input(shape=(self.img_shape))
        a_img = tfk.Input(shape=(self.img_shape),
                          tensor=e * r_img + (1 - e) * f_img)

        f_out = self.discriminator(f_img)
        r_out = self.discriminator(r_img)
        a_out = self.discriminator(a_img)

        loss_real = K.mean(r_out) / self.batch_size
        loss_fake = K.mean(f_out) / self.batch_size

        grad_mixed = K.gradients(a_out, [a_img])[0]
        norm_grad_mixed = K.sqrt(K.sum(K.square(grad_mixed), axis=[1, 2, 3]))
        grad_penalty = K.mean(K.square(norm_grad_mixed - 1))

        loss = loss_fake - loss_real + self.gp_weight * grad_penalty

        training_updates = tfk.optimizers.Adam(
            lr=1e-4, beta_1=0.5,
            beta_2=0.9).get_updates(loss, self.discriminator.trainable_weights)

        d_train = K.function([r_img, z, e], [loss_real, loss_fake],
                             training_updates)
        return d_train
Пример #27
0
    def ctc_beam_decoder_loss(self, labels, logits):
        logits = tf.transpose(logits, (1, 0, 2))
        print(labels.shape, 'decode')
        if labels.shape[1] == None:
            print('init')
            return k.placeholder(shape=(1), dtype=tf.float32)
            # labels = k.placeholder(shape=(self.batch_size, self.max_len+1), dtype=tf.int32)
        y_true, length = tf.split(labels, [(labels.shape[1] - 1), 1], 1)
        print(y_true.shape, length.shape)
        length = tf.squeeze(length, axis=1)
        predict, logprob = tf.nn.ctc_beam_search_decoder(
            logits,
            tf.fill([self.batch_size], self.frames - self.cutoff),
            beam_width=100)
        dict = {}
        for i in range(y_true.shape[0]):
            print(length.shape, 'length')
            dict[i], dontcare = y_true[i][0:length[i]]
            print(dict[i].shape, 'dict')
        sparse_true = sparse_tuple_from(dict)

        # dense_decoded = tf.sparse.to_dense(
        #         predict[0], name="dense_decoded"
        #     )
        # print(y_true.shape, dense_decoded)
        # return levenshtein(tf.cast(predict[0], tf.int32), y_true)
        return tf.reduce_mean(
            tf.edit_distance(tf.cast(predict[0], tf.int32), sparse_true))


# if __name__ == '__main__':
Пример #28
0
    def generate(self):
        model_path = os.path.expanduser(self.model_path)
        assert model_path.endswith(
            '.h5'), 'Keras model or weights must be a .h5 file.'

        # Load model, or construct model and load weights.
        num_anchors = len(self.anchors)
        num_classes = len(self.class_names)
        # is_tiny_version = num_anchors == 6  # default setting

        self.yolo_model = custom_yolo3_spp_body(Input(shape=(None, None, 3)),
                                                num_anchors // 3, num_classes)
        self.yolo_model.load_weights(
            self.model_path)  # make sure model, anchors and classes match

        print('{} model, anchors, and classes loaded.'.format(model_path))

        np.random.seed(10101)  # Fixed seed for consistent colors across runs.
        np.random.seed(None)  # Reset seed to default.

        # Generate output tensor targets for filtered bounding boxes.
        self.input_image_shape = K.placeholder(shape=(2, ))
        boxes, scores, classes = yolo_eval(self.yolo_model.output,
                                           self.anchors,
                                           len(self.class_names),
                                           self.input_image_shape,
                                           score_threshold=self.score,
                                           iou_threshold=self.iou)
        return boxes, scores, classes
    def generate(self):
        self.score = 0.01
        self.iou = 0.5
        model_path = os.path.expanduser(self.model_path)
        assert model_path.endswith(
            '.h5'), 'Keras model or weights must be a .h5 file.'

        # 计算anchor数量
        num_anchors = len(self.anchors)
        num_classes = len(self.class_names)

        # 载入模型,如果原来的模型里已经包括了模型结构则直接载入。
        # 否则先构建模型再载入
        self.yolo_model = yolo_body(Input(shape=(None, None, 3)),
                                    num_anchors // 2, num_classes)
        self.yolo_model.load_weights(self.model_path, by_name=True)
        print('{} model, anchors, and classes loaded.'.format(model_path))

        # 画框设置不同的颜色
        hsv_tuples = [(x / len(self.class_names), 1., 1.)
                      for x in range(len(self.class_names))]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))

        # 打乱颜色
        np.random.seed(10101)
        np.random.shuffle(self.colors)
        np.random.seed(None)

        if self.eager:
            self.input_image_shape = Input([
                2,
            ], batch_size=1)
            inputs = [*self.yolo_model.output, self.input_image_shape]
            outputs = Lambda(yolo_eval,
                             output_shape=(1, ),
                             name='yolo_eval',
                             arguments={
                                 'anchors': self.anchors,
                                 'num_classes': len(self.class_names),
                                 'image_shape': self.model_image_size,
                                 'score_threshold': self.score,
                                 'eager': True,
                                 'max_boxes': self.max_boxes
                             })(inputs)
            self.yolo_model = Model(
                [self.yolo_model.input, self.input_image_shape], outputs)
        else:
            self.input_image_shape = K.placeholder(shape=(2, ))

            self.boxes, self.scores, self.classes = yolo_eval(
                self.yolo_model.output,
                self.anchors,
                num_classes,
                self.input_image_shape,
                max_boxes=self.max_boxes,
                score_threshold=self.score,
                iou_threshold=self.iou)
Пример #30
0
def get_placeholder(dtype, shape, name=None):
    """
    Returns a placeholder.

    Used to abstract underlying implementation (tf or K)
    """
    return K.placeholder(dtype=dtype, shape=shape, name=name)