Пример #1
0
    def graph_(self, x_input, y_input):
        # sqrt_m
        sqrt_m = tf.sqrt(tf.to_float(x_input.get_shape().as_list()[1]))
        # boundary
        scaled_max_extended = tf.maximum(
            tf.multiply(self.scaled_clip_max,
                        tf.to_float(self.insertion_perm_array))
            +  # upper bound for positions allowing perturbations
            tf.multiply(self.scaled_clip_min,
                        1. - tf.to_float(self.insertion_perm_array)
                        ),  # may be useful to reset the lower bound
            x_input  # upper bound for positions no perturbations allowed
        )
        scaled_min_extended = tf.minimum(
            tf.multiply(self.scaled_clip_min,
                        tf.to_float(self.removal_perm_array)) +
            tf.multiply(self.scaled_clip_max,
                        1. - tf.to_float(self.removal_perm_array)), x_input)

        def _cond(i, _):
            return tf.less(i, self.iterations)

        def _body(i, x_adv_tmp):
            loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.model.get_logits(x_adv_tmp), labels=y_input))
            grad = tf.gradients(loss, x_adv_tmp)[0]

            grad_l2norm = tf.sqrt(
                tf.reduce_sum(tf.square(grad), axis=-1, keepdims=True))

            perturbations = tf.cast(
                (tf.greater(sqrt_m *
                            (1. - 2 * x_adv_tmp) * grad, grad_l2norm)),
                tf.float32)
            x_adv_tmp = x_adv_tmp + perturbations
            x_adv_tmp = tf.clip_by_value(x_adv_tmp,
                                         clip_value_min=scaled_min_extended,
                                         clip_value_max=scaled_max_extended)

            return i + 1, x_adv_tmp

        _, adv_x_batch = tf.while_loop(_cond,
                                       _body, (tf.zeros([]), x_input),
                                       maximum_iterations=self.iterations,
                                       back_prop=False)

        # map to discrete domain
        if self.normalizer is not None:
            # projection in the discrete domain with the threshold: 0.5
            x_adv = tf.rint(
                tf.divide(adv_x_batch - self.normalizer.min_,
                          self.normalizer.scale_))
            # re-project back
            x_adv_normalized = tf.multiply(
                x_adv, self.normalizer.scale_) + self.normalizer.min_
        else:
            x_adv_normalized = tf.rint(adv_x_batch)

        return x_adv_normalized
Пример #2
0
def _smallest_size_at_least(height, width, smallest_side):
    """Computes new shape with the smallest side equal to `smallest_side`.

  Computes new shape with the smallest side equal to `smallest_side` while
  preserving the original aspect ratio.

  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """
    smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

    height = tf.to_float(height)
    width = tf.to_float(width)
    smallest_side = tf.to_float(smallest_side)

    scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width,
                    lambda: smallest_side / height)
    new_height = tf.to_int32(tf.rint(height * scale))
    new_width = tf.to_int32(tf.rint(width * scale))
    return new_height, new_width
Пример #3
0
def age_group_accuracy(y_true, y_pred):
    array = np.array([0] * 13 + [1] * 2 + [2] * 13000000)

    age_to_group = K.variable(value=array, dtype='int32', name='age_to_group')
    ages_true = tf.gather(age_to_group, tf.cast(tf.rint(y_true), tf.int32))
    ages_pred = tf.gather(age_to_group, tf.cast(tf.rint(y_pred), tf.int32))
    return K.mean(K.equal(ages_true, ages_pred), axis=-1)
Пример #4
0
    def graph(self, x_input, y_input):
        # sqrt_m
        sqrt_m = tf.sqrt(tf.to_float(x_input.get_shape().as_list()[1]))

        loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=self.model.get_logits(x_input), labels=y_input))
        grad = tf.gradients(loss, x_input)[0]

        grad_l2norm = tf.sqrt(
            tf.reduce_sum(tf.square(grad), axis=-1, keepdims=True))

        perturbations = tf.cast(
            (tf.greater(sqrt_m * (1. - 2 * x_input) * grad, grad_l2norm)),
            tf.float32)
        x_adv_tmp = x_input + perturbations
        x_adv_tmp = tf.clip_by_value(x_adv_tmp,
                                     clip_value_min=self.scaled_min_extended,
                                     clip_value_max=self.scaled_max_extended)

        # map to discrete domain
        if self.normalizer is not None:
            # projection in the discrete domain with the threshold: 0.5
            x_adv = tf.rint(
                tf.divide(x_adv_tmp - self.normalizer.min_,
                          self.normalizer.scale_))
            # re-project back
            x_adv_normalized = tf.multiply(
                x_adv, self.normalizer.scale_) + self.normalizer.min_
        else:
            x_adv_normalized = tf.rint(x_adv_tmp)

        return x_adv_normalized
Пример #5
0
def _smallest_size_at_least(height, width, smallest_side):
  """Computes new shape with the smallest side equal to `smallest_side`.

  Computes new shape with the smallest side equal to `smallest_side` while
  preserving the original aspect ratio.

  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """
  smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

  height = tf.to_float(height)
  width = tf.to_float(width)
  smallest_side = tf.to_float(smallest_side)

  scale = tf.cond(tf.greater(height, width),
                  lambda: smallest_side / width,
                  lambda: smallest_side / height)
  new_height = tf.to_int32(tf.rint(height * scale))
  new_width = tf.to_int32(tf.rint(width * scale))
  return new_height, new_width
Пример #6
0
def aspect_preserve_resize(image, resize_side_min=256, resize_side_max=512, is_training=False):
    """

    :param image_tensor:
    :param output_height:
    :param output_width:
    :param resize_side_min:
    :param resize_side_max:
    :return:
    """
    if is_training:
        smaller_side = tf.random_uniform([], minval=resize_side_min, maxval=resize_side_max, dtype=tf.float32)
    else:
        smaller_side = resize_side_min

    shape = tf.shape(image)

    height, width = tf.cast(shape[0], dtype=tf.float32), tf.cast(shape[1], dtype=tf.float32)

    resize_scale = tf.cond(pred=tf.greater(height, width),
                           true_fn=lambda : smaller_side / width,
                           false_fn=lambda : smaller_side / height)

    new_height = tf.cast(tf.rint(height * resize_scale), dtype=tf.int32)
    new_width = tf.cast(tf.rint(width * resize_scale), dtype=tf.int32)

    resize_image = tf.image.resize(image, size=(new_height, new_width))

    return tf.cast(resize_image, dtype=image.dtype)
Пример #7
0
def _random_scale(image_list):
    """Computes new shape with the smallest side equal to `smallest_side`.

  Computes new shape with the smallest side equal to `smallest_side` while
  preserving the original aspect ratio.

  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """

    outputs = []
    for image in image_list:
        ratio = tf.random_uniform([], minval=0.25, maxval=1, dtype=tf.float32)
        square_720 = 720
        rescale_height = tf.to_int32(tf.rint(square_720 * ratio))
        rescale_width = tf.to_int32(tf.rint(square_720 * ratio))

        image = tf.expand_dims(image, 0)
        resized_image = tf.image.resize_bilinear(
            image, [rescale_height, rescale_width], align_corners=False)
        resized_image = tf.squeeze(resized_image, axis=0)

        # paddings = [[pad_height, pad_height], [pad_width, pad_width]]
        outputs.append(resized_image)

    return outputs
Пример #8
0
def _smallest_size_at_least(height, width, smallest_side):
    smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
    height = tf.to_float(height)
    width = tf.to_float(width)
    smallest_side = tf.to_float(smallest_side)
    scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height)
    new_height = tf.to_int32(tf.rint(height * scale))
    new_width = tf.to_int32(tf.rint(width * scale))
    return new_height, new_width
Пример #9
0
def compute_new_shape_bilinear(height, width, resolution):
    height = tf.to_float(height)
    width = tf.to_float(width)
    resolution = tf.to_float(resolution)

    scale = [resolution / height, resolution / width]
    new_height = tf.to_int32(tf.rint(height * scale[0]))
    new_width = tf.to_int32(tf.rint(width * scale[1]))

    translation = [tf.to_float(0), tf.to_float(0)]
    return new_height, new_width, scale, translation
Пример #10
0
def eval_haps(hap_pred, hap_real, hap_len, batch_size=100):
    hap_pred = tf.reshape(hap_pred, [batch_size, hap_len])
    hap_accuracy = tf.reduce_mean(
        tf.reduce_sum(
            tf.cast(tf.equal(tf.rint(hap_pred), hap_real), dtype=tf.float32),
            -1), -1) * tf.divide(100, hap_len)
    _, auc = tf.metrics.auc(labels=hap_real, predictions=hap_pred)
    full_hap_accuracy = tf.reduce_mean(
        tf.cast(tf.reduce_all((tf.equal(tf.rint(hap_pred), hap_real)), -1),
                dtype=tf.float32), -1) * 100
    return 100 - hap_accuracy, auc, 100 - full_hap_accuracy
Пример #11
0
def lin_8b_quant(w, min_rng=-0.5, max_rng=0.5):
    min_clip = tf.rint(min_rng * 256 / (max_rng - min_rng))
    max_clip = tf.rint(max_rng * 256 / (max_rng - min_rng) - 1)

    wq = 256.0 * w / (max_rng - min_rng)  # to expand [min, max] to [-128, 128]
    wq = tf.rint(wq)  # integer (quantization)
    wq = tf.clip_by_value(wq, min_clip,
                          max_clip)  # fit into 256 linear quantization
    wq = wq / 256.0 * (max_rng - min_rng
                       )  # back to quantized real number, not integer
    wclip = tf.clip_by_value(w, min_rng, max_rng)  # linear value w/ clipping
    return wclip + tf.stop_gradient(wq - wclip)
Пример #12
0
def _smallest_size_at_least(height, width, smallest_side):
    """Computes new shape with the smallest side equal to `smallest_side`."""
    smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

    height = tf.to_float(height)
    width = tf.to_float(width)
    smallest_side = tf.to_float(smallest_side)

    scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width,
                    lambda: smallest_side / height)
    new_height = tf.to_int32(tf.rint(height * scale))
    new_width = tf.to_int32(tf.rint(width * scale))
    return new_height, new_width
Пример #13
0
def preprocess_for_eval(image,
                        scale=1.0,
                        out_shape=None,
                        data_format='NHWC',
                        scope='preprocess_eval'):
    """Preprocess an image for evaluation.

    Args:
        image: A `Tensor` representing an image of arbitrary size.
        out_shape: Output shape after pre-processing (if resize != None)
        resize: Resize strategy.

    Returns:
        A preprocessed image.
    """
    with tf.name_scope(scope):
        if image.get_shape().ndims != 3:
            raise ValueError('Input must be of size [height, width, C>0]')

        image = tf.to_float(image)
        image = tf_image_whitened(image, [_R_MEAN, _G_MEAN, _B_MEAN])

        if out_shape is None:
            i_shape = tf.to_float(tf.shape(image))
            shape = [
                tf.cast(i_shape[0] * scale, tf.int32),
                tf.cast(i_shape[1] * scale, tf.int32)
            ]
            image = resize_image(image,
                                 shape,
                                 method=tf.image.ResizeMethod.BILINEAR,
                                 align_corners=False)
            image_shape = tf.shape(image)
            image_h, image_w = image_shape[0], image_shape[1]
            image_h = tf.cast(tf.rint(image_h / 32) * 32, tf.int32)
            image_w = tf.cast(tf.rint(image_w / 32) * 32, tf.int32)
            image = resize_image(image, [image_h, image_w],
                                 method=tf.image.ResizeMethod.BILINEAR,
                                 align_corners=False)
        else:
            image = resize_image(image,
                                 out_shape,
                                 method=tf.image.ResizeMethod.BILINEAR,
                                 align_corners=False)

        # Image data format.
        if data_format == 'NCHW':
            image = tf.transpose(image, perm=(2, 0, 1))
        return image
Пример #14
0
def compute_new_shape(height, width, resolution):
    height = tf.to_float(height)
    width = tf.to_float(width)
    resolution = tf.to_float(resolution)

    scale = tf.cond(tf.greater(height, width), lambda: resolution / height,
                    lambda: resolution / width)
    scale = [scale, scale]
    new_height = tf.to_int32(tf.rint(height * scale[0]))
    new_width = tf.to_int32(tf.rint(width * scale[1]))

    translation = (resolution -
                   [tf.to_float(new_height),
                    tf.to_float(new_width)]) / 2.0
    return new_height, new_width, scale, translation
Пример #15
0
    def attack_graph(self, x_input, y_input, using_normalizer=True):
        def _cond(i, *_):
            return tf.less(i, self.iterations)

        init_state = self.optimizer.init_state(
            [tf.zeros_like(x_input, dtype=tf.float32)])
        nest = tf.contrib.framework.nest

        def _body(i, x_adv_tmp, flat_optim_state):
            curr_state = nest.pack_sequence_as(structure=init_state,
                                               flat_sequence=flat_optim_state)

            def _loss_fn_wrapper(x_):
                return -1 * tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.model.get_logits(x_), labels=y_input)

            x_adv_tmp_list, new_optim_state = self.optimizer.minimize(
                _loss_fn_wrapper, [x_adv_tmp], curr_state)

            x_adv_tmp_clip = tf.clip_by_value(
                x_adv_tmp_list[0],
                clip_value_min=self.scaled_clip_min,
                clip_value_max=self.scaled_clip_max)

            return i + 1, x_adv_tmp_clip, nest.flatten(new_optim_state)

        flat_init_state = nest.flatten(init_state)
        _, adv_x_batch, _ = tf.while_loop(
            _cond,
            _body,
            (tf.zeros([]), x_input, flat_init_state),
            maximum_iterations=self.iterations,
            back_prop=False  #
        )

        # map to discrete domain
        if using_normalizer:
            x_adv = tf.rint(
                tf.divide(adv_x_batch - self.normalizer.min_,
                          self.normalizer.scale_)
            )  # projection in the discrete domain with the determinstic threshold:0.5
            # project back
            x_adv_normalized = tf.multiply(
                x_adv, self.normalizer.scale_) + self.normalizer.min_
        else:
            x_adv_normalized = tf.rint(adv_x_batch)

        return x_adv_normalized
Пример #16
0
def make_eval_op(enc, dec):
	# MNIST is black-on-white; 0 is white background and 255 is black foreground.
	x_ph = tf.placeholder(tf.uint8, [batch_size, img_size])
	x    = tf.cast(tf.rint(x_ph / 255), tf.float32)

	zs       = [batch_size, latent_code_count, latent_classes_per_code]
	z_logits = tf.reshape(enc(x), zs)

	"""
	For the ELB we actually want to minimize on the validation set, both the approximate
	posterior and the prior are given by categorical distributions.
	"""
	q = tf.contrib.distributions.OneHotCategorical(logits=z_logits)
	z = tf.cast(q.sample(), tf.float32)

	x_logits = dec(tf.reshape(z, [batch_size, -1]))
	logprobs = -tf.nn.sigmoid_cross_entropy_with_logits(labels=x, logits=x_logits)
	assert logprobs.get_shape().as_list() == [batch_size, img_size]
	llp = tf.reduce_sum(logprobs, axis=1)

	probs = tf.nn.softmax(z_logits, axis=2)
	kl_qp = -tf.nn.softmax_cross_entropy_with_logits_v2(labels=probs, logits=z_logits)
	assert kl_qp.get_shape().as_list() == [batch_size, latent_code_count]
	kl_qp = kl_qp + np.log(latent_classes_per_code)
	kl_qp = assert_non_negative(kl_qp)

	kl_qp = tf.reduce_sum(kl_qp, axis=1)
	elb   = llp - kl_qp
	return x_ph, elb
def quantize(x):
    abs_value = tf.abs(x)
    vmax = tf.reduce_max(abs_value)
    s = tf.divide(vmax, 127.)
    x = tf.divide(x, s)
    x = tf.rint(x)
    return x, s
Пример #18
0
def quant_add(i1, i1_s, i1_z, i2, i2_s, i2_z, i3_s, i3_z):
    '''
    i1:input_1,输入1,为uint8
    i1_s:input_1_scale,输入1对应的缩放因子,为float32
    i1_z:input_1_zero,输入1对应的零点,为uint8
        
    i2:input_2,输入2,为uint8
    i2_s:input_2_scale,输入2对应的缩放因子,为float32
    i2_z:input_2_zero,输入2对应的零点,为uint8
    
    i3_s:input_3_scale,输出3对应的缩放因子,为float32
    i3_z:input_3_zero,输出3对应的零点,为uint8
            

    输出为uint8    
    '''

    r_1 = (i1 - i1_z) * i1_s / i3_s
    #    r_1 = tf.rint(r_1)

    r_2 = (i2 - i2_z) * i2_s / i3_s
    #    r_2 = tf.rint(r_2)

    temp = r_1 + r_2
    temp = tf.rint(temp)

    temp = temp + i3_z

    temp = tf.clip_by_value(temp, 0, 255)
    #    temp = tf.rint(temp)

    return temp
Пример #19
0
def matrix_abs():
    isess = tf.InteractiveSession()
    X = tf.Variable(tf.eye(3))
    W = tf.Variable(tf.random_normal(shape=(3, 3)))

    X.initializer.run()
    W.initializer.run()
    logger.info("X\n%s" % X.eval())
    logger.info("W\n%s" % W.eval())
    # 取负
    logger.info("tf.negative(X)\n%s" % tf.negative(X).eval())
    # 返回 x 的符号
    logger.info("tf.sign(W)\n%s" % tf.sign(W).eval())
    logger.info("tf.reciprocal(W)\n%s" % tf.reciprocal(W).eval())
    logger.info("tf.abs(W)\n%s" % tf.abs(W).eval())
    logger.info("tf.round(W)\n%s" % tf.round(W).eval())

    # 向上取整
    logger.info("tf.ceil(W)\n%s" % tf.ceil(W).eval())

    # 向下取整
    logger.info("tf.floor(W)\n%s" % tf.floor(W).eval())

    # 取最接近的整数
    logger.info("tf.rint(W)\n%s" % tf.rint(W).eval())
    logger.info("tf.maximum(W)\n%s" % tf.maximum(W, X).eval())
    logger.info("tf.minimum(W)\n%s" % tf.minimum(W, X).eval())
    isess.close()
Пример #20
0
def quant_depthwise_conv(i, w, b, padding, stride, i_z, w_z, r_z, m):
    '''
    i:input,输入,为uint8
    w:weight,权重,为uint8
    padding:是否边界填充
    stride:步长
    i_z:input_zero,输入为0时浮点值所对应的整型数值,为uint8
    w_z: weight_zero,权重为0时浮点值所对应的整型数值,为uint8 
    r_z:result_zero,输出为0时浮点值所对应的整型数值,为uint8
    m:缩小因子,将卷积之后的结果乘m,为float32
        
    输出为uint8    
    
    '''

    #    m = round(m,7)
    #    print(m)
    #    m = tf.cast(m, tf.float32)

    temp = tf.nn.depthwise_conv2d(i - i_z, w - w_z, [1, stride, stride, 1],
                                  padding) + b

    #    temp = tf.rint(temp)

    temp = tf.multiply(temp, m)

    temp = tf.rint(temp)

    temp = temp + r_z

    temp = tf.clip_by_value(temp, 0, 255)

    #    temp = tf.rint(temp)

    return temp
Пример #21
0
def compute_accuracy(label, logits):
    temp_sim = tf.subtract(tf.ones_like(logits),
                           tf.rint(logits),
                           name="temp_sim")  # auto threshold 0.5
    correct_predictions = tf.equal(temp_sim, label)
    return tf.reduce_mean(tf.cast(correct_predictions, "float"),
                          name="accuracy")
Пример #22
0
def create_class_loss(gene_output, true_output):
    #import classifier
    #hook up graph
    #saver = tf.train.import_meta_graph('checkpoint_64/model.ckpt.meta')
    #class_graph=tf.get_default_graph()
    f = gfile.FastGFile("checkpoint_64/frozen_graph.pb", 'rb')
    class_graph = tf.GraphDef()
    class_graph.ParseFromString(f.read())
    y_pred, x = tf.import_graph_def(class_graph,
                                    return_elements=['y_pred:0', 'x:0'],
                                    name='')
    #y_pred = class_graph.get_tensor_by_name("y_pred:0")
    #x = class_graph.get_tensor_by_name("x:0")
    true_output = tf.reshape(true_output, [FLAGS.batch_size, -1])
    #calculate first labels with true_output (all in tensors)
    original_labels = tf.contrib.graph_editor.graph_replace(
        y_pred, {x: true_output})
    gene_output = tf.reshape(gene_output, [FLAGS.batch_size, -1])
    #calculate labels of generated output
    gene_labels = tf.contrib.graph_editor.graph_replace(
        y_pred, {x: gene_output})
    #round to nearest integer to get labels (i.e. [.3,.7] -> [0,1])
    class_labels = tf.rint(original_labels)
    #calculate softmax cross entropy
    loss = tf.nn.softmax_cross_entropy_with_logits(logits=gene_labels,
                                                   labels=class_labels)
    loss = tf.reduce_mean(loss)
    return loss
    def __init__(self, max_sequence_len, vocabulary_size, main_cfg, model_cfg, loss_function):
        self.x1 = tf.placeholder(dtype=tf.int32, shape=[None, max_sequence_len])
        self.x2 = tf.placeholder(dtype=tf.int32, shape=[None, max_sequence_len])
        self.is_training = tf.placeholder(dtype=tf.bool)
        self.labels = tf.placeholder(dtype=tf.int32, shape=[None, 1])
        self.sentences_lengths = tf.placeholder(dtype=tf.int32, shape=[None])

        self.debug = None
        self.debug_vars = dict()

        self.embedding_size = main_cfg['PARAMS'].getint('embedding_size')
        self.learning_rate = main_cfg['TRAINING'].getfloat('learning_rate')

        with tf.variable_scope('embeddings'):
            word_embeddings = tf.get_variable('word_embeddings', [vocabulary_size, self.embedding_size])
            self.embedded_x1 = tf.gather(word_embeddings, self.x1)
            self.embedded_x2 = tf.gather(word_embeddings, self.x2)

        with tf.variable_scope('siamese'):
            self.predictions = self.siamese_layer(max_sequence_len, model_cfg)

        with tf.variable_scope('loss'):
            self.loss = loss_function(self.labels, self.predictions)
            self.opt = optimize(self.loss, self.learning_rate)

        with tf.variable_scope('metrics'):
            self.temp_sim = tf.rint(self.predictions)
            self.correct_predictions = tf.equal(self.temp_sim, tf.to_float(self.labels))
            self.accuracy = tf.reduce_mean(tf.to_float(self.correct_predictions))

        with tf.variable_scope('summary'):
            tf.summary.scalar("loss", self.loss)
            tf.summary.scalar("accuracy", self.accuracy)
            self.summary_op = tf.summary.merge_all()
Пример #24
0
    def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.constant(0.0, shape=[vocab_size, embedding_size]),
                trainable=trainableEmbeddings,name="W")
            self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
        print self.embedded_words1
        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
Пример #25
0
def reduce_precision_tf(x, npp):
    """
    Reduce the precision of image, the tensorflow version.
    """
    npp_int = npp - 1
    x_int = tf.rint(tf.multiply(x, npp_int))
    x_float = tf.div(x_int, npp_int)
    return x_float
Пример #26
0
def build_accuracy(logits, labels, mask, loss_type):
    mask = tf.cast(mask, tf.float32)
    if loss_type == 'contrastive_loss':
        temp_sim = tf.subtract(tf.ones_like(logits),
                               tf.rint(logits),
                               name="temp_sim")  #auto threshold 0.5
        correct = tf.equal(tf.cast(temp_sim, tf.float32),
                           tf.cast(labels, tf.float32))
        accuracy = tf.reduce_sum(tf.cast(correct, tf.float32) *
                                 mask) / (1e-10 + tf.reduce_sum(mask))
    elif loss_type == 'exponent_neg_manhattan_distance_mse':
        temp_sim = tf.rint(logits)
        correct = tf.equal(tf.cast(temp_sim, tf.float32),
                           tf.cast(labels, tf.float32))
        accuracy = tf.reduce_sum(tf.cast(correct, tf.float32) *
                                 mask) / (1e-10 + tf.reduce_sum(mask))
    return accuracy
Пример #27
0
    def _train_trans_adj_loss(self):
        """MSE loss for adjacency matrix generator
        Use:
            self.pred_trans_adjs: predicted adjacency matrices # [batch_size, max_seq_len, max_seq_len]
            self.adjs:      target adjacency matrices    # [batch_size, max_seq_len, max_seq_len]
        Create:
            self.loss_trans_adj, self.accu_trans_adj
        """
        pred_trans_adjs_sigmoid = tf.math.sigmoid(self.pred_trans_adjs)
        self.loss_trans_adj = tf.losses.mean_squared_error(
            labels=self.adjs, predictions=pred_trans_adjs_sigmoid)

        self.accu_trans_adj = tx.evals.accuracy(
            labels=self.adjs[:, 1:, 1:],
            preds=tf.rint(
                pred_trans_adjs_sigmoid)  # convert logits into 0-1 value
        )
        self.pred_trans_adjs_binary = tf.rint(pred_trans_adjs_sigmoid)
Пример #28
0
def prepareImage(image, config_dict):
    # get preprocess info
    image_size = config_dict['DATASET']['IMAGE_SIZE']

    # get dataset mean std info
    output_paras = config_dict['OUTPUT']
    experiment_base_dir = os.path.join(output_paras['OUTPUT_SAVE_DIR'],
                                       output_paras['EXPERIMENT_NAME'])
    model_save_dir = os.path.join(experiment_base_dir, 'weights')
    mean_std_file = os.path.join(model_save_dir, 'dataset_mean_var.txt')
    dataset_rgb_mean, dataset_rgb_std = dataset_utils.load_dataset_mean_std_file(
        mean_std_file)
    r_mean, g_mean, b_mean = dataset_rgb_mean
    r_std, g_std, b_std = dataset_rgb_std

    # 定义预处理方法
    # 这样部署后,只需直接传入base64的string即可直接得到结果
    img_decoded = tf.image.decode_png(image, channels=3)
    #img_decoded = tf.image.decode_jpeg(image, channels=3)

    # 与本demo一致的tensor版本预处理,保持长宽比将长边resize到224,然后padding到224*224
    shape = tf.shape(img_decoded)
    height = tf.to_float(shape[0])
    width = tf.to_float(shape[1])
    scale = tf.cond(tf.greater(height, width), lambda: image_size / height,
                    lambda: image_size / width)
    new_height = tf.to_int32(tf.rint(height * scale))
    new_width = tf.to_int32(tf.rint(width * scale))
    resized_image = tf.image.resize_images(
        img_decoded, [new_height, new_width],
        method=tf.image.ResizeMethod.BILINEAR)

    # normalization
    R = tf.ones([new_height, new_width, 1], dtype=tf.float32) * r_mean
    G = tf.ones([new_height, new_width, 1], dtype=tf.float32) * g_mean
    B = tf.ones([new_height, new_width, 1], dtype=tf.float32) * b_mean
    rgb_img_mean = tf.concat([R, G, B], axis=2)
    img_centered = tf.subtract(resized_image, rgb_img_mean)
    img_normalize = tf.divide(img_centered, [r_std, g_std, b_std])
    #img_normalize = tf.cast(img_normalize, dtype=tf.float32)

    padd_image = tf.image.resize_image_with_crop_or_pad(
        img_normalize, image_size, image_size)
    return padd_image
Пример #29
0
    def __init__(self, params, word2vec, features, labels, training=False):

        len1, len2, s1, s2 = features
        embed_dim = params['embed_dim']
        hidden_size = params['hidden_size']
        dropout = params['dropout']
        learning_rate = params['learning_rate']

        K.set_learning_phase(training)

        embedding = tf.get_variable("word2vec",
                                    initializer=word2vec,
                                    trainable=False)
        with tf.device('/cpu:0'):
            s1 = tf.nn.embedding_lookup(embedding, s1)
            s2 = tf.nn.embedding_lookup(embedding, s2)

        s1 = TimeDistributed(Dense(embed_dim, activation='relu'))(s1)
        s1 = Lambda(lambda x: K.max(x, axis=1), output_shape=(embed_dim, ))(s1)

        s2 = TimeDistributed(Dense(embed_dim, activation='relu'))(s2)
        s2 = Lambda(lambda x: K.max(x, axis=1), output_shape=(embed_dim, ))(s2)

        merged = concatenate([s1, s2])
        merged = Dense(hidden_size, activation='relu')(merged)
        merged = Dropout(dropout)(merged)
        merged = BatchNormalization()(merged)
        merged = Dense(hidden_size, activation='relu')(merged)
        merged = Dropout(dropout)(merged)
        merged = BatchNormalization()(merged)
        merged = Dense(hidden_size, activation='relu')(merged)
        merged = Dropout(dropout)(merged)
        merged = BatchNormalization()(merged)
        merged = Dense(hidden_size, activation='relu')(merged)
        merged = Dropout(dropout)(merged)
        merged = BatchNormalization()(merged)

        logits = tf.squeeze(Dense(1)(merged))

        self.prob = tf.sigmoid(logits)
        self.pred = tf.rint(self.prob)
        self.acc = tf.metrics.accuracy(labels=labels, predictions=self.pred)

        self.loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.to_float(labels),
                                                    logits=logits))

        if training:
            self.global_step = tf.train.get_or_create_global_step()
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                # Ensures that we execute the update_ops before performing the train_step
                self.train_op = optimizer.minimize(
                    self.loss, global_step=self.global_step)
Пример #30
0
    def __init__(self, sequence_length, vocab_size, embedding_size,
                 hidden_units, batch_size):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length],
                                       name='input_x1')
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length],
                                       name='input_x2')
        self.input_y = tf.placeholder(tf.float32, [None], name='input_y')
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name='dropout_keep_prob')

        # Embedding layer
        with tf.name_scope('embedding'):
            self.W = tf.Variable(tf.random_uniform(
                [vocab_size, embedding_size], -1.0, 1.0),
                                 trainable=True,
                                 name='W')
            self.embedded_chars1 = tf.nn.embedding_lookup(
                self.W, self.input_x1)

            self.embedded_chars2 = tf.nn.embedding_lookup(
                self.W, self.input_x2)

        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope('output'):
            self.out1 = self.BiRNN(self.embedded_chars1,
                                   self.dropout_keep_prob, 'side1',
                                   hidden_units)
            self.out2 = self.BiRNN(self.embedded_chars2,
                                   self.dropout_keep_prob, 'side2',
                                   hidden_units)
            self.distance = \
                tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1, self.out2)), 1, keepdims=True))
            self.distance = tf.div(
                self.distance,
                tf.add(
                    tf.sqrt(
                        tf.reduce_sum(tf.square(self.out1), 1, keepdims=True)),
                    tf.sqrt(
                        tf.reduce_sum(tf.square(self.out2), 1,
                                      keepdims=True))))
            self.distance = tf.reshape(self.distance, [-1], name='distance')
        with tf.name_scope('loss'):
            self.loss = self.contrastive_loss(self.input_y, self.distance,
                                              batch_size)

        # Accuracy computation is outside of this class.
        with tf.name_scope('accuracy'):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),
                                        tf.rint(self.distance),
                                        name='temp_sim')  # auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                                   'float'),
                                           name='accuracy')
Пример #31
0
def handle_for_six(logit, types):
    logits = tf.dynamic_partition(logit, types, 6)
    answers = []
    for i in range(6):
        res = tf.rint(logits[i])
        res = tf.maximum(res, 0)
        res = tf.minimum(res, 1)
        res = tf.reshape(res, [501, 501])
        answers.append(res)
    return answers
Пример #32
0
      def blackbox((trX, trY, teX, teY)):
        trY = tf.to_int32(tf.rint(trY))
        teY = tf.to_int32(tf.rint(teY))
        tf_fn = build_fit(
            self._local_device,
            self._get_model,
            num_classes=num_classes,
            probs=self.probs)
        if self.probs:
          trP, teP, teP_probs = tf_fn(trX, trY, teX)
        else:
          trP, teP = tf_fn(trX, trY, teX)

        teY.set_shape(teY_shape)
        if self.probs:
          onehot = tf.one_hot(teY, num_classes)
          crossent = -tf.reduce_sum(onehot * teP_probs, [1])
          return tf.reduce_mean(crossent)
        else:
          # use error rate as the loss if no surrogate is avalible.
          return 1 - tf.reduce_mean(
              tf.to_float(tf.equal(teY, tf.to_int32(teP))))
Пример #33
0
ofst = np.power(float(2), 0 - b_fl)
dmax = (np.power(float(2), 26) - 1)
dmin = -(np.power(float(2), 26))
#b = tf.divide(b, ofst)
#b = tf.rint(b)
b = tf.maximum(tf.minimum(b, dmax.astype(np.float32)), dmin.astype(np.float32))
b = tf.multiply(b, ofst)
ob = sess.run(b)
ob.tofile("bias_qnt.txt", '\n')

ofst = np.power(float(2), 0 - i_fl)
dmax = (np.power(float(2), 7) - 1)
dmin = -(np.power(float(2), 7))

i = tf.divide(i, np.power(float(2), 0 - i_fl))
i = tf.rint(i)
i = tf.maximum(tf.minimum(i, dmax.astype(np.float32)), dmin.astype(np.float32))
# input save
# ini = sess.run(i)
# ini = ini.astype(np.int8)
# ini.tofile("i.bin")
i = tf.multiply(i, np.power(float(2), 0 - i_fl))
i = tf.pad(i, [[0,0], [pad,pad],[pad,pad], [0,0]], "CONSTANT")

out = tf.nn.max_pool(i, [1,p_size,p_size,1], [1,p_stride,p_stride,1], padding = 'SAME')
out = tf.nn.conv2d(out, w, strides = [1,stride,stride,1], padding = 'VALID')
# out = tf.nn.conv2d(i, w, strides = [1,stride,stride,1], padding = 'VALID')
out = tf.add(out, b)
out = tf.divide(out, np.power(float(2), 0 - o_fl))
out = tf.rint(out)
out = tf.maximum(tf.minimum(out, dmax.astype(np.float32)), dmin.astype(np.float32))