Example #1
0
def net_atten(input_data, input_pos, reuse=False):
    with tf.variable_scope('net', reuse=reuse):
        #得到input_data的嵌入
        x = embed(input_data, cfg.vocab_size, cfg.vocab_dim)
        #得到输入位置input_pos的嵌入
        pos = embed(input_pos, 800, cfg.vocab_dim)
        #将以上嵌入加在一起然后再做layernorm
        x = x + pos
        x = normalize(x, 'norm_' + get_uid())
        #以下循环只要超过2次就会收敛的很慢
        for _ in range(3):
            #先预留一个short_cut做residue连接用
            short_cut = x
            #一个multi head attention 输出
            x = mha_block2(x, 64, 8)
            #矩阵乘到512,没有用激活函数
            x = linear(x, 512, dummy_func)
            #给x+short_cut这个residue连接做layernorm
            x = normalize(x + short_cut, 'norm_' + get_uid())
            #再加两个矩阵乘, 注意第一个有relu激活
            ix = linear(x, 512, tf.nn.relu)
            lx = linear(ix, 512, dummy_func)
            #最后给lx+x这个residue连接做layernorm
            x = normalize(lx + x, 'norm_' + get_uid())
        #由于我拿声音特征做测试,所以输出为27维, 所以再做一个输出的矩阵乘
        x = linear(x, 27, dummy_func)
        output = tf.transpose(x)
        return output
Example #2
0
 def __transform(self, input_data, masks):
     """Apply transformer encoder
     """
     config = self.config
     transformed_output = tf.identity(input_data)
     if config.tf_used:
         tf_keep_prob = tf.cond(self.is_train, lambda: config.tf_keep_prob,
                                lambda: 1.0)
         tf_mh_keep_prob = tf.cond(self.is_train,
                                   lambda: config.tf_mh_keep_prob,
                                   lambda: 1.0)
         tf_ffn_keep_prob = tf.cond(self.is_train,
                                    lambda: config.tf_ffn_keep_prob,
                                    lambda: 1.0)
         # last dimension must be equal to model_dim because we use a residual connection.
         model_dim = transformed_output.get_shape().as_list()[-1]
         # sinusoidal positional signal
         signal = positional_encoding(self.sentence_lengths,
                                      self.sentence_length,
                                      model_dim,
                                      zero_pad=False,
                                      scale=False,
                                      scope='positional-encoding',
                                      reuse=None)
         transformed_output += signal
         # block
         for i in range(config.tf_num_layers):
             x = transformed_output
             # layer norm
             x_norm = normalize(x, scope='layer-norm-sa-%s' % i, reuse=None)
             # multi-head attention
             y = self.__self_attention(x_norm,
                                       masks,
                                       model_dim=model_dim,
                                       keep_prob=tf_mh_keep_prob,
                                       scope='self-attention-%s' % i)
             # residual and dropout
             x = tf.nn.dropout(x_norm + y, keep_prob=tf_keep_prob)
             # layer norm
             x_norm = normalize(x,
                                scope='layer-norm-ffn-%s' % i,
                                reuse=None)
             # position-wise feed forward net
             y = self.__feedforward(x_norm,
                                    masks,
                                    model_dim=model_dim,
                                    kernel_size=config.tf_ffn_kernel_size,
                                    keep_prob=tf_ffn_keep_prob,
                                    scope='feed-forward-%s' % i)
             # residual and dropout
             x = tf.nn.dropout(x_norm + y, keep_prob=tf_keep_prob)
             transformed_output = x
         # final layer norm
         transformed_output = normalize(transformed_output,
                                        scope='layer-norm',
                                        reuse=None)
     return transformed_output
 def intersect(self, rayO, rayD):
     N = -1.0 * ops.normalize(ops.einsum_cross(self.c - self.a, self.b - self.a))
     d = np.dot(self.normal, self.a)
     t_num = d - (np.dot(self.normal, rayO))
     t_dem = np.dot(self.normal, rayD)
     t = t_num / t_dem
     P = rayO + t * rayD
     d = self.intersect_plane(rayO, rayD, P, self.normal)
     if d == np.inf:
         return np.inf
     # check edge 1
     e1 = self.b - self.a
     vp1 = P - self.a
     c1 = ops.einsum_cross(e1, vp1)
     if np.dot(N, c1) < 0:
         return np.inf
     # check edge 2
     e2 = self.c - self.b
     vp2 = P - self.b
     c2 = ops.einsum_cross(e2, vp2)
     if np.dot(N, c2) < 0:
         return np.inf
     # check edge 3
     e3 = self.a - self.c
     vp3 = P - self.c
     c3 = ops.einsum_cross(e3, vp3)
     if np.dot(N, c3) < 0:
         return np.inf
     return d
Example #4
0
def simple_save_result_from_test_lb_whole(batch_img_list, i, result, labels, stage, config):
  """
  """
  parent_path = os.path.join(config.PARENT_PATH, stage)
  if not os.path.isdir(parent_path):
    os.mkdir(parent_path)
  # save labels
  labels_str = ''
  for l in labels:
    labels_str += (l + '\n')
  with open(parent_path + '/label.txt', 'w') as f:
    f.write(labels_str)

  parent_path = os.path.join(parent_path, 'whole_output')
  if not os.path.isdir(parent_path):
    os.mkdir(parent_path)

  # save the ori
  for idx in range(i.shape[0]):
    skimage.io.imsave(parent_path + '/ori_' + str(idx) + '.bmp', i[idx])
  # save the result
  with open(parent_path+'/result.txt', 'w') as f:
    f.write(result)

  for img in batch_img_list:
    tmp_img = img['img']
    tmp_label = img['label'].replace('/', '-')
    skimage.io.imsave(parent_path+'/'+tmp_label+'.bmp', normalize(tmp_img))
def reflect_transmit_rays(og_rayO, og_rayD, col, reflection,
                          transmission_depth, max_transmission_depth):
    traced = trace_ray(og_rayO, og_rayD)
    if not traced:
        return False
    # reflection: create a new ray
    obj, M, N, col_ray = traced
    rf_rayO = M + N * 0.001
    rf_rayD = ops.normalize(og_rayD - 2 * np.dot(og_rayD, N) * N)
    col_rf = reflection * col_ray
    transmission_depth += 1
    opacity = obj.get_opacity()
    transparency = 1.0 - opacity
    if transmission_depth <= max_transmission_depth:
        # transmission of ray through transparent object
        tr_rayO = M - N * 0.001
        tf_rayD = og_rayD
        transmitted = reflect_transmit_rays(
            tr_rayO,
            tf_rayD,
            transparency * col,
            reflection,
            transmission_depth,
            args.transmission_depth,
        )
        if transmitted:
            _, _, col_tr, _, _ = transmitted
            col += transparency * col_tr
    col += opacity * col_rf
    reflection *= obj.get_reflection()
    col = np.clip(col, 0.0, 1.0)
    return rf_rayO, rf_rayD, col, reflection, transmission_depth
def trace_ray(rayO, rayD):
    t = np.inf
    for group in scene:
        # print("[debug] Testing intersection with", group)
        intersected = False
        bounds = scene[group]["bounds"]
        t_obj = bounds.intersect(rayO, rayD)
        if t_obj < t:
            # print("[debug] Intersected with", group)
            objects = scene[group]["objects"]
            for obj in objects:
                # print("[debug] Testing intersection with", obj.type)
                t_obj = obj.intersect(rayO, rayD)
                if t_obj < t:
                    # print("[debug] Intersected with", obj.type)
                    t = t_obj
                    intersected_object = obj
                    intersected = True
                    break
        if intersected:
            break
    if t == np.inf:
        return None
    obj = intersected_object
    # print("[debug]", obj.type)
    M = rayO + rayD * t
    # get properties of object
    N = obj.get_normal(M)
    color = obj.get_color(M)
    toL = ops.normalize(L - M)
    toO = ops.normalize(O - M)
    # compute color
    col_ray = ambient_c
    col_ray += obj.get_diffuse_c() * max(np.dot(N, toL), 0) * color
    col_ray += (obj.get_specular_c() * max(np.dot(
        N, ops.normalize(toL + toO)), 0)**obj.get_specular_k() * color_light)
    return obj, M, N, col_ray
def shade_pixel(x, y, q_z, depth_max):
    col = np.zeros(3)
    Q = np.array([x, y, q_z])
    D = ops.normalize(Q - O)
    depth = 0
    rayO, rayD = O, D
    reflection = 1.0
    transmission_depth = 0
    while depth < depth_max:
        traced = reflect_transmit_rays(rayO, rayD, col, reflection,
                                       transmission_depth,
                                       args.transmission_depth)
        if traced:
            rayO, rayD, col, reflection, transmission_depth = traced
        else:
            break
        depth += 1
    return col
Example #8
0
    def net(self, input, is_training = True, reuse = False, scope = 'Encoder'):
        with tf.variable_scope(scope, 'Encoder', [input], reuse = reuse ) as net_scope:
            with slim.arg_scope(model_arg_scope(is_training = is_training, ac_fn = _leaky_relu)):
                if reuse:
                    net_scope.reuse_variables()
                #b*32*32
                net = slim.conv2d(input, self.nef, [5,5], stride = 2, normalizer_fn = None, scope = 'e_conv_1')
                #b*16*16
                net = slim.conv2d(net, self.nef*2, [5,5], stride = 2, scope = 'e_conv_2')
                #b*8*8
                net = slim.conv2d(net, self.nef*4, [5,5], stride = 2, scope = 'e_conv_3')
                #b*4*4
                net = slim.conv2d(net, self.nef*8, [5,5], stride = 2, scope = 'e_conv_4')
                #b*1*1
                net = slim.conv2d(net, self.nz, [4,4], stride = 1, padding = 'VALID', normalizer_fn = None, activation_fn = None, scope = 'e_conv_5')

                if self.noise == 'sphere':
                    net = normalize(net)

                return net
    def getBatch(batch_paths):

        batch_images_x = np.empty((len(batch_paths), 256, 256, 3), dtype=np.float32)
        batch_images_y = np.empty((len(batch_paths), 256, 256, 3), dtype=np.float32)

        for i,img_path in enumerate(batch_paths):

            img_xy = cv2.imread(img_path)
            img_xy = cv2.resize(img_xy, (512, 256)).astype(np.float32)
            img_xy = ops.normalize(img_xy)

            img_x = img_xy[:,:256,:]
            img_y = img_xy[:,256:,:]

            if args.direction == 'ytox':
                img_x, img_y = img_y, img_x

            batch_images_x[i, ...] = img_x
            batch_images_y[i, ...] = img_y

        return tf.convert_to_tensor(batch_images_x), tf.convert_to_tensor(batch_images_y)
Example #10
0
def simple_save_result_from_test_lb(batch_img_list, result, labels, stage, config, output_layer):
  """Simple save the result.
  """
  if '/' in output_layer:
    raise ValueError('output_layer should not have /, but %s'%output_layer)
  parent_path = os.path.join(config.PARENT_PATH, stage)
  if not os.path.isdir(parent_path):
    os.mkdir(parent_path)

  # save labels
  labels_str = ''
  for l in labels:
    labels_str += (l + '\n')
  with open(parent_path + '/label.txt', 'w') as f:
    f.write(labels_str)

  with open(parent_path + '/result.txt', 'w') as f:
    f.write(result)

  parent_path = os.path.join(parent_path, output_layer)
  if not os.path.isdir(parent_path):
    os.mkdir(parent_path)

  tmp_count = 0
  for single_batch in batch_img_list:
    tmp_count += 1
    tmp_path = os.path.join(parent_path, str(tmp_count))
    if not os.path.isdir(tmp_path):
      os.mkdir(tmp_path)
    ori_img = single_batch[1]
    skimage.io.imsave(tmp_path + '/ori.bmp', ori_img)
    tmp_count_inner = 0
    for single_kernel_lb in single_batch[0]:
      tmp_count_inner += 1
      skimage.io.imsave(tmp_path + '/' + single_kernel_lb['label'].replace('/', '-') + str(tmp_count_inner) + '.bmp',
                        normalize(single_kernel_lb['img']))
Example #11
0
 def assertNormalizes(self, src, dst, ntype):
     value = ops.normalize(src, type=ntype)
     message = '%s != %s' % (type(value).__name__, type(dst).__name__)
     self.assertTrue(isinstance(value, type(dst)), message)
     self.assertEqual(value, dst)
Example #12
0
'''
  h_pool1 = nn_ops.conv2d_transpose(h_conv2 + b_conv2,W_conv2,
      [class_size,14,14,conv1_size],[1,1,1,1])
'''
index = 0
dir = "l1f/"
for t in W_conv1_t.eval():
    with open(dir + 'filter' + str(index) + '.png', "wb") as file:
        t = tf.constant(t)
        #t = tf.expand_dims(tf.constant(t), 0)
        #t_n = tf.squeeze(nn_ops.conv2d_transpose(t, W_conv1, [1,5,5,1],[1,1,1,1]), [0])
        t = tf.image.resize_images(t,
                                   50,
                                   50,
                                   method=tf.image.ResizeMethod.BICUBIC)
        t = tf.constant(ops.normalize(t.eval(), 0, 255))
        file.write(tf.image.encode_png(t).eval())
    index += 1

W_conv2_t = tf.transpose(W_conv2, perm=[3, 0, 1, 2])

index = 0
dir = "l2f/"
#for t in W_conv1_t.eval():
for t in W_conv2_t.eval():
    with open(dir + 'filter' + str(index) + '.png', "wb") as file:
        t = tf.expand_dims(tf.constant(t), 0)
        t_n = tf.squeeze(
            nn_ops.conv2d_transpose(t, W_conv1, [1, 5, 5, 1], [1, 1, 1, 1]),
            [0])
        t_n = tf.image.resize_images(t_n,
 def compute_normal(self):
     return ops.normalize(ops.einsum_cross(self.c - self.a, self.b - self.a))
Example #14
0
def simple_gray_nor_imshow(img):
  plt.imshow(normalize(img), cmap ='gray')
Example #15
0
 def assertNormalizes(self, src, dst, ntype):
     value = ops.normalize(src, type=ntype)
     message = '%s != %s' % (type(value).__name__, type(dst).__name__)
     self.assertTrue(isinstance(value, type(dst)), message)
     self.assertEqual(value, dst)
Example #16
0
def initialize(m_opts):
    m_vars = {}

    np.random.seed(m_opts['random_state'])

    data = loadmat(m_opts['dataset'])
    print "Dataset loaded: ", m_opts['dataset']

    m_vars['Y_train'] = sparsify(data['Y_tr'])
    m_vars['X_train'] = sparsify(data['X_tr'])
    m_vars['Y_test'] = sparsify(data['Y_te'])
    m_vars['X_test'] = sparsify(data['X_te'])

    print "Training data --  Y:", m_vars['Y_train'].shape, " X:", m_vars[
        'X_train'].shape
    print "Testing data -- Y:", m_vars['Y_test'].shape, "X: ", m_vars[
        'X_test'].shape

    m_vars['n_users'], m_vars['n_labels'] = m_vars['Y_train'].shape
    m_vars['n_features'] = m_vars['X_train'].shape[1]

    if m_opts['label_normalize']:
        normalize(m_vars['Y_train'], norm='l2', axis=1, copy=False)

    if m_opts['no_feat_normalize'] == False:
        normalize(m_vars['X_train'], norm='l2', axis=1, copy=False)
        normalize(m_vars['X_test'], norm='l2', axis=1, copy=False)

    # m_vars['U'] = m_opts['init_std']*np.random.randn(m_vars['n_users'], m_opts['n_components']).astype(floatX)
    m_vars['U_batch'] = np.zeros(
        (m_opts['batch_size'], m_opts['n_components'])).astype(floatX)
    m_vars['V'] = m_opts['init_std'] * np.random.randn(
        m_vars['n_labels'], m_opts['n_components']).astype(floatX)
    m_vars['W'] = m_opts['init_w'] * np.random.randn(
        m_opts['n_components'], m_vars['n_features']).astype(floatX)

    # accumulator of sufficient statistics of label factors
    m_vars['sigma_v'] = [None] * m_vars['n_labels']
    for i in range(m_vars['n_labels']):
        m_vars['sigma_v'][i] = m_opts['lam_v'] * ssp.eye(
            m_opts['n_components'], format="csr")
    m_vars['x_v'] = np.zeros((m_vars['n_labels'], m_opts['n_components']))

    if not m_opts['use_grad']:
        # accumulator of sufficient statistics of W matrix
        m_vars['sigma_W'] = m_opts['lam_w'] * ssp.eye(
            m_vars['n_features'], m_vars['n_features'], format="csr")
        m_vars['x_W'] = np.zeros(
            (m_vars['n_features'], m_opts['n_components']))

    if m_opts['observance']:
        m_vars['a'], m_vars['b'] = m_opts['init_mu_a'], m_opts['init_mu_b']
        # Beta random initialization
        m_vars['mu'] = np.random.beta(m_vars['a'],
                                      m_vars['b'],
                                      size=(m_vars['n_labels']))
        # constant initialization
        # m_vars['mu'] = m_opts['init_mu']*np.ones(m_vars['n_labels']).astype(floatX)
    else:
        m_vars['mu'] = np.ones(m_vars['n_labels']).astype(floatX)

    return m_vars
 def get_normal(self, coords):
     return -1.0 * ops.normalize(coords - self.center)
Example #18
0
  train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

'''
def t_map(func, tensor):
  flat = tf.reshape(tensor, [-1]).eval()
  flat_mapped = list(map(func, flat))
  return tf.reshape(tf.constant(flat_mapped), tensor.get_shape())

def normalize(tensor, lower, upper):
  minVal = tf.reduce_min(tensor).eval()
  maxVal = tf.reduce_max(tensor).eval()
  translated = t_map(lambda x: x+minVal-lower, tensor)
  scaled = t_map(lambda x: numpy.uint8(x*(upper-lower)/(maxVal-minVal)), translated)
  #print(tf.reduce_min(scaled).eval(),tf.reduce_max(scaled).eval())
  return scaled
'''

W_conv1_t = tf.transpose(W_conv1, perm=[3,0,1,2])
index = 0
dir = "filters/"
for t in W_conv1_t.eval():
  with open(dir+'filter'+str(index)+'.png', "wb") as file:
    t = tf.constant(t)
    t_n = tf.image.resize_images(t,50,50, method=tf.image.ResizeMethod.BICUBIC)
    t_n = tf.constant(ops.normalize(t_n.eval(), 0, 255))
    #t_n = t_map(lambda x: numpy.uint8(x), t_n)
    file.write(tf.image.encode_png(t_n).eval())
  index += 1
#for t in W_conv1.eval():

Example #19
0
 def extract_vgg_face(self, inputs):
     inputs = normalize((F.hardtanh(inputs) * 0.5 + 0.5) * 255,
                        [129.1863, 104.7624, 93.5940], [1.0, 1.0, 1.0])
     return self.vgg_face(inputs)