def mask_for_lengths(lengths, batch_size=None, max_length=None, mask_right=True, value=-1000.0): """ Creates a [batch_size x max_length] mask. :param lengths: int64 1-dim tensor of batch_size lengths :param batch_size: int32 0-dim tensor or python int :param max_length: int32 0-dim tensor or python int :param mask_right: if True, everything before "lengths" becomes zero and the rest "value", else vice versa :param value: value for the mask :return: [batch_size x max_length] mask of zeros and "value"s """ if max_length is None: max_length = tf.cast(tf.reduce_max(lengths), tf.int32) if batch_size is None: batch_size = tf.shape(lengths)[0] # [batch_size x max_length] mask = tf.reshape(tf.tile(tf.range(0, max_length), [batch_size]), tf.pack([batch_size, -1])) if mask_right: mask = tf.greater_equal(tf.cast(mask, tf.int64), tf.expand_dims(lengths, 1)) else: mask = tf.less(tf.cast(mask, tf.int64), tf.expand_dims(lengths, 1)) mask = tf.cast(mask, tf.float32) * value return mask
def generator(self, z_enc, train): with tf.variable_scope('gan'): base_filters = self.d_size h0 = ops.linear(z_enc[:, 0:(self.z_size - 1)], self.z_size - 1, 4 * 4 * 4 * base_filters, scope='g_f0') h0 = tf.reshape(h0, [self.batch_size, 4, 4, 4, base_filters]) h0 = tf.nn.relu(self.g_bn0(h0, train)) h1 = ops.deconv3d(h0, [self.batch_size, 8, 8, 8, base_filters / 2], name='g_h1') h1 = tf.nn.relu(self.g_bn1(h1, train)) h2 = ops.deconv3d(h1, [self.batch_size, 16, 16, 16, base_filters / 4], name='g_h2') h2 = tf.nn.relu(self.g_bn2(h2, train)) h3 = ops.deconv3d(h2, [self.batch_size, 32, 32, 32, 1], name='g_h3') h3 = tf.nn.relu(self.g_bn3(h3, train)) h4 = ops.deconv3d(h3, [self.batch_size, 64, 64, 64, 1], name='g_h4') h4 = tf.nn.sigmoid(h4) * (1.0 / self.tau) self.voxels = tf.reshape(h4, [self.batch_size, 64, 64, 64]) v = z_enc[:, self.z_size - 1] rendered_imgs = [] for i in range(self.batch_size): img = ops.project( ops.transform_volume(self.voxels[i], ops.rot_matrix(v[i])), self.tau) rendered_imgs.append(img) self.final_imgs = tf.reshape(tf.pack(rendered_imgs), [self.batch_size, 64, 64, 1]) return self.final_imgs
def transform_volume(v, t): height = int(v.get_shape()[0]) width = int(v.get_shape()[1]) depth = int(v.get_shape()[2]) grid = grid_coord(height, width, depth) xs = grid[0, :] ys = grid[1, :] zs = grid[2, :] idxs_f = tf.transpose(tf.pack([xs, ys, zs])) idxs_f = tf.matmul(idxs_f, t) xs_t = (idxs_f[:, 0] + 1.0) * float(width) / 2.0 ys_t = (idxs_f[:, 1] + 1.0) * float(height) / 2.0 zs_t = (idxs_f[:, 2] + 1.0) * float(depth) / 2.0 return tf.reshape(resample_voxels(v, xs_t, ys_t, zs_t, method='trilinear'), v.get_shape())
def hacked_tf_one_hot(indices, depth, on_value, off_value, name=None): '''Emulates new tf.one_hot in master. # Real signature: tf.one_hot(indices, depth, on_value, off_value, axis=None, name=None) # Assumed signature: tf.one_hot(indices, depth, on_value, off_value, axis=-1, name=None) Not needed if using newer versions of TensorFlow. ''' N = tf.shape(indices)[0] range_Nx1 = tf.expand_dims(tf.to_int64(tf.range(N)), 1) indices_Nx1 = tf.expand_dims(indices, 1) concat = tf.concat(1, [range_Nx1, indices_Nx1]) as_dense = tf.sparse_to_dense( concat, tf.to_int64(tf.pack([N, depth])), # Assumption: axis=-1 on_value, off_value) one_hot = tf.reshape(as_dense, (-1, depth), name=name) return one_hot
def one_hot_encoding(labels, num_classes, scope=None): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels. """ with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat([indices, labels], 1) onehot_labels = tf.sparse_to_dense(concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def get_voxel_values(v, xs, ys, zs): idxs = tf.cast(tf.pack([xs, ys, zs], axis=1), 'int32') idxs = tf.clip_by_value(idxs, 0, v.get_shape()[0]) idxs = tf.expand_dims(idxs, 0) return gather_nd(v, idxs)
def __init__(self, inputs, config_reader=None): # active function. # x1 dot x2. pack = tf.pack([self.x1, self.x2]) self.y = tf.reduce_sum(tf.reduce_prod(pack, [0]), [1], keep_dims=True)
def Linear(name, input_dim, output_dim, inputs, biases=True, initialization=None, weightnorm=None, spectralnorm=None, gain=1., weight_init=weight_init, weight_regularizer=weight_regularizer, update_sn=None): """ initialization: None, `lecun`, 'glorot', `he`, 'glorot_he', `orthogonal`, `("uniform", range)` """ #with tf.name_scope(name) as scope: def uniform(stdev, size): if _weights_stdev is not None: stdev = _weights_stdev return np.random.uniform(low=-stdev * np.sqrt(3), high=stdev * np.sqrt(3), size=size).astype(dtype) if initialization == 'lecun': # and input_dim != output_dim): # disabling orth. init for now because it's too slow weight_values = uniform(np.sqrt(1. / input_dim), (input_dim, output_dim)) elif initialization == 'glorot' or (initialization == None): weight_values = uniform(np.sqrt(2. / (input_dim + output_dim)), (input_dim, output_dim)) elif initialization == 'he': weight_values = uniform(np.sqrt(2. / input_dim), (input_dim, output_dim)) elif initialization == 'glorot_he': weight_values = uniform(np.sqrt(4. / (input_dim + output_dim)), (input_dim, output_dim)) elif initialization == 'orthogonal' or \ (initialization == None and input_dim == output_dim): # From lasagne def sample(shape): if len(shape) < 2: raise RuntimeError("Only shapes of length 2 or more are " "supported.") flat_shape = (shape[0], np.prod(shape[1:])) # TODO: why normal and not uniform? a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) # pick the one with the correct shape q = u if u.shape == flat_shape else v q = q.reshape(shape) return q.astype(dtype) weight_values = sample((input_dim, output_dim)) elif initialization[0] == 'uniform': weight_values = np.random.uniform(low=-initialization[1], high=initialization[1], size=(input_dim, output_dim)).astype(dtype) else: raise Exception('Invalid initialization!') weight_values *= gain weight = lib.get_param(name + '.W', weight_values.shape, dtype, weight_init, weight_regularizer) #weight = lib.param(name + '.W',weight_values) #tf.add_to_collection('G_linear' if 'Generator' in name else 'D_linear',orthogonal_regularizer_fully(0.0001)(weight)) if weightnorm == None: weightnorm = _default_weightnorm if weightnorm: norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0)) # norm_values = np.linalg.norm(weight_values, axis=0) target_norms = lib.param(name + '.g', norm_values) with tf.name_scope('weightnorm') as scope: norms = tf.sqrt( tf.reduce_sum(tf.square(weight), reduction_indices=[0])) weight = weight * (target_norms / (norms + 1e-12)) # spectral normalization power_method_update = tf.zeros([]) t_update = tf.zeros([]) if spectralnorm == None: spectralnorm = _default_spectralnorm if spectralnorm: weight = spectral_norm(weight, update_sn=update_sn) ''' v=lib.param(name + '.sn.v',np.random.randn(1, output_dim),dtype=dtype,trainable=False) #t=tf.Variable(10.,dtype=dtype, trainable=False) W=tf.reshape(weight,[input_dim, output_dim]) new_u = _l2normalize(tf.matmul(v, tf.transpose(W))) new_v = _l2normalize(tf.matmul(new_u, W)) new_u = tf.stop_gradient(new_u) new_v = tf.stop_gradient(new_v) #new_u=tf.random_normal(new_u.shape) #new_v=tf.random_normal(new_v.shape) #new_u = tf.nn.l2_normalize((new_u),1) #new_v = tf.nn.l2_normalize((new_v),1) spectral_norm = tf.matmul(tf.matmul(new_u, W),tf.transpose(new_v)) #spectral_norm=tf.math.reduce_logsumexp(tf.abs(W)) #spectral_norm=tf.svd(W, compute_uv=False)[0] #spectral_norm=tf.stop_gradient(spectral_norm) #filters/=tf.norm(filters) #t_update=tf.assign(t,tf.maximum(1.,t-0.01)) if name not in norm_weight_names: norm_weight_names.append(name) power_method_update = tf.assign(v, new_v) with tf.control_dependencies([power_method_update]): weight=tf.reshape(W/spectral_norm, weight.shape)#*target_norm else: weight=tf.reshape(W/spectral_norm, weight.shape) ''' # if 'Discriminator' in name: # print "WARNING weight constraint on {}".format(name) # weight = tf.nn.softsign(10.*weight)*.1 if inputs.get_shape().ndims == 2: result = tf.matmul(inputs, weight) else: reshaped_inputs = tf.reshape(inputs, [-1, input_dim]) result = tf.matmul(reshaped_inputs, weight) result = tf.reshape( result, tf.pack(tf.unpack(tf.shape(inputs))[:-1] + [output_dim])) if 'Generator' in name: rec = tf.matmul(result, tf.transpose(weight)) assert inputs.shape == rec.shape tf.add_to_collection( 'REC_LOSS', tf.reduce_mean((tf.stop_gradient(inputs) - rec)**2)) if biases: result = tf.nn.bias_add( result, lib.param(name + '.b', np.zeros((output_dim, ), dtype=dtype))) return result
def unravel_argmax(argmax, shape): output_list = [ argmax // (shape[2] * shape[3]), argmax % (shape[2] * shape[3]) // shape[3] ] return tf.pack(output_list)