Exemple #1
0
    def generate(self, z=None):

        with tf.name_scope('Generate'):
            if self.prior == 'gaussian':
                if z is None:
                    z = np.random_normal((self.batch_size, self.latent_dim))
                return self.sess.run(self.x_mean, feed_dict={self.z: z})

            elif self.prior == 'gmm':
                if z is None:
                    targets = (self.gmm_pi, self.gmm_mu, self.gmm_log_var)
                    pis, means, log_vars = self.sess.run(targets)
                    pis /= pis.sum()
                    cluster = np.random.choice(range(self.num_clusters), p=pis)
                    mean = means[cluster]
                    std = np.sqrt(np.exp(log_vars[cluster]))
                    eps = np.random_normal(std.shape)
                    z = mean + std * eps
                return self.sess.run(self.x_mean, feed_dict={self.z: z})
Exemple #2
0
 def init_params():
     w = nd.random_normal(scale=1, shape=(num_inputs, 1))
     b = nd.zeros(shape=(1, ))
     params = [w, b]
     vs = []
     sqrs = []
     for param in params:
         param.attach_grad()
         # 把算法中基于指数加权移动平均的变量初始化为和参数形状相同的零张量。
         vs.append(param.zeros_like())
         sqrs.append(param.zeros_like())
     return params, vs, sqrs
Exemple #3
0
	def __init__(self, M1 = 0, M2 = 0, f = tf.nn.tanh, use_bias = True, zeros = False, copy_from_ESwNN = False, W_E = 0, b_E = 0):
		self.use_bias = use_bias
		if not copy_from_ESwNN:
			if zeros:
				W = np.zeros((M1, M2)).astype(np.float32)
			else:
				W = np.random_normal(shape = (M1, M2))
			if use_bias:
				b = np.zeros(M2).astype(np.float32)
		else:
			W = np.array(W_E).astype(np.float32)
			b = np.array(b_E).astype(np.float32)
			print(W)
			print("\n\n")
			print(b)
			print("\n\n\n\n")

		self.W = tf.Variable(W)
		self.b = tf.Variable(b)
		self.params = [self.W, self.b]

		self.f = f
    def __getitem__(self, idx):
        # 1. Random select a video
        # 2. slice the video frames into batches and select one batch([l_bound, r_bound])
        selected_video_num = np.random.random_integers(
            0,
            len(self.annotations) - 1)
        num_frames = sum(
            1 for line in open(self.annotations[selected_video_num]))
        num_batch = num_frames / self.config['BATCH_SIZE']
        l_bound = (idx % num_batch) * self.config['BATCH_SIZE']
        r_bound = (idx % num_batch + 1) * self.config['BATCH_SIZE']
        if r_bound - 1 + self.config['TIME_STEP'] > num_frames:
            r_bound = num_frames - self.config['TIME_STEP'] + 1
            l_bound = r_bound - self.config['BATCH_SIZE']
            if l_bound < 0:
                raise Exception(
                    "Number of frames in every video must be more than batch size ( > %d )"
                    % self.config['BATCH_SIZE'])
        if (l_bound + self.config['BATCH_SIZE'] - 1 +
                self.config['TIME_STEP'] - 1) > num_frames - 1:
            l_bound = num_frames - 1 - (self.config['BATCH_SIZE'] - 1 +
                                        self.config['TIME_STEP'] - 1)
            r_bound = l_bound + self.config['BATCH_SIZE']

        labels = np.loadtxt(self.annotations[selected_video_num],
                            delimiter=',')

        ##########################################################################
        # Make sure labels are not NAN
        ##########################################################################

        while isNAN(labels[l_bound:(r_bound - 1 + self.config['TIME_STEP']),
                           ...]):
            # print("\nGround truth is Nan, choose another batch")
            l_bound = (np.random.random_integers(0, 1000) %
                       num_batch) * self.config['BATCH_SIZE']
            r_bound = l_bound + self.config['BATCH_SIZE']
            if r_bound - 1 + self.config['TIME_STEP'] > num_frames:
                r_bound = num_frames - self.config['TIME_STEP'] + 1
                l_bound = r_bound - self.config['BATCH_SIZE']
                if l_bound < 0:
                    raise Exception(
                        "Number of frames in every video must be more than batch size ( > %d )"
                        % self.config['BATCH_SIZE'])
            # print("l_bound:", l_bound)
            # print("r_bound:", r_bound)
            # print("#frame:", num_frames)
            # print('#batch:', num_batch)

        detections = np.load(self.detected_label_namelist[selected_video_num])
        features = np.load(self.features_namelist[selected_video_num])

        # TODO
        ''' x_center, y_center, w, h (normalized)
        '''
        labels[:, 0] = (labels[:, 0] + labels[:, 2] / 2.0) / 1280
        labels[:, 1] = (labels[:, 1] + labels[:, 3] / 2.0) / 720
        labels[:, 2] = labels[:, 2] / 1280
        labels[:, 3] = labels[:, 3] / 720

        # Make input data
        if isinstance(self.config['INPUT_SIZE'], list):
            x_batch = np.zeros(
                (self.config['BATCH_SIZE'], self.config['TIME_STEP'],
                 self.config['INPUT_SIZE'][0], self.config['INPUT_SIZE'][1],
                 self.config['INPUT_SIZE'][2]))
            bbox_batch = np.zeros(
                (self.config['BATCH_SIZE'], self.config['TIME_STEP'], 4))
            # detection_batch = np.zeros((self.config['BATCH_SIZE'], self.config['GRID_H'],  self.config['GRID_W'], self.config['BOX'], 4+1+self.config['CLASS']+1))
            # b_batch = np.zeros((self.config['BATCH_SIZE'], 1, 1, 1, 1, 4))   # list of self.config['TRUE_self.config['BOX']_BUFFER'] GT boxes

            # feat_batch = np.zeros((self.config['BATCH_SIZE'], self.config['INPUT_SIZE'][0], self.config['INPUT_SIZE'][1], self.config['INPUT_SIZE'][2]))
            y_batch = np.zeros(
                (self.config['BATCH_SIZE'], self.config['TIME_STEP'], 4))
            # y_batch = np.zeros((self.config['BATCH_SIZE'], self.config['GRID_H'],  self.config['GRID_W'], self.config['BOX'], 4+1+self.config['CLASS']+1))
            # bbox_score_batch = np.zeros((self.config['BATCH_SIZE'], self.config['TIME_STEP'], self.config['INPUT_SIZE'][0], self.config['INPUT_SIZE'][1], 1))
        else:
            x_batch = np.zeros(
                (self.config['BATCH_SIZE'], self.config['TIME_STEP'],
                 self.config['INPUT_SIZE']))
            y_batch = np.zeros(
                (self.config['BATCH_SIZE'], self.config['TIME_STEP'], 4))

        instance_count = 0
        for i in range(l_bound, r_bound):
            # Every instance in every batch contains #time_step images
            for j in range(self.config['TIME_STEP']):
                detection = detections[i + j, ...]
                feature = features[i + j, ...]
                label = labels[i + j, ...]
                # print("Detection shape:", detection.shape)
                # print("Feature shape:", feature.shape)
                # print("Detection:", detection)
                # inputs = detection
                # inputs = np.concatenate((feature.flatten(), detection))
                # inputs = feature
                # print("Input shape:", inputs.shape)

                x_batch[instance_count, j, ...] = feature

                if isNAN(detection):
                    # When no detection results in some frame, use the initial box plus some Gaussian random normals as detection results
                    print("No detection results, use auxiliary bbox.")
                    detection = detections[0, ...]
                    for value in detection:
                        tmp = value + np.random_normal(0, 0.5)
                        while tmp < 0 or tmp > 1:
                            tmp = value + np.random_normal(0, 0.5)
                        value = tmp
                bbox_batch[instance_count, j, :] = detection
                y_batch[instance_count, j, :] = label

                if j == self.config['TIME_STEP'] - 1:
                    # detection = detections[i+j, ...]
                    # if isNAN(detection):
                    #     detection = detections[0, ...]
                    #     for value in detection:
                    #         tmp = value + np.random_normal(0, 0.5)
                    #         while tmp < 0 or tmp > 1:
                    #             tmp = value + np.random_normal(0, 0.5)
                    #         value = tmp
                    # print(detection)

                    # bbox_batch[instance_count, :] = detection
                    # label = labels[i+j, ...]

                    if isNAN(label):
                        raise ValueError("Label is nan!")
                    for value in label:
                        if value < 0:
                            print(label)
                            raise ValueError("Label value should > 0")
                    # feat_batch[instance_count, ...] = feature
                    # y_batch[instance_count, :] = label

                    # find the anchor that best predicts this box

                # print("Label:", label)

            instance_count += 1

        if isNAN(x_batch):
            raise ValueError("X batch NAN!!!!!!!!!!!!!1")

        # if isNAN(bbox_batch):
        # raise ValueError("BBOX batch NAN!!!!!!!!!!!!!1")

        # print ' new batch created', idx
        # print(feat_batch.shape)
        return [x_batch, bbox_batch], y_batch
Exemple #5
0
	def generate(self, hidden = None):
		if hidden is None:
			hidden = np.random_normal(size = self.weights["b1"])
		return self.sess.run(self.reconstruct, feed_dict = {self.hidden: hidden})
Exemple #6
0
n_epochs = 2
batch_size = 150


def shuffle_batch(X, y, batch_size):
    rnd_idx = np.random.permutation(len(X))
    n_batches = len(X) // batch_size
    for batch_idx in np.array_split(rnd_idx, n_batches):
        X_batch, y_batch = X[batch_idx], y[batch_idx]
        yield X_batch, y_batch


def plot_image(image, shape=[28, 28]):
    plt.imshow(image.reshape(shape), cmap="Greys", interpolation="nearest")
    plt.axis("off")


with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):
        n_batches = len(X_train)
        print(epoch)
        for iteration in range(n_batches):
            X_batch, y_batch = next(shuffle_batch(X_train, y_train,
                                                  batch_size))
            sess.run(training_op, feed_dict={X: X_batch})
    codings_rnd = np.random_normal(size=[n_digits, n_hidden3])
    outputs_val = outputs.eval(feed_dict={hidden3: codings_rnd})
    for iteration in range(n_digits):
        plt.subplot(n_digits, 10, iteration + 1)
        plot_image(outputs_val[iteration])
Exemple #7
0
 def generate_data(self, z_mu=None):
     if z_mu is None:
         z_mu = np.random_normal(size=self.network_arch["n_z"])
     return self.sess.run(self.x_reconstruction_mean, feed_dict={self.z_mean: z_mu})
Exemple #8
0
#激励函数解决不能用线性方程y = Wx解决的问题
# y = AF(Wx)  掰弯利器    激励函数必须是可微分的
import tensorflow as tf
import numpy as np

#in_size表示输入多少个单位代表行  out_size代表列
def add_layer(inputs, in_size, out_size, activation_function=None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]) )
    biases = tf.Variable(tf.zores[1, out_size] + 0.1)
    Wx_plus_b = tf.matmul(inputs, Weights) + biases 

    if activation_function is None:
        outputs = Wx_plus_b 
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs

x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random_normal(0, 0.05, x_data.shape) 
y_data = np.square(x_data) - 0.5 + noise 

xs = tf.placeholder([None, 1])
ys = 
Exemple #9
0
        sqr_bias_corr = sqr / (1. - beta2**t)
        div = lr * v_bias_corr / (nd.sqrt(sqr_bias_corr) + eps_stable)
        param[:] = param - div


if __name__ == '__main__':
    import random
    import numpy as nd
    random.seed(1)

    # 生成数据集。
    num_inputs = 2
    num_examples = 1000
    true_w = [2, -3.4]
    true_b = 4.2
    X = nd.random_normal(scale=1, shape=(num_examples, num_inputs))
    y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_b
    y += .01 * nd.random_normal(scale=1, shape=y.shape)
    dataset = gluon.data.ArrayDataset(X, y)

    # 构造迭代器。
    import random

    def data_iter(batch_size):
        idx = list(range(num_examples))
        random.shuffle(idx)
        for batch_i, i in enumerate(range(0, num_examples, batch_size)):
            j = nd.array(idx[i:min(i + batch_size, num_examples)])
            yield batch_i, X.take(j), y.take(j)

    # 初始化模型参数。
def init_weights(shape):
    return tf.Variable(np.random_normal(shape, stddev=0.01))
import tensorflow as tf
import numpy as np

x_zero = np.random.multivariate_normal(mean=(-1, -1),
                                       cov=.1 * np.eye(2),
                                       size=(50, ))
y_zero = np.zeros(50)
x_one = np.random.multivariate_normal(mean=(1, 1),
                                      cov=.1 * np.eye(2),
                                      size=(50, ))
y_one = np.ones(50)
x = np.concatenate([x_zero, x_one])
y = np.concatenate([y_zero, y_one])

x_input = tf.placeholder(dtype=float32, shape=(100, 2))
y_input = tf.placeholder(dtype=float32, shape=(100, ))
w = tf.Variable(np.random_normal(size=(2, 1)))
b = tf.Variable(tf.zeros((1, )))