def prediction(self, x, z): """ Compute prediction for the fully-connected net as test time (without saving cache and no-dropout). Input: x: A numpy array of input data, shape (N, D) z: Diff between y-y_des for the func approx (N, M) (1,2) in this case Return: output: Output prediction/prediction of label, shape (N, M) """ h = x # Input into the next layer or previous hidden activation for l in range(self.n_hidden): l = str(l) w = self.params["w" + l] b = self.params["b" + l] h, _ = affine(h, w, b) # Affine layer h, _ = relu(h) # Activation (ReLU) # Output layer, simply an affine output, cache = affine(h, self.params["w_out"], self.params["b_out"]) # Technically this is not the real z but the 1/N term only scales z (we # can think of this as equivalent to scaling β by 1/N). # This is to match how dout works in NeuralNetOffline (see line: 190) N, D = x.shape z = z / N # Only trainable paramters in the adaptive case are the last layer weights # So we only update the output layer weights (using e-mod) _, dw, db = self.w_hat_dot_e_mod(z, cache) # Update the weights self.params["w_out"] -= self.beta * dw self.params["b_out"] -= self.beta * db return output
def prediction_baysian_dropout(self, x, k=10): """ Runs test time prediction k times to get a variance on the output. Essentially bayesian dropout. Input: x: A numpy array of input data, shape (N, D) Return: mean: The mean prediction from the dropout ensemble, shape (N, M) var: The variance on the prediction from the ensemble, shape (N, M) """ N, D = x.shape outputs = np.zeros((k, N, self.M)) for i in range(k): h = x # Input into the next layer or previous hidden activation for l in range(self.n_hidden): w, b = self.params["w" + str(l)], self.params["b" + str(l)] h, _ = affine(h, w, b) # Affine layer h, _ = relu(h) # Activation (ReLU) # Output layer, simply an affine outputs[i], _ = affine(h, self.params["w_out"], self.params["b_out"]) mean = np.mean(outputs) var = np.var(output) return mean, var
def prediction_save_cache(self, x): """ Compute prediction for the fully-connected net and save intermediate activations. N samples, D dims per sample, each sample is a row vec, M is the dims of y/prediction Input: x: A numpy array of input data, shape (N, D) Return: output: Output prediction/prediction of label, shape (N, M) caches: Saved intermediate activations for use in backprop """ caches = {} h = x # Input into the next layer or previous hidden activation for l in range(self.n_hidden): l = str(l) w, b = self.params["w" + l], self.params["b" + l] h, caches["affine" + l] = affine(h, w, b) # Affine layer h, caches["relu" + l] = relu(h) # Activation (ReLU) # Dropout layer (train-time dropout) h, caches["dropout" + l] = dropout(h, self.dropout) # Output layer, simply an affine output, cache = affine(h, self.params["w_out"], self.params["b_out"]) caches["affine_out"] = cache return output, caches
def landmarksBuilding(devide, _x1, _x2, _y1, _y2, _z1, _z2): #devide: how many landmarks you want to draw on a certain direction #x, y, z: the range of translation that the landmarks are placed #this function only draws basic images: lines, stars, triangles, cubes global lines, stars, triangles, cubes for yi in range(devide): #rand_scaler = random.uniform(0.75, (y2-y1)/devide) rand_choose = random.randint(1, 4) rand_angle1 = random.uniform(-PI, PI) rand_angle2 = random.uniform(-PI, PI) rand_angle3 = random.uniform(-PI, PI) rand_trans1 = random.uniform(_x1, _x2) rand_trans2 = random.uniform(_y1, _y2) rand_trans3 = random.uniform(_z1, _z2) if (rand_choose == 1): #l_temp = utils.scale(0.75, (y2-y1)/devide-0.2, l0) l_temp = utils.affine(0.6, 0.7, l0) l_temp = utils.multi( utils.EulerRotate(rand_angle1, rand_angle2, rand_angle3), l_temp) #l_temp = utils.translate(rand_trans1, rand_trans2+(yi+0.5)*(y2-y1)/devide, rand_trans3, l_temp) l_temp = utils.translate(rand_trans1, rand_trans2, rand_trans3, l_temp) lines = np.append(lines, l_temp) elif (rand_choose == 2): #s_temp = utils.scale(0.75, (y2-y1)/devide-0.2, s0) s_temp = utils.affine(0.6, 0.7, s0) s_temp = utils.multi( utils.EulerRotate(rand_angle1, rand_angle2, rand_angle3), s_temp) #s_temp = utils.translate(rand_trans1, rand_trans2+(yi+0.5)*(y2-y1)/devide, rand_trans3, s_temp) s_temp = utils.translate(rand_trans1, rand_trans2, rand_trans3, s_temp) stars = np.append(stars, s_temp) elif (rand_choose == 3): #t_temp = utils.scale(0.75, (y2-y1)/devide-0.2, t0) t_temp = utils.affine(0.6, 0.7, t0) t_temp = utils.multi( utils.EulerRotate(rand_angle1, rand_angle2, rand_angle3), t_temp) #t_temp = utils.translate(rand_trans1, rand_trans2+(yi+0.5)*(y2-y1)/devide, rand_trans3, t_temp) t_temp = utils.translate(rand_trans1, rand_trans2, rand_trans3, t_temp) triangles = np.append(triangles, t_temp) elif (rand_choose == 4): #c_temp = utils.scaleCubes(0.75, (y2-y1)/devide-0.2, c0) c_temp = utils.affine(0.6, 0.7, c0) c_temp = utils.multi( utils.EulerRotate(rand_angle1, rand_angle2, rand_angle3), c_temp) #c_temp = utils.translate(rand_trans1, rand_trans2+(yi+0.5)*(y2-y1)/devide, rand_trans3, c_temp) c_temp = utils.translate(rand_trans1, rand_trans2, rand_trans3, c_temp) cubes = np.append(cubes, c_temp) else: print('Warning: Error Occurs in right!')
def prediction(self, x, z=None): """ Compute prediction for the fully-connected net as test time (without saving cache and no-dropout). Input: x: A numpy array of input data, shape (N, D) x_des: Compatibility with the adaptive NN (not used) Return: output: Output prediction/prediction of label, shape (N, M) """ h = x # Input into the next layer or previous hidden activation for l in range(self.n_hidden): l = str(l) w = self.params["w" + l] b = self.params["b" + l] h, _ = affine(h, w, b) # Affine layer h, _ = relu(h) # Activation (ReLU) # Output layer, simply an affine output, _ = affine(h, self.params["w_out"], self.params["b_out"]) return output
model.summary() flags = [0] * 10 index = [0] * 10 digits = np.where(y_test == 1)[1] for i, num in enumerate(digits): num = int(num) if flags[num]: continue else: flags[num] = 1 index[num] = i if np.all(flags): break x_deform_test = np.array([affine(x) for x in x_test]) print(index) print(x_test[index].shape) input_img = np.concatenate([x_test[index], x_deform_test[index]]) input_img = combine_images(input_img, height=2, width=10) input_img = input_img * 255 Image.fromarray(input_img.astype(np.uint8)).save(args.save_dir + '/input.png') model.load_weights(args.weights1) _, x_recon = eval_model.predict(x_test, batch_size=100) _, x_deform_recon = eval_model.predict(x_deform_test, batch_size=100) recon_img = np.concatenate([x_recon[index], x_deform_recon[index]]) recon_img = combine_images(recon_img, height=2, width=10) recon_img = recon_img * 255 Image.fromarray(recon_img.astype(np.uint8)).save(args.save_dir +