def search(self): game = self.game parent = self.root while not parent.is_leaf_node(): best_puct = None for child in parent.children: curr_puct = self.PUCT(parent, child) if (best_puct == None or curr_puct >= best_puct): best_child = child best_puct = curr_puct self.level += 1 parent = best_child self.game.execute_move(best_child.last_action) raw_pred = self.agent.predict(np.array([game.get_board()])) result = loss.softmax(np.array(game.get_legal_NN_output()), raw_pred[0]) if not self.game.is_final(): valid_moves = game.get_moves() for move in valid_moves: Node(game, parent, move, result[0][move]) self.back_propagate(parent, raw_pred[1][0][0]) self.level = 0 else: self.back_propagate(parent, raw_pred[1][0][0]) self.level = 0
def _evaluate(self, result, epsilon=0.000001): # return 0, {str(act): 1 / len(self.game.get_moves()) for num, act in enumerate(self.game.get_moves())} # return random.uniform(-1, 1), {str(act): random.random() for num, act in enumerate(self.game.get_moves())} # state = state.reshape(self.NN_input_dim) policy, value = result policy = policy.flatten() legal_moves = np.array(self.game.get_legal_NN_output()) num_legal_moves = np.sum(legal_moves) policy_norm = loss.softmax(legal_moves, policy) policy_norm = (policy_norm + epsilon) * legal_moves outp = self.NN_output_to_moves_func(policy_norm) policy_norm = policy_norm[policy_norm > 0] if len(self.state_visits) == 0 and self.dirichlet_noise: noise = np.random.dirichlet(np.array([self.alpha for _ in range(num_legal_moves)]), (1)) noise = noise.reshape(noise.shape[1]) # print("Adding noise", policy_norm) return value, {str(act): (1 - self.epsilon) * policy_norm[num] + self.epsilon * noise[num] for num, act in enumerate(outp)} else: # print("No noise", policy_norm) return value, {str(act): policy_norm[num] for num, act in enumerate(outp)}
def perplexity(input_data): from loss import softmax N, T, V = input_data.shape p = 1 for i in range(N): p *= softmax(input_data[i]) return pow(1 / p, 1 / N) / N
def train(game, config, num_filters, num_res_blocks, num_sim=125, epochs=50, games_each_epoch=10, batch_size=32, num_train_epochs=10): h, w, d = config.board_dims[1:] agent = ResNet.ResNet.build(h, w, d, num_filters, config.policy_output_dim, num_res_blocks=num_res_blocks) agent.compile( loss=[softmax_cross_entropy_with_logits, 'mean_squared_error'], optimizer=SGD(lr=0.001, momentum=0.9)) agent.summary() for epoch in range(epochs): x, y_pol, y_val = generate_data(game, agent, config, num_sim=num_sim, games=games_each_epoch) print("Epoch") print(x.shape) raw = agent.predict(x) for num in range(len(x)): print("targets-predictions") print(y_pol[num], y_val[num]) print(softmax(y_pol[num], raw[0][num]), raw[1][num]) agent.fit(x=x, y=[y_pol, y_val], batch_size=min(batch_size, len(x)), epochs=num_train_epochs, callbacks=[]) agent.save_weights("Models/" + Config.name + "/" + str(epoch) + ".h5") return agent
def get_prior_probabilities(self, board_state): pred = self.agent.predict(board_state) return loss.softmax(np.array(self.game.get_legal_NN_output()), pred[0]), pred[1]
def main(): """Create the model and start the evaluation process.""" args = get_arguments() gpu0 = args.gpu h, w = map(int, args.input_size.split(',')) input_size = (h, w) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) save_part_dir = os.path.join(args.save_dir, 'part_map') if not os.path.exists(save_part_dir): os.makedirs(save_part_dir) save_overlay_dir = os.path.join(args.save_dir, 'part_overlay') if not os.path.exists(save_overlay_dir): os.makedirs(save_overlay_dir) save_part_dcrf_dir = os.path.join(args.save_dir, 'part_map_dcrf') if not os.path.exists(save_part_dcrf_dir): os.makedirs(save_part_dcrf_dir) save_dcrf_overlay_dir = os.path.join(args.save_dir, 'part_dcrf_overlay') if not os.path.exists(save_dcrf_overlay_dir): os.makedirs(save_dcrf_overlay_dir) save_lm_dir = os.path.join(args.save_dir, 'landmarks') if not os.path.exists(save_lm_dir): os.makedirs(save_lm_dir) save_seg_dir = os.path.join(args.save_dir, 'seg') if not os.path.exists(save_seg_dir): os.makedirs(save_seg_dir) save_prob_dir = os.path.join(args.save_dir, 'prob') if not os.path.exists(save_prob_dir): os.makedirs(save_prob_dir) save_dcrf_prob_dir = os.path.join(args.save_dir, 'dcrf_prob') if not os.path.exists(save_dcrf_prob_dir): os.makedirs(save_dcrf_prob_dir) # create network model = model_generator(args) #model.load_state_dict(torch.load("snapshots_CelebA/SCOPS_K8_retrain/model_100000.pth")) model.load_state_dict(torch.load(args.restore_from)) model.eval() model.cuda(gpu0) if args.dataset == 'CelebAWild': from dataset.celeba_wild_dataset import CelebAWildDataset dataset = CelebAWildDataset testloader = data.DataLoader(dataset(args.data_dir, args.data_list, crop_size=input_size, scale=False, mirror=False, mean=IMG_MEAN, center_crop=False, ignore_saliency_fg=False, iou_threshold=0.3), batch_size=1, shuffle=False, pin_memory=True) elif args.dataset == 'cub': from dataset import cub args.batch_size = 1 testloader = cub.data_loader(args) elif args.dataset == 'ImageNet': from dataset import imagenet as imnet_data args.batch_size = 1 testloader = imnet_data.imnet_dataloader(args) elif args.dataset == 'p3d': from dataset import p3d as p3d_data args.batch_size = 1 testloader = p3d_data.p3d_dataloader(args) else: print('Not supported dataset {}'.format(args.dataset)) interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True) colorize = utils.Colorize(args.num_parts + 1) N = len(testloader) landmarks = np.zeros((N, args.num_parts, 2)) #landmarks_gt = np.zeros((N,args.lm_count,2)) with torch.no_grad(): for index, batch in enumerate(testloader): """ import pickle with open("/xtli-correspondence/Data/trim/bboxes.npy", 'rb') as f: bboxes = pickle.load(f) bbox = np.squeeze(bboxes[0]) fpath = "/xtli-correspondence/Data/trim/output_00001.jpg" fpath = "/xtli-correspondence/CUB_200_2011/images/001.Black_footed_Albatross/Black_Footed_Albatross_0039_796132.jpg" bbox = np.array([122, 128, 481, 379]) img = read_frame(fpath, bbox) batch = {} batch['img'] = img """ if index % 100 == 0: path_split = args.save_dir.split('/') print('{} processd: {}/{}'.format(index, path_split[-4], path_split[-3])) image = batch['img'] #label = batch['mask'] img_path = batch['img_path'] tmp = img_path[0].split("/") img_folder = tmp[4] img_nm = tmp[5].replace(".jpg", ".png") size = input_size output = model(image.cuda(gpu0)) output_raw = interp(output[2]) #permute = [0, 1, 4, 3, 2] #import pdb; pdb.set_trace() #output_raw = output_raw[:, permute, :, :] lms = Batch_Get_Centers(output_raw) landmarks[index, :, :] = lms if args.save_viz: mean_tensor = torch.tensor(IMG_MEAN).float().expand( int(size[1]), int(size[0]), 3).transpose(0, 2) imgs_viz = torch.clamp(image + mean_tensor, 0.0, 255.0) #landmark visualization lms_viz = Batch_Draw_GT_Landmarks(imgs_viz, output_raw, lms) output = softmax(output_raw) # normalize part output /= output.max(dim=3, keepdim=True)[0].max(dim=2, keepdim=True)[0] output[:, 0, :, :] = 0.1 output = output.cpu().data[0].numpy() #output = output[:,:size[0],:size[1]] #gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int) output_np = output.transpose(1, 2, 0) output_np = np.asarray(np.argmax(output_np, axis=2), dtype=np.int) filename = os.path.join( save_prob_dir, '{}/{}'.format(img_folder, img_nm.replace("png", "pth"))) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) #save probility map before softmax torch.save(output_raw.cpu(), filename) #Image.fromarray(output_np, 'P').save(filename) filename = os.path.join(save_seg_dir, '{}/{}'.format(img_folder, img_nm)) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) pil_image = Image.fromarray(output_np.astype(dtype=np.uint8)) pil_image.save(filename, 'PNG') seg_viz = colorize(output_np) filename = os.path.join(save_part_dir, '{}/{}'.format(img_folder, img_nm)) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(seg_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) seg_overlay_viz = (imgs_viz.numpy() * 0.8 + seg_viz * 0.7).clip(0, 255.0).astype(np.uint8) filename = os.path.join(save_overlay_dir, '{}/{}'.format(img_folder, img_nm)) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(seg_overlay_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) if args.crf: output_dcrf_prob = utils.denseCRF( imgs_viz.numpy().squeeze().transpose(1, 2, 0).astype( np.uint8).copy(), output) filename = os.path.join( save_dcrf_prob_dir, '{}/{}'.format( img_folder, img_nm.replace(".png", ".npy").replace(".JPEG", ".npy"))) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) np.save(filename, output_dcrf_prob) output_dcrf = np.asarray(np.argmax(output_dcrf_prob, axis=2), dtype=np.int) seg_dcrf_viz = colorize(output_dcrf) filename = os.path.join(save_part_dcrf_dir, '{}/{}'.format(img_folder, img_nm)) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(seg_dcrf_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) seg_dcrf_overlay_viz = (imgs_viz.numpy() * 0.8 + seg_dcrf_viz * 0.7).clip( 0, 255.0).astype(np.uint8) filename = os.path.join(save_dcrf_overlay_dir, '{}/{}'.format(img_folder, img_nm)) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray( seg_dcrf_overlay_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) filename_lm = os.path.join(save_lm_dir, '{}/{}'.format(img_folder, img_nm)) file_dir = os.path.dirname(filename_lm) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(lms_viz[0, :, :, :].transpose(1, 2, 0), 'RGB').save(filename_lm)
def predict(self, X): X = self.forward(X) return np.argmax(softmax(X), axis=1)
# print(img_load) # print(label_load) #---------------------------------------------------------------- w_out = np.ones((784, 10)) z_1 = np.dot(layer_img.T, w_1).T + b_1 a_1 = activation.relu(z_1) z_2 = np.dot(a_1.T, w_2).T + b_2 a_2 = activation.relu(z_2) z_3 = np.dot(a_2.T, w_3).T + b_3 a_3 = activation.relu(z_3) z_out = np.dot(a_3.T, w_out).T output = loss.softmax(z_out) t = loss.crossEntropy(output, label) dw_3 = np.dot(a_2, (a_3 - label).T) print(dw_3) # # show image debugging # img_load = np.reshape(unpack(len(bin_img)*'B',bin_img),(28,28)) # label_load = int.from_bytes(bin_label,byteorder='big',signed=False) # plt.imshow(img_load,cmap=cm.binary) # plt.show() # print(img_load) # print(label_load)
def main(): """Create the model and start the evaluation process.""" args = get_arguments() gpu0 = args.gpu h, w = map(int, args.input_size.split(',')) input_size = (h, w) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) save_part_dir = os.path.join(args.save_dir, 'part_map') if not os.path.exists(save_part_dir): os.makedirs(save_part_dir) save_overlay_dir = os.path.join(args.save_dir, 'part_overlay') if not os.path.exists(save_overlay_dir): os.makedirs(save_overlay_dir) save_part_dcrf_dir = os.path.join(args.save_dir, 'part_map_dcrf') if not os.path.exists(save_part_dcrf_dir): os.makedirs(save_part_dcrf_dir) save_dcrf_overlay_dir = os.path.join(args.save_dir, 'part_dcrf_overlay') if not os.path.exists(save_dcrf_overlay_dir): os.makedirs(save_dcrf_overlay_dir) save_lm_dir = os.path.join(args.save_dir, 'landmarks') if not os.path.exists(save_lm_dir): os.makedirs(save_lm_dir) save_seg_dir = os.path.join(args.save_dir, 'seg') if not os.path.exists(save_seg_dir): os.makedirs(save_seg_dir) # create network model = model_generator(args) model.eval() model.cuda(gpu0) if args.dataset == 'CelebAWild': from dataset.celeba_wild_dataset import CelebAWildDataset dataset = CelebAWildDataset testloader = data.DataLoader(dataset(args.data_dir, args.data_list, crop_size=input_size, scale=False, mirror=False, mean=IMG_MEAN, center_crop=False, ignore_saliency_fg=False, iou_threshold=0.3), batch_size=1, shuffle=False, pin_memory=True) else: print('Not supported dataset {}'.format(args.dataset)) interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True) colorize = utils.Colorize(args.num_parts + 1) N = len(testloader) landmarks = np.zeros((N, args.num_parts, 2)) landmarks_gt = np.zeros((N, args.lm_count, 2)) with torch.no_grad(): for index, batch in enumerate(testloader): if index % 100 == 0: path_split = args.save_dir.split('/') print('{} processd: {}/{}'.format(index, path_split[-4], path_split[-3])) image = batch['img'] label = batch['saliency'] size_org = batch['size'] name = batch['name'] landmarks_gt[index, :, :] = batch['landmarks'] size = input_size output = model(image.cuda(gpu0)) output = interp(output[2]) lms = Batch_Get_Centers(output) landmarks[index, :, :] = lms if args.save_viz: mean_tensor = torch.tensor(IMG_MEAN).float().expand( int(size[1]), int(size[0]), 3).transpose(0, 2) imgs_viz = torch.clamp(image + mean_tensor, 0.0, 255.0) #landmark visualization lms_viz = Batch_Draw_GT_Landmarks(imgs_viz, output, lms) output = softmax(output) # normalize part output /= output.max(dim=3, keepdim=True)[0].max(dim=2, keepdim=True)[0] output[:, 0, :, :] = 0.1 output = output.cpu().data[0].numpy() output = output[:, :size[0], :size[1]] gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int) output_np = output.transpose(1, 2, 0) output_np = np.asarray(np.argmax(output_np, axis=2), dtype=np.int) filename = os.path.join(save_seg_dir, '{}.png'.format(name[0][:-4])) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(output_np, 'P').save(filename) seg_viz = colorize(output_np) filename = os.path.join(save_part_dir, '{}.png'.format(name[0][:-4])) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(seg_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) seg_overlay_viz = (imgs_viz.numpy() * 0.8 + seg_viz * 0.7).clip(0, 255.0).astype(np.uint8) filename = os.path.join(save_overlay_dir, '{}.png'.format(name[0][:-4])) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(seg_overlay_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) if args.crf: output_dcrf = utils.denseCRF( imgs_viz.numpy().squeeze().transpose(1, 2, 0).astype( np.uint8).copy(), output) output_dcrf = np.asarray(np.argmax(output_dcrf, axis=2), dtype=np.int) seg_dcrf_viz = colorize(output_dcrf) filename = os.path.join(save_part_dcrf_dir, '{}.png'.format(name[0][:-4])) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(seg_dcrf_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) seg_dcrf_overlay_viz = (imgs_viz.numpy() * 0.8 + seg_dcrf_viz * 0.7).clip( 0, 255.0).astype(np.uint8) filename = os.path.join(save_dcrf_overlay_dir, '{}.png'.format(name[0][:-4])) file_dir = os.path.dirname(filename) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray( seg_dcrf_overlay_viz.squeeze().transpose(1, 2, 0), 'RGB').save(filename) filename_lm = os.path.join(save_lm_dir, '{}.png'.format(name[0][:-4])) file_dir = os.path.dirname(filename_lm) if not os.path.exists(file_dir): os.makedirs(file_dir) Image.fromarray(lms_viz[0, :, :, :].transpose(1, 2, 0), 'RGB').save(filename_lm) np.save(os.path.join(args.save_dir, 'pred_kp.npy'), landmarks) np.save(os.path.join(args.save_dir, 'gt_kp.npy'), landmarks_gt)
def predict_proba(self, X): score, _ = self.forward(X, False) return loss_func.softmax(score)