Esempio n. 1
0
  def process(self,state):
    # send http header
    print "Content-Type: text/html; charset=utf-8" 
    print                               # blank line, end of headers
    
    print '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
    print '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
    print '<html xmlns="http://www.w3.org/1999/xhtml">'
    
    HtmlHeader().render(state)
    util.opentag('body')
    util.opentag('div', id='content')

    locale.setlocale(locale.LC_TIME, "de_DE")
    
    for day in range(1,31):
      state.day = str(day);
      renderDate = 1
      for eventid in range(0,10):
        state.eventid=str(eventid)
        name = util.eventFileName(state, state.lang, state.eventid)
        if not os.path.exists(name):
          continue;
        if renderDate:
          print "<h1>"+state.day+". "+state.month+". "+state.year+"</h1>"
          renderDate = 0
        xml_file = file(name)
        xslt_file = file(Request.templatedir+'/event.xsl')
        util.transform(state, xslt_file, xml_file)


    util.closetag('div')
    util.closetag('body')
    util.closetag('html')
 def eval_handler(self):
   # Evaluate the probability distribution.
   mcts = MCTS([self.state], self.evaluator, None)
   pi = mcts.tree_search()[0]
   # Get the predicted action sequence.
   a = mcts.get_action_sequence()
     
   # Evaluate the value.
   model = Model(enhance = True, imitate = True, optimize = 0)
   with tf.Session() as sess:
     saver = tf.train.Saver()
     # Restore the player.
     if self.evaluator == BEST_PLAYER:
       file_name = "BestPlayer"
     else:
       file_name = "Player_" + format(self.evaluator, "05d")
     saver.restore(sess, PLAYER_DIR + file_name)
     
     # Transform the action sequence into state sequence.
     state_sequence, sequence_length, sequence_index = get_state_sequence([self.state], a)
     
     # Randomly transform the state.
     n = np.random.choice(SYMMETRY)
     transformed_state = copy.deepcopy(self.state)
     transformed_state = transform(transformed_state, n)
     # Update the state sequence.
     state_sequence = [transform(x, n) for x in state_sequence]
     # Extract inputs from state.
     inputs = extract_inputs([transformed_state])
     inputs_sequence = extract_inputs(state_sequence)
     
     # Compute the feature sequence.
     feature_sequence = sess.run(model.x, feed_dict = {model.Inputs: inputs_sequence})
     feature_sequence = reshape_feature_sequence(feature_sequence, sequence_length, sequence_index)
     
     # Compute the value and policy.
     feed_dict = {model.Inputs: inputs, model.Feature_Sequence: feature_sequence, model.Sequence_Length: sequence_length}
     target = [model.v, model.p, model.v_prime, model.p_prime, model.v_hat, model.p_hat]
     v, p, v_prime, p_prime, v_hat, p_hat = sess.run(target, feed_dict = feed_dict)
     v = v[0, 0]
     p = normalize_probability(p, [transformed_state.legal_moves])
     p = p[0]
     v_prime = v_prime[0, 0]
     p_prime = normalize_probability(p_prime, [transformed_state.legal_moves])
     p_prime = p_prime[0]
     v_hat = v_hat[0, 0]
     p_hat = normalize_probability(p_hat, [transformed_state.legal_moves])
     p_hat = p_hat[0]
     
     print("V     = " + str(v))
     print("V'    = " + str(v_prime))
     print("V_hat = " + str(v_hat))
     print("P     = " + str(p))
     print("P'    = " + str(p_prime))
     print("P_hat = " + str(p_hat))
     print("Pi    = " + str(pi))
     print("===================")
   tf.contrib.keras.backend.clear_session()
   self.game_canvas.draw_gameboard(pi, v)
Esempio n. 3
0
	def render(self,state):	
		""" trys to access current days event files and transforms them
		"""
		# try to access default language event
		name = util.monthFileName(state, 'de')
		if not os.path.exists(name):
			return
		xml_file = file(name)
		xslt_file = file(Request.templatedir+'/monthEvent.xsl')
		util.transform(state, xslt_file, xml_file)
Esempio n. 4
0
	def render(self,state):	
		""" trys to access current days event files and transforms them
		"""
		# try to access special language event
		name = util.eventFileName(state, state.lang, state.eventid)
		if not os.path.exists(name):
		  name = name = util.eventFileName(state, 'de', state.eventid)
		if not os.path.exists(name):
			name = (Request.templatedir+'/emptyevent.xml')
		xml_file = file(name)
		xslt_file = file(Request.templatedir+'/admin.xsl')
		state.edit='s' # set edit type to event save
		util.transform(state, xslt_file, xml_file)
Esempio n. 5
0
	def render(self,state):	
		""" trys to access current days event files and transforms them
		"""
		# try to access current dates event files and render them
		for i in range(0,10):
			state.eventid=str(i)
			name = util.eventFileName(state, state.lang, state.eventid)
			if not os.path.exists(name): # fallback to default lang
				name = util.eventFileName(state, 'de', state.eventid)
			if not os.path.exists(name):
				continue
			xml_file = file(name)
			xslt_file = file(Request.templatedir+'/eventShort.xsl')
			util.transform(state, xslt_file, xml_file)
Esempio n. 6
0
	def render(self,state):	
		""" trys to access current days event files and transforms them
		"""
		# try to access requested month xml and render it
		name=util.monthFileName(state, state.lang)
		if not os.path.exists(name): # fallback to default lang
			name=util.monthFileName(state, 'de')
		if not os.path.exists(name):
			name = (Request.templatedir+'/emptyevent.xml')
		if not os.path.exists(name):
			return
		xml_file = file(name)
		xslt_file = file(Request.templatedir+'/monthEvent.xsl')
		util.transform(state, xslt_file, xml_file)
Esempio n. 7
0
def prepare_training_data_process(player, num_minibatches, id):
  state = [[None for _ in range(MINIBATCH_SIZE)] for _ in range(num_minibatches)]
  pi = [[None for _ in range(MINIBATCH_SIZE)] for _ in range(num_minibatches)]
  z = [[None for _ in range(MINIBATCH_SIZE)] for _ in range(num_minibatches)]
  a = [[None for _ in range(MINIBATCH_SIZE)] for _ in range(num_minibatches)]
  inputs = [None for _ in range(num_minibatches)]
  state_sequence = [None for _ in range(num_minibatches)]
  sequence_length = [None for _ in range(num_minibatches)]
  sequence_index = [None for _ in range(num_minibatches)]
  inputs_sequence = [None for _ in range(num_minibatches)]
  np.random.seed()
  
  for i in range(num_minibatches):
    # Randomly select training data from the most recent games.
    length = np.minimum(SELFPLAY_GAMES * (player+1), RECENT_GAMES)
    game_id = np.random.randint(length, size = MINIBATCH_SIZE) + SELFPLAY_GAMES * (player+1) - length
    for j in range(MINIBATCH_SIZE):
      file_name = "TrainingData_" + format(game_id[j], "07d") + ".pkl"
      with open(DATA_DIR + file_name, "rb") as fp:
        data = pickle.load(fp)
        k = np.random.randint(len(data))
        state[i][j] = data[k][0]
        pi[i][j] = data[k][1]
        z[i][j] = [data[k][2]]
        a[i][j] = data[k][3]
    
    # Transform the action sequence into state sequence.
    state_sequence[i], sequence_length[i], sequence_index[i] = get_state_sequence(state[i], a[i])
    
    index = 0
    for j in range(MINIBATCH_SIZE):
      # Randomly transform the state.
      n = np.random.choice(SYMMETRY)
      state[i][j] = transform(state[i][j], n)
      # Update the probability distribution.
      pi_temp = np.zeros(BOARD_SIZE_X*BOARD_SIZE_Y+1, np.float32)
      pi_temp[state[i][j].legal_moves] = pi[i][j]
      pi[i][j] = pi_temp
      # Update the state sequence.
      state_sequence[i][index:index+sequence_length[i][j]] = [transform(state_sequence[i][index+k], n) for k in range(sequence_length[i][j])]
      index += sequence_length[i][j]
    # Extract inputs from state.
    inputs[i] = extract_inputs(state[i])
    inputs_sequence[i] = extract_inputs(state_sequence[i])
  
  # Save the training data to file.
  data = [inputs, pi, z, sequence_length, sequence_index, inputs_sequence]
  file_name = "TempData_" + format(id, "04d") + ".pkl"
  with open(TEMPDATA_DIR + file_name, "wb") as fp:
    pickle.dump(data, fp)
Esempio n. 8
0
 def eval(self):
     sys.stderr.write('Evaluating\n')
     folds = StratifiedKFold(y=self.y_train, n_folds=self.folds, shuffle=True, random_state=1337)
     scores = []
     for train_index, test_index in folds:
         self.fit(train_index)
         predicted, y_test = self.predict(test_index)
         k = kappa(y_test, transform(predicted), weights='quadratic')
         print(k)
         scores.append(k)
     print(scores)
     print(np.mean(scores))
     print(np.std(scores))
Esempio n. 9
0
    def __getitem__(self, idx):
        '''
        d: disk name
        l, h: Two input imgs for training
        m: label img
        '''
        # d, l_id, h_id, m_id = self.mapping(idx)
        try:
            # Interpolation l=1, h=3, m=2, predict m
            # Extrapolation l=1, m=3, h=2, predict m
            d, l_id, h_id, m_id = self.triplets[idx]  # interpolation
            # d, l_id, m_id, h_id = self.triplets[idx] # extrapolation
        except IndexError:
            raise IndexError("Maximum index supported is {}".format(
                self.__len__()))
        # print(d, l, h, m)
        with h5py.File(self.data_dir, "r") as data:
            data_d = data[d]
            l = np.array(data_d[str(l_id)])
            h = np.array(data_d[str(h_id)])
            m = np.array(data_d[str(m_id)])
            log_grid = np.asarray(data_d["log_grid"])

        # Apply transforms
        if self.transforms:
            for t_i, transform in enumerate(self.transforms):
                # second transform is a group opration
                # if t_i in self.group_trans_id:
                if hasattr(transform, 'group_tran'):
                    l, h, m = transform(l, h, m)
                    continue
                # Check if func arguments requires of log_grid
                # if 'log_grid' in transform.__call__.__code__.co_varnames:
                if hasattr(transform, 'require_grid'):
                    l = transform(log_grid, l)
                    h = transform(log_grid, h)
                    m = transform(log_grid, m)
                    continue

                l = transform(l)
                h = transform(h)
                m = transform(m)

        ret = [l, h, m]
        # Verbose mode, return disk name, img indexes
        if self.verbose:
            ret.append({
                "disk_name": d,
                "img1_idx": l_id,
                "img2_idx": h_id,
                "label_idx": m_id
            })
        return ret
Esempio n. 10
0
def test_find_rbm_procrustes():
    for it in xrange(100):
        R = np.linalg.qr(np.random.uniform(-1, 1, size=(3, 3)))[0]
        if np.linalg.det(R) < 0:
            R *= -1
        t = np.random.uniform(-2, 2, size=3)
        M = np.eye(4)
        M[:3, :3] = R
        M[:3,  3] = t
        N = np.random.randint(3, 1000)
        frompts = np.random.random((N, 3))
        topts = transform(frompts, M)
        M_pro = find_rbm_procrustes(frompts, topts)
        np.testing.assert_almost_equal(M_pro, M)
Esempio n. 11
0
    def __getitem__(self, index):
        """
        Args:
            index (int): Index
        Returns:
            tuple: (image, target) where target is index of the target class.
        """
        img, label = self.img[index], self.label[index]

        # img = transforms.ToTensor()(img)
        # img = img.transpose(1,0)
        img = transform(img)

        return img, label
Esempio n. 12
0
def test_find_rbm_procrustes():
    for it in xrange(100):
        R = np.linalg.qr(np.random.uniform(-1, 1, size=(3, 3)))[0]
        if np.linalg.det(R) < 0:
            R *= -1
        t = np.random.uniform(-2, 2, size=3)
        M = np.eye(4)
        M[:3, :3] = R
        M[:3,  3] = t
        N = np.random.randint(3, 1000)
        frompts = np.random.random((N, 3))
        topts = transform(frompts, M)
        M_pro = find_rbm_procrustes(frompts, topts)
        np.testing.assert_almost_equal(M_pro, M)
Esempio n. 13
0
def main(input_hdf5_file, output_hdf5_file, rest_shape='first'):
    data = h5py.File(input_hdf5_file, 'r')
    verts = data['verts'].value
    tris = data['tris'].value

    v0 = verts[0]
    verts_new = []
    for i, v in enumerate(verts):
        print "frame %d/%d" % (i+1, len(verts))
        M = find_rbm_procrustes(v, v0)
        verts_new.append(transform(v, M))
    verts = np.array(verts_new, np.float32)

    with h5py.File(output_hdf5_file, 'w') as f:
        f.create_dataset('verts', data=verts, compression='gzip')
        f['tris'] = tris
Esempio n. 14
0
def main(input_hdf5_file, output_hdf5_file):
    data = h5py.File(input_hdf5_file, 'r')
    verts = data['verts'].value
    tris = data['tris'].value

    v0 = verts[0]
    verts_new = []
    for i, v in enumerate(verts):
        print "frame %d/%d" % (i+1, len(verts))
        M = find_rbm_procrustes(v, v0)
        verts_new.append(transform(v, M))
    verts = np.array(verts_new, np.float32)

    with h5py.File(output_hdf5_file, 'w') as f:
        f.create_dataset('verts', data=verts, compression='gzip')
        f['tris'] = tris
Esempio n. 15
0
def mind(user_input):
    tar = transform(user_input, tokenizer)
    states_values = enc_model.predict(tar)
    empty_target_seq = np.zeros((1, 1))
    empty_target_seq[0, 0] = tokenizer.word_index['start']
    stop_condition = False
    decoded_translation = ''
    while not stop_condition:
        dec_outputs, h, c = dec_model.predict([empty_target_seq] +
                                              states_values)
        sampled_word_index = np.argmax(dec_outputs[0, -1, :])
        for word, index in tokenizer.word_index.items():
            if sampled_word_index == index:
                decoded_translation += ' {}'.format(word)
                sampled_word = word
        if sampled_word == 'end':
            stop_condition = True
        empty_target_seq = np.zeros((1, 1))
        empty_target_seq[0, 0] = sampled_word_index
        states_values = [h, c]
    out = decoded_translation.split()[:-1]
    decoded_translation = ' '.join(out)
    return decoded_translation
Esempio n. 16
0
    def viewpoint(self,u,v,d,mat_c):
        # target point in camera frame at pixel u, v
        # print("current", mat)
        pc = self.position3d(u,v)
        if pc[2] < 0: # depth
            print("invalid depth image")
            return None

        # target point normal in camera frame
        nc = self.normal3d(u,v)
        pv = pc-d*nc
        # rotate angles in robot frame
        a = asin(-nc[1]) # about x
        b = asin(nc[0]/cos(a)) # about y
        c = 0 # about z
        mat_v = util.transform(np.array([a,b,c]), pv)
        #print("in camera, normal, position ", nc, pv, [a,b,c], mat_v)

        # mat_c : camera pose in robot frame
        # mat_v : next viewpoint in camera system
        # mat_e: next camera pose in robot frame
        mat_e = np.dot(mat_c,mat_v)
        vp = self.transform_to_cartesian(mat_e)
        return vp
Esempio n. 17
0
 def solution(self):
     predicts = self.pool.map(solution, self.models)
     predicted = [elem / len(self.models) for elem in sum(predicts)]
     return transform(predicted)
Esempio n. 18
0
def get_map(model, confidence, iou_threshold, coco_version, subset=1):

    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    pw_ph = pw_ph.cuda()
    cx_cy = cx_cy.cuda()
    stride = stride.cuda()

    model.eval()
    subset = subset

    max_detections = 100
    transformed_dataset = Coco(partition='val',
                               coco_version=coco_version,
                               subset=subset,
                               transform=transforms.Compose(
                                   [ResizeToTensor(inp_dim)]))

    dataset_len = (len(transformed_dataset))
    #     print('Length of dataset is '+ str(dataset_len)+'\n')
    batch_size = 8

    dataloader = DataLoader(transformed_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            collate_fn=helper.my_collate,
                            num_workers=4)

    for images, targets in dataloader:
        inp = images.cuda()
        raw_pred = model(inp, torch.cuda.is_available())
        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)

        sorted_pred = torch.sort(true_pred[:, :, 4] *
                                 (true_pred[:, :, 5:].max(axis=2)[0]),
                                 descending=True)

        pred_mask = sorted_pred[0] > confidence
        indices = [(sorted_pred[1][e, :][pred_mask[e, :]])
                   for e in range(pred_mask.shape[0])]
        pred_final = [true_pred[i, indices[i], :] for i in range(len(indices))]

        pred_final_coord = [
            util.get_abs_coord(pred_final[i].unsqueeze(-2))
            for i in range(len(pred_final))
        ]

        indices = [
            nms_box.nms(pred_final_coord[i][0], pred_final[i][:, 4],
                        iou_threshold) for i in range(len(pred_final))
        ]

        pred_final = [
            pred_final[i][indices[i], :] for i in range(len(pred_final))
        ]

        #     pred_final[:,0:4]=pred_final[:,0:4]/inp_dim
        helper.write_pred(img_name, pred_final, inp_dim, max_detections,
                          coco_version)

    boundingboxes = helper.getBoundingBoxes(coco_version)

    evaluator = Evaluator()

    metricsPerClass = evaluator.GetPascalVOCMetrics(boundingboxes,
                                                    IOUThreshold=0.75)
    # Loop through classes to obtain their metrics
    mAP = 0
    counter = 0
    for mc in metricsPerClass:
        # Get metric values per each class
        c = mc['class']
        precision = mc['precision']
        recall = mc['recall']
        average_precision = mc['AP']
        ipre = mc['interpolated precision']
        irec = mc['interpolated recall']
        # Print AP per class
        mAP = average_precision + mAP


#         print('%s: %f' % (c, average_precision))

#     print('map is:',mAP/80)
    return mAP / 80
 def test_transform(self):
     self.assertEqual(5764801, util.transform(1, 7, 8))
     self.assertEqual(17807724, util.transform(1, 7, 11))
     pass
Esempio n. 20
0
    def eval_handler(self):
        # Evaluate the probability distribution.
        mcts = MCTS([self.state], self.evaluator, None)
        pi = mcts.tree_search()[0]
        # Get the predicted action sequence.
        a = mcts.get_action_sequence()

        # Evaluate the value.
        model = Model(enhance=True, imitate=True, optimize=0)
        with tf.Session() as sess:
            saver = tf.train.Saver()
            # Restore the player.
            if self.evaluator == BEST_PLAYER:
                file_name = "BestPlayer"
            else:
                file_name = "Player_" + format(self.evaluator, "05d")
            saver.restore(sess, PLAYER_DIR + file_name)

            # Transform the action sequence into state sequence.
            state_sequence, sequence_length, sequence_index = get_state_sequence(
                [self.state], a)

            # Randomly transform the state.
            n = np.random.choice(SYMMETRY)
            transformed_state = copy.deepcopy(self.state)
            transformed_state = transform(transformed_state, n)
            # Update the state sequence.
            state_sequence = [transform(x, n) for x in state_sequence]
            # Extract inputs from state.
            inputs = extract_inputs([transformed_state])
            inputs_sequence = extract_inputs(state_sequence)

            # Compute the feature sequence.
            feature_sequence = sess.run(
                model.x, feed_dict={model.Inputs: inputs_sequence})
            feature_sequence = reshape_feature_sequence(
                feature_sequence, sequence_length, sequence_index)

            # Compute the value and policy.
            feed_dict = {
                model.Inputs: inputs,
                model.Feature_Sequence: feature_sequence,
                model.Sequence_Length: sequence_length
            }
            target = [
                model.v, model.p, model.v_prime, model.p_prime, model.v_hat,
                model.p_hat
            ]
            v, p, v_prime, p_prime, v_hat, p_hat = sess.run(
                target, feed_dict=feed_dict)
            v = v[0, 0]
            p = normalize_probability(p, [transformed_state.legal_moves])
            p = p[0]
            v_prime = v_prime[0, 0]
            p_prime = normalize_probability(p_prime,
                                            [transformed_state.legal_moves])
            p_prime = p_prime[0]
            v_hat = v_hat[0, 0]
            p_hat = normalize_probability(p_hat,
                                          [transformed_state.legal_moves])
            p_hat = p_hat[0]

            print("V     = " + str(v))
            print("V'    = " + str(v_prime))
            print("V_hat = " + str(v_hat))
            print("P     = " + str(p))
            print("P'    = " + str(p_prime))
            print("P_hat = " + str(p_hat))
            print("Pi    = " + str(pi))
            print("===================")
        tf.contrib.keras.backend.clear_session()
        self.game_canvas.draw_gameboard(pi, v)
Esempio n. 21
0
    def _aux_generator(self, batch_size=16, sample_set='train', datatype = None, depthres = 256, seg_joint_res = 64):
        """ Auxiliary Generator
        Args:
            See Args section in self._generator
        """
        generated_batch = {}
        random.seed(time.time())
        generated_batch['train_img'] = np.zeros((batch_size, 256, 256, 3), dtype=np.float32)
        generated_batch['train_gtseg'] = np.zeros([batch_size, seg_joint_res, seg_joint_res], dtype = np.int8)
        generated_batch['train_gt2dheat'] = np.zeros([batch_size, seg_joint_res, seg_joint_res, self.joints_num], dtype = np.float32)
        generated_batch['train_gtjoints'] = np.zeros((batch_size, 64, 64, self.joints_num * self.Zres_joint), dtype=np.float32)
        generated_batch['train_gtdepthre'] =  np.zeros((batch_size, depthres, depthres), dtype= np.float32)
        generated_batch['train_mask'] = np.zeros([batch_size, depthres, depthres],dtype = np.bool)
        generated_batch['train_2djoints'] = np.zeros([batch_size, 2, self.joints_num ],dtype= np.float32)
        generated_batch['train_3djoints'] = np.zeros([batch_size, 3, self.joints_num ],dtype= np.float32)

        i=0
        if datatype == 'normal_dataset':
            generated_batch['normal_train_img'] = np.zeros((batch_size, self.normalres[0], self.normalres[1], 3), dtype=np.float32)
            generated_batch['normal_train_gtnormal'] = np.zeros([batch_size, self.normalres[0], self.normalres[1], 3],
                                                                dtype=np.float32)
            generated_batch['normal_train_gtdepthre'] = np.zeros((batch_size, self.normalres[0], self.normalres[1]),
                                                                 dtype=np.float32)
            generated_batch['normal_train_mask'] = np.zeros([batch_size, self.normalres[0], self.normalres[1]],
                                                            dtype=np.bool)
            while i < batch_size:
                img_name = self.filelist[self.currentindex]
                type_dir = os.path.join(self.test_dir, img_name.split('/')[-4])#random.sample(getsubfolders(self.train_dir), 1)[0])
                depth_dir = type_dir + '/depth_maps'
                normal_dir = type_dir + '/normals'

                view_type = img_name.split('/')[-2]

                depth_dir = os.path.join(depth_dir, view_type)
                normal_dir = os.path.join(normal_dir, view_type)

                index = img_name[-9:-5]
                depth_name = depth_dir + '/depth_' + index + '.npz'
                normal_name = normal_dir + '/normals_' + index + '.npz'


                bg_name = os.path.join(self.bg_dir, random.sample(os.listdir(self.bg_dir), 1)[0])
                bg_name = os.path.join(bg_name, random.sample(os.listdir(bg_name), 1)[0])

                try:
                    bg_img = io.imread(bg_name)
                except:
                    self.currentindex +=1
                    continue
                bg_img = scipy.misc.imresize(bg_img, [self.normalres[0], self.normalres[1]], interp='bilinear')
                img = io.imread(img_name)
                nmap = np.load(normal_name)['normals']
                dmap = np.load(depth_name)['depth']
                mask = dmap > 1e-4

                generated_mask = np.zeros([self.normalres[0], self.normalres[1]], dtype=np.bool)
                generated_mask[15:239, 15:239] = mask
                generated_batch['normal_train_mask'][i] = generated_mask
                img_pad = np.zeros((self.normalres[0], self.normalres[1], 3), dtype=np.uint8)
                img_pad[15: 239, 15: 239, :] = img.astype(np.float32)
                bg_img[generated_mask] = img_pad[generated_mask]

                # plt.figure()
                # plt.imshow(bg_img, aspect='auto',
                #            cmap=plt.get_cmap('jet'))
                # plt.show()

                bg_img = bg_img.astype(np.float32)
                # color augmentation
                if sample_set == 'train':
                    for j in range(3):
                        bg_img[:, :, j] = np.clip(
                            bg_img[:, :, j].astype(np.float32) / 255 * np.random.uniform(0.6, 1.4), 0.0,
                            1.0)
                else:
                    for j in range(3):
                        bg_img[:, :, j] = np.clip(bg_img[:, :, j].astype(np.float32) / 255, 0.0, 1.0)
                # print('color augmentation done!')

                # whitening rgb image
                meanstd = load_lua(self.meanRgb_dir)
                for j in range(3):
                    bg_img[:, :, j] = bg_img[:, :, j] - meanstd['mean'][j]
                    bg_img[:, :, j] = bg_img[:, :, j] / meanstd['std'][j]
                generated_batch['normal_train_img'][i,:,:,:] = bg_img

                generated_batch['normal_train_gtnormal'][i, 15:239, 15:239, :] = nmap


                if self.show:
                    plt.figure()
                    plt.imshow(generated_batch['normal_train_gtnormal'][i, :, :, 0], aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
                #
                # plt.figure()
                # plt.imshow(generated_batch['normal_train_gtnormal'][i, :, :, 1], aspect='auto', cmap=plt.get_cmap('jet'))
                # plt.show()
                #
                # plt.figure()
                # plt.imshow(generated_batch['normal_train_gtnormal'][i, :, :, 2], aspect='auto', cmap=plt.get_cmap('jet'))
                # plt.show()
                # print(generated_batch['normal_train_mask'].shape)
                # plt.figure()
                # plt.imshow(generated_batch['normal_train_mask'][i, :, :, 0], aspect='auto', cmap=plt.get_cmap('jet'))
                # plt.show()

                generated_batch['normal_train_gtdepthre'][i, 15:239, 15:239] = dmap

                i = i + 1

                self.currentindex+=1
                if(self.currentindex == self.datanum-1):
                    self._reset_filelist(datatype,sample_set)
            return  generated_batch


        if datatype == 'realtest':
            while i < batch_size:
                #name = random.sample(glob.glob(self.test_dir + "/*.jpg"), 1)[0]
                name = self.filelist[self.currentindex]
                testimg = io.imread(name)
                testimg = scipy.misc.imresize(testimg, [self.insize[1], self.insize[1]], interp='bilinear').astype(np.float32)
                meanstd = load_lua(self.meanRgb_dir)
                for j in range(3):
                    testimg[:, :, j] = np.clip(testimg[:, :, j].astype(np.float32) / 255.0, 0.0, 1.0)
                    testimg[:, :, j] = testimg[:, :, j] - meanstd['mean'][j]
                    testimg[:, :, j] = testimg[:, :, j] / meanstd['std'][j]
                generated_batch['train_img'][i] = cv2.resize(testimg, (self.insize[0], self.insize[1]), interpolation=cv2.INTER_NEAREST)
                i += 1
                self.currentindex += 1

                if self.show:
                    plt.figure()
                    plt.imshow(generated_batch['train_img'][0], aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()


                if(self.currentindex == self.datanum-1):
                    self._reset_filelist('realtest','test')
            return generated_batch

        while i < batch_size:
            if datatype != 'detail_data' and datatype != 'up-3d':
                name = self.filelist[self.currentindex]
                #name = '/home/sicong/surreal/data/SURREAL/data/cmu/train/run1/ung_91_33/ung_91_33_c0001.mp4'
                cap = cv2.VideoCapture(name)
                length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                frameindex = random.randint(1, length)
                #frameindex = 82
                if(sample_set == 'test'):
                    print('test file: ',name, 'frameindex: ', frameindex)
                cap.set(1, frameindex - 1)
                _, img_full = cap.read()
                try:
                    img_full = cv2.cvtColor(img_full, cv2.COLOR_BGR2RGB)
                except:
                    continue
                bodyinfo = sio.loadmat(name[0:-4] + '_info.mat')

            elif datatype == 'detail_data':
                name = self.filelist[self.currentindex]
                frameindex = name[-12:-8]
                name = '/home/sicong/detail_data/data/3/0235_rgb.png'
                try:
                    img_full = io.imread(name)
                except:
                    self.currentindex += 1
                    continue

            elif datatype == 'up-3d':
                if sample_set == 'train':
                    info_dir = self.train_dir + '/pose_prepared/91/500/up-p91/'
                    seg_dir = self.train_dir + '/segment/up-s31/s31/'
                elif sample_set == 'valid':
                    info_dir = self.valid_dir + '/pose_prepared/91/500/up-p91/'
                    seg_dir = self.valid_dir + '/segment/up-s31/s31/'
                elif sample_set == 'test':
                    info_dir = self.test_dir + '/pose_prepared/91/500/up-p91/'
                    seg_dir = self.test_dir + '/segment/up-s31/s31/'

                name = self.filelist[self.currentindex]
                if(sample_set == 'test'):
                    print('test file: ',name)
                # name = '/media/sicong/a86d93af-1a2e-469b-972c-f819c47cd5ee/datasets/pose_prepared/91/500/up-p91/04877_image.png'
                frameindex = name[-15:-10]

                try:
                    img_full = io.imread(name)
                except:
                    self.currentindex +=1
                    continue
                try:
                    bodyinfo = sio.loadmat(info_dir + frameindex+ '_info.mat')
                except:
                    self.currentindex += 1
                    continue
            if self.show:
                img = Image.fromarray(img_full, 'RGB')
                img.show()

            if datatype != 'detail_data':
                # load 2d joints to determine the bounding box
                # [2 x njoints]
                if datatype != 'up-3d':
                    if bodyinfo is None:
                        self.currentindex += 1
                        continue
                    joints2dfull = bodyinfo['joints2D']
                    if joints2dfull is None:
                        self.currentindex += 1
                        continue
                    if len(joints2dfull.shape) < 3:
                        self.currentindex += 1
                        continue
                    if frameindex - 1 >= joints2dfull.shape[2]:
                        self.currentindex += 1
                        continue
                    joints2d = joints2dfull[:, self.joints_subset, frameindex - 1].astype(np.int64)

                    joints3dfull = bodyinfo['joints3D']
                    if joints3dfull is None:
                        self.currentindex += 1
                        continue
                    if frameindex - 1 >= joints2dfull.shape[2]:
                        self.currentindex += 1
                        continue
                    joints3d = joints3dfull[:, self.joints_subset, frameindex - 1]

                    generated_batch['train_2djoints'][i,:] = joints2d
                    generated_batch['train_3djoints'][i,:] = joints3d

                    depth_full = sio.loadmat(name[0:-4] + '_depth.mat')['depth_' + str(frameindex)]
                elif datatype == 'up-3d':
                    if bodyinfo is None:
                        self.currentindex += 1
                        continue
                    joints2dfull = bodyinfo['joints2D']
                    if joints2dfull is None:
                        self.currentindex += 1
                        continue
                    if len(joints2dfull.shape) < 2:
                        self.currentindex += 1
                        continue
                    joints2d = joints2dfull[:, self.joints_subset].astype(np.int64)
                    joints3dfull = np.transpose(bodyinfo['joints3D'])
                    if joints3dfull is None:
                        self.currentindex += 1
                        continue
                    joints3d = joints3dfull[:, self.joints_subset]

                    depth_full = sio.loadmat(info_dir + frameindex+ '_depth.mat')['depth']

                #set pelvis as the original point
                camLoc = bodyinfo['camLoc'][0]
                if datatype == 'up-3d':
                    # camlocation = camLoc[2]
                    # joints3d[2, :] = camlocation - joints3d[2, :]
                    dPelvis = joints3d[2, 0]
                else:
                    camlocation = camLoc
                    joints3d[0, :] = camlocation - joints3d[0, :]
                    dPelvis = joints3d[0, 0]

                if datatype != 'up-3d':
                    segm_raw = sio.loadmat(name[0:-4] + '_segm.mat')['segm_'+str(frameindex)]

                    segm_full = util.changeSegmIx(segm_raw,
                                                  [2, 12, 9, 2, 13, 10, 2, 14, 11, 2, 14, 11, 2, 2, 2, 1, 6, 3, 7, 4, 8,
                                                   5, 8,
                                                   5]).astype(np.int8)

                else:
                    segm_raw = cv2.imread(seg_dir+ frameindex + '_ann_vis.png')
                    segm_full = util.up3dtosurreal(segm_raw)

                if self.show:
                    plt.figure()
                    plt.imshow(segm_full, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()

                if datatype == 'up-3d':
                    quantized_joints3d, _ = util.quantize(joints3d[2, :], dPelvis, self.stp_joint, self.Zres_joint)
                    quantized_joints3d = quantized_joints3d * -1
                    relative_depth, _ = util.relative_up3d(depth_full, dPelvis, self.stp, self.Zres)  # self.halfrange
                elif datatype != 'detail_data':
                    quantized_joints3d, _ = util.quantize(joints3d[0, :], dPelvis, self.stp_joint, self.Zres_joint)
                    quantized_joints3d = quantized_joints3d * -1
                    relative_depth, _ = util.relative(depth_full,dPelvis, self.stp, self.Zres) #self.halfrange

                # TODO: 1. resize quantized_depth 2. output dense continuous relative depth in util.quantize
                if self.show:
                    plt.figure()
                    plt.imshow(depth_full, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
                if self.show:
                    plt.figure()
                    plt.imshow(relative_depth, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
            else:
                depth_full = io.imread(name[0:-8] + '_depth.png')
                depthcount = np.sum(depth_full > 100)
                if depthcount < 100 * 100:
                    self.currentindex += 1
                    continue

            if datatype != 'detail_data':
                # crop, scale
                rot = 0
                scale = util.getScale(joints2d)
                center = util.getCenter(joints2d)
            else:
                # crop, scale
                rot = 0
                scale = util.getScale_detail(depth_full)
                center = util.getCenter_detail(depth_full)

            if (center[0] < 1 or center[1] < 1 or center[1] > img_full.shape[0] or center[0] > img_full.shape[1]):
                self.currentindex +=1
                continue


## for rgb image
            if datatype != 'up-3d' and datatype!= 'detail_data':
                img = util.cropfor3d(img_full, center, scale, rot, self.insize[1], 'bilinear')
            elif datatype == 'detail_data':
                img = util_detail.cropfor3d(img_full, center, scale, rot, self.insize[1], 'bilinear')
            elif datatype == 'up-3d':
                norm_factor = np.array([self.insize[1]/img_full.shape[1], self.insize[1]/img_full.shape[0]], dtype=np.float32)
                img = scipy.misc.imresize(img_full, [self.insize[1], self.insize[1]], interp= 'bilinear')
                badexample = False
                for j in range(joints2d.shape[1]):
                    joints2d_rescaled = np.multiply(joints2d[:,j],norm_factor).astype(np.int64)
                    if joints2d_rescaled[0] < 0 or joints2d_rescaled[0] > 256 or joints2d_rescaled[1] < 0 or joints2d_rescaled[1] > 256:
                        badexample = True
                if badexample:
                    self.currentindex += 1
                    continue

            if img is None:
                self.currentindex+=1
                continue
            if (img.shape[0] == 0 or img.shape[1] == 0):
                self.currentindex+=1
                continue

            if self.show:
                imgnew = Image.fromarray(img, 'RGB')
                imgnew.show()

            # color augmentation
            img_bak = img
            img = img.astype(np.float32)
            if sample_set == 'train':
                for j in range(3):
                    img[:, :, j] = np.clip(img[:, :, j].astype(np.float32) / 255 * np.random.uniform(0.6, 1.4), 0.0,
                                           1.0)
            else:
                for j in range(3):
                    img[:, :, j] = np.clip(img[:, :, j].astype(np.float32) / 255, 0.0, 1.0)
            # print('color augmentation done!')

            # whitening rgb image
            meanstd = load_lua(self.meanRgb_dir)
            for j in range(3):
                img[:, :, j] = img[:, :, j] - meanstd['mean'][j]
                img[:, :, j] = img[:, :, j] / meanstd['std'][j]

            generated_batch['train_img'][i] = img

## for depth
            if datatype == 'detail_data':
                depm_continue = util_detail.cropfor3d(depth_full,center,scale,rot,self.insize[1],'bilinear')

                if self.show:
                    plt.figure()
                    plt.imshow(depth_full, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()

                if self.show:
                    plt.figure()
                    plt.imshow(depm_continue, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
                mask =depm_continue>100
                depm_continue[depm_continue < 100] = 15 * 1000.0
                final_depth = depm_continue/1000.0
                median_value =np.median(final_depth[final_depth<5])
                final_depth = final_depth - median_value + 0.10
                final_depth[final_depth>5] = 0.60
                generated_batch['train_gtdepthre'][i, :, :] = final_depth

                mask = ndimage.binary_erosion(mask).astype(mask.dtype)
                generated_batch['train_mask'][i,:,:] = mask

            elif datatype == 'up-3d':
                depm_continue = cv2.resize(relative_depth.astype(np.float32), (depthres, depthres), interpolation=cv2.INTER_NEAREST)
                generated_batch['train_gtdepthre'][i, :, :] = depm_continue
                mask = depm_continue<0.59

                mask = ndimage.binary_erosion(mask).astype(mask.dtype)
                generated_batch['train_mask'][i,:,:] = mask

            else:
                depm_continue = util.cropdepth(relative_depth,center,scale,rot,self.insize[1],0.60)
                generated_batch['train_gtdepthre'][i, :, :] = cv2.resize(depm_continue,(depthres, depthres),interpolation=cv2.INTER_NEAREST)
                mask = depm_continue<0.59

                mask = ndimage.binary_erosion(mask).astype(mask.dtype)
                generated_batch['train_mask'][i,:,:] = mask




            if self.show:
                plt.figure()
                plt.imshow(generated_batch['train_gtdepthre'][i, :, :], aspect='auto', cmap=plt.get_cmap('jet'))
                plt.show()

            # if self.show:
            #     plt.figure()
            #     plt.imshow(mask, aspect='auto', cmap=plt.get_cmap('jet'))
            #     plt.show()

## for 2d segmentation

            if datatype == 'up-3d':
                segm = cv2.resize(segm_full, (seg_joint_res, seg_joint_res),
                                                            interpolation=cv2.INTER_NEAREST)
                generated_batch['train_gtseg'][i,:,:] = segm

            elif datatype != 'detail_data':
                segm = util.cropfor3d(segm_full, center, scale, rot, self.insize[1],'nearest')
                generated_batch['train_gtseg'][i,:,:] = cv2.resize(segm, (seg_joint_res, seg_joint_res),
                                                            interpolation=cv2.INTER_NEAREST)
                if self.show:
                    plt.figure()
                    plt.imshow(segm, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()

## for 2d joints

            if datatype != 'detail_data':
                # TODO: create 2d heatmaps
                sigma_2d_inscale = math.floor(2 * self.insize[0]/self.outsize[0])
                out_2d = np.zeros([self.insize[0], self.insize[1], self.joints_num])

                for j in range(self.joints_num):
                    if datatype == 'up-3d':
                        #pt = util.transform(joints2d[:, j], center, scale, 0, self.insize[0], False)
                        pt = np.multiply(joints2d[:, j], norm_factor).astype(np.int64)
                        # print('joints: ', joints2d[:, j], 'pt: ', pt)
                    else:
                        pt = util.transform(joints2d[:, j], center, scale, 0, self.insize[0], False)
                    heat_slice = util.Drawgaussian2D(img,pt,sigma_2d_inscale)
                    # if np.sum(heat_slice) > 1e-2:
                    #     heat_slice /= np.sum(heat_slice)
                    # else:
                    #     heat_slice *= 0
                    #print('heat_slice.shape',heat_slice.shape)

                    out_2d[:, :, j] = heat_slice
                    # if self.show:
                    #     plt.figure()
                    #     plt.imshow(heat_slice, aspect='auto', cmap=plt.get_cmap('jet'))
                    #     plt.show()

                out_2d = cv2.resize(out_2d,(seg_joint_res,seg_joint_res),interpolation=cv2.INTER_NEAREST)
                generated_batch['train_gt2dheat'][i] = out_2d
                if self.show:
                    # img4show = img
                    # for j in range(3):
                    #     img4show[:, :, j] = img4show[:, :, j] - meanstd['mean'][j]
                    #     img4show[:, :, j] = img4show[:, :, j] / meanstd['std'][j]
                    # img4show = img4show * 255.0
                    visualizer.draw2dskeleton(img_bak.astype(np.uint8), out_2d)

            
# for 3d joints

            #print('draw3d---------------------------------------------------')
            if datatype != 'detail_data':
                out = np.zeros([self.outsize[0], self.outsize[1], self.joints_num * self.Zres_joint])
                sigma_2d = 2
                size_z = 2 * math.floor((6* sigma_2d * self.Zres_joint / self.outsize[0] +1) / 2) + 1
                for j in range(self.joints_num):
                    #if joints2d[1,j] >= img_full.shape[0] or joints2d[0,j] >=img_full.shape[1] or joints2d[1,j]<0 or joints2d[0,j]<0:
                        #continue
                    z = quantized_joints3d[j]
                    if datatype == 'up-3d':
                        pt = np.multiply(joints2d[:, j], norm_factor/4).astype(np.int64)
                    else:
                        pt = util.transform(joints2d[:, j], center, scale, 0, self.outsize[0], False)
                    out[:,:,j * self.Zres_joint : (j+1) * self.Zres_joint] = util.Drawguassian3D(out[:,:,j * self.Zres_joint : (j+1) * self.Zres_joint], pt, z , sigma_2d, size_z)

                generated_batch['train_gtjoints'][i] = out
                if self.show:
                    visualizer.draw3dskeleton(self.joints_num,self.Zres_joint,out)
            i = i+1
            self.currentindex +=1
            if(self.currentindex==self.datanum-1):
                self._reset_filelist(datatype,sample_set)


        return generated_batch
Esempio n. 22
0
	def render(self,state):
		util.transform(state, EditLinks.xslt_file, EditLinks.xml_file)
Esempio n. 23
0
	def render(self,state):
		util.transform(state, Header.xslt_file, Header.xml_file)
Esempio n. 24
0
	def render(self,state):
		util.transform(state, Month.xslt_file, Month.xml_file)
Esempio n. 25
0
    misses = 0
    avg_iou = 0
    prg_counter = 0
    train_counter = 0
    avg_conf = 0
    avg_no_conf = 0
    avg_pos = 0
    avg_neg = 0
    for images, targets, img_names in dataloader:
        train_counter = train_counter + 1
        optimizer.zero_grad()
        #         targets,anchors,offset,strd,mask=helper.collapse_boxes(targets,pw_ph,cx_cy,stride)
        images = images.cuda()
        raw_pred = model(images, torch.cuda.is_available())
        #         raw_pred=helper.expand_predictions(raw_pred,mask)
        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)

        targets, anchors, offset, strd, mask = helper.collapse_boxes(
            targets, pw_ph, cx_cy, stride)
        targets = targets.cuda()
        fall_into_mask = util.get_fall_into_mask(targets, offset, strd,
                                                 inp_dim)
        resp_raw_pred, resp_true_pred, resp_anchors, resp_offset, resp_strd = util.build_tensors(
            raw_pred, true_pred, anchors, offset, strd, fall_into_mask, mask)

        iou, iou_mask = util.get_iou_mask(targets, resp_true_pred, inp_dim,
                                          hyperparameters)
        iou = iou.T.max(dim=1)[0].mean().item()
        no_obj_mask = util.get_noobj(true_pred, targets, fall_into_mask, mask,
                                     hyperparameters, inp_dim)
        k = 0
Esempio n. 26
0
def get_dets(training_manager,
             detector,
             image,
             resize_ratio,
             num_rois=64,
             stride=16,
             det_threshold=DEFAULT_DET_THRESHOLD):
    conv_out, rois = training_manager.get_det_inputs(image)
    class_mapping = training_manager.class_mapping
    rev_class_mapping = dict((v, k) for k, v in class_mapping.items())

    num_boxes = rois.shape[0]
    print("num rois: {}".format(num_boxes))
    # remove this later
    # num_boxes = 64

    num_batches = num_boxes // num_rois + 1 if num_boxes % num_rois != 0 else num_boxes // num_rois
    bg_idx = class_mapping['bg']

    bboxes_by_cls = {}
    probs_by_cls = {}

    for batch_num in range(num_batches):
        start_idx = batch_num * num_rois
        end_idx = start_idx + num_rois
        batch_rois = rois[start_idx:end_idx, :]

        if batch_num == num_batches - 1:
            # add repeat ROIs because the network expects exactly 64 in a batch
            num_to_add = num_rois - batch_rois.shape[0]
            extra_rois = np.tile(batch_rois[0], (num_to_add, 1))
            batch_rois = np.concatenate([batch_rois, extra_rois])

        batch_rois = np.expand_dims(batch_rois, axis=0)
        out_cls, out_reg = detector.predict([conv_out, batch_rois])

        for roi_idx in range(num_rois):
            cls_idx = np.argmax(out_cls[0, roi_idx])
            confidence = out_cls[0, roi_idx, cls_idx]
            # print(batch_rois[0, roi_idx])
            # print(cls_idx)
            # print(confidence)
            if cls_idx == bg_idx or confidence < det_threshold:
                continue

            cls_name = rev_class_mapping[cls_idx]
            if cls_name not in bboxes_by_cls:
                bboxes_by_cls[cls_name] = []
                probs_by_cls[cls_name] = []

            x1, y1, x2, y2 = batch_rois[0, roi_idx]
            tx, ty, tw, th = out_reg[0, roi_idx, cls_idx * 4:(cls_idx + 1) *
                                     4] / BBREG_MULTIPLIERS

            px1, py1, px2, py2 = transform([x1, y1, x2, y2], [tx, ty, tw, th])
            bboxes_by_cls[cls_name].append(
                [stride * px1, stride * py1, stride * px2, stride * py2])
            probs_by_cls[cls_name].append(out_cls[0, roi_idx, cls_idx])

    dets = []
    for cls_name in bboxes_by_cls:
        bboxes = np.array(bboxes_by_cls[cls_name])
        probs = np.array(probs_by_cls[cls_name])
        new_boxes, new_probs = nms(bboxes,
                                   probs,
                                   overlap_thresh=0.5,
                                   max_boxes=2000)
        for box_idx in range(new_boxes.shape[0]):
            x1, y1, x2, y2 = new_boxes[box_idx, :]
            real_x1 = int(round(x1 / resize_ratio))
            real_y1 = int(round(y1 / resize_ratio))
            real_x2 = int(round(x2 / resize_ratio))
            real_y2 = int(round(y2 / resize_ratio))

            det_obj = {
                'bbox': np.array([real_x1, real_y1, real_x2, real_y2]),
                'cls_name': cls_name,
                'prob': new_probs[box_idx]
            }
            dets.append(det_obj)

    return dets
def train_one_epoch(model, optimizer, dataloader, hyperparameters, mode):
    model.train()

    if (mode['show_temp_summary'] == True):
        writer = SummaryWriter('../tensorboard/test_vis/')
    epoch = hyperparameters['resume_from']

    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    coco_version = hyperparameters['coco_version']

    pw_ph = pw_ph.cuda()
    cx_cy = cx_cy.cuda()
    stride = stride.cuda()

    break_flag = 0
    dataset_len = len(dataloader.dataset)
    batch_size = dataloader.batch_size
    total_loss = 0
    avg_iou = 0
    prg_counter = 0
    train_counter = 0
    avg_conf = 0
    avg_no_conf = 0
    avg_pos = 0
    avg_neg = 0
    for images, targets in dataloader:
        train_counter = train_counter + 1
        prg_counter = prg_counter + 1
        optimizer.zero_grad()
        images = images.cuda()

        if mode['debugging'] == True:
            with autograd.detect_anomaly():
                raw_pred = model(images, torch.cuda.is_available())
        else:
            raw_pred = model(images, torch.cuda.is_available())
            if (torch.isinf(raw_pred).sum() > 0):
                break_flag = 1
                break

        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)
        iou_list = util.get_iou_list(true_pred, targets, hyperparameters,
                                     inp_dim)

        resp_raw_pred, resp_cx_cy, resp_pw_ph, resp_stride, no_obj = util.build_tensors(
            raw_pred, iou_list, pw_ph, cx_cy, stride, hyperparameters)

        stats = helper.get_progress_stats(true_pred, no_obj, iou_list, targets)

        if hyperparameters['wasserstein'] == True:
            no_obj = util.get_wasserstein_matrices(raw_pred, iou_list, inp_dim)

        if mode['debugging'] == True:
            with autograd.detect_anomaly():
                loss = util.yolo_loss(resp_raw_pred, targets, no_obj,
                                      resp_pw_ph, resp_cx_cy, resp_stride,
                                      inp_dim, hyperparameters)
        elif mode['bayes_opt'] == True:
            try:
                loss = util.yolo_loss(resp_raw_pred, targets, no_obj,
                                      resp_pw_ph, resp_cx_cy, resp_stride,
                                      inp_dim, hyperparameters)
            except RuntimeError:
                #                 print('bayes opt failed')
                break_flag = 1
                break
        else:
            loss = util.yolo_loss(resp_raw_pred, targets, no_obj, resp_pw_ph,
                                  resp_cx_cy, resp_stride, inp_dim,
                                  hyperparameters)
        loss.backward()
        optimizer.step()

        avg_conf = avg_conf + stats['pos_conf']
        avg_no_conf = avg_no_conf + stats['neg_conf']
        avg_pos = avg_pos + stats['pos_class']
        avg_neg = avg_neg + stats['neg_class']
        total_loss = total_loss + loss.item()
        avg_iou = avg_iou + stats['iou']

        if mode['show_output'] == True:
            sys.stdout.write('\rPgr:' + str(prg_counter / dataset_len * 100 *
                                            batch_size) + '%'
                             ' L:' + str(loss.item()))
            sys.stdout.write(' IoU:' + str(stats['iou']) + ' pob:' +
                             str(stats['pos_conf']) + ' nob:' +
                             str(stats['neg_conf']))
            sys.stdout.write(' PCls:' + str(stats['pos_class']) + ' ncls:' +
                             str(stats['neg_class']))
            sys.stdout.flush()

        if (mode['show_temp_summary'] == True):
            writer.add_scalar('AvLoss/train', total_loss / train_counter,
                              train_counter)
            writer.add_scalar('AvIoU/train', avg_iou / train_counter,
                              train_counter)
            writer.add_scalar('AvPConf/train', avg_conf / train_counter,
                              train_counter)
            writer.add_scalar('AvNConf/train', avg_no_conf / train_counter,
                              train_counter)
            writer.add_scalar('AvClass/train', avg_pos / train_counter,
                              train_counter)
            writer.add_scalar('AvNClass/train', avg_neg / train_counter,
                              train_counter)

    total_loss = total_loss / train_counter
    avg_iou = avg_iou / train_counter
    avg_pos = avg_pos / train_counter
    avg_neg = avg_neg / train_counter
    avg_conf = avg_conf / train_counter
    avg_no_conf = avg_no_conf / train_counter

    outcome = {
        'avg_loss': total_loss,
        'avg_iou': avg_iou,
        'avg_pos': avg_pos,
        'avg_neg': avg_neg,
        'avg_conf': avg_conf,
        'avg_no_conf': avg_no_conf,
        'broken': break_flag
    }

    return outcome
Esempio n. 28
0
	def render(self,state):
		util.transform(state, Language.xslt_file, Language.xml_file)
Esempio n. 29
0
    def evaluator_thread(self, q_main, q_forward, q_feedback):
        length = len(self.root_state)
        index_list = []
        state_list = []
        signal = np.zeros(length, np.int16)

        if USE_GPU:
            # Additional configurations for running on GPU.
            os.environ["CUDA_VISIBLE_DEVICES"] = VISIBLE_DEVICE_MCTS
            gpu_options = tf.GPUOptions(
                per_process_gpu_memory_fraction=MEMORY_MCTS)
            gpu_config = tf.ConfigProto(gpu_options=gpu_options)
        else:
            gpu_config = None

        # Build models to evaluate p and v.
        model = Model(enhance=False,
                      imitate=self.advanced_evaluation,
                      optimize=0)
        with tf.Session(config=gpu_config) as sess:
            saver = tf.train.Saver()
            # Restore the player.
            if self.player == BEST_PLAYER:
                file_name = "BestPlayer"
            else:
                file_name = "Player_" + format(self.player, "05d")
            saver.restore(sess, self.parent_dir + PLAYER_DIR + file_name)

            # Read the first package from the main thread to evaluate root nodes.
            (state) = q_forward.get()
            if state != None:
                transformed_state = copy.deepcopy(state)
                transformed_state = [
                    transform(x, np.random.choice(SYMMETRY))
                    for x in transformed_state
                ]
                inputs = extract_inputs(transformed_state)
                if self.advanced_evaluation:
                    p, v = sess.run([model.p_hat, model.v_hat],
                                    feed_dict={model.Inputs: inputs})
                else:
                    p, v = sess.run([model.p, model.v],
                                    feed_dict={model.Inputs: inputs})
                legal_moves = [x.legal_moves for x in transformed_state]
                p = normalize_probability(p, legal_moves)
                # The output p is a list of vectors, each of which indicates the probability of each possible move.
                # The output v is a column vector of shape (len(state), 1), which predicts the return of the game.
                q_main.put((p, v))
            else:
                q_main.put((None))

            while np.any(self.step < self.num_simulations):
                # Ready to read data.
                (i, j, str, state) = q_forward.get()
                if str == "Eval":
                    # Normal package that contains a state to be evaluated.
                    index_list.append([i, j])
                    state_list.append(state)
                    signal[i] += 1
                elif str == "Blocked":
                    # Special case that the thread is blocked.
                    self.blocked[i] += 1
                elif str == "Unblocked":
                    # Remove the block signal.
                    self.blocked[i] -= 1
                elif str == "Terminal":
                    # Special case that the thread reaches a terminal node.
                    self.step[i] += 1

                if np.all(
                        np.logical_or(
                            self.blocked + signal == self.search_threads,
                            self.step + self.blocked +
                            signal == self.num_simulations)):
                    if len(state_list) > 0:
                        # Evaluate all states in the list when all threads have send packages or when the search ends.
                        transformed_state = copy.deepcopy(state_list)
                        transformed_state = [
                            transform(x, np.random.choice(SYMMETRY))
                            for x in transformed_state
                        ]
                        inputs = extract_inputs(transformed_state)
                        if self.advanced_evaluation:
                            p, v = sess.run([model.p_hat, model.v_hat],
                                            feed_dict={model.Inputs: inputs})
                        else:
                            p, v = sess.run([model.p, model.v],
                                            feed_dict={model.Inputs: inputs})
                        legal_moves = [
                            x.legal_moves for x in transformed_state
                        ]
                        p = normalize_probability(p, legal_moves)
                        # Return p and v to the corresponding thread.
                        for k in range(len(state_list)):
                            self.step[index_list[k][0]] += 1
                            q_feedback[index_list[k][0]][index_list[k][1]].put(
                                (p[k], v[k]))
                        index_list = []
                        state_list = []
                    signal = np.zeros(length, np.int16)
        tf.contrib.keras.backend.clear_session()
        raw_pred = model(sample_batched['image'], torch.cuda.is_available())

        target = sample_batched['bbox_coord'].unsqueeze(-3)
        target = target.to(device='cuda')
        for b in range(sample_batched['image'].shape[0]):
            if (write == 0):
                anchors = pw_ph
                offset = cx_cy
                strd = stride
                write = 1
            else:
                anchors = torch.cat((anchors, pw_ph), 0).to(device='cuda')
                offset = torch.cat((offset, cx_cy), 0).to(device='cuda')
                strd = torch.cat((strd, stride), 0).to(device='cuda')

        true_pred = util.transform(raw_pred.clone(), anchors, offset, strd)
        iou_mask, noobj_mask = util.get_responsible_masks(
            true_pred, target, offset, stride)

        iou = torch.diag(
            util.bbox_iou(
                util.get_abs_coord(true_pred[iou_mask.T, :].unsqueeze(-3)),
                target)).mean().item()

        noobj_box = raw_pred[:, :, 4:5].clone()
        conf = noobj_box[iou_mask.T, :].mean().item()

        noobj_box = noobj_box[noobj_mask.T, :]
        no_obj_conf = noobj_box.mean().item()

        raw_pred = raw_pred[iou_mask.T, :]
Esempio n. 31
0
def predictor(essay_title=None, essay_text=None, module=None):
    if empty(essay_title) or empty(essay_text) or empty(module):
        return '0.0'

    result = modules[module].predict(transform(np.array([essay_text]), module))
    return "{0}".format(round(adjust_score(result[0], essay_text), 2))
Esempio n. 32
0
def test(testloader, model, epoch, device):
    # FIXME remove this and make paste_masks_in_image run on the GPU
    cpu_device = torch.device("cpu")

    device = device

    batch_time = AverageMeter()
    data_time = AverageMeter()

    hyperparameters = model.hp
    confidence = hyperparameters['inf_confidence']
    iou_threshold = hyperparameters['inf_iou_threshold']

    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    pw_ph = pw_ph.to(device)
    cx_cy = cx_cy.to(device)
    stride = stride.to(device)

    sys.stdout = open(os.devnull, 'w')  #wrapper to disable hardcoded printing
    coco = coco_utils.get_coco_api_from_dataset(testloader.dataset)
    iou_types = ["bbox"]
    coco_evaluator = coco_eval.CocoEvaluator(coco, iou_types)
    sys.stdout = sys.__stdout__  #wrapper to enable hardcoded printing (return to normal mode)

    # switch to evaluate mode
    model.eval()

    end = time.time()
    with torch.no_grad():
        for batch_idx, (images, targets) in enumerate(testloader):
            # measure data loading time
            data_time.update(time.time() - end)

            images = images.to(device)

            targets2 = []
            for t in targets:
                dd = {}
                for k, v in t.items():
                    if (k != 'img_size'):
                        dd[k] = v.to(device)
                    else:
                        dd[k] = v
                targets2.append(dd)

    #         targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
            targets = targets2

            raw_pred = model(images, device)

            true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                       stride)

            sorted_pred = torch.sort(true_pred[:, :, 4] *
                                     (true_pred[:, :, 5:].max(axis=2)[0]),
                                     descending=True)
            pred_mask = sorted_pred[0] > confidence
            indices = [(sorted_pred[1][e, :][pred_mask[e, :]])
                       for e in range(pred_mask.shape[0])]
            pred_final = [
                true_pred[i, indices[i], :] for i in range(len(indices))
            ]

            pred_final_coord = [
                util.get_abs_coord(pred_final[i].unsqueeze(-2))
                for i in range(len(pred_final))
            ]

            indices = [
                nms_box.nms(pred_final_coord[i][0], pred_final[i][:, 4],
                            iou_threshold) for i in range(len(pred_final))
            ]
            pred_final = [
                pred_final[i][indices[i], :] for i in range(len(pred_final))
            ]

            abs_pred_final = [
                helper.convert2_abs_xyxy(pred_final[i], targets[i]['img_size'],
                                         inp_dim)
                for i in range(len(pred_final))
            ]

            outputs = [dict() for i in range(len((abs_pred_final)))]
            for i, atrbs in enumerate(abs_pred_final):

                outputs[i]['boxes'] = atrbs[:, :4]
                outputs[i]['scores'] = pred_final[i][:, 4]
                try:
                    outputs[i]['labels'] = pred_final[i][:, 5:].max(
                        axis=1)[1] + 1  #could be empty
                except:

                    outputs[i]['labels'] = torch.tensor([])

            outputs = [{k: v.to(cpu_device)
                        for k, v in t.items()} for t in outputs]

            res = {
                target["image_id"].item(): output
                for target, output in zip(targets, outputs)
            }
            coco_evaluator.update(res)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

    sys.stdout = open(os.devnull, 'w')  #wrapper to disable hardcoded printing

    coco_evaluator.synchronize_between_processes()
    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()

    metrics = coco_evaluator.get_stats()

    sys.stdout = sys.__stdout__  #wrapper to enable hardcoded printing (return to normal mode)

    coco_stats = {
        'map_all': metrics[0],
        '[email protected]': metrics[1],
        '[email protected]': metrics[2],
        'map_small': metrics[3],
        'map_med': metrics[4],
        'map_large': metrics[5],
        'recall@1': metrics[6],
        'recall@10': metrics[7],
        'recall@100': metrics[8],
        'recall@small': metrics[9],
        'recall@medium': metrics[10],
        'recall@large': metrics[11]
    }

    track.metric(iteration=0, epoch=epoch, coco_stats=coco_stats)

    return (metrics[0])
Esempio n. 33
0
def train(trainloader, model, optimizer, epoch, cuda=True):
    # switch to train mode
    model.train()
    hyperparameters = model.hp
    mode = model.mode

    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    if cuda:
        pw_ph = pw_ph.cuda()
        cx_cy = cx_cy.cuda()
        stride = stride.cuda()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    avg_loss = AverageMeter()
    avg_iou = AverageMeter()
    avg_conf = AverageMeter()
    avg_no_conf = AverageMeter()
    avg_pos = AverageMeter()
    avg_neg = AverageMeter()
    end = time.time()
    break_flag = 0

    if mode['show_temp_summary'] == True:
        writer = SummaryWriter(os.path.join(track.trial_dir(), 'temp_vis/'))

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        # measure data loading time
        data_time.update(time.time() - end)

        if cuda:
            inputs = inputs.cuda()

        # compute output
        raw_pred = model(inputs, torch.cuda.is_available())
        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)
        iou_list = util.get_iou_list(true_pred, targets, hyperparameters,
                                     inp_dim)

        resp_raw_pred, resp_cx_cy, resp_pw_ph, resp_stride, no_obj = util.build_tensors(
            raw_pred, iou_list, pw_ph, cx_cy, stride, hyperparameters)

        stats = helper.get_progress_stats(true_pred, no_obj, iou_list, targets)
        if hyperparameters['wasserstein'] == True:
            no_obj = util.get_wasserstein_matrices(raw_pred, iou_list, inp_dim)

        try:
            loss = util.yolo_loss(resp_raw_pred, targets, no_obj, resp_pw_ph,
                                  resp_cx_cy, resp_stride, inp_dim,
                                  hyperparameters)
        except RuntimeError:
            print('bayes opt failed')
            break_flag = 1
            break

        # measure accuracy and record loss
        avg_loss.update(loss.item())
        avg_iou.update(stats['iou'])
        avg_conf.update(stats['pos_conf'])
        avg_no_conf.update(stats['neg_conf'])
        avg_pos.update(stats['pos_class'])
        avg_neg.update(stats['neg_class'])

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if mode['show_output'] == True:  # plot progress
            progress_str = 'Loss: %.4f | AvIoU: %.3f | AvPConf: %.3f | AvNConf: %.5f | AvClass: %.3f | AvNClass: %.5f'\
                % (loss.item(), stats['iou'], stats['pos_conf'], stats['neg_conf'],stats['pos_class'],stats['neg_class'])
            progress_bar(batch_idx, len(trainloader), progress_str)

        iteration = epoch * len(trainloader) + batch_idx

        if mode['show_temp_summary'] == True:
            writer.add_scalar('AvLoss/train', avg_loss.avg, iteration)
            writer.add_scalar('AvIoU/train', avg_iou.avg, iteration)
            writer.add_scalar('AvPConf/train', avg_conf.avg, iteration)
            writer.add_scalar('AvNConf/train', avg_no_conf.avg, iteration)
            writer.add_scalar('AvClass/train', avg_pos.avg, iteration)
            writer.add_scalar('AvNClass/train', avg_neg.avg, iteration)

    track.metric(iteration=iteration,
                 epoch=epoch,
                 avg_train_loss=avg_loss.avg,
                 avg_train_iou=avg_iou.avg,
                 avg_train_conf=avg_conf.avg,
                 avg_train_neg_conf=avg_no_conf.avg,
                 avg_train_pos=avg_pos.avg,
                 avg_train_neg=avg_neg.avg)

    outcome = {
        'avg_loss': avg_loss.avg,
        'avg_iou': avg_iou.avg,
        'avg_pos': avg_pos.avg,
        'avg_neg': avg_neg.avg,
        'avg_conf': avg_conf.avg,
        'avg_no_conf': avg_no_conf.avg,
        'broken': break_flag
    }

    return outcome
Esempio n. 34
0
def evaluate(model,
             device,
             coco_version,
             confidence=0.01,
             iou_threshold=0.5,
             subset=1):
    # FIXME remove this and make paste_masks_in_image run on the GPU
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")

    device = device
    model.eval()

    #     metric_logger = utils.MetricLogger(delimiter="  ")
    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    pw_ph = pw_ph.to(device)
    cx_cy = cx_cy.to(device)
    stride = stride.to(device)

    subset = subset

    transformed_dataset = Coco(partition='val',
                               coco_version=coco_version,
                               subset=subset,
                               transform=transforms.Compose(
                                   [ResizeToTensor(inp_dim)]))

    dataloader = DataLoader(transformed_dataset,
                            batch_size=8,
                            shuffle=False,
                            collate_fn=helper.collate_fn,
                            num_workers=4)

    coco = coco_utils.get_coco_api_from_dataset(transformed_dataset)
    iou_types = ["bbox"]
    coco_evaluator = coco_eval.CocoEvaluator(coco, iou_types)

    for images, targets in dataloader:
        images = images.to(device)

        targets2 = []
        for t in targets:
            dd = {}
            for k, v in t.items():
                if (k != 'img_size'):
                    dd[k] = v.to(device)
                else:
                    dd[k] = v
            targets2.append(dd)

#         targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        targets = targets2

        with torch.no_grad():
            raw_pred = model(images, device)

        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)

        sorted_pred = torch.sort(true_pred[:, :, 4] *
                                 (true_pred[:, :, 5:].max(axis=2)[0]),
                                 descending=True)
        pred_mask = sorted_pred[0] > confidence
        indices = [(sorted_pred[1][e, :][pred_mask[e, :]])
                   for e in range(pred_mask.shape[0])]
        pred_final = [true_pred[i, indices[i], :] for i in range(len(indices))]

        pred_final_coord = [
            util.get_abs_coord(pred_final[i].unsqueeze(-2))
            for i in range(len(pred_final))
        ]

        indices = [
            nms_box.nms(pred_final_coord[i][0], pred_final[i][:, 4],
                        iou_threshold) for i in range(len(pred_final))
        ]
        pred_final = [
            pred_final[i][indices[i], :] for i in range(len(pred_final))
        ]

        abs_pred_final = [
            helper.convert2_abs_xyxy(pred_final[i], targets[i]['img_size'],
                                     inp_dim) for i in range(len(pred_final))
        ]

        outputs = [dict() for i in range(len((abs_pred_final)))]
        for i, atrbs in enumerate(abs_pred_final):

            outputs[i]['boxes'] = atrbs[:, :4]
            outputs[i]['scores'] = pred_final[i][:, 4]
            try:
                outputs[i]['labels'] = pred_final[i][:, 5:].max(
                    axis=1)[1] + 1  #could be empty
            except:

                outputs[i]['labels'] = torch.tensor([])

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        coco_evaluator.update(res)
#         print('det is ',res)

# gather the stats from all processes

    sys.stdout = open(os.devnull, 'w')  #wrapper to disable hardcoded printing

    coco_evaluator.synchronize_between_processes()
    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    mAP = coco_evaluator.get_stats()[0]

    sys.stdout = sys.__stdout__  #wrapper to enable hardcoded printing (return to normal mode)
    return mAP
Esempio n. 35
0
 def my_stack(self):
     self.y2 = util.transform(self.y1, self.data_type[0, 0])
     for i in range(1, self.tn):
         self.y2 = np.hstack((self.y2, util.transform(self.y1, self.data_type[0, i])))
Esempio n. 36
0
    cities = getCityNamesFromFile("static/ca.city.lst.json", MAJOR_CITIES)
    config = getConfig("/var/weather_config.json")

    api_key = config["key"]
    db_host = config["db_host"]
    db_user = config["db_user"]
    db_port = config["db_port"]
    db_name = config["db_name"]
    db_password = config["db_password"]

    ## Establish database connection
    db = Database(db_user=db_user, db_port=db_port, db_name=db_name, db_host=db_host, db_password=db_password)
    db.setup_db_connection()


    for city in cities:
        city_data = requests.get(f"http://api.openweathermap.org/data/2.5/weather?q={city},ca&APPID={api_key}")

        city_data_dict = json.loads(city_data.content.decode("utf-8"))
        transformed_data = transform(city_data_dict)

        ## Insert data into the database
        successful = db.insert_weather(transformed_data)
        if successful:
            print(f"Weather data for {city}, CA inserted successfully.")
        else:
            print(f"Something is wrong with inserting weather data for {city}")

        time.sleep(2)
    
    db.close_session()
 def evaluator_thread(self, q_main, q_forward, q_feedback):
   length = len(self.root_state)
   index_list = []
   state_list = []
   signal = np.zeros(length, np.int16)
   
   if USE_GPU:
     # Additional configurations for running on GPU.
     os.environ["CUDA_VISIBLE_DEVICES"] = VISIBLE_DEVICE_MCTS
     gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = MEMORY_MCTS)
     gpu_config = tf.ConfigProto(gpu_options = gpu_options)
   else:
     gpu_config = None
   
   # Build models to evaluate p and v.
   model = Model(enhance = False, imitate = self.advanced_evaluation, optimize = 0)
   with tf.Session(config = gpu_config) as sess:
     saver = tf.train.Saver()
     # Restore the player.
     if self.player == BEST_PLAYER:
       file_name = "BestPlayer"
     else:
       file_name = "Player_" + format(self.player, "05d")
     saver.restore(sess, self.parent_dir + PLAYER_DIR + file_name)
     
     # Read the first package from the main thread to evaluate root nodes.
     (state) = q_forward.get()
     if state != None:
       transformed_state = copy.deepcopy(state)
       transformed_state = [transform(x, np.random.choice(SYMMETRY)) for x in transformed_state]
       inputs = extract_inputs(transformed_state)
       if self.advanced_evaluation:
         p, v = sess.run([model.p_hat, model.v_hat], feed_dict = {model.Inputs: inputs})
       else:
         p, v = sess.run([model.p, model.v], feed_dict = {model.Inputs: inputs})
       legal_moves = [x.legal_moves for x in transformed_state]
       p = normalize_probability(p, legal_moves)
       # The output p is a list of vectors, each of which indicates the probability of each possible move.
       # The output v is a column vector of shape (len(state), 1), which predicts the return of the game.
       q_main.put((p, v))
     else:
       q_main.put((None))
     
     while np.any(self.step < self.num_simulations):
       # Ready to read data.
       (i, j, str, state) = q_forward.get()
       if str == "Eval":
         # Normal package that contains a state to be evaluated.
         index_list.append([i, j])
         state_list.append(state)
         signal[i] += 1
       elif str == "Blocked":
         # Special case that the thread is blocked.
         self.blocked[i] += 1
       elif str == "Unblocked":
         # Remove the block signal.
         self.blocked[i] -= 1
       elif str == "Terminal":
         # Special case that the thread reaches a terminal node.
         self.step[i] += 1
       
       if np.all(np.logical_or(self.blocked+signal == self.search_threads, self.step+self.blocked+signal == self.num_simulations)):
         if len(state_list) > 0:
           # Evaluate all states in the list when all threads have send packages or when the search ends.
           transformed_state = copy.deepcopy(state_list)
           transformed_state = [transform(x, np.random.choice(SYMMETRY)) for x in transformed_state]
           inputs = extract_inputs(transformed_state)
           if self.advanced_evaluation:
             p, v = sess.run([model.p_hat, model.v_hat], feed_dict = {model.Inputs: inputs})
           else:
             p, v = sess.run([model.p, model.v], feed_dict = {model.Inputs: inputs})
           legal_moves = [x.legal_moves for x in transformed_state]
           p = normalize_probability(p, legal_moves)
           # Return p and v to the corresponding thread.
           for k in range(len(state_list)):
             self.step[index_list[k][0]] += 1
             q_feedback[index_list[k][0]][index_list[k][1]].put((p[k], v[k]))
           index_list = []
           state_list = []
         signal = np.zeros(length, np.int16)
   tf.contrib.keras.backend.clear_session()