Example #1
0
 def checkOneHot(self):
     v = torch.LongTensor([1, 2, 1, 2, 0])
     v_length = torch.LongTensor([2, 3])
     v_onehot = utils.oneHot(v, v_length, 4)
     target = torch.FloatTensor([[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
                                 [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]])
     assert target.equal(v_onehot)
Example #2
0
 def checkOneHot(self):
     v = torch.LongTensor([1, 2, 1, 2, 0])
     v_length = torch.LongTensor([2, 3])
     v_onehot = utils.oneHot(v, v_length, 4)
     target = torch.FloatTensor([[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]],
                                 [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]])
     assert target.equal(v_onehot)
 def format_label_data(self, x, requires_grad=False, plabel=None, *args, **kwargs):
     if x is None:
         return 
     if plabel is None:
         plabel = self.plabel
     if issubclass(type(x), list):
         new_x = []
         for i, label in enumerate(x):
             new_x.append(self.format_label_data(label, plabel=plabel[i]))
         x = new_x
     else:
         if type(x)==list:
             x = np.array(x)
         if type(x)==np.ndarray:
             if x.ndim == 1:
                 if issubclass(type(plabel), list):
                     label_dim = 0
                     for pl in plabel:
                         label_dim += pl['dim']
                 else:
                     label_dim = plabel['dim']
                 x = oneHot(x, label_dim)
             if x.dtype!='float32':
                 x = x.astype('float32')
             x = torch.from_numpy(x)
         x = x.to(self.device, dtype=torch.float32)
     for i in x:
         i.requires_grad_(requires_grad)
     return x
Example #4
0
 def selectMove(self, state, goal):
     goalVec = utils.oneHot(goal)
     if self.controllerEpsilon[goal] < random.random():
         # predict action
         dummyYtrue = np.zeros((1, 8))
         dummyMask = np.zeros((1, 8))
         return np.argmax(self.net.controllerNet.predict([np.reshape(state, (1, 84, 84, 4)), np.asarray([goalVec]), dummyYtrue, dummyMask], verbose=0)[1])
     return random.choice(self.actionSet)
Example #5
0
    def __getitem__(self, index):
        try:
            lables={}
            imgInfo=self.dataset[index]
            imgName=imgInfo[0]
            imgData= Image.open(os.path.join(self.imgPath,imgName))
            imgData=trans.Resize((IMG_WIDTH,IMG_HEIGHT))(imgData)
            imgData2=trans.ToTensor()(imgData)- 0.5
            
            
            boxes=np.array([float(i) for i in imgInfo[1:]])
            boxes2=np.split(boxes,len(boxes)//5) #标签为多分类,产生多个标记框
            for featureSize,anchors in ANCHORS_GROUP.items():
                #三种不同的特征图,4个偏移量+1个置信度+N个分类
                lables[featureSize]=np.zeros(shape=(featureSize,featureSize,3, 5+CLASS_NUM))
                for box in boxes2:
                    cls,x,y,w,h=box[0:5]
                    boxArea=w*h
                    # math.modf
                    xOffset,xIndex=math.modf(x*featureSize/IMG_WIDTH)
                    yOffset,yIndex=math.modf(y*featureSize/IMG_HEIGHT)
                    for i,anchor in enumerate(anchors):
                        anchorArea=ANCHORS_GROUP_AREA[featureSize][i]
                        wOffset, hOffset = w / anchor[0], h / anchor[1]
                        box_area = w * h

                        # 计算置信度(同心框的IOU(交并))
                        inter = np.minimum(w, anchor[0]) * np.minimum(h, anchor[1])  # 交集
                        conf = inter / (box_area + anchorArea - inter)
                        '''
                        加log函数方便求梯度
                        形状为(N,H,W,3,C),相当于增加了一个维度,
                        其中3相当于在做损失时与3个锚框对应,C维度存放偏移量,iou等数据
                        *oneHot:从数组中将值取出来
                        '''
                        lables[featureSize][int(yIndex),int(xIndex),i]=np.array(
                            [conf,xOffset,yOffset,np.log(wOffset),np.log(hOffset),*oneHot(CLASS_NUM,int(cls))]
                        )

            return lables[13],lables[26],lables[52],imgData2
        except Exception as e:
            print("__getitem__",str(e))
Example #6
0
 def _update(self, stepCount):
     batches = self.memory.sample(self.nSamples)
     stateVector = []
     goalVector = []
     for batch in batches:
         exp = batch[1]
         stateVector.append(exp.state)
         goalVector.append(utils.oneHot(exp.goal))
     stateVector = np.asarray(stateVector)
     goalVector = np.asarray(goalVector)
     nextStateVector = []
     for batch in batches:
         exp = batch[1]
         nextStateVector.append(exp.next_state)
     nextStateVector = np.asarray(nextStateVector)
     rewardVectors = self.net.controllerNet.predict([stateVector, goalVector, np.zeros((self.nSamples,8)), np.zeros((self.nSamples, 8 ))], verbose=0)[1]
     
     rewardVectorsCopy = np.copy(rewardVectors)
     rewardVectors = np.zeros((self.nSamples, 8))
     nextStateRewardVectors = self.net.targetControllerNet.predict([nextStateVector, goalVector, np.zeros((self.nSamples,8)), np.zeros((self.nSamples, 8 ))], verbose=0)[1]
     
     maskVector = np.zeros((self.nSamples, 8))
     for i, batch in enumerate(batches):
         exp = batch[1]
         idx = batch[0]
         maskVector[i, exp.action] = 1. 
         rewardVectors[i][exp.action] = exp.reward
         if not exp.done:
             rewardVectors[i][exp.action] += self.gamma * max(nextStateRewardVectors[i])
         self.memory.update(idx, np.abs(rewardVectors[i][exp.action] - rewardVectorsCopy[i][exp.action]))
     rewardVectors = np.asarray(rewardVectors)
     loss = self.net.controllerNet.train_on_batch([stateVector, goalVector, rewardVectors, maskVector], [np.zeros(self.nSamples), rewardVectors])
     #Update target network
     controllerWeights = self.net.controllerNet.get_weights()
     controllerTargetWeights = self.net.targetControllerNet.get_weights()
     for i in range(len(controllerWeights)):
         controllerTargetWeights[i] = self.targetTau * controllerWeights[i] + (1 - self.targetTau) * controllerTargetWeights[i]
     self.net.targetControllerNet.set_weights(controllerTargetWeights)
     return loss
Example #7
0
    def forward(self, x):
        """
        This method defines the forward pass of the network.

        Parameters:
            x:
                The input image tensor

        Returns:
            - A list of indices that forms a caption to the given image -
            each element in the list is an index in vocab
            - A list of the probabilities for each word in the generated caption
        """
        batchSize = len(x)
        outputSeq = [[] for _ in range(batchSize)]

        inp = torch.zeros(batchSize,
                          1,
                          self._rnn.input_size,
                          requires_grad=False,
                          device=settings.device)
        inp_ = torch.zeros(batchSize,
                           1,
                           self._rnn.input_size,
                           requires_grad=False,
                           device=settings.device)

        first_hidden = self._cnn(x)

        hidden_elems = []
        for l in range(self._rnn.num_layers):
            hidden_elems.append(first_hidden)
        hidden = torch.stack(hidden_elems, 0)
        assert hidden.size() == (self._rnn.num_layers, batchSize,
                                 self._rnn.hidden_size)

        if self._twoStates:
            cell = torch.stack(hidden_elems, 0)
            assert hidden.size() == (self._rnn.num_layers, batchSize,
                                     self._rnn.hidden_size)

        outputLogProbs_elems = []
        for step in range(self._maxOutputLen +
                          1):  # plus 1 for the termination char
            if self._twoStates:
                _, (hidden, cell) = self._rnn(inp, (hidden, cell))
            else:
                _, hidden = self._rnn(inp, hidden)

            output = self._outputNet(hidden[-1])
            outputLogProbs_elems.append(
                nn.functional.log_softmax(output, dim=1))

            for b in range(batchSize):
                w = utils.randSamp(output[b], Hyperparams().topk)
                outputSeq[b].append(w)
                inp_[b, 0] = utils.oneHot(self._vocabLen + 1, w)

            inp = inp_.detach().add(0)

        outputLogProbs = torch.stack(outputLogProbs_elems, dim=1)
        assert outputLogProbs.size() == (batchSize, self._maxOutputLen + 1,
                                         self._vocabLen + 1)

        return outputSeq, outputLogProbs