Beispiel #1
0
def torch_predict(V, W, classId, XlT, XuT, patClassIdTest, gama = 1, oper = 'min'):
    """
    GFMM classifier (test routine). Implemented by Pytorch

      result = predict(V,W,classId,XlT,XuT,patClassIdTest,gama,oper)

    INPUT
      V                 Tested model hyperbox lower bounds
      W                 Tested model hyperbox upper bounds
      classId	          Input data (hyperbox) class labels (crisp)
      XlT               Test data lower bounds (rows = objects, columns = features)
      XuT               Test data upper bounds (rows = objects, columns = features)
      patClassIdTest    Test data class labels (crisp)
      gama              Membership function slope (default: 1)
      oper              Membership calculation operation: 'min' or 'prod' (default: 'min')

   OUTPUT
      result           A object with Bunch datatype containing all results as follows:
                          + summis           Number of misclassified objects
                          + misclass         Binary error map
                          + sumamb           Number of objects with maximum membership in more than one class
                          + out              Soft class memberships
                          + mem              Hyperbox memberships

    """
	if len(XlT.size()) == 1:
        XlT = XlT.reshape(1, -1)
    if len(XuT.size()) == 1:
        XuT = XuT.reshape(1, -1)
		
    #initialization
    yX = XlT.size(0)
    isUsingGPU = False
    if is_Have_GPU and (W.size(0) * W.size(1) >= GPU_Computing_Threshold or XlT.size(1) >= GPU_Computing_Threshold):
        V = V.cuda()
        W = W.cuda()
        classId = classId.cuda()
        XlT = XlT.cuda()
        XuT = XuT.cuda()
        patClassIdTest = patClassIdTest.cuda()
        misclass = torch.cuda.FloatTensor(yX).fill_(0)
        isUsingGPU = True
        els = torch.arange(yX).cuda()
    else:
        misclass = torch.zeros(yX)
        els = torch.arange(yX)

    # classifications
    for i in els:
        if isUsingGPU == True:
            mem = gpu_memberG(XlT[i, :], XuT[i, :], V, W, gama, oper)
        else:
            mem = torch_memberG(XlT[i, :], XuT[i, :], V, W, gama, oper) # calculate memberships for all hyperboxes

        bmax = mem.max()	                                          # get max membership value
        maxVind = mem == bmax                        # get indexes of all hyperboxes with max membership

        if bmax == 0:
            print('zero maximum membership value')                     # this is probably bad...
            misclass[i] = 1
        else:
            if len(torch.unique(classId[maxVind])) > 1:
                misclass[i] = 1
            else:
                if (torch.any(classId[maxVind] == patClassIdTest[i]) == 1) or (patClassIdTest[i] == UNLABELED_CLASS):
                    misclass[i] = 0
                else:
                    misclass[i] = 1

    # results
    summis = torch.sum(misclass)

    result = Bunch(summis = summis, misclass = misclass)
    return result
Beispiel #2
0
def torch_predict(V, W, classId, XlT, XuT, patClassIdTest, gama=1, oper='min'):
    """
    GFMM classifier (test routine). Implemented by Pytorch

      result = predict(V,W,classId,XlT,XuT,patClassIdTest,gama,oper)

    INPUT
      V                 Tested model hyperbox lower bounds
      W                 Tested model hyperbox upper bounds
      classId	          Input data (hyperbox) class labels (crisp)
      XlT               Test data lower bounds (rows = objects, columns = features)
      XuT               Test data upper bounds (rows = objects, columns = features)
      patClassIdTest    Test data class labels (crisp)
      gama              Membership function slope (default: 1)
      oper              Membership calculation operation: 'min' or 'prod' (default: 'min')

   OUTPUT
      result           A object with Bunch datatype containing all results as follows:
                          + summis           Number of misclassified objects
                          + misclass         Binary error map
                          + sumamb           Number of objects with maximum membership in more than one class
                          + out              Soft class memberships
                          + mem              Hyperbox memberships

    """
    #initialization
    yX = XlT.size(0)
    isUsingGPU = False
    if is_Have_GPU and (W.size(0) * W.size(1) >= GPU_Computing_Threshold
                        or XlT.size(1) >= GPU_Computing_Threshold):
        V = V.cuda()
        W = W.cuda()
        classId = classId.cuda()
        XlT = XlT.cuda()
        XuT = XuT.cuda()
        patClassIdTest = patClassIdTest.cuda()
        misclass = torch.cuda.FloatTensor(yX).fill_(0)
        classes = torch.unique(classId)
        noClasses = classes.size(0)
        ambiguity = torch.cuda.FloatTensor(yX, 1).fill_(0)
        mem = torch.cuda.FloatTensor(yX, V.size(0)).fill_(0)
        out = torch.cuda.FloatTensor(yX, noClasses).fill_(0)
        isUsingGPU = True
        els = torch.arange(yX).cuda()
    else:
        classes = torch.unique(classId)
        misclass = torch.zeros(yX)
        noClasses = classes.size(0)
        ambiguity = torch.zeros((yX, 1))
        mem = torch.zeros((yX, V.size(0)))
        out = torch.zeros((yX, noClasses))
        els = torch.arange(yX)

    # classifications
    for i in els:
        if isUsingGPU == True:
            mem[i, :] = gpu_memberG(XlT[i, :], XuT[i, :], V, W, gama, oper)
        else:
            mem[i, :] = torch_memberG(
                XlT[i, :], XuT[i, :], V, W, gama,
                oper)  # calculate memberships for all hyperboxes

        bmax = mem[i, :].max()  # get max membership value
        maxVind = torch.nonzero(mem[
            i, :] == bmax)  # get indexes of all hyperboxes with max membership

        for j in torch.arange(noClasses):
            out[i, j] = mem[i, classId == classes[j]].max(
            )  # get max memberships for each class

        ambiguity[i, :] = torch.sum(out[
            i, :] == bmax)  # number of different classes with max membership

        if bmax == 0:
            print('zero maximum membership value')  # this is probably bad...

        misclass[i] = ~(torch.any(classId[maxVind] == patClassIdTest[i]) |
                        (patClassIdTest[i] == 0))

    # results
    sumamb = torch.sum(ambiguity[:, 0] > 1)
    summis = torch.sum(misclass)

    result = Bunch(summis=summis,
                   misclass=misclass,
                   sumamb=sumamb,
                   out=out,
                   mem=mem)
    return result
    def fit(self, X_l, X_u, patClassId):
        """
        X_l          Input data lower bounds (rows = objects, columns = features)
        X_u          Input data upper bounds (rows = objects, columns = features)
        patClassId  Input data class labels (crisp)
        """
        
        if self.isNorm == True:
            X_l, X_u = self.dataPreprocessing(X_l, X_u)
        
        if isinstance(X_l, torch.Tensor) == False:
            X_l = torch.from_numpy(X_l).float()
            X_u = torch.from_numpy(X_u).float()
            patClassId = torch.from_numpy(patClassId).long()
            
        time_start = time.perf_counter()
        
        isUsingGPU = False
        if is_Have_GPU and X_l.size(0) * X_l.size(1) >= GPU_Computing_Threshold:
            self.V = X_l.cuda()
            self.W = X_u.cuda()
            self.classId = patClassId.cuda()
            isUsingGPU = True
        else:
            self.V = X_l
            self.W = X_u
            self.classId = patClassId
       
        # yX, xX = X_l.size()
        
#        if len(self.cardin) == 0 or len(self.clusters) == 0:
#            self.cardin = np.ones(yX)
#            self.clusters = np.empty(yX, dtype=object)
#            for i in range(yX):
#                self.clusters[i] = np.array([i], dtype = np.int32)
#        
        if self.isDraw:
            mark_col = np.array(['r', 'g', 'b', 'y', 'c', 'm', 'k'])
            drawing_canvas = self.initializeCanvasGraph("GFMM - AGGLO-SM-Fast version")
                
            # plot initial hyperbox
            Vt, Wt = self.pcatransform()
            color_ = np.empty(len(self.classId), dtype = object)
            for c in range(len(self.classId)):
                color_[c] = mark_col[self.classId[c]]
            drawbox(Vt, Wt, drawing_canvas, color_)
            self.delay()
        
        # training
        isTraining = True
        while isTraining:
            isTraining = False
            
            # calculate class masks
            yX, xX = self.V.size()
            labList = torch.unique(self.classId)
            if isUsingGPU == False:
                clMask = torch.zeros((yX, len(labList)), dtype=torch.uint8)
            else:
                clMask = torch.cuda.ByteTensor(yX, len(labList)).fill_(0)
            
            for i in range(len(labList)):
                clMask[:, i] = self.classId == labList[i]
        
        	# calculate pairwise memberships *ONLY* within each class (faster!)
            if isUsingGPU == False:
                b = torch.zeros((yX, yX))
            else:
                b = torch.cuda.FloatTensor(yX, yX).fill_(0)
            
            if isUsingGPU:
                els = torch.arange(len(labList)).cuda()
            else:
                els = torch.arange(len(labList))
            
            for i in els:
                Vi = self.V[clMask[:, i]] # get bounds of patterns with class label i
                Wi = self.W[clMask[:, i]]
                clSize = torch.sum(clMask[:, i]) # get number of patterns of class i
                clIdxs = torch.nonzero(clMask[:, i])[:, 0] # get position of patterns with class label i in the training set
                
                if self.simil == 'short':
                    for j in range(clSize):
                        if isUsingGPU == False:
                            b[clIdxs[j], clIdxs] = torch_memberG(Wi[j], Vi[j], Vi, Wi, self.gamma, self.oper)
                        else:
                            b[clIdxs[j], clIdxs] = gpu_memberG(Wi[j], Vi[j], Vi, Wi, self.gamma, self.oper)
                elif self.simil == 'long':
                    for j in range(clSize):
                        if isUsingGPU == False:
                            b[clIdxs[j], clIdxs] = torch_memberG(Vi[j], Wi[j], Wi, Vi, self.gamma, self.oper)
                        else:
                            b[clIdxs[j], clIdxs] = gpu_memberG(Vi[j], Wi[j], Wi, Vi, self.gamma, self.oper)
                else:
                    for j in range(clSize):
                        if isUsingGPU == False:
                            b[clIdxs[j], clIdxs] = torch_memberG(Vi[j], Wi[j], Vi, Wi, self.gamma, self.oper)
                        else:
                            b[clIdxs[j], clIdxs] = gpu_memberG(Vi[j], Wi[j], Vi, Wi, self.gamma, self.oper)
                
            if yX == 1:
                maxb = torch.FloatTensor([])
            else:
                maxb = self.torch_splitSimilarityMaxtrix(b, self.sing, False, isUsingGPU)
                if len(maxb) > 0:
                    maxb = maxb[(maxb[:, 2] >= self.bthres), :]
                    
                    if len(maxb) > 0:
                        # sort maxb in the decending order following the last column
                        values, idx_smaxb = torch.sort(maxb[:, 2], descending=True)
                        maxb = torch.cat((maxb[idx_smaxb, 0].reshape(-1, 1), maxb[idx_smaxb, 1].reshape(-1, 1), maxb[idx_smaxb, 2].reshape(-1, 1)), dim=1)
                        #maxb = maxb[idx_smaxb]
            
            while len(maxb) > 0:
                curmaxb = maxb[0, :] # current position handling
                
                # calculate new coordinates of curmaxb(0)-th hyperbox by including curmaxb(1)-th box, scrap the latter and leave the rest intact
                newV = torch.cat((self.V[0:curmaxb[0].long(), :], torch.min(self.V[curmaxb[0].long(), :], self.V[curmaxb[1].long(), :]).reshape(1, -1), self.V[curmaxb[0].long() + 1:curmaxb[1].long(), :], self.V[curmaxb[1].long() + 1:, :]), dim=0)
                newW = torch.cat((self.W[0:curmaxb[0].long(), :], torch.max(self.W[curmaxb[0].long(), :], self.W[curmaxb[1].long(), :]).reshape(1, -1), self.W[curmaxb[0].long() + 1:curmaxb[1].long(), :], self.W[curmaxb[1].long() + 1:, :]), dim=0)
                newClassId = torch.cat((self.classId[0:curmaxb[1].long()], self.classId[curmaxb[1].long() + 1:]))
                #print('Type newV = ', newV.type())
                # adjust the hyperbox if no overlap and maximum hyperbox size is not violated
                if (not torch_isOverlap(newV, newW, curmaxb[0].long(), newClassId, isUsingGPU)) and (((newW[curmaxb[0].long()] - newV[curmaxb[0].long()]) <= self.teta).all() == True):
                    isTraining = True
                    self.V = newV
                    self.W = newW
                    self.classId = newClassId
                    
#                    self.cardin[int(curmaxb[0])] = self.cardin[int(curmaxb[0])] + self.cardin[int(curmaxb[1])]
#                    self.cardin = np.append(self.cardin[0:int(curmaxb[1])], self.cardin[int(curmaxb[1]) + 1:])
#                            
#                    self.clusters[int(curmaxb[0])] = np.append(self.clusters[int(curmaxb[0])], self.clusters[int(curmaxb[1])])
#                    self.clusters = np.append(self.clusters[0:int(curmaxb[1])], self.clusters[int(curmaxb[1]) + 1:])
#                    
                    # remove joined pair from the list as well as any pair with lower membership and consisting of any of joined boxes
                    mask = (maxb[:, 0] != curmaxb[0]) & (maxb[:, 1] != curmaxb[0]) & (maxb[:, 0] != curmaxb[1]) & (maxb[:, 1] != curmaxb[1]) & (maxb[:, 2] >= curmaxb[2])
                    maxb = maxb[mask, :]
                    
                    # update indexes to accomodate removed hyperbox
                    # indices of V and W larger than curmaxb(1,2) are decreased 1 by the element whithin the location curmaxb(1,2) was removed 
                    if len(maxb) > 0:
                        maxb[maxb[:, 0] > curmaxb[1], 0] = maxb[maxb[:, 0] > curmaxb[1], 0] - 1
                        maxb[maxb[:, 1] > curmaxb[1], 1] = maxb[maxb[:, 1] > curmaxb[1], 1] - 1
                            
                    if self.isDraw:
                        Vt, Wt = self.pcatransform()
                        color_ = np.empty(len(self.classId), dtype = object)
                        for c in range(len(self.classId)):
                            color_[c] = mark_col[self.classId[c]]
                        drawing_canvas.cla()
                        drawbox(Vt, Wt, drawing_canvas, color_)
                        self.delay()
                else:
                    maxb = maxb[1:, :]  # scrap examined pair from the list
            
            if isTraining == True and isUsingGPU == True and self.V.size(0) * self.V.size(1) < GPU_Computing_Threshold:
                isUsingGPU = False
                self.V = self.V.cpu()
                self.W = self.W.cpu()
                self.classId = self.classId.cpu()
                
        time_end = time.perf_counter()
        self.elapsed_training_time = time_end - time_start
         
        return self
    def fit(self, X_l, X_u, patClassId):
        """
        Training the classifier
        
         Xl             Input data lower bounds (rows = objects, columns = features)
         Xu             Input data upper bounds (rows = objects, columns = features)
         patClassId     Input data class labels (crisp). patClassId[i] = UNLABELED_CLASS corresponds to an unlabeled item
        
        """
        print('--Online Learning--')

        if self.isNorm == True:
            X_l, X_u = self.dataPreprocessing(X_l, X_u)

        if isinstance(X_l, torch.Tensor) == False:
            X_l = torch.from_numpy(X_l).float()
            X_u = torch.from_numpy(X_u).float()
            patClassId = torch.from_numpy(patClassId).long()

        time_start = time.perf_counter()

        yX, xX = X_l.size()
        teta = self.teta

        self.misclass = 1
        isUsingGPU = False

        while self.misclass > 0 and teta >= self.tMin:
            # for each input sample
            for i in range(yX):
                if len(
                        self.V
                ) > 0 and is_Have_GPU and isUsingGPU == False and self.V.size(
                        0) * self.V.size(1) >= GPU_Computing_Threshold:
                    self.V = self.V.cuda()
                    self.W = self.W.cuda()
                    self.classId = self.classId.cuda()
                    isUsingGPU = True

                if self.V.size(
                        0) == 0:  # no model provided - starting from scratch
                    self.V = X_l[0].reshape(
                        1, -1)  # torch.DoubleTensor(X_l[0]).to(device)
                    self.W = X_u[0].reshape(
                        1, -1)  # torch.DoubleTensor(X_u[0]).to(device)
                    self.classId = torch.LongTensor([
                        patClassId[0]
                    ])  # torch.DoubleTensor([patClassId[0]]).to(device)
                else:
                    if isUsingGPU == False:
                        classOfX = patClassId[i]
                    else:
                        classOfX = patClassId[i].cuda()

                    id_lb_sameX = (self.classId == classOfX) | (
                        self.classId == UNLABELED_CLASS)

                    if len(torch.nonzero(id_lb_sameX)) > 0:
                        V_sameX = self.V[id_lb_sameX]
                        W_sameX = self.W[id_lb_sameX]
                        lb_sameX = self.classId[id_lb_sameX]
                        id_range = torch.arange(len(self.classId))
                        id_processing = id_range[id_lb_sameX]

                        if isUsingGPU == False:
                            Xl_cur = X_l[i]
                            Xu_cur = X_u[i]

                            b = torch_memberG(Xl_cur, Xu_cur, V_sameX, W_sameX,
                                              self.gamma)
                        else:
                            Xl_cur = X_l[i].cuda()
                            Xu_cur = X_u[i].cuda()

                            b = gpu_memberG(Xl_cur, Xu_cur, V_sameX, W_sameX,
                                            self.gamma)

                        bSort, index = torch.sort(b, descending=True)

                        if bSort[0] != 1 or (classOfX != lb_sameX[index[0]]
                                             and classOfX != UNLABELED_CLASS):
                            adjust = False
                            for j in id_processing[index]:
                                # test violation of max hyperbox size and class labels
                                if (classOfX == self.classId[j]
                                        or self.classId[j] == UNLABELED_CLASS
                                        or classOfX == UNLABELED_CLASS
                                    ) and (
                                        (torch.max(self.W[j], Xu_cur).float() -
                                         torch.min(self.V[j], Xl_cur).float())
                                        <= teta).all() == True:
                                    # adjust the j-th hyperbox
                                    self.V[j] = torch.min(self.V[j], Xl_cur)
                                    self.W[j] = torch.max(self.W[j], Xu_cur)
                                    indOfWinner = j
                                    adjust = True
                                    if classOfX != UNLABELED_CLASS and self.classId[
                                            j] == UNLABELED_CLASS:
                                        self.classId[j] = classOfX

                                    break

                            # if i-th sample did not fit into any existing box, create a new one
                            if not adjust:
                                self.V = torch.cat(
                                    (self.V, Xl_cur.reshape(1, -1)), 0)
                                self.W = torch.cat(
                                    (self.W, Xu_cur.reshape(1, -1)), 0)
                                if isUsingGPU == False:
                                    self.classId = torch.cat(
                                        (self.classId,
                                         torch.LongTensor([classOfX])), 0)
                                else:
                                    self.classId = torch.cat(
                                        (self.classId,
                                         torch.cuda.LongTensor([classOfX])), 0)

                            elif self.V.size(0) > 1:
                                for ii in range(self.V.size(0)):
                                    if ii != indOfWinner and self.classId[
                                            ii] != self.classId[indOfWinner]:
                                        caseDim = torch_hyperboxOverlapTest(
                                            self.V, self.W, indOfWinner,
                                            ii)  # overlap test

                                        if len(caseDim) > 0:
                                            self.V, self.W = torch_hyperboxContraction(
                                                self.V, self.W, caseDim, ii,
                                                indOfWinner)

                    else:
                        # create new sample
                        if isUsingGPU == False:
                            self.V = torch.cat((self.V, X_l[i].reshape(1, -1)),
                                               0)
                            self.W = torch.cat((self.W, X_u[i].reshape(1, -1)),
                                               0)
                            self.classId = torch.cat(
                                (self.classId, torch.LongTensor(
                                    [patClassId[i]])), 0)
                        else:
                            self.V = torch.cat(
                                (self.V, X_l[i].cuda().reshape(1, -1)), 0)
                            self.W = torch.cat(
                                (self.W, X_l[i].cuda().reshape(1, -1)), 0)
                            self.classId = torch.cat(
                                (self.classId,
                                 torch.cuda.LongTensor([patClassId[i]])), 0)

            teta = teta * 0.9
            if teta >= self.tMin:
                result = torch_predict(self.V, self.W, self.classId, X_l, X_u,
                                       patClassId, self.gamma, self.oper)
                self.misclass = result.summis

        time_end = time.perf_counter()
        self.elapsed_training_time = time_end - time_start

        return self
Beispiel #5
0
    def fit(self, X_l, X_u, patClassId):
        """
        Xl          Input data lower bounds (rows = objects, columns = features)
        Xu          Input data upper bounds (rows = objects, columns = features)
        patClassId  Input data class labels (crisp)
        """

        if self.isNorm == True:
            X_l, X_u = self.dataPreprocessing(X_l, X_u)

        if isinstance(X_l, torch.Tensor) == False:
            X_l = torch.from_numpy(X_l).float()
            X_u = torch.from_numpy(X_u).float()
            patClassId = torch.from_numpy(patClassId).long()

        time_start = time.perf_counter()

        isUsingGPU = False
        if is_Have_GPU and X_l.size(0) * X_l.size(1) >= GPU_Computing_Threshold:
            self.V = X_l.cuda()
            self.W = X_u.cuda()
            self.classId = patClassId.cuda()
            isUsingGPU = True
        else:
            self.V = X_l
            self.W = X_u
            self.classId = patClassId

        # yX, xX = X_l.shape


#        if len(self.cardin) == 0 or len(self.clusters) == 0:
#            self.cardin = np.ones(yX)
#            self.clusters = np.empty(yX, dtype=object)
#            for i in range(yX):
#                self.clusters[i] = np.array([i], dtype = np.int64)

        if self.isDraw:
            mark_col = np.array(['r', 'g', 'b', 'y', 'c', 'm', 'k'])
            drawing_canvas = self.initializeCanvasGraph("GFMM - AGGLO-2")

            # plot initial hyperbox
            Vt, Wt = self.pcatransform()
            color_ = np.empty(len(self.classId), dtype=object)
            for c in range(len(self.classId)):
                color_[c] = mark_col[self.classId[c]]
            boxes = drawbox(Vt, Wt, drawing_canvas, color_)
            self.delay()
            hyperboxes = list(boxes)

        # training
        isTraining = True
        while isTraining:
            isTraining = False

            k = 0  # input pattern index
            while k < len(self.classId):
                if self.simil == 'short':
                    if isUsingGPU == False:
                        b = torch_memberG(self.W[k], self.V[k], self.V, self.W,
                                          self.gamma, self.oper, isUsingGPU)
                    else:
                        b = gpu_memberG(self.W[k], self.V[k], self.V, self.W,
                                        self.gamma, self.oper)

                elif self.simil == 'long':
                    if isUsingGPU == False:
                        b = torch_memberG(self.V[k], self.W[k], self.W, self.V,
                                          self.gamma, self.oper, isUsingGPU)
                    else:
                        b = gpu_memberG(self.V[k], self.W[k], self.W, self.V,
                                        self.gamma, self.oper)

                else:
                    b = torch_asym_similarity_one_many(self.V[k], self.W[k],
                                                       self.V, self.W,
                                                       self.gamma, self.sing,
                                                       self.oper, isUsingGPU)

                sortB, indB = torch.sort(b, descending=True)

                maxB = sortB[sortB >=
                             self.bthres]  # apply membership threshold

                if len(maxB) > 0:
                    indmaxB = indB[sortB >= self.bthres]
                    # remove self-membership
                    maxB = maxB[indmaxB != k]
                    indmaxB = indmaxB[indmaxB != k]

                    # remove memberships to boxes from other classes
                    # idx_other_classes = np.where(np.logical_and(self.classId[indmaxB] != self.classId[k], self.classId[indmaxB] != 0))
                    #idx_same_classes = np.where(np.logical_or(self.classId[indmaxB] == self.classId[k], self.classId[indmaxB] == 0))
                    #idx_same_classes = np.where(self.classId[indmaxB] == self.classId[k])[0] # np.logical_or(self.classId[indmaxB] == self.classId[k], self.classId[indmaxB] == 0)

                    #maxB = np.delete(maxB, idx_other_classes)
                    idx_same_classes = (self.classId[indmaxB]
                                        == self.classId[k]) | (
                                            self.classId[indmaxB] == 0)
                    maxB = maxB[idx_same_classes]
                    # leaving memeberships to unlabelled boxes
                    indmaxB = indmaxB[idx_same_classes]

                    #                    if len(maxB) > 30: # trim the set of memberships to speedup processing
                    #                        maxB = maxB[0:30]
                    #                        indmaxB = indmaxB[0:30]
                    if isUsingGPU == True:
                        kMat = torch.cuda.LongTensor([k]).expand(
                            indmaxB.size(0))
                    else:
                        kMat = torch.LongTensor([k]).expand(indmaxB.size(0))

                    pairewise_maxb = torch.cat(
                        (torch.min(kMat, indmaxB).reshape(
                            -1, 1).float(), torch.max(kMat, indmaxB).reshape(
                                -1, 1).float(), maxB.reshape(-1, 1)),
                        dim=1)

                    if isUsingGPU:
                        els = torch.arange(pairewise_maxb.size(0)).cuda()
                    else:
                        els = torch.arange(pairewise_maxb.size(0))

                    for i in els:
                        # calculate new coordinates of k-th hyperbox by including pairewise_maxb(i,1)-th box, scrap the latter and leave the rest intact
                        # agglomorate pairewise_maxb(i, 0) and pairewise_maxb(i, 1) by adjusting pairewise_maxb(i, 0)
                        # remove pairewise_maxb(i, 1) by getting newV from 1 -> pairewise_maxb(i, 0) - 1, new coordinates for pairewise_maxb(i, 0), from pairewise_maxb(i, 0) + 1 -> pairewise_maxb(i, 1) - 1, pairewise_maxb(i, 1) + 1 -> end

                        newV = torch.cat(
                            (self.V[:pairewise_maxb[i, 0].long()],
                             torch.min(
                                 self.V[pairewise_maxb[i, 0].long()],
                                 self.V[pairewise_maxb[i, 1].long()]).reshape(
                                     1, -1),
                             self.V[pairewise_maxb[i, 0].long() +
                                    1:pairewise_maxb[i, 1].long()],
                             self.V[pairewise_maxb[i, 1].long() + 1:]),
                            dim=0)
                        newW = torch.cat(
                            (self.W[:pairewise_maxb[i, 0].long()],
                             torch.max(
                                 self.W[pairewise_maxb[i, 0].long()],
                                 self.W[pairewise_maxb[i, 1].long()]).reshape(
                                     1, -1),
                             self.W[pairewise_maxb[i, 0].long() +
                                    1:pairewise_maxb[i, 1].long()],
                             self.W[pairewise_maxb[i, 1].long() + 1:]),
                            dim=0)
                        newClassId = torch.cat(
                            (self.classId[:pairewise_maxb[i, 1].long()],
                             self.classId[pairewise_maxb[i, 1].long() + 1:]))

                        # adjust the hyperbox if no overlap and maximum hyperbox size is not violated
                        # position of adjustment is pairewise_maxb[i, 0] in new bounds
                        if (not torch_isOverlap(
                                newV, newW, pairewise_maxb[i, 0].long(),
                                newClassId)) and (
                                    ((newW[pairewise_maxb[i, 0].long()] -
                                      newV[pairewise_maxb[i, 0].long()]) <=
                                     self.teta).all() == True):
                            self.V = newV
                            self.W = newW
                            self.classId = newClassId

                            #                            self.cardin[int(pairewise_maxb[i, 0])] = self.cardin[int(pairewise_maxb[i, 0])] + self.cardin[int(pairewise_maxb[i, 1])]
                            #                            #self.cardin = np.delete(self.cardin, int(pairewise_maxb[i, 1]))
                            #                            self.cardin = np.append(self.cardin[0:int(pairewise_maxb[i, 1])], self.cardin[int(pairewise_maxb[i, 1]) + 1:])
                            #
                            #                            self.clusters[int(pairewise_maxb[i, 0])] = np.append(self.clusters[int(pairewise_maxb[i, 0])], self.clusters[int(pairewise_maxb[i, 1])])
                            #                            #self.clusters = np.delete(self.clusters, int(pairewise_maxb[i, 1]))
                            #                            self.clusters = np.append(self.clusters[0:int(pairewise_maxb[i, 1])], self.clusters[int(pairewise_maxb[i, 1]) + 1:])
                            #
                            isTraining = True

                            if k != pairewise_maxb[
                                    i,
                                    0]:  # position pairewise_maxb[i, 1] (also k) is removed, so next step should start from pairewise_maxb[i, 1]
                                k = k - 1

                            if self.isDraw:
                                try:
                                    hyperboxes[pairewise_maxb[
                                        i, 1].long()].remove()
                                    hyperboxes[pairewise_maxb[
                                        i, 0].long()].remove()
                                except:
                                    print("No remove old hyperbox")

                                Vt, Wt = self.pcatransform()

                                box_color = 'k'
                                if self.classId[pairewise_maxb[
                                        i, 0].long()] < len(mark_col):
                                    box_color = mark_col[self.classId[
                                        pairewise_maxb[i, 0].long()]]

                                box = drawbox(
                                    np.asmatrix(Vt[pairewise_maxb[i,
                                                                  0].long()]),
                                    np.asmatrix(Wt[pairewise_maxb[i,
                                                                  0].long()]),
                                    drawing_canvas, box_color)
                                self.delay()
                                hyperboxes[pairewise_maxb[i,
                                                          0].long()] = box[0]
                                hyperboxes.remove(
                                    hyperboxes[pairewise_maxb[i, 1].long()])

                            break  # if hyperbox adjusted there's no need to look at other hyperboxes

                k = k + 1

            if isTraining == True and isUsingGPU == True and self.V.size(
                    0) * self.V.size(1) < GPU_Computing_Threshold:
                isUsingGPU = False
                self.V = self.V.cpu()
                self.W = self.W.cpu()
                self.classId = self.classId.cpu()

        time_end = time.perf_counter()
        self.elapsed_training_time = time_end - time_start

        return self
    def fit(self, X_l, X_u, patClassId):
        """
        Training the classifier
        
         Xl             Input data lower bounds (rows = objects, columns = features)
         Xu             Input data upper bounds (rows = objects, columns = features)
         patClassId     Input data class labels (crisp). patClassId[i] = 0 corresponds to an unlabeled item
        
        """
        print('--Online Learning--')

        if self.isNorm == True:
            X_l, X_u = self.dataPreprocessing(X_l, X_u)

        if isinstance(X_l, torch.Tensor) == False:
            # X_l = torch.cuda.FloatTensor(X_l)
            # X_u = torch.cuda.FloatTensor(X_u)
            # patClassId = torch.cuda.LongTensor(patClassId)
            # print('Conver data')
            # t1 = time.clock()
            X_l = torch.from_numpy(X_l).float()
            X_u = torch.from_numpy(X_u).float()
            patClassId = torch.from_numpy(patClassId).long()
#            t2 = time.clock()
#            print('Finish Conver data: ', t2 - t1)

        time_start = time.perf_counter()

        yX, xX = X_l.size()
        teta = self.teta

        mark = np.array([
            '*', 'o', 'x', '+', '.', ',', 'v', '^', '<', '>', '1', '2', '3',
            '4', '8', 's', 'p', 'P', 'h', 'H', 'X', 'D', '|', '_'
        ])
        mark_col = np.array(['r', 'g', 'b', 'y', 'c', 'm', 'k'])

        listLines = list()
        listInputSamplePoints = list()

        if self.isDraw:
            drawing_canvas = self.initializeCanvasGraph(
                "GFMM - Online learning", xX)

            if self.V.size(0) > 0:
                # draw existed hyperboxes
                color_ = np.array(['k'] * len(self.classId), dtype=object)
                for c in range(len(self.classId)):
                    if self.classId[c] < len(mark_col):
                        color_[c] = mark_col[self.classId[c]]

                hyperboxes = drawbox(self.V[:, 0:np.minimum(xX, 3)].numpy(),
                                     self.W[:, 0:np.minimum(xX, 3)].numpy(),
                                     drawing_canvas, color_)
                listLines.extend(hyperboxes)
                self.delay()

        self.misclass = 1
        isUsingGPU = False

        while self.misclass > 0 and teta >= self.tMin:
            # for each input sample
            for i in range(yX):
                if len(
                        self.V
                ) > 0 and is_Have_GPU and isUsingGPU == False and self.V.size(
                        0) * self.V.size(1) >= GPU_Computing_Threshold:
                    self.V = self.V.cuda()
                    self.W = self.W.cuda()
                    self.classId = self.classId.cuda()
                    isUsingGPU = True
#                print('Sample: ', i)
# draw input samples
                if self.isDraw:
                    if i == 0 and len(listInputSamplePoints) > 0:
                        # reset input point drawing
                        for point in listInputSamplePoints:
                            point.remove()
                        listInputSamplePoints.clear()

                    color_ = 'k'
                    if patClassId[i] < len(mark_col):
                        color_ = mark_col[patClassId[i]]

                    if (X_l[i, :] == X_u[i, :]).all():
                        marker_ = 'd'
                        if patClassId[i] < len(mark):
                            marker_ = mark[patClassId[i]]

                        if xX == 2:
                            inputPoint = drawing_canvas.plot(X_l[i, 0],
                                                             X_l[i, 1],
                                                             color=color_,
                                                             marker=marker_)
                        else:
                            inputPoint = drawing_canvas.plot([X_l[i, 0]],
                                                             [X_l[i, 1]],
                                                             [X_l[i, 2]],
                                                             color=color_,
                                                             marker=marker_)

                        #listInputSamplePoints.append(inputPoint)
                    else:
                        inputPoint = drawbox(
                            np.asmatrix(X_l[i, 0:np.minimum(xX, 3)].numpy()),
                            np.asmatrix(X_u[i, 0:np.minimum(xX, 3)].numpy()),
                            drawing_canvas, color_)

                    listInputSamplePoints.append(inputPoint[0])
                    self.delay()

                if self.V.size(
                        0) == 0:  # no model provided - starting from scratch
                    #print('Initial data')
                    #t1 = time.clock()
                    self.V = X_l[0].reshape(
                        1, -1)  # torch.DoubleTensor(X_l[0]).to(device)
                    self.W = X_u[0].reshape(
                        1, -1)  # torch.DoubleTensor(X_u[0]).to(device)
                    self.classId = torch.LongTensor([
                        patClassId[0]
                    ])  # torch.DoubleTensor([patClassId[0]]).to(device)
                    #t2 = time.clock()
                    #print('Finish Initial data: ', t2 - t1)

                    if self.isDraw == True:
                        # draw hyperbox
                        box_color = 'k'
                        if patClassId[0] < len(mark_col):
                            box_color = mark_col[patClassId[0]]

                        hyperbox = drawbox(
                            np.asmatrix(self.V[0,
                                               0:np.minimum(xX, 3)].numpy()),
                            np.asmatrix(self.W[0,
                                               0:np.minimum(xX, 3)].numpy()),
                            drawing_canvas, box_color)
                        listLines.append(hyperbox[0])
                        self.delay()

                else:
                    # print('V === ', self.V)
                    # print('W === ', self.W)
                    #                    print('Compute membership')
                    #                    t1 = time.clock()
                    if isUsingGPU == False:
                        Xl_cur = X_l[i]
                        Xu_cur = X_u[i]
                        classOfX = patClassId[i]
                        b = torch_memberG(Xl_cur, Xu_cur, self.V, self.W,
                                          self.gamma)
                    else:
                        Xl_cur = X_l[i].cuda()
                        Xu_cur = X_u[i].cuda()
                        classOfX = patClassId[i].cuda()

                        b = gpu_memberG(Xl_cur, Xu_cur, self.V, self.W,
                                        self.gamma)
#                    t2 = time.clock()
#                    print('Finish computing membership: ', t2 - t1)
# print(b)
                    bSort, index = torch.sort(b, descending=True)
                    # print('index = ', index)
                    # print('Self-class: ', self.classId)
                    # print(' i===', i)

                    if bSort[0] != 1 or (classOfX != self.classId[index[0]]
                                         and classOfX != 0):
                        adjust = False
                        for j in index:
                            # test violation of max hyperbox size and class labels
                            if (classOfX == self.classId[j] or self.classId[j]
                                    == 0 or classOfX == 0) and (
                                        (torch.max(self.W[j], Xu_cur).float() -
                                         torch.min(self.V[j], Xl_cur).float())
                                        <= teta).all() == True:
                                # adjust the j-th hyperbox
                                self.V[j] = torch.min(self.V[j], Xl_cur)
                                self.W[j] = torch.max(self.W[j], Xu_cur)
                                indOfWinner = j
                                adjust = True
                                if classOfX != 0 and self.classId[j] == 0:
                                    self.classId[j] = classOfX

                                if self.isDraw:
                                    # Handle drawing graph
                                    box_color = 'k'
                                    if self.classId[j] < len(mark_col):
                                        box_color = mark_col[self.classId[j]]

                                    try:
                                        listLines[j].remove()
                                    except:
                                        pass

                                    if isUsingGPU == False:
                                        hyperbox = drawbox(
                                            np.asmatrix(self.V[
                                                j,
                                                0:np.minimum(xX, 3)].numpy()),
                                            np.asmatrix(self.W[
                                                j,
                                                0:np.minimum(xX, 3)].numpy()),
                                            drawing_canvas, box_color)
                                    else:
                                        hyperbox = drawbox(
                                            np.asmatrix(
                                                self.V[j, 0:np.minimum(xX, 3)].
                                                cpu().numpy()),
                                            np.asmatrix(
                                                self.W[j, 0:np.minimum(xX, 3)].
                                                cpu().numpy()), drawing_canvas,
                                            box_color)

                                    listLines[j] = hyperbox[0]
                                    self.delay()

                                break

                        # if i-th sample did not fit into any existing box, create a new one
                        if not adjust:
                            #                            print('Create new hyperbox')
                            #                            t1 = time.clock()
                            self.V = torch.cat((self.V, Xl_cur.reshape(1, -1)),
                                               0)
                            self.W = torch.cat((self.W, Xu_cur.reshape(1, -1)),
                                               0)
                            if isUsingGPU == False:
                                self.classId = torch.cat(
                                    (self.classId, torch.LongTensor([classOfX
                                                                     ])), 0)
                            else:
                                self.classId = torch.cat(
                                    (self.classId,
                                     torch.cuda.LongTensor([classOfX])), 0)
#                            t2 = time.clock()
#                            print('Finish compute new hyperbox: ', t2 - t1)

                            if self.isDraw:
                                # handle drawing graph
                                box_color = 'k'
                                if self.classId[-1] < len(mark_col):
                                    box_color = mark_col[self.classId[-1]]

                                if isUsingGPU == False:
                                    hyperbox = drawbox(
                                        np.asmatrix(
                                            X_l[i,
                                                0:np.minimum(xX, 3)].numpy()),
                                        np.asmatrix(
                                            X_u[i,
                                                0:np.minimum(xX, 3)].numpy()),
                                        drawing_canvas, box_color)
                                else:
                                    hyperbox = drawbox(
                                        np.asmatrix(X_l[i,
                                                        0:np.minimum(xX, 3)].
                                                    cpu().numpy()),
                                        np.asmatrix(X_u[i,
                                                        0:np.minimum(xX, 3)].
                                                    cpu().numpy()),
                                        drawing_canvas, box_color)

                                listLines.append(hyperbox[0])
                                self.delay()

                        elif self.V.size(0) > 1:
                            for ii in range(self.V.size(0)):
                                if ii != indOfWinner and self.classId[
                                        ii] != self.classId[indOfWinner]:
                                    #                                    print('Overlap Test')
                                    #                                    t1 = time.clock()
                                    caseDim = torch_hyperboxOverlapTest(
                                        self.V, self.W, indOfWinner,
                                        ii)  # overlap test
                                    #                                    t2 = time.clock()
                                    #                                    print('Finish overlap test: ', t2 - t1)

                                    if len(caseDim) > 0:
                                        #                                        print('Hyperbox Contraction')
                                        #                                        t1 = time.clock()
                                        self.V, self.W = torch_hyperboxContraction(
                                            self.V, self.W, caseDim, ii,
                                            indOfWinner)
                                        #                                        t2 = time.clock()
                                        #                                        print('Finish hyperbox Contaction: ', t2 - t1)

                                        if self.isDraw:
                                            # Handle graph drawing
                                            boxii_color = boxwin_color = 'k'
                                            if self.classId[ii] < len(
                                                    mark_col):
                                                boxii_color = mark_col[
                                                    self.classId[ii]]

                                            if self.classId[indOfWinner] < len(
                                                    mark_col):
                                                boxwin_color = mark_col[
                                                    self.classId[indOfWinner]]

                                            try:
                                                listLines[ii].remove()
                                                listLines[indOfWinner].remove()
                                            except:
                                                pass

                                            if isUsingGPU == False:
                                                hyperboxes = drawbox(
                                                    self.V[[ii, indOfWinner],
                                                           0:np.minimum(xX, 3)]
                                                    .numpy(),
                                                    self.W[[ii, indOfWinner],
                                                           0:np.minimum(xX, 3)]
                                                    .numpy(), drawing_canvas, [
                                                        boxii_color,
                                                        boxwin_color
                                                    ])
                                            else:
                                                hyperboxes = drawbox(
                                                    self.V[[ii, indOfWinner],
                                                           0:np.minimum(xX, 3)]
                                                    .cpu().numpy(),
                                                    self.W[[ii, indOfWinner],
                                                           0:np.minimum(xX, 3)]
                                                    .cpu().numpy(),
                                                    drawing_canvas, [
                                                        boxii_color,
                                                        boxwin_color
                                                    ])

                                            listLines[ii] = hyperboxes[0]
                                            listLines[
                                                indOfWinner] = hyperboxes[1]
                                            self.delay()

            teta = teta * 0.9
            if teta >= self.tMin:
                result = torch_predict(self.V, self.W, self.classId, X_l, X_u,
                                       patClassId, self.gamma, self.oper)
                self.misclass = result.summis

        # Draw last result


#        if self.isDraw == True:
#            # Handle drawing graph
#            drawing_canvas.cla()
#            color_ = np.empty(len(self.classId), dtype = object)
#            for c in range(len(self.classId)):
#                color_[c] = mark_col[self.classId[c]]
#
#            drawbox(self.V[:, 0:np.minimum(xX, 3)], self.W[:, 0:np.minimum(xX, 3)], drawing_canvas, color_)
#            self.delay()
#
#        if self.isDraw:
#            plt.show()

        time_end = time.perf_counter()
        self.elapsed_training_time = time_end - time_start

        return self
Beispiel #7
0
    def fit(self, X_l, X_u, patClassId):  
        """
        Xl          Input data lower bounds (rows = objects, columns = features)
        Xu          Input data upper bounds (rows = objects, columns = features)
        patClassId  Input data class labels (crisp)
        """
        
        if self.isNorm == True:
            X_l, X_u = self.dataPreprocessing(X_l, X_u)
        
        if isinstance(X_l, torch.Tensor) == False:
            X_l = torch.from_numpy(X_l).float()
            X_u = torch.from_numpy(X_u).float()
            patClassId = torch.from_numpy(patClassId).long()
            
        time_start = time.perf_counter()
        
        isUsingGPU = False
        if is_Have_GPU and X_l.size(0) * X_l.size(1) >= GPU_Computing_Threshold:
            self.V = X_l.cuda()
            self.W = X_u.cuda()
            self.classId = patClassId.cuda()
            isUsingGPU = True
        else:
            self.V = X_l
            self.W = X_u
            self.classId = patClassId
         
        # training
        isTraining = True
        while isTraining:
            isTraining = False
            
            k = 0 # input pattern index
            while k < len(self.classId):
                idx_same_classes = (self.classId == self.classId[k]) | (self.classId == UNLABELED_CLASS) | ((self.classId != self.classId[k]) & (self.classId[k] == UNLABELED_CLASS))
                idx_same_classes[k] = 0 # remove element in the position k
                idex = torch.arange(len(self.classId))
                idex = idex[idx_same_classes] # keep the indices of elements retained
                V_same_class = self.V[idx_same_classes]
                W_same_class = self.W[idx_same_classes]
                
                if self.simil == 'short':
                    if isUsingGPU == False:
                        b = torch_memberG(self.W[k], self.V[k], V_same_class, W_same_class, self.gamma, self.oper, isUsingGPU)
                    else:
                        b = gpu_memberG(self.W[k], self.V[k], V_same_class, W_same_class, self.gamma, self.oper)
                        
                elif self.simil == 'long':
                    if isUsingGPU == False:
                        b = torch_memberG(self.V[k], self.W[k], V_same_class, W_same_class, self.gamma, self.oper, isUsingGPU)
                    else:
                        b = gpu_memberG(self.V[k], self.W[k], V_same_class, W_same_class, self.gamma, self.oper)
                        
                else:
                    b = torch_asym_similarity_one_many(self.V[k], self.W[k], V_same_class, W_same_class, self.gamma, self.sing, self.oper, isUsingGPU)
                
                sortB, indB = torch.sort(b, descending=True)
                idex = idex[indB]
                
                maxB = sortB[sortB >= self.bthres]	# apply membership threshold
                
                if len(maxB) > 0:
                    idexmax = idex[sortB >= self.bthres]
                    
                    if isUsingGPU == True:
                        idexmax = idexmax.cuda()
                        kMat = torch.cuda.LongTensor([k]).expand(idexmax.size(0))
                    else:
                        kMat = torch.LongTensor([k]).expand(idexmax.size(0))
                    
                    pairewise_maxb = torch.cat((torch.min(kMat, idexmax).reshape(-1, 1).float(), torch.max(kMat,idexmax).reshape(-1, 1).float(), maxB.reshape(-1, 1)), dim=1)

                    if isUsingGPU:
                        els = torch.arange(pairewise_maxb.size(0)).cuda()
                    else:
                        els = torch.arange(pairewise_maxb.size(0))
                        
                    for i in els:
                        # calculate new coordinates of k-th hyperbox by including pairewise_maxb(i,1)-th box, scrap the latter and leave the rest intact
                        # agglomorate pairewise_maxb(i, 0) and pairewise_maxb(i, 1) by adjusting pairewise_maxb(i, 0)
                        # remove pairewise_maxb(i, 1) by getting newV from 1 -> pairewise_maxb(i, 0) - 1, new coordinates for pairewise_maxb(i, 0), from pairewise_maxb(i, 0) + 1 -> pairewise_maxb(i, 1) - 1, pairewise_maxb(i, 1) + 1 -> end
                        row1 = pairewise_maxb[i, 0].long()
                        row2 = pairewise_maxb[i, 1].long()
                        
                        newV = torch.cat((self.V[:row1], torch.min(self.V[row1], self.V[row2]).reshape(1, -1), self.V[row1 + 1:row2], self.V[row2 + 1:]), dim=0)
                        newW = torch.cat((self.W[:row1], torch.max(self.W[row1], self.W[row2]).reshape(1, -1), self.W[row1 + 1:row2], self.W[row2 + 1:]), dim=0)
                        newClassId = torch.cat((self.classId[:row2], self.classId[row2 + 1:]))
                        if (newClassId[row1] == UNLABELED_CLASS):
                            newClassId[row1] = self.classId[row2]
                        # adjust the hyperbox if no overlap and maximum hyperbox size is not violated
                        # position of adjustment is pairewise_maxb[i, 0] in new bounds
                        if ((((newW[row1] - newV[row1]) <= self.teta).all() == True) and (not torch_modifiedIsOverlap(newV, newW, row1, newClassId, isUsingGPU))):
                            self.V = newV
                            self.W = newW
                            self.classId = newClassId
                                                   
                            isTraining = True
                            
                            if k != row1: # position pairewise_maxb[i, 1] (also k) is removed, so next step should start from pairewise_maxb[i, 1]
                                k = k - 1
                                
                            break # if hyperbox adjusted there's no need to look at other hyperboxes                            
                        
                k = k + 1
                    
            if isTraining == True and isUsingGPU == True and self.V.size(0) * self.V.size(1) < GPU_Computing_Threshold:
                isUsingGPU = False
                self.V = self.V.cpu()
                self.W = self.W.cpu()
                self.classId = self.classId.cpu()
        
        time_end = time.perf_counter()
        self.elapsed_training_time = time_end - time_start
         
        return self