示例#1
0
    def solve(self, C, xt, lt, task_indicator, M):
        """
        solve dual using cvxopt
        """

        num_xt = len(xt)

        # set up quadratic term
        Q = np.zeros((num_xt, num_xt))

        # compute quadratic term
        for i in xrange(num_xt):
            for j in xrange(num_xt):

                s = task_indicator[i]
                t = task_indicator[j]
                
                Q[i,j] = M[s,t] * lt[i] * lt[j] * np.dot(xt[i], xt[j])

        # set up linear term
        p = -np.ones(num_xt)

        # if we would like to use bias
        #b = np.zeros((M,1))
        #label_matrix = numpy.zeros((M,N))

        # set up QP
        p = QP(Q, p, lb=np.zeros(num_xt), ub=C*np.ones(num_xt)) #Aeq=label_matrix, beq=b
        p.debug=1
        
        # run solver
        r = p.solve('cvxopt_qp', iprint = 0)

        # recover result
        self.alphas = r.xf
        self.dual_obj = self.obj = r.ff

        # compute W from alphas
        self.W = alphas_to_w(self.alphas, xt, lt, task_indicator, M)


        return True
示例#2
0
    def minimize(self, **kwargs):
        """ solve the quadratic problem using OpenOpt

        Returns:
        obj_value, solution

        obj_value -- value of the objective function at the discovered solution
        solution  -- the solution flux vector (indexed like matrix columns)
        """
        qp = QP(self.obj.H, self.obj.f, A=self.Aineq, Aeq=self.Aeq,
                b=self.bineq, beq=self.beq, lb=self.lb, ub=self.ub, **kwargs)
        qp.debug=1
        r = qp.solve(self.solver)

        if r.istop <= 0 or r.ff != r.ff:  # check halting condition
            self.obj_value = 0
            self.solution = []
            self.istop = r.istop
        else:
            self.obj_value = r.ff
            self.solution = r.xf
            self.istop = r.istop

        return self.obj_value, self.solution
示例#3
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """

        # fix dimensions
        M = len(train_data)

        N = 0
        for key in train_data.keys():
            N += len(train_data[key])

        # init containers
        examples = []
        labels = []

        # vector to indicate to which task each example belongs
        task_vector = []
        task_num = 0
        tmp_examples = 0

        label_matrix = numpy.zeros((M, N))

        # extract training data
        for (task_id, instance_set) in train_data.items():

            print "train task id:", task_id
            #assert(instance_set[0].dataset.organism==task_id)

            examples.extend([inst.example for inst in instance_set])

            tmp_labels = [inst.label for inst in instance_set]
            labels.extend(tmp_labels)

            begin_idx = tmp_examples
            end_idx = tmp_examples + len(tmp_labels)

            # fill matrix row
            label_matrix[task_num, begin_idx:end_idx] = tmp_labels

            task_vector.extend([task_num] * len(instance_set))

            task_num += 1
            tmp_examples += len(tmp_labels)

        # fetch gammas from parameter object
        # TODO: compute gammas outside of this
        gammas = numpy.ones((M, M)) + numpy.eye(M)
        #gammas = numpy.eye(M)

        # create kernel
        kernel = shogun_factory.create_kernel(examples, param)

        y = numpy.array(labels)

        print "computing kernel matrix"

        km = kernel.get_kernel_matrix()
        km = reweight_kernel_matrix(km, gammas, task_vector)

        # "add" labels to Q-matrix
        km = numpy.transpose(y.flatten() * (km * y.flatten()).transpose())

        print "done computing kernel matrix, calling solver"

        f = -numpy.ones(N)
        b = numpy.zeros((M, 1))

        # set up QP
        p = QP(km,
               f,
               Aeq=label_matrix,
               beq=b,
               lb=numpy.zeros(N),
               ub=param.cost * numpy.ones(N))
        p.debug = 1

        # run solver
        r = p.solve('cvxopt_qp', iprint=0)

        print "done with training"

        alphas = r.xf
        objective = r.ff

        print "alphas:", alphas

        predictors = {}

        for (k, task_id) in enumerate(train_data.keys()):
            # pack all relevant information in predictor
            predictors[task_id] = (alphas, param, task_vector, k, gammas,
                                   examples, labels)

        return predictors
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """
 
        
                
        # fix dimensions
        M = len(train_data)

        N = 0
        for key in train_data.keys():
            N += len(train_data[key])
        
        # init containers
        examples = []
        labels = []


        # vector to indicate to which task each example belongs
        task_vector = []
        task_num = 0
        tmp_examples = 0

        label_matrix = numpy.zeros((M,N))


        # extract training data
        for (task_id, instance_set) in train_data.items():
  
            print "train task id:", task_id
            #assert(instance_set[0].dataset.organism==task_id)
            
            examples.extend([inst.example for inst in instance_set])
            
            tmp_labels = [inst.label for inst in instance_set]
            labels.extend(tmp_labels)
            
            begin_idx = tmp_examples
            end_idx = tmp_examples + len(tmp_labels) 
            
            # fill matrix row
            label_matrix[task_num, begin_idx:end_idx] = tmp_labels

            task_vector.extend([task_num]*len(instance_set))

            task_num += 1
            tmp_examples += len(tmp_labels)


        # fetch gammas from parameter object
        # TODO: compute gammas outside of this
        gammas = numpy.ones((M,M)) + numpy.eye(M)
        #gammas = numpy.eye(M)
        

        # create kernel
        kernel = shogun_factory.create_kernel(examples, param)


        y = numpy.array(labels)

        print "computing kernel matrix"

        km = kernel.get_kernel_matrix()
        km = reweight_kernel_matrix(km, gammas, task_vector)

        # "add" labels to Q-matrix
        km = numpy.transpose(y.flatten() * (km*y.flatten()).transpose())

        print "done computing kernel matrix, calling solver"


        f = -numpy.ones(N)
        b = numpy.zeros((M,1))

        # set up QP
        p = QP(km, f, Aeq=label_matrix, beq=b, lb=numpy.zeros(N), ub=param.cost*numpy.ones(N))
        p.debug=1
        
        # run solver
        r = p.solve('cvxopt_qp', iprint = 0)

        print "done with training"

        alphas = r.xf
        objective = r.ff


        print "alphas:", alphas

        predictors = {}

        for (k, task_id) in enumerate(train_data.keys()):
            # pack all relevant information in predictor
            predictors[task_id] = (alphas, param, task_vector, k, gammas, examples, labels)

        return predictors