示例#1
0
def compute_bbox_set_agreement(example_boxes, gold_boxes):
    nExB = len(example_boxes)
    nGtB = len(gold_boxes)
    if nExB == 0:
        if nGtB == 0:
            return 1
        else:
            return 0

    if nGtB == 0:
        print "WARNING: new object"
        return 0

    A = cvxmod.zeros(rows=nExB, cols=nGtB)

    for iBox, ex in enumerate(example_boxes):
        for jBox, gt in enumerate(gold_boxes):
            A[iBox, jBox] = ex.overlap_score(gt)

    S = []
    S2 = []

    for iBox, ex in enumerate(example_boxes):
        S_tmp = [0] * (iBox) * nGtB + [1] * nGtB + [0] * (nExB - iBox -
                                                          1) * nGtB

        S.append(S_tmp)

    for jBox in range(0, nGtB):
        S2_tmp = [0] * nExB * nGtB
        for j2 in range(0, nExB):
            S2_tmp[j2 * nGtB + jBox] = 1

        S2.append(S2_tmp)

    S = cvxmod.transpose(cvxmod.matrix(S, size=(nExB * nGtB, nExB)))
    S2 = cvxmod.transpose(cvxmod.matrix(S2, size=(nExB * nGtB, nGtB)))

    A2 = cvxmod.matrix(A, (1, nExB * nGtB))
    x = cvxmod.optvar('x', rows=nExB * nGtB, cols=1)

    p = cvxmod.problem(cvxmod.maximize(A2 * x))
    p.constr.append(x <= 1)
    p.constr.append(x >= 0)

    p.constr.append(S * x <= 1)
    p.constr.append(S2 * x <= 1)

    p.solve(True)
    overlap = cvxmod.value(p) / max(nExB, nGtB)
    assert (overlap < 1.0001)
    return overlap
示例#2
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """

        # split for training weak_learners and boosting
        (train_weak, train_boosting) = split_data(train_data, 4)
          
        # merge data sets
        data = PreparedMultitaskData(train_weak, shuffle=True)
        
        # create shogun label
        lab = shogun_factory.create_labels(data.labels)
        


        ##################################################
        # define pockets
        ##################################################
        
        pockets = [0]*9
        
        pockets[0] = [1, 5, 6, 7, 8, 31, 32, 33, 34]
        pockets[1] = [1, 2, 3, 4, 6, 7, 8, 9, 11, 21, 31]
        pockets[2] = [11, 20, 21, 22, 29, 31]
        pockets[3] = [8, 30, 31, 32]
        pockets[4] = [10, 11, 30]
        pockets[5] = [10, 11, 12, 13, 20, 29]
        pockets[6] = [10, 12, 20, 22, 26, 27, 28, 29]
        pockets[7] = [12, 14, 15, 26]
        pockets[8] = [13, 15, 16, 17, 18, 19, 20, 23, 24, 25, 26]
        
        pockets = []
        for i in xrange(35):
            pockets.append([i])


        #new_pockets = []
        
        # merge neighboring pockets
        #for i in range(8):
        #    new_pockets.append(list(set(pockets[i]).union(set(pockets[i+1]))))
            
        #pockets = new_pockets
        
        
        ########################################################
        print "creating a kernel:"
        ########################################################
        
        
        # init seq handler 
        pseudoseqs = SequencesHandler()

        
        classifiers = []


        for pocket in pockets:

            print "creating normalizer"
            #import pdb
            #pdb.set_trace()
            
            normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
            
            print "processing pocket", pocket

            # set similarity
            for task_name_lhs in data.get_task_names():
                for task_name_rhs in data.get_task_names():
                    
                    similarity = 0.0
                    
                    for pseudo_seq_pos in pocket:
                        similarity += float(pseudoseqs.get_similarity(task_name_lhs, task_name_rhs, pseudo_seq_pos-1))
                    
                    # normalize
                    similarity = similarity / float(len(pocket))
                    
                    print "pocket %s (%s, %s) = %f" % (str(pocket), task_name_lhs, task_name_rhs, similarity)
                    
                    normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
               

            print "creating empty kernel"
            kernel = shogun_factory.create_kernel(data.examples, param)
            
            print "setting normalizer"
            kernel.set_normalizer(normalizer)

            print "training SVM for pocket", pocket
            svm = self._train_single_svm(param, kernel, lab)

            classifiers.append(svm)
        
        
        print "done obtaining weak learners"
            
        
        # save additional info
        #self.additional_information["svm_objective"] = svm.get_objective()
        #self.additional_information["svm num sv"] = svm.get_num_support_vectors()
        #self.additional_information["post_weights"] = combined_kernel.get_subkernel_weights()
        
        #print self.additional_information 
        


        ##################################################
        # combine weak learners for each task
        ##################################################
        
        
        # set constants
        
        some = 0.9
        import cvxmod
        
        
        # wrap up predictors
        svms = {}
            
        # use a reference to the same svm several times
        for task_name in train_boosting.keys():
            
            instances = train_boosting[task_name]
            
            N = len(instances)
            F = len(pockets)
            
            examples = [inst.example for inst in instances]
            labels = [inst.label for inst in instances]
            
            # dim = (F x N)
            out = cvxmod.zeros((N,F))
            
            for i in xrange(F):
                svm = classifiers[i]
                tmp_out = self._predict_weak(svm, examples, data.name_to_id(task_name))

                out[:,i] = numpy.sign(tmp_out)
                #out[:,i] = tmp_out
            

            #TODO: fix
            helper.save("/tmp/out_sparse", (out,labels))
            pdb.set_trace()
            
            weights = solve_boosting(out, labels, some, solver="mosek")
            
            
            
            svms[task_name] = (data.name_to_id(task_name), svm)

        
        return svms
示例#3
0
def solve_boosting(out, labels, nu, solver):
    '''
    solve boosting formulation used by gelher and novozin
    
    @param out: matrix (N,F) of predictions (for each f_i) for all examples
    @param y: vector (N,1) label for each example 
    @param p: regularization constant
    '''
    
    
    
    N = out.size[0]
    F = out.size[1]
    
    assert(N==len(labels))
    
    
    norm_fact = 1.0 / (nu * float(N))
    
    print norm_fact
    
    label_matrix = cvxmod.zeros((N,N))
    
    # avoid point-wise product
    for i in xrange(N):
        label_matrix[i,i] = labels[i] 
    
    
    #### parameters
    
    f = cvxmod.param("f", N, F)
    
    y = cvxmod.param("y", N, N, symm=True)
    
    norm = cvxmod.param("norm", 1) 
    
    #### varibales
    
    # rho
    rho = cvxmod.optvar("rho", 1)
    
    # dim = (N x 1)
    chi = cvxmod.optvar("chi", N)
    
    # dim = (F x 1)
    beta = cvxmod.optvar("beta", F)
    
    
    #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta)) 
    objective = -rho + cvxmod.sum(chi) * norm_fact
    
    print objective
    
    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))
    
    
    # create contraint for probability simplex
    #p.constr.append(beta |cvxmod.In| probsimp(F))
    p.constr.append(cvxmod.sum(beta)==1.0)
    #p.constr.append(square(norm2(beta)) <= 1.0)
    p.constr.append(beta >= 0.0)
    
    
    #    y       f     beta          y    f*beta      y*f*beta
    # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1)
    p.constr.append(y * (f * beta) + chi >= rho)
    
    
    ###### set values
    f.value = out
    y.value = label_matrix
    norm.value = norm_fact 
    
    p.solve(lpsolver=solver)
    

    weights = numpy.array(cvxmod.value(beta))
    
    #print weights
    
    cvxmod.printval(chi)
    cvxmod.printval(beta)
    cvxmod.printval(rho)
    

    return p
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """
        
        
        for task_id in train_data.keys():
            print "task_id:", task_id

        # split data for training weak_learners and boosting
        (train_weak, train_boosting) = split_data(train_data, 4)
        
        # train on first part of dataset (evaluate on other)
        prepared_data_weak = PreparedMultitaskData(train_weak, shuffle=False)
        classifiers = self._inner_train(prepared_data_weak, param)

        # train on entire dataset
        prepared_data_final = PreparedMultitaskData(train_data, shuffle=False)
        final_classifiers = self._inner_train(prepared_data_final, param)


        print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
        print "done training weak learners"

        #####################################################
        #    perform boosting and wrap things up    
        #####################################################

        # wrap up predictors for later use
        predictors = {}

        for task_name in train_boosting.keys():
            
            instances = train_boosting[task_name]
            
            N = len(instances)
            F = len(classifiers)
            
            examples = [inst.example for inst in instances]
            labels = [inst.label for inst in instances]
            
            # dim = (F x N)
            out = cvxmod.zeros((N,F))
            
            for i in xrange(F):
                    
                svm = classifiers[i]
                        
                tmp_out = self._predict_weak(svm, examples, prepared_data_weak.name_to_id(task_name), param)

                if param.flags["signum"]:
                    out[:,i] = numpy.sign(tmp_out)
                else:
                    out[:,i] = tmp_out
            
            
            if param.flags["boosting"] == "ones":
                weights = numpy.ones(F)/float(F)
            if param.flags["boosting"] == "L1":
                weights = solve_boosting(out, labels, param.transform, solver="glpk")
            if param.flags["boosting"] == "L2":            
                weights = solve_nu_svm(out, labels, param.transform, solver="glpk", reg=False)
            if param.flags["boosting"] == "L2_reg":            
                weights = solve_nu_svm(out, labels, param.transform, solver="glpk", reg=True)
            
            
            predictors[task_name] = (final_classifiers, weights, prepared_data_final.name_to_id(task_name), param)
            
            
            assert prepared_data_final.name_to_id(task_name)==prepared_data_weak.name_to_id(task_name), "name mappings don't match"
            
        
        #####################################################
        #    Some sanity checks
        ##################################################### 
        
        # make sure we have the same keys (potentiall in a different order)  
        sym_diff_keys = set(train_weak.keys()).symmetric_difference(set(predictors.keys()))
        assert len(sym_diff_keys)==0, "symmetric difference between keys non-empty: " + str(sym_diff_keys)  


        return predictors
示例#5
0
def solve_svm(out, labels, nu, solver):
    '''
    solve boosting formulation used by gelher and nowozin
    
    @param out: matrix (N,F) of predictions (for each f_i) for all examples
    @param labels: vector (N,1) label for each example 
    @param nu: regularization constant
    @param solver: which solver to use. options: 'mosek', 'glpk'
    '''

    # get dimension
    N = out.size[0]
    F = out.size[1]

    assert N == len(labels), str(N) + " " + str(len(labels))

    norm_fact = 1.0 / (nu * float(N))
    print "normalization factor %f" % (norm_fact)

    # avoid point-wise product
    label_matrix = cvxmod.zeros((N, N))

    for i in xrange(N):
        label_matrix[i, i] = labels[i]

    #### parameters

    f = cvxmod.param("f", N, F)
    y = cvxmod.param("y", N, N, symm=True)
    norm = cvxmod.param("norm", 1)

    #### varibales

    # rho
    rho = cvxmod.optvar("rho", 1)

    # dim = (N x 1)
    chi = cvxmod.optvar("chi", N)

    # dim = (F x 1)
    beta = cvxmod.optvar("beta", F)

    #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta))
    objective = -rho + cvxmod.sum(chi) * norm_fact

    print objective

    # create problem
    p = cvxmod.problem(cvxmod.minimize(objective))

    # create contraints for probability simplex
    #p.constr.append(beta |cvxmod.In| probsimp(F))
    p.constr.append(cvxmod.sum(beta) == 1.0)
    p.constr.append(beta >= 0.0)
    p.constr.append(chi >= 0.0)

    # attempt to perform non-sparse boosting
    #p.constr.append(square(norm2(beta)) <= 1.0)

    #    y       f     beta          y    f*beta      y*f*beta
    # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1)
    p.constr.append(y * (f * beta) + chi >= rho)

    # set values for parameters
    f.value = out
    y.value = label_matrix
    norm.value = norm_fact

    print "solving problem"
    print "============================================="
    print p
    print "============================================="

    # start solver
    p.solve(lpsolver=solver)

    # print variables
    cvxmod.printval(chi)
    cvxmod.printval(beta)
    cvxmod.printval(rho)

    return numpy.array(cvxmod.value(beta))
示例#6
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """
        
        
        for task_id in train_data.keys():
            print "task_id:", task_id

        # split data for training weak_learners and boosting
        (train_weak, train_boosting) = split_data(train_data, 4)
        
        # train on first part of dataset (evaluate on other)
        prepared_data_weak = PreparedMultitaskData(train_weak, shuffle=False)
        classifiers = self._inner_train(prepared_data_weak, param)

        # train on entire dataset
        prepared_data_final = PreparedMultitaskData(train_data, shuffle=False)
        final_classifiers = self._inner_train(prepared_data_final, param)


        print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
        print "done training weak learners"

        #####################################################
        #    perform boosting and wrap things up    
        #####################################################

        # wrap up predictors for later use
        predictors = {}

        for task_name in train_boosting.keys():
                        
            instances = train_boosting[task_name]
            
            N = len(instances)
            F = len(classifiers)
            
            examples = [inst.example for inst in instances]
            labels = [inst.label for inst in instances]
            
            # dim = (F x N)
            out = cvxmod.zeros((N,F))
            
            for i in xrange(F):
                    
                svm = classifiers[i]
                        
                tmp_out = self._predict_weak(svm, examples, prepared_data_weak.name_to_id(task_name))

                if param.flags["signum"]:
                    out[:,i] = numpy.sign(tmp_out)
                else:
                    out[:,i] = tmp_out
            
            
            if param.flags["boosting"] == "ones":
                weights = numpy.ones(F)/float(F)
            if param.flags["boosting"] == "L1":
                weights = solve_boosting(out, labels, param.transform, solver="glpk")
            if param.flags["boosting"] == "L2":            
                weights = solve_nu_svm(out, labels, param.transform, solver="glpk", reg=False)
            if param.flags["boosting"] == "L2_reg":            
                weights = solve_nu_svm(out, labels, param.transform, solver="glpk", reg=True)
            
            
            predictors[task_name] = (final_classifiers, weights, prepared_data_final.name_to_id(task_name))
            
            
            assert prepared_data_final.name_to_id(task_name)==prepared_data_weak.name_to_id(task_name), "name mappings don't match"
            
        
        #####################################################
        #    Some sanity checks
        ##################################################### 
        
        # make sure we have the same keys (potentiall in a different order)  
        sym_diff_keys = set(train_weak.keys()).symmetric_difference(set(predictors.keys()))
        assert len(sym_diff_keys)==0, "symmetric difference between keys non-empty: " + str(sym_diff_keys)  


        return predictors
示例#7
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """

        # split data for training weak_learners and boosting
        (train_weak, train_boosting) = split_data(train_data, 4)
                  

        for task_id in train_data.keys():
            print "task_id:", task_id
            
        
        root = param.taxonomy.data
        
        # train on first part of dataset (evaluate on other)
        (classifiers, classifier_at_node) = self._inner_train(train_weak, param)

        # train on entire dataset
        (final_classifiers, final_classifier_at_node) = self._inner_train(train_data, param)

        ###

        print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
        print "done training weak learners"

        #####################################################
        #    perform boosting and wrap things up    
        #####################################################

        # wrap up predictors for later use
        predictors = {}

        for task_name in train_boosting.keys():
            
            
            instances = train_boosting[task_name]
            
            # get ids of predecessor nodes            
            node_names = [node.name for node in root.get_node(task_name).get_path_root()]
            node_names.append(task_name)
            
            print "node: %s --> %s" % (task_name, str(node_names))
            
            N = len(instances)
            
            
            if param.flags["use_all_nodes"]:
                # use classifiers only from parent nodes
                F = len(classifiers)
                tmp_classifiers = classifiers
                tmp_final_classifiers = final_classifiers
                
            else:
                # use classifiers from all leaves
                F = len(node_names)
                tmp_classifiers = []
                tmp_final_classifiers = []
            
            
            examples = [inst.example for inst in instances]
            labels = [inst.label for inst in instances]
            
            # dim = (F x N)
            out = cvxmod.zeros((N,F))
            
            for i in xrange(F):
                
                if param.flags["use_all_nodes"]:
                    svm = classifiers[i]
                else:
                    svm = classifier_at_node[node_names[i]]
                    tmp_classifiers.append(svm)

                    final_svm = final_classifier_at_node[node_names[i]]
                    tmp_final_classifiers.append(final_svm)
                    
                tmp_out = self._predict_weak(svm, examples, task_name)

                if param.flags["signum"]:
                    out[:,i] = numpy.sign(tmp_out)
                else:
                    out[:,i] = tmp_out
            
            
            if param.flags["boosting"] == "ones":
                weights = numpy.ones(F)/float(F)
            if param.flags["boosting"] == "L1":
                weights = solve_boosting(out, labels, param.transform, solver="glpk")
            if param.flags["boosting"] == "L2":            
                weights = solve_nu_svm(out, labels, param.transform, solver="glpk", reg=False)
            if param.flags["boosting"] == "L2_reg":            
                weights = solve_nu_svm(out, labels, param.transform, solver="glpk", reg=True)
            
            
            predictors[task_name] = (tmp_final_classifiers, weights)
            
        
        #####################################################
        #    Some sanity checks
        ##################################################### 
        
        # make sure we have the same keys (potentiall in a different order)  
        sym_diff_keys = set(train_weak.keys()).symmetric_difference(set(predictors.keys()))
        assert len(sym_diff_keys)==0, "symmetric difference between keys non-empty: " + str(sym_diff_keys)  


        # save graph plot
        mypath = "/fml/ag-raetsch/share/projects/multitask/graphs/"
        filename = mypath + "graph_" + str(param.id)
        root.plot(filename)#, plot_cost=True, plot_B=True)


        return predictors
示例#8
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """

        # split for training weak_learners and boosting
        (train_weak, train_boosting) = split_data(train_data, 4)
          
        # merge data sets
        data = PreparedMultitaskData(train_weak, shuffle=True)
        
        # create shogun label
        lab = shogun_factory.create_labels(data.labels)
        


        
        
        ########################################################
        print "creating a kernel:"
        ########################################################
        
        
        # init seq handler 
        pseudoseqs = SequencesHandler()

        
        classifiers = []


        for pocket in pockets:

            print "creating normalizer"
            #import pdb
            #pdb.set_trace()
            
            normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
            
            print "processing pocket", pocket

            # set similarity
            for task_name_lhs in data.get_task_names():
                for task_name_rhs in data.get_task_names():
                    
                    similarity = 0.0
                    
                    for pseudo_seq_pos in pocket:
                        similarity += float(pseudoseqs.get_similarity(task_name_lhs, task_name_rhs, pseudo_seq_pos-1))
                    
                    # normalize
                    similarity = similarity / float(len(pocket))
                    
                    print "pocket %s (%s, %s) = %f" % (str(pocket), task_name_lhs, task_name_rhs, similarity)
                    
                    normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
               

            print "creating empty kernel"
            kernel = shogun_factory.create_kernel(data.examples, param)
            
            print "setting normalizer"
            kernel.set_normalizer(normalizer)

            print "training SVM for pocket", pocket
            svm = self._train_single_svm(param, kernel, lab)

            classifiers.append(svm)
        
        
        print "done obtaining weak learners"
            
        
        # save additional info
        #self.additional_information["svm_objective"] = svm.get_objective()
        #self.additional_information["svm num sv"] = svm.get_num_support_vectors()
        #self.additional_information["post_weights"] = combined_kernel.get_subkernel_weights()
        
        #print self.additional_information 
        


        ##################################################
        # combine weak learners for each task
        ##################################################
        
        
        # set constants
        
        some = 0.9
        import cvxmod
        
        
        # wrap up predictors
        svms = {}
            
        # use a reference to the same svm several times
        for task_name in train_boosting.keys():
            
            instances = train_boosting[task_name]
            
            N = len(instances)
            F = len(pockets)
            
            examples = [inst.example for inst in instances]
            labels = [inst.label for inst in instances]
            
            # dim = (F x N)
            out = cvxmod.zeros((N,F))
            
            for i in xrange(F):
                svm = classifiers[i]
                tmp_out = self._predict_weak(svm, examples, data.name_to_id(task_name))

                out[:,i] = numpy.sign(tmp_out)
                #out[:,i] = tmp_out
            

            #TODO: fix
            helper.save("/tmp/out_sparse", (out,labels))
            pdb.set_trace()
            
            weights = solve_boosting(out, labels, some, solver="mosek")
            
            
            
            svms[task_name] = (data.name_to_id(task_name), svm)

        
        return svms
示例#9
0
def solve_boosting(out, labels, nu, solver):
    '''
    solve boosting formulation used by gelher and novozin
    
    @param out: matrix (N,F) of predictions (for each f_i) for all examples
    @param y: vector (N,1) label for each example 
    @param p: regularization constant
    '''
    
    
    
    N = out.size[0]
    F = out.size[1]
    
    assert(N==len(labels))
    
    
    norm_fact = 1.0 / (nu * float(N))
    
    print norm_fact
    
    label_matrix = cvxmod.zeros((N,N))
    
    # avoid point-wise product
    for i in xrange(N):
        label_matrix[i,i] = labels[i] 
    
    
    #### parameters
    
    f = cvxmod.param("f", N, F)
    
    y = cvxmod.param("y", N, N, symm=True)
    
    norm = cvxmod.param("norm", 1) 
    
    #### varibales
    
    # rho
    rho = cvxmod.optvar("rho", 1)
    
    # dim = (N x 1)
    chi = cvxmod.optvar("chi", N)
    
    # dim = (F x 1)
    beta = cvxmod.optvar("beta", F)
    
    
    #objective = -rho + cvxmod.sum(chi) * norm_fact + square(norm2(beta)) 
    objective = -rho + cvxmod.sum(chi) * norm_fact
    
    print objective
    
    # create problem                                    
    p = cvxmod.problem(cvxmod.minimize(objective))
    
    
    # create contraint for probability simplex
    #p.constr.append(beta |cvxmod.In| probsimp(F))
    p.constr.append(cvxmod.sum(beta)==1.0)
    #p.constr.append(square(norm2(beta)) <= 1.0)
    p.constr.append(beta >= 0.0)
    
    
    #    y       f     beta          y    f*beta      y*f*beta
    # (N x N) (N x F) (F x 1) --> (N x N) (N x 1) --> (N x 1)
    p.constr.append(y * (f * beta) + chi >= rho)
    
    
    ###### set values
    f.value = out
    y.value = label_matrix
    norm.value = norm_fact 
    
    p.solve(lpsolver=solver)
    

    weights = numpy.array(cvxmod.value(beta))
    
    #print weights
    
    cvxmod.printval(chi)
    cvxmod.printval(beta)
    cvxmod.printval(rho)
    

    return p