def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """

        # dict to save additional information for later analysis
        self.additional_information = {}

        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=True)

        # create shogun label
        lab = shogun_factory.create_labels(data.labels)

        ########################################################
        print "creating a kernel for each node:"
        ########################################################

        # assemble combined kernel

        combined_kernel = CombinedKernel()

        combined_kernel.io.set_loglevel(shogun.Kernel.MSG_INFO)

        base_features = shogun_factory.create_features(data.examples, param)

        combined_features = CombinedFeatures()

        ##################################################
        # intra-domain blocks (dirac kernel)

        intra_block_vec = PairiiVec()

        for task_id in data.get_task_ids():
            intra_block_vec.push_back(Pairii(task_id, task_id))

        # create mask-based normalizer
        normalizer = MultitaskKernelMaskPairNormalizer(data.task_vector_nums,
                                                       intra_block_vec)
        kernel = shogun_factory.create_empty_kernel(param)
        kernel.set_normalizer(normalizer)

        # append current kernel to CombinedKernel
        combined_kernel.append_kernel(kernel)

        # append features
        combined_features.append_feature_obj(base_features)

        print "------"

        ##################################################
        # all blocks (full kernel matrix)

        all_block_vec = PairiiVec()

        for task_id_1 in data.get_task_ids():
            for task_id_2 in data.get_task_ids():
                all_block_vec.push_back(Pairii(task_id_1, task_id_2))

        # create mask-based normalizer
        normalizer_all = MultitaskKernelMaskPairNormalizer(
            data.task_vector_nums, all_block_vec)
        kernel_all = shogun_factory.create_empty_kernel(param)
        kernel_all.set_normalizer(normalizer_all)

        # append current kernel to CombinedKernel
        combined_kernel.append_kernel(kernel_all)

        # append features
        combined_features.append_feature_obj(base_features)

        ##################################################
        # hack

        #        hack_block_vec = PairiiVec()
        #
        #        for task_id_1 in data.get_task_ids():
        #            for task_id_2 in data.get_task_ids():
        #                hack_block_vec.push_back(Pairii(task_id_1, task_id_2))
        #
        #        hack_block_vec.push_back(Pairii(data.name_to_id("B_2705"), data.name_to_id("B_4001")))
        #        other_group = ["B_0702", "B_1501", "B_5801"]
        #        for task_id_1 in other_group:
        #            for task_id_2 in other_group:
        #                hack_block_vec.push_back(Pairii(data.name_to_id(task_id_1), data.name_to_id(task_id_2)))
        #
        #
        #
        #        # create mask-based normalizer
        #        normalizer_hack = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, hack_block_vec)
        #        kernel_hack = shogun_factory.create_empty_kernel(param)
        #        kernel_hack.set_normalizer(normalizer_hack)
        #
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel_hack)
        #
        #        # append features
        #        combined_features.append_feature_obj(base_features)

        ##################################################
        # init combined kernel

        combined_kernel.init(combined_features, combined_features)

        #combined_kernel.precompute_subkernels()
        self.additional_information[
            "mkl weights before"] = combined_kernel.get_subkernel_weights()

        print "subkernel weights:", combined_kernel.get_subkernel_weights()

        svm = None

        print "using MKL:", (param.flags["mkl_q"] >= 1.0)

        if param.flags["mkl_q"] >= 1.0:

            svm = MKLClassification()

            svm.set_mkl_norm(param.flags["mkl_q"])
            svm.set_kernel(combined_kernel)
            svm.set_labels(lab)

        else:

            # create SVM (disable unsupported optimizations)
            combined_kernel.set_cache_size(500)
            svm = SVMLight(param.cost, combined_kernel, lab)

        num_threads = 8
        svm.io.enable_progress()
        svm.io.set_loglevel(shogun.Classifier.MSG_INFO)

        svm.parallel.set_num_threads(num_threads)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)

        svm.set_epsilon(0.03)

        # set cost
        if param.flags["normalize_cost"]:

            norm_c_pos = param.cost / float(
                len([l for l in data.labels if l == 1]))
            norm_c_neg = param.cost / float(
                len([l for l in data.labels if l == -1]))
            svm.set_C(norm_c_neg, norm_c_pos)

        else:

            svm.set_C(param.cost, param.cost)

        svm.train()

        print "subkernel weights (after):", combined_kernel.get_subkernel_weights(
        )

        ########################################################
        print "svm objective:"
        print svm.get_objective()

        self.additional_information["svm_objective"] = svm.get_objective()
        self.additional_information[
            "svm num sv"] = svm.get_num_support_vectors()
        self.additional_information[
            "mkl weights post-training"] = combined_kernel.get_subkernel_weights(
            )

        ########################################################

        # wrap up predictors
        svms = {}

        # use a reference to the same svm several times
        for task_name in train_data.keys():
            svms[task_name] = (data.name_to_id(task_name), combined_kernel,
                               svm, param)

        return svms
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """
        
        # dict to save additional information for later analysis
        self.additional_information = {}
        
          
        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=True)

                
        # create shogun label
        lab = shogun_factory.create_labels(data.labels)
        


        ########################################################
        print "creating a kernel for each node:"
        ########################################################


        # assemble combined kernel
        
        combined_kernel = CombinedKernel()
        
        combined_kernel.io.set_loglevel(shogun.Kernel.MSG_INFO)
        
        
        base_features = shogun_factory.create_features(data.examples, param)
        
        combined_features = CombinedFeatures()
        
        
        
        
        ##################################################
        # intra-domain blocks (dirac kernel)
        
        
        intra_block_vec = PairiiVec()
        
        for task_id in data.get_task_ids():
            intra_block_vec.push_back(Pairii(task_id, task_id))
        
        
        
        # create mask-based normalizer
        normalizer = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, intra_block_vec)        
        kernel = shogun_factory.create_empty_kernel(param)
        kernel.set_normalizer(normalizer)
        
        # append current kernel to CombinedKernel
        combined_kernel.append_kernel(kernel)
    
        # append features
        combined_features.append_feature_obj(base_features)

        print "------"
        
        ##################################################
        # all blocks (full kernel matrix)
        
        
        all_block_vec = PairiiVec()
        
        for task_id_1 in data.get_task_ids():
            for task_id_2 in data.get_task_ids():
                all_block_vec.push_back(Pairii(task_id_1, task_id_2))
                
        
        # create mask-based normalizer
        normalizer_all = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, all_block_vec)        
        kernel_all = shogun_factory.create_empty_kernel(param)
        kernel_all.set_normalizer(normalizer_all)
                
        # append current kernel to CombinedKernel
        combined_kernel.append_kernel(kernel_all)
    
        # append features
        combined_features.append_feature_obj(base_features)

        
        ##################################################
        # hack
        
        
        #        hack_block_vec = PairiiVec()
        #        
        #        for task_id_1 in data.get_task_ids():
        #            for task_id_2 in data.get_task_ids():
        #                hack_block_vec.push_back(Pairii(task_id_1, task_id_2))
        #        
        #        hack_block_vec.push_back(Pairii(data.name_to_id("B_2705"), data.name_to_id("B_4001")))
        #        other_group = ["B_0702", "B_1501", "B_5801"]
        #        for task_id_1 in other_group:
        #            for task_id_2 in other_group:
        #                hack_block_vec.push_back(Pairii(data.name_to_id(task_id_1), data.name_to_id(task_id_2)))
        #        
        #        
        #        
        #        # create mask-based normalizer
        #        normalizer_hack = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, hack_block_vec)        
        #        kernel_hack = shogun_factory.create_empty_kernel(param)
        #        kernel_hack.set_normalizer(normalizer_hack)
        #                
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel_hack)
        #    
        #        # append features
        #        combined_features.append_feature_obj(base_features)
        
        
        
            
        ##################################################
        # init combined kernel
        
        combined_kernel.init(combined_features, combined_features)    
        
            
        #combined_kernel.precompute_subkernels()
        self.additional_information["mkl weights before"] = combined_kernel.get_subkernel_weights()
        
        print "subkernel weights:", combined_kernel.get_subkernel_weights()

        svm = None
                
        
        print "using MKL:", (param.flags["mkl_q"] >= 1.0)
        
        if param.flags["mkl_q"] >= 1.0:
            
            svm = MKLClassification()
            
            svm.set_mkl_norm(param.flags["mkl_q"])
            svm.set_kernel(combined_kernel)
            svm.set_labels(lab)
        
        else:
            
            # create SVM (disable unsupported optimizations)
            combined_kernel.set_cache_size(500)
            svm = SVMLight(param.cost, combined_kernel, lab)


        num_threads = 8
        svm.io.enable_progress()
        svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
        
        svm.parallel.set_num_threads(num_threads)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)
    
        svm.set_epsilon(0.03)
        
        # set cost
        if param.flags["normalize_cost"]:
            
            norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
            norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
            svm.set_C(norm_c_neg, norm_c_pos)
            
        else:

            svm.set_C(param.cost, param.cost)
        
        svm.train()
    
    
        print "subkernel weights (after):", combined_kernel.get_subkernel_weights()

        ########################################################
        print "svm objective:"
        print svm.get_objective()
        
        
        self.additional_information["svm_objective"] = svm.get_objective()
        self.additional_information["svm num sv"] = svm.get_num_support_vectors()
        self.additional_information["mkl weights post-training"] = combined_kernel.get_subkernel_weights()
         
        ########################################################
        
        
        # wrap up predictors
        svms = {}
            
        # use a reference to the same svm several times
        for task_name in train_data.keys():
            svms[task_name] = (data.name_to_id(task_name), combined_kernel, svm, param)

        
        return svms
Exemplo n.º 3
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """
        
          
        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=True)
        
        # create shogun label
        lab = shogun_factory.create_labels(data.labels)
        


        ##################################################
        # define pockets
        ##################################################
        
        pockets = [0]*9
        
        pockets[0] = [1, 5, 6, 7, 8, 31, 32, 33, 34]
        pockets[1] = [1, 2, 3, 4, 6, 7, 8, 9, 11, 21, 31]
        pockets[2] = [11, 20, 21, 22, 29, 31]
        pockets[3] = [8, 30, 31, 32]
        pockets[4] = [10, 11, 30]
        pockets[5] = [10, 11, 12, 13, 20, 29]
        pockets[6] = [10, 12, 20, 22, 26, 27, 28, 29]
        pockets[7] = [12, 14, 15, 26]
        pockets[8] = [13, 15, 16, 17, 18, 19, 20, 23, 24, 25, 26]
        

        #new_pockets = []
        
        # merge neighboring pockets
        #for i in range(8):
        #    new_pockets.append(list(set(pockets[i]).union(set(pockets[i+1]))))
            
        #pockets = new_pockets
        
        
        ########################################################
        print "creating a kernel:"
        ########################################################


        # assemble combined kernel
        
        combined_kernel = CombinedKernel()
        
        combined_kernel.io.set_loglevel(shogun.Kernel.MSG_INFO)
        
        
        base_features = shogun_factory.create_features(data.examples)
        
        combined_features = CombinedFeatures()
        
        
        
        ##################################################
        # intra-domain blocks
        
        
        #        intra_block_vec = PairiiVec()
        #        
        #        for task_id in data.get_task_ids():
        #            intra_block_vec.push_back(Pairii(task_id, task_id))
        #        
        #        
        #        
        #        # create mask-based normalizer
        #        normalizer = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, intra_block_vec)        
        #        kernel = shogun_factory.create_empty_kernel(param)
        #        kernel.set_normalizer(normalizer)
        #        
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel)
        #    
        #        # append features
        #        combined_features.append_feature_obj(base_features)
        #
        #        print "------"
        #        
        #        ##################################################
        #        # all blocks
        #        
        #        
        #        all_block_vec = PairiiVec()
        #        
        #        for task_id_1 in data.get_task_ids():
        #            for task_id_2 in data.get_task_ids():
        #                all_block_vec.push_back(Pairii(task_id_1, task_id_2))
        #                
        #        
        #        # create mask-based normalizer
        #        normalizer_all = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, all_block_vec)        
        #        kernel_all = shogun_factory.create_empty_kernel(param)
        #        kernel_all.set_normalizer(normalizer_all)
        #                
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel_all)
        #    
        #        # append features
        #        combined_features.append_feature_obj(base_features)

        
        ##################################################
        # add one kernel per similarity position
        
        
        # init seq handler 
        pseudoseqs = SequencesHandler()



        for pocket in pockets:

            print "creating normalizer"
            #import pdb
            #pdb.set_trace()
            
            normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
            
            print "processing pocket", pocket


            # set similarity
            for task_name_lhs in data.get_task_names():
                for task_name_rhs in data.get_task_names():
                    
                    similarity = 0.0
                    
                    for pseudo_seq_pos in pocket:
                        similarity += float(pseudoseqs.get_similarity(task_name_lhs, task_name_rhs, pseudo_seq_pos-1))
                    
                    # normalize
                    similarity = similarity / float(len(pocket))
                    
                    print "pocket %s (%s, %s) = %f" % (str(pocket), task_name_lhs, task_name_rhs, similarity)
                    
                    normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
               

            print "creating empty kernel"
            kernel_pos = shogun_factory.create_empty_kernel(param)
            
            print "setting normalizer"
            kernel_pos.set_normalizer(normalizer)
                
            print "appending kernel"
            # append current kernel to CombinedKernel
            combined_kernel.append_kernel(kernel_pos)
    
            print "appending features"
            # append features
            combined_features.append_feature_obj(base_features)

        
        
        print "done constructing combined kernel"
        
        ##################################################
        # init combined kernel

        
        # init weights
        # combined_kernel.set_subkernel_weights([1.0/2.85]*combined_kernel.get_num_subkernels())
        
        
        combined_kernel.init(combined_features, combined_features)    
        
        

                
        print "subkernel weights:", combined_kernel.get_subkernel_weights()

        svm = None
                
        
        print "using MKL:", (param.transform >= 1.0)
        
        if param.transform >= 1.0:
            
            svm = MKLClassification()
            
            svm.set_mkl_norm(param.transform)
            #svm.set_solver_type(ST_CPLEX) #ST_GLPK) #DIRECT) #NEWTON)#ST_CPLEX) #auto
        
            svm.set_C(param.cost, param.cost)
            
            svm.set_kernel(combined_kernel)
            svm.set_labels(lab)
            
                
        else:
            
            # create SVM (disable unsupported optimizations)
            combined_kernel.set_cache_size(500)
            
            svm = SVMLight(param.cost, combined_kernel, lab)


        # set up SVM
        num_threads = 8
        svm.io.enable_progress()
        #svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
        svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)
        
        svm.parallel.set_num_threads(num_threads)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)
        
        #print "WARNING: custom epsilon set"
        #svm.set_epsilon(0.05)    
        
        # normalize cost
        norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
        norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
        
        svm.set_C(norm_c_neg, norm_c_pos)
        
        
        # start training
        svm.train()
    
        
        # save additional info
        self.additional_information["svm_objective"] = svm.get_objective()
        self.additional_information["svm num sv"] = svm.get_num_support_vectors()
        self.additional_information["post_weights"] = combined_kernel.get_subkernel_weights()
        
        print self.additional_information 
        
        
        
        # wrap up predictors
        svms = {}
            
        # use a reference to the same svm several times
        for task_name in train_data.keys():
            svms[task_name] = (data.name_to_id(task_name), combined_kernel, svm)

        
        return svms
Exemplo n.º 4
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """
        
          
        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=True)
        
        # create shogun label
        lab = shogun_factory.create_labels(data.labels)
        


        ########################################################
        print "creating a kernel for each node:"
        ########################################################


        # assemble combined kernel
        
        combined_kernel = CombinedKernel()
        
        combined_kernel.io.set_loglevel(shogun.Kernel.MSG_INFO)
        
        
        base_features = shogun_factory.create_features(data.examples)
        
        combined_features = CombinedFeatures()
        
        
        
        
        ##################################################
        # intra-domain blocks
        
        
        #        intra_block_vec = PairiiVec()
        #        
        #        for task_id in data.get_task_ids():
        #            intra_block_vec.push_back(Pairii(task_id, task_id))
        #        
        #        
        #        
        #        # create mask-based normalizer
        #        normalizer = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, intra_block_vec)        
        #        kernel = shogun_factory.create_empty_kernel(param)
        #        kernel.set_normalizer(normalizer)
        #        
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel)
        #    
        #        # append features
        #        combined_features.append_feature_obj(base_features)
        #
        #        print "------"
        #        
        #        ##################################################
        #        # all blocks
        #        
        #        
        #        all_block_vec = PairiiVec()
        #        
        #        for task_id_1 in data.get_task_ids():
        #            for task_id_2 in data.get_task_ids():
        #                all_block_vec.push_back(Pairii(task_id_1, task_id_2))
        #                
        #        
        #        # create mask-based normalizer
        #        normalizer_all = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, all_block_vec)        
        #        kernel_all = shogun_factory.create_empty_kernel(param)
        #        kernel_all.set_normalizer(normalizer_all)
        #                
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel_all)
        #    
        #        # append features
        #        combined_features.append_feature_obj(base_features)

        
        ##################################################
        # add one kernel per similarity position
        
        
        # init seq handler 
        pseudoseqs = SequencesHandler()
        pseudoseq_length = pseudoseqs.seq_length


        for pos in range(pseudoseq_length):
            
            print "appending kernel for pos %i" % (pos)
        
            print "nums", data.task_vector_nums

    
            pos_block_vec = PairiiVec()
    
            # set similarity
            for task_name_lhs in data.get_task_names():
                for task_name_rhs in data.get_task_names():
                    
                    similarity = pseudoseqs.get_similarity(task_name_lhs, task_name_rhs, pos)
                    #print "computing similarity for tasks (%s, %s) = %i" % (task_name_lhs, task_name_rhs, similarity)
                    
                    if similarity == 1:                    
                        tmp_pair = Pairii(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs))
                        pos_block_vec.push_back(tmp_pair)

            print "creating normalizer"
            normalizer_pos = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, pos_block_vec)   

            print "creating empty kernel"
            kernel_pos = shogun_factory.create_empty_kernel(param)
            
            print "setting normalizer"
            kernel_pos.set_normalizer(normalizer_pos)
                
            print "appending kernel"
            # append current kernel to CombinedKernel
            combined_kernel.append_kernel(kernel_pos)
    
            print "appending features"
            # append features
            combined_features.append_feature_obj(base_features)
        
        
        print "done constructing combined kernel"
        
        ##################################################
        # init combined kernel
        
        combined_kernel.init(combined_features, combined_features)    
        
            

                
        print "subkernel weights:", combined_kernel.get_subkernel_weights()

        svm = None
                
        
        print "using MKL:", (param.transform >= 1.0)
        
        if param.transform >= 1.0:
            
            svm = MKLClassification()
            
            svm.set_mkl_norm(param.transform)
            #svm.set_solver_type(ST_CPLEX) #ST_GLPK) #DIRECT) #NEWTON)#ST_CPLEX) #auto
        
            svm.set_C(param.cost, param.cost)
            
            svm.set_kernel(combined_kernel)
            svm.set_labels(lab)
            
                
        else:
            
            # create SVM (disable unsupported optimizations)
            combined_kernel.set_cache_size(500)
            
            svm = SVMLight(param.cost, combined_kernel, lab)


        # set up SVM
        num_threads = 8
        svm.io.enable_progress()
        #svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
        svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)
        
        svm.parallel.set_num_threads(num_threads)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)
        
        print "WARNING: custom epsilon set"
        svm.set_epsilon(0.05)    
        
        # normalize cost
        norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
        norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
        
        svm.set_C(norm_c_neg, norm_c_pos)
        
        
        # start training
        svm.train()
    
        
        # save additional info
        self.additional_information["svm_objective"] = svm.get_objective()
        self.additional_information["svm num sv"] = svm.get_num_support_vectors()
        self.additional_information["mkl weights post-training"] = combined_kernel.get_subkernel_weights()
        
        print self.additional_information 
        
        
        
        # wrap up predictors
        svms = {}
            
        # use a reference to the same svm several times
        for task_name in train_data.keys():
            svms[task_name] = (data.name_to_id(task_name), combined_kernel, svm)

        
        return svms
Exemplo n.º 5
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """

        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=False)

        # create shogun data objects
        base_wdk = shogun_factory.create_empty_kernel(param)
        lab = shogun_factory.create_labels(data.labels)

        combined_kernel = CombinedKernel()
        combined_kernel.io.set_loglevel(shogun.Kernel.MSG_INFO)
        base_features = shogun_factory.create_features(data.examples)
        combined_features = CombinedFeatures()

        # set normalizer
        normalizer = MultitaskKernelNormalizer(data.task_vector_nums)

        # load data
        #f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_pearson.txt")
        f = file(
            "/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/All_PseudoSeq_Hamming.txt"
        )
        #f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_euklid.txt")
        #f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_RAxML.txt")

        num_lines = int(f.readline().strip())
        task_distances = numpy.zeros((num_lines, num_lines))
        name_to_id = {}
        for (i, line) in enumerate(f):
            tokens = line.strip().split("\t")
            name = str(tokens[0])
            name_to_id[name] = i
            entry = numpy.array([v for (j, v) in enumerate(tokens) if j != 0])
            assert len(entry) == num_lines, "len_entry %i, num_lines %i" % (
                len(entry), num_lines)
            task_distances[i, :] = entry

        # cut relevant submatrix
        active_ids = [name_to_id[name] for name in data.get_task_names()]
        tmp_distances = task_distances[active_ids, :]
        tmp_distances = tmp_distances[:, active_ids]
        print "distances ", tmp_distances.shape

        # normalize distances
        task_distances = task_distances / numpy.max(tmp_distances)

        similarities = numpy.zeros(
            (data.get_num_tasks(), data.get_num_tasks()))

        # convert distance to similarity
        for task_name_lhs in data.get_task_names():
            for task_name_rhs in data.get_task_names():

                # convert similarity with simple transformation
                similarity = param.base_similarity - task_distances[
                    name_to_id[task_name_lhs], name_to_id[task_name_rhs]]
                normalizer.set_task_similarity(data.name_to_id(task_name_lhs),
                                               data.name_to_id(task_name_rhs),
                                               similarity)

                # save for later
                similarities[data.name_to_id(task_name_lhs),
                             data.name_to_id(task_name_rhs)] = similarity

        # set normalizer
        base_wdk.set_normalizer(normalizer)
        #base_wdk.init_normalizer()

        combined_features.append_feature_obj(base_features)
        combined_kernel.append_kernel(base_wdk)

        ##################################################
        # intra-domain blocks

        intra_block_vec = PairiiVec()

        for task_id in data.get_task_ids():
            intra_block_vec.push_back(Pairii(task_id, task_id))

        # create mask-based normalizer
        normalizer = MultitaskKernelMaskPairNormalizer(data.task_vector_nums,
                                                       intra_block_vec)
        kernel = shogun_factory.create_empty_kernel(param)
        kernel.set_normalizer(normalizer)

        # append current kernel to CombinedKernel
        combined_kernel.append_kernel(kernel)

        # append features
        combined_features.append_feature_obj(base_features)

        # set mixing factor (used if MKL is OFF)
        assert (param.base_similarity <= 1)
        assert (param.base_similarity >= 0)
        combined_kernel.set_subkernel_weights(
            [param.base_similarity, 1 - param.base_similarity])

        combined_kernel.init(combined_features, combined_features)

        svm = None

        print "using MKL:", (param.transform >= 1.0)

        if param.transform >= 1.0:

            svm = MKLClassification()

            svm.set_mkl_norm(param.transform)
            #svm.set_solver_type(ST_CPLEX) #ST_GLPK) #DIRECT) #NEWTON)#ST_CPLEX) #auto

            svm.set_C(param.cost, param.cost)

            svm.set_kernel(combined_kernel)
            svm.set_labels(lab)

        else:

            # create SVM (disable unsupported optimizations)
            combined_kernel.set_cache_size(500)

            svm = SVMLight(param.cost, combined_kernel, lab)

        # set up SVM
        num_threads = 8
        svm.io.enable_progress()
        svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)

        svm.parallel.set_num_threads(num_threads)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)

        print "WARNING: custom epsilon set"
        svm.set_epsilon(0.05)

        # normalize cost
        norm_c_pos = param.cost / float(len([l
                                             for l in data.labels if l == 1]))
        norm_c_neg = param.cost / float(
            len([l for l in data.labels if l == -1]))

        svm.set_C(norm_c_neg, norm_c_pos)

        # start training
        svm.train()

        # save additional information
        self.additional_information["svm objective"] = svm.get_objective()
        self.additional_information["num sv"] = svm.get_num_support_vectors()
        self.additional_information["similarities"] = similarities
        self.additional_information[
            "post_weights"] = combined_kernel.get_subkernel_weights()

        # wrap up predictors
        svms = {}

        # use a reference to the same svm several times
        for task_name in data.get_task_names():

            task_num = data.name_to_id(task_name)

            # save svm and task_num
            svms[task_name] = (task_num, combined_kernel, svm)

        return svms
Exemplo n.º 6
0
    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """

        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=True)

        # create shogun label
        lab = shogun_factory.create_labels(data.labels)

        ##################################################
        # define pockets
        ##################################################

        pockets = [0] * 9

        pockets[0] = [1, 5, 6, 7, 8, 31, 32, 33, 34]
        pockets[1] = [1, 2, 3, 4, 6, 7, 8, 9, 11, 21, 31]
        pockets[2] = [11, 20, 21, 22, 29, 31]
        pockets[3] = [8, 30, 31, 32]
        pockets[4] = [10, 11, 30]
        pockets[5] = [10, 11, 12, 13, 20, 29]
        pockets[6] = [10, 12, 20, 22, 26, 27, 28, 29]
        pockets[7] = [12, 14, 15, 26]
        pockets[8] = [13, 15, 16, 17, 18, 19, 20, 23, 24, 25, 26]

        #new_pockets = []

        # merge neighboring pockets
        #for i in range(8):
        #    new_pockets.append(list(set(pockets[i]).union(set(pockets[i+1]))))

        #pockets = new_pockets

        ########################################################
        print "creating a kernel:"
        ########################################################

        # assemble combined kernel

        combined_kernel = CombinedKernel()

        combined_kernel.io.set_loglevel(shogun.Kernel.MSG_INFO)

        base_features = shogun_factory.create_features(data.examples)

        combined_features = CombinedFeatures()

        ##################################################
        # intra-domain blocks

        #        intra_block_vec = PairiiVec()
        #
        #        for task_id in data.get_task_ids():
        #            intra_block_vec.push_back(Pairii(task_id, task_id))
        #
        #
        #
        #        # create mask-based normalizer
        #        normalizer = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, intra_block_vec)
        #        kernel = shogun_factory.create_empty_kernel(param)
        #        kernel.set_normalizer(normalizer)
        #
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel)
        #
        #        # append features
        #        combined_features.append_feature_obj(base_features)
        #
        #        print "------"
        #
        #        ##################################################
        #        # all blocks
        #
        #
        #        all_block_vec = PairiiVec()
        #
        #        for task_id_1 in data.get_task_ids():
        #            for task_id_2 in data.get_task_ids():
        #                all_block_vec.push_back(Pairii(task_id_1, task_id_2))
        #
        #
        #        # create mask-based normalizer
        #        normalizer_all = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, all_block_vec)
        #        kernel_all = shogun_factory.create_empty_kernel(param)
        #        kernel_all.set_normalizer(normalizer_all)
        #
        #        # append current kernel to CombinedKernel
        #        combined_kernel.append_kernel(kernel_all)
        #
        #        # append features
        #        combined_features.append_feature_obj(base_features)

        ##################################################
        # add one kernel per similarity position

        # init seq handler
        pseudoseqs = SequencesHandler()

        for pocket in pockets:

            print "creating normalizer"
            #import pdb
            #pdb.set_trace()

            normalizer = MultitaskKernelNormalizer(data.task_vector_nums)

            print "processing pocket", pocket

            # set similarity
            for task_name_lhs in data.get_task_names():
                for task_name_rhs in data.get_task_names():

                    similarity = 0.0

                    for pseudo_seq_pos in pocket:
                        similarity += float(
                            pseudoseqs.get_similarity(task_name_lhs,
                                                      task_name_rhs,
                                                      pseudo_seq_pos - 1))

                    # normalize
                    similarity = similarity / float(len(pocket))

                    print "pocket %s (%s, %s) = %f" % (
                        str(pocket), task_name_lhs, task_name_rhs, similarity)

                    normalizer.set_task_similarity(
                        data.name_to_id(task_name_lhs),
                        data.name_to_id(task_name_rhs), similarity)

            print "creating empty kernel"
            kernel_pos = shogun_factory.create_empty_kernel(param)

            print "setting normalizer"
            kernel_pos.set_normalizer(normalizer)

            print "appending kernel"
            # append current kernel to CombinedKernel
            combined_kernel.append_kernel(kernel_pos)

            print "appending features"
            # append features
            combined_features.append_feature_obj(base_features)

        print "done constructing combined kernel"

        ##################################################
        # init combined kernel

        # init weights
        # combined_kernel.set_subkernel_weights([1.0/2.85]*combined_kernel.get_num_subkernels())

        combined_kernel.init(combined_features, combined_features)

        print "subkernel weights:", combined_kernel.get_subkernel_weights()

        svm = None

        print "using MKL:", (param.transform >= 1.0)

        if param.transform >= 1.0:

            svm = MKLClassification()

            svm.set_mkl_norm(param.transform)
            #svm.set_solver_type(ST_CPLEX) #ST_GLPK) #DIRECT) #NEWTON)#ST_CPLEX) #auto

            svm.set_C(param.cost, param.cost)

            svm.set_kernel(combined_kernel)
            svm.set_labels(lab)

        else:

            # create SVM (disable unsupported optimizations)
            combined_kernel.set_cache_size(500)

            svm = SVMLight(param.cost, combined_kernel, lab)

        # set up SVM
        num_threads = 8
        svm.io.enable_progress()
        #svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
        svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)

        svm.parallel.set_num_threads(num_threads)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)

        #print "WARNING: custom epsilon set"
        #svm.set_epsilon(0.05)

        # normalize cost
        norm_c_pos = param.cost / float(len([l
                                             for l in data.labels if l == 1]))
        norm_c_neg = param.cost / float(
            len([l for l in data.labels if l == -1]))

        svm.set_C(norm_c_neg, norm_c_pos)

        # start training
        svm.train()

        # save additional info
        self.additional_information["svm_objective"] = svm.get_objective()
        self.additional_information[
            "svm num sv"] = svm.get_num_support_vectors()
        self.additional_information[
            "post_weights"] = combined_kernel.get_subkernel_weights()

        print self.additional_information

        # wrap up predictors
        svms = {}

        # use a reference to the same svm several times
        for task_name in train_data.keys():
            svms[task_name] = (data.name_to_id(task_name), combined_kernel,
                               svm)

        return svms