def __init__(self, arg = None, **args) :

        Classifier.__init__(self)
        self.trainingDirectory = None
        self.testingDirectory = None

        self.maxSize = 2e6
        self.numTrees = 200
        self.numFeatures = 0
        if arg.__class__ == self.__class__ :
            other = arg
            self.numTrees = other.numTrees
            self.numFeatures = other.numFeatures
            
        if 'numTrees' in args :
            self.numTrees = args['numTrees']
        if 'numFeatures' in args :
            self.numFeatures = args['numFeatures']
示例#2
0
    def __init__(self, arg=None, **args):

        Classifier.__init__(self)
        self.trainingDirectory = None
        self.testingDirectory = None

        self.maxSize = 2e6
        self.numTrees = 200
        self.numFeatures = 0
        if arg.__class__ == self.__class__:
            other = arg
            self.numTrees = other.numTrees
            self.numFeatures = other.numFeatures

        if 'numTrees' in args:
            self.numTrees = args['numTrees']
        if 'numFeatures' in args:
            self.numFeatures = args['numFeatures']
示例#3
0
    def __init__(self, arg = None, **args):

        """
        :Parameters:
          - `arg` - another SVM object or a kernel object; if no argument is given
            the kernel function of the training dataset is used
        
        :Keywords:
          - `C` - the svm C parameter
          - `Cmode` - the way the C parameter is used; values: 'equal', 'classProb',
            'fromData'.
            In 'equal' mode C is set to be the same for both classes
            In 'classProb' mode each class is assigned a C value that is 
            proportional to the size of the other class.  This results in 
            margin error costs being proportional to the ratio of the
            sizes of the two classes.  
            This is useful for datasets with an unbalanced class distribution.
            In 'fromData' the value of C for each pattern is taken from the
            'C' attribute of the training data.
          - `optimizer` - which optimizer to use.  the options are:
            'libsvm' -- run libsvm
            'liblinear' -- use liblinear (linear svm only)
            in this case you have the option to set the loss function - either 'l1' or 'l2'
            'mysmo' - use the PyML native optmizer (based on libsvm)
            'gist' - use a gist-like optimizer.
          - `loss` - when using liblinear set this to 'l1' or 'l2' (default: 'l1')
          - `cacheSize` - size of the kernel cache (in MB).
        """

        Classifier.__init__(self, arg, **args)

        self.kernel = None
        if arg.__class__ == self.__class__ :
            if arg.kernel is not None :
                self.kernel = arg.kernel.__class__(arg.kernel)
        elif hasattr(arg, 'type') and arg.type == 'kernel' :
            self.kernel = arg.__class__(arg)
        elif arg is not None :
            raise ValueError, 'unknown type of argument'
    def __init__(self, arg = None, **args):

        """
        :Parameters:
          - `arg` - another SVM object or a kernel object; if no argument is given
            the kernel function of the training dataset is used
        
        :Keywords:
          - `C` - the svm C parameter
          - `Cmode` - the way the C parameter is used; values: 'equal', 'classProb',
            'fromData'.
            In 'equal' mode C is set to be the same for both classes
            In 'classProb' mode each class is assigned a C value that is 
            proportional to the size of the other class.  This results in 
            margin error costs being proportional to the ratio of the
            sizes of the two classes.  
            This is useful for datasets with an unbalanced class distribution.
            In 'fromData' the value of C for each pattern is taken from the
            'C' attribute of the training data.
          - `optimizer` - which optimizer to use.  the options are:
            'libsvm' -- run libsvm
            'liblinear' -- use liblinear (linear svm only)
            in this case you have the option to set the loss function - either 'l1' or 'l2'
            'mysmo' - use the PyML native optmizer (based on libsvm)
            'gist' - use a gist-like optimizer.
          - `loss` - when using liblinear set this to 'l1' or 'l2' (default: 'l1')
          - `cacheSize` - size of the kernel cache (in MB).
        """

        Classifier.__init__(self, arg, **args)

        self.kernel = None
        if arg.__class__ == self.__class__ :
            if arg.kernel is not None :
                self.kernel = arg.kernel.__class__(arg.kernel)
        elif hasattr(arg, 'type') and arg.type == 'kernel' :
            self.kernel = arg.__class__(arg)
        elif arg is not None :
            raise ValueError, 'unknown type of argument'
示例#5
0
    def __init__(self, arg=None, **args):

        Classifier.__init__(self, arg, **args)
        if self.regression:
            self.resultsObject = resultsObjects.RegressionResults
            self.classify = self.decisionFunc
示例#6
0
    def __init__(self, arg = None, **args) :

        Classifier.__init__(self, arg, **args)
示例#7
0
 def __init__(self, arg = None, **args) :
 
     Classifier.__init__(self, arg, **args)
     if self.regression :
         self.resultsObject = resultsObjects.RegressionResults
         self.classify = self.decisionFunc