def __init__(self, steps, *, memory=None, verbose=False, output_name=None, enforce_float32=True, runtime='python', options=None, white_op=None, black_op=None, final_types=None, op_version=None): self.output_name = output_name self.enforce_float32 = enforce_float32 self.runtime = runtime self.options = options self.white_op = white_op self.white_op = white_op self.black_op = black_op self.final_types = final_types self.op_version = op_version # The constructor calls _validate_step and it checks the value # of black_op. Pipeline.__init__(self, steps, memory=memory, verbose=verbose)
def __init__(self, steps): Pipeline.__init__(self, steps) if not isinstance(self._dataframe_mapper, DataFrameMapper): raise TypeError( "First step of a DataFramePipeline must be a DataFrameMapper, " "'%s' (type %s) is not." % (self._dataframe_mapper, type(self._dataframe_mapper)) )
def __init__(self, nQuantile=None): self.nQuantile = Separator_num.nQUANTILE if nQuantile is None else nQuantile Pipeline.__init__( self, [ ('geometry', Separator_num.Selector()), ('quantiled', QuantileTransformer(n_quantiles=self.nQuantile, copy=False) ) #use in-place scaling ])
def __init__(self, k, model_type='random_forest', kmeans_type='squishy', n_forests=192, n_trees=1, n_features_to_predict=0.5, max_depth=5, #should be 2 for boosting learning_rate=0.6, using_weights=True, weight_extent=1, # 2 for boosting max_iter=60, n_attempts=10, weight_adjustment=0, eig_extent=0, n_jobs=1): slc1 = SLCluster(n_forests, model_type=model_type, n_trees=n_trees, n_features_to_predict=n_features_to_predict, max_depth=max_depth, outputting_weights=False, weight_extent=weight_extent, learning_rate=learning_rate, n_jobs=n_jobs) slc2 = SLCluster(n_forests, model_type=model_type, n_trees=n_trees, n_features_to_predict=n_features_to_predict, max_depth=max_depth, outputting_weights=using_weights, weight_extent=weight_extent, learning_rate=learning_rate, n_jobs=n_jobs) ew = EigenvectorWeighting(extent=eig_extent) if kmeans_type == 'normal': jk = JKMeans(k, max_iter=max_iter, n_attempts=n_attempts, accepting_weights=using_weights, weight_adjustment=weight_adjustment, n_jobs=n_jobs) else: jk = SquishyJKMeans(k, max_iter=max_iter, n_attempts=n_attempts, accepting_weights=using_weights, weight_adjustment=weight_adjustment, n_jobs=n_jobs) if eig_extent == 0: Pipeline.__init__(self,[('slc1', slc1), ('slc2', slc2), ('jkmeans', jk)]) else: Pipeline.__init__(self,[('slc1', slc1), ('slc2', slc2), ('ew', ew), ('jkmeans', jk)])
def __init__( self, k, model_type='random_forest', kmedoids_type='normal', n_forests=150, n_trees=1, n_features_to_predict=0.5, max_depth=5, #should be 2 if model_type is boosting learning_rate=0.6, using_weights=False, using_pca=False, weight_extent=1, # 2 if model_type is boosting max_iter=60, n_attempts=10, weight_adjustment=0, eig_extent=0, n_jobs=1): rft = RFTransform(n_forests, model_type=model_type, n_trees=n_trees, n_features_to_predict=n_features_to_predict, max_depth=max_depth, outputting_weights=using_weights, using_pca=using_pca, weight_extent=weight_extent, learning_rate=learning_rate, n_jobs=n_jobs) ew = EigenvectorWeighting(extent=eig_extent) if kmedoids_type == 'normal': jk = JKMedoids(k, max_iter=max_iter, n_attempts=n_attempts, accepting_weights=using_weights, weight_adjustment=weight_adjustment, n_jobs=n_jobs) else: jk = SquishyJKMedoids(k, max_iter=max_iter, n_attempts=n_attempts, accepting_weights=using_weights, weight_adjustment=weight_adjustment, n_jobs=n_jobs) if eig_extent == 0 or not using_weights: Pipeline.__init__(self, [('rft', rft), ('jkmeans', jk)]) else: Pipeline.__init__(self, [('rft', rft), ('ew', ew), ('jkmeans', jk)])
def __init__(self, steps, *, memory=None, verbose=False, output_name=None, enforce_float32=True, runtime='python', options=None, white_op=None, black_op=None, final_types=None, op_version=None): Pipeline.__init__( self, steps, memory=memory, verbose=verbose) self.output_name = output_name self.enforce_float32 = enforce_float32 self.runtime = runtime self.options = options self.white_op = white_op self.white_op = white_op self.black_op = black_op self.final_types = final_types self.op_version = op_version
def __init__(self, steps=None, frc_mdl=None, gen_mdl=None, sel_mdl=None): # if steps is None and frc_mdl is None: # raise ValueError("Steps are not defined in Pipeline model") if steps is None: steps = [('gen', gen_mdl), ('sel', sel_mdl), ('frc', frc_mdl)] named_steps = {k: v for k, v in steps} if named_steps['frc'] is None: frc_mdl = IdentityModel(name="Identity") if named_steps['sel'] is None: sel_mdl = sel_class.FeatureSelection(name="No feature selection", on=False) if named_steps['gen'] is None: gen_mdl = gnt_class.FeatureGeneration(name="No feature generation") steps = [('gen', gen_mdl), ('sel', sel_mdl), ('frc', frc_mdl)] Pipeline.__init__(self, steps) self.name = "_".join( [str(frc_mdl.name), str(gen_mdl.name), str(sel_mdl.name)])
def __init__(self, steps, na_val=np.nan): Pipeline.__init__(self, steps) self.na_val = na_val
def __init__(self, steps, memory=None, verbose=False, op_version=None): Pipeline.__init__(self, steps=steps, memory=memory, verbose=verbose) OnnxSubGraphOperatorMixin.__init__(self) self.op_version = op_version
def __init__(self, steps): Pipeline.__init__(self, steps)
def __init__(self, steps, cache_name=None, verbose=False): Pipeline.__init__(self, steps, memory=None, verbose=verbose) if cache_name is None: cache_name = "Pipeline%d" % id(self) self.cache_name = cache_name
def __init__(self, steps): Pipeline.__init__(self, steps)
def __init__(self, op_version=None): Pipeline.__init__(self) OnnxSubGraphOperatorMixin.__init__(self) self.op_version = op_version