Ejemplo n.º 1
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 logdir=None,
                 layer_sizes=[1000],
                 weight_init_stddevs=[.02],
                 bias_init_consts=[1.],
                 penalty=0.0,
                 penalty_type="l2",
                 dropouts=[0.5],
                 learning_rate=0.002,
                 momentum=.8,
                 optimizer="adam",
                 batch_size=50,
                 fit_transformers=[],
                 n_evals=1,
                 verbose=True,
                 seed=None,
                 **kwargs):
        """Initialize TensorflowMultiTaskFitTransformRegressor

    Parameters
    ----------
    n_tasks: int
      Number of tasks
    n_features: list or int
      Number of features.
    logdir: str
      Location to save data
    layer_sizes: list
      List of layer sizes.
    weight_init_stddevs: list
      List of standard deviations for weights (sampled from zero-mean
      gaussians). One for each layer.
    bias_init_consts: list
      List of bias initializations. One for each layer.
    penalty: float
      Amount of penalty (l2 or l1 applied)
    penalty_type: str
      Either "l2" or "l1"
    dropouts: list
      List of dropout amounts. One for each layer.
    learning_rate: float
      Learning rate for model.
    momentum: float
      Momentum. Only applied if optimizer=="momentum"
    optimizer: str
      Type of optimizer applied.
    batch_size: int
      Size of minibatches for training.
    fit_transformers: list
      List of dc.trans.FitTransformer objects
    n_evals: int
      Number of evalations per example at predict time
    verbose: True
      Perform logging.
    seed: int
      If not none, is used as random seed for tensorflow.

    """

        self.fit_transformers = fit_transformers
        self.n_evals = n_evals

        # Run fit transformers on dummy dataset to determine n_features after transformation
        if isinstance(n_features, list):
            X_b = np.ones([batch_size] + n_features)
        elif isinstance(n_features, int):
            X_b = np.ones([batch_size, n_features])
        else:
            raise ValueError("n_features should be list or int")

        for transformer in self.fit_transformers:
            X_b = transformer.X_transform(X_b)
        n_features = X_b.shape[1]
        print("n_features after fit_transform: %d" % int(n_features))

        TensorflowGraphModel.__init__(self,
                                      n_tasks,
                                      n_features,
                                      logdir=logdir,
                                      layer_sizes=layer_sizes,
                                      weight_init_stddevs=weight_init_stddevs,
                                      bias_init_consts=bias_init_consts,
                                      penalty=penalty,
                                      penalty_type=penalty_type,
                                      dropouts=dropouts,
                                      learning_rate=learning_rate,
                                      momentum=momentum,
                                      optimizer=optimizer,
                                      batch_size=batch_size,
                                      pad_batches=False,
                                      verbose=verbose,
                                      seed=seed,
                                      **kwargs)
Ejemplo n.º 2
0
    def __init__(self,
                 n_tasks,
                 K=10,
                 logdir=None,
                 n_classes=2,
                 penalty=0.0,
                 penalty_type="l2",
                 learning_rate=0.001,
                 momentum=.8,
                 optimizer="adam",
                 batch_size=50,
                 verbose=True,
                 seed=None,
                 **kwargs):
        """Initialize TensorflowMultiTaskIRVClassifier
    
    Parameters
    ----------
    n_tasks: int
      Number of tasks
    K: int
      Number of nearest neighbours used in classification
    logdir: str
      Location to save data
    n_classes: int
      number of different labels
    penalty: float
      Amount of penalty (l2 or l1 applied)
    penalty_type: str
      Either "l2" or "l1"
    learning_rate: float
      Learning rate for model.
    momentum: float
      Momentum. Only applied if optimizer=="momentum"
    optimizer: str
      Type of optimizer applied.
    batch_size: int
      Size of minibatches for training.
    verbose: True 
      Perform logging.
    seed: int
      If not none, is used as random seed for tensorflow.        

    """

        self.n_tasks = n_tasks
        self.K = K
        self.n_features = 2 * self.K * self.n_tasks
        print("n_features after fit_transform: %d" % int(self.n_features))
        TensorflowGraphModel.__init__(self,
                                      n_tasks,
                                      self.n_features,
                                      logdir=logdir,
                                      layer_sizes=None,
                                      weight_init_stddevs=None,
                                      bias_init_consts=None,
                                      penalty=penalty,
                                      penalty_type=penalty_type,
                                      dropouts=None,
                                      n_classes=n_classes,
                                      learning_rate=learning_rate,
                                      momentum=momentum,
                                      optimizer=optimizer,
                                      batch_size=batch_size,
                                      pad_batches=False,
                                      verbose=verbose,
                                      seed=seed,
                                      **kwargs)
Ejemplo n.º 3
0
  def __init__(self,
               n_tasks,
               n_features,
               logdir=None,
               layer_sizes=[1000],
               weight_init_stddevs=[.02],
               bias_init_consts=[1.],
               penalty=0.0,
               penalty_type="l2",
               dropouts=[0.5],
               learning_rate=0.002,
               momentum=.8,
               optimizer="adam",
               batch_size=50,
               fit_transformers=[],
               n_evals=1,
               verbose=True,
               seed=None,
               **kwargs):
    """Initialize TensorflowMultiTaskFitTransformRegressor
       
    Parameters
    ----------
    n_tasks: int
      Number of tasks
    n_features: list or int
      Number of features.
    logdir: str
      Location to save data
    layer_sizes: list
      List of layer sizes.
    weight_init_stddevs: list
      List of standard deviations for weights (sampled from zero-mean
      gaussians). One for each layer.
    bias_init_consts: list
      List of bias initializations. One for each layer.
    penalty: float
      Amount of penalty (l2 or l1 applied)
    penalty_type: str
      Either "l2" or "l1"
    dropouts: list
      List of dropout amounts. One for each layer.
    learning_rate: float
      Learning rate for model.
    momentum: float
      Momentum. Only applied if optimizer=="momentum"
    optimizer: str
      Type of optimizer applied.
    batch_size: int
      Size of minibatches for training.
    fit_transformers: list
      List of dc.trans.FitTransformer objects
    n_evals: int
      Number of evalations per example at predict time
    verbose: True 
      Perform logging.
    seed: int
      If not none, is used as random seed for tensorflow.        

    """

    self.fit_transformers = fit_transformers
    self.n_evals = n_evals

    # Run fit transformers on dummy dataset to determine n_features after transformation
    if isinstance(n_features, list):
      X_b = np.ones([batch_size] + n_features)
    elif isinstance(n_features, int):
      X_b = np.ones([batch_size, n_features])
    else:
      raise ValueError("n_features should be list or int")

    for transformer in self.fit_transformers:
      X_b = transformer.X_transform(X_b)
    n_features = X_b.shape[1]
    print("n_features after fit_transform: %d" % int(n_features))

    TensorflowGraphModel.__init__(
        self,
        n_tasks,
        n_features,
        logdir=logdir,
        layer_sizes=layer_sizes,
        weight_init_stddevs=weight_init_stddevs,
        bias_init_consts=bias_init_consts,
        penalty=penalty,
        penalty_type=penalty_type,
        dropouts=dropouts,
        learning_rate=learning_rate,
        momentum=momentum,
        optimizer=optimizer,
        batch_size=batch_size,
        pad_batches=False,
        verbose=verbose,
        seed=seed,
        **kwargs)
Ejemplo n.º 4
0
  def __init__(self,
               n_tasks,
               K=10,
               logdir=None,
               n_classes=2,
               penalty=0.0,
               penalty_type="l2",
               learning_rate=0.001,
               momentum=.8,
               optimizer="adam",
               batch_size=50,
               verbose=True,
               seed=None,
               **kwargs):
    """Initialize TensorflowMultiTaskIRVClassifier
    
    Parameters
    ----------
    n_tasks: int
      Number of tasks
    K: int
      Number of nearest neighbours used in classification
    logdir: str
      Location to save data
    n_classes: int
      number of different labels
    penalty: float
      Amount of penalty (l2 or l1 applied)
    penalty_type: str
      Either "l2" or "l1"
    learning_rate: float
      Learning rate for model.
    momentum: float
      Momentum. Only applied if optimizer=="momentum"
    optimizer: str
      Type of optimizer applied.
    batch_size: int
      Size of minibatches for training.
    verbose: True 
      Perform logging.
    seed: int
      If not none, is used as random seed for tensorflow.        

    """

    self.n_tasks = n_tasks
    self.K = K
    self.n_features = 2 * self.K * self.n_tasks
    print("n_features after fit_transform: %d" % int(self.n_features))
    TensorflowGraphModel.__init__(
        self,
        n_tasks,
        self.n_features,
        logdir=logdir,
        layer_sizes=None,
        weight_init_stddevs=None,
        bias_init_consts=None,
        penalty=penalty,
        penalty_type=penalty_type,
        dropouts=None,
        n_classes=n_classes,
        learning_rate=learning_rate,
        momentum=momentum,
        optimizer=optimizer,
        batch_size=batch_size,
        pad_batches=False,
        verbose=verbose,
        seed=seed,
        **kwargs)