Ejemplo n.º 1
0
 def __init__(self, model_name, output_folder):
     """
         **input:
             *model_name: (Integer) Name of this model
             *output_folder: Output folder to saved data (tensorboard, checkpoints)
     """
     ModelBase.__init__(self, model_name, output_folder=output_folder)
Ejemplo n.º 2
0
 def __init__(self, parent, id, arg_list, dtype=float):
     ModelBase.__init__(self, parent, id, arg_list[0], type)
     arg_list.pop(0)
     [self.raw_value] = arg_list.pop(0)
     self.dtype = dtype
     self.data = None
     return
Ejemplo n.º 3
0
 def __init__(self, parent, id, arg_list, dtype='float'):
     ModelBase.__init__(self, parent, id, arg_list[1], 'Param List')
     self.domain = arg_list.pop(0)
     self.raw_list = arg_list[-1]
     self.dtype = dtype
     self.shape = []
     self.data = array([])
     return
Ejemplo n.º 4
0
 def __init__(self, parent, id, arg_list, dtype='float'):
     ModelBase.__init__(self, parent, id, arg_list[1], 'Table')
     self.domain = arg_list.pop(0)
     arg_list.pop(0)
     self.dtype = dtype
     arg_list.pop(0)
     self.raw_table = arg_list.pop()
     self.data = array([])
     return
Ejemplo n.º 5
0
 def __init__(self, parent, id, arg_list, dtype='float'):
     ModelBase.__init__(self, parent, id, arg_list[1], 'Variable')
     self.domain = arg_list.pop(0)
     arg_list.pop(0)
     self.variable_type = arg_list.pop()
     self.upper_bound = 10**30
     self.lower_bound = -10**30
     self.dtype = dtype
     return
Ejemplo n.º 6
0
def h2o_median_absolute_error(y_actual, y_predicted):
    """
  Median absolute error regression loss

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :return: loss (float) (best is 0.0)
  """
    ModelBase._check_targets(y_actual, y_predicted)
    return (y_predicted - y_actual).abs().median()
Ejemplo n.º 7
0
def h2o_median_absolute_error(y_actual, y_predicted):
  """
  Median absolute error regression loss

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :return: loss (float) (best is 0.0)
  """
  ModelBase._check_targets(y_actual, y_predicted)
  return (y_predicted-y_actual).abs().median()
Ejemplo n.º 8
0
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Components")
        
        self.AddAllowedAssociation(Unit = {"cid" : "C_DEVICE", "id" : "EACH"}, Part = {"cid" : "C_DEVICE_CHANNEL", "id" : "ANY"})
        self.AddAllowedAssociation(Unit = {"cid" : "C_DEVICE_TYPE", "id" : "EACH"}, Part = {"cid" : "C_DEVICE_CHANNEL_GROUP", "id" : "ANY"})

        self.GenerateValidator()
Ejemplo n.º 9
0
def h2o_mean_squared_error(y_actual, y_predicted, weights=None):
    """
  Mean squared error regression loss

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :param weights: (Optional) sample weights
  :return: loss (float) (best is 0.0)
  """
    ModelBase._check_targets(y_actual, y_predicted)
    return ((y_predicted - y_actual)**2).mean()[0]
Ejemplo n.º 10
0
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Tagging")
        
        self.AddAllowedAssociation(Tag = {"cid" : "C_TAG", "id" : "ANY"}, Subject = {"cid" : "C_TAG", "id" : "EACH"}, multiplicity=1)
        self.AddAllowedAssociation(Tag = {"cid" : "C_TAG", "id" : "ANY"}, Subject = {"cid" : "ANY", "id" : "ANY"})


        self.GenerateValidator()
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Physical Connections")
        
        self.AddAllowedAssociation(From = {"cid" : "C_SERVER", "id" : "EACH"}, To = {"cid" : "C_BUS_CONTROLLER", "id" : "ANY"})
        self.AddAllowedAssociation(From = {"cid" : "C_BUS_CONTROLLER", "id" : "EACH"}, To = {"cid" : "C_BUS", "id" : "ANY"})
        self.AddAllowedAssociation(From = {"cid" : "C_BUS", "id" : "EACH"}, To = {"cid" : "C_DEVICE", "id" : "ANY"})

        self.GenerateValidator()
Ejemplo n.º 12
0
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Logical Connections")
        
        self.AddAllowedAssociation(From = {"cid" : "C_DEVICE_LINK", "id" : "EACH"}, To = {"cid" : "C_DEVICE_CHANNEL", "id" : "ANY"}, multiplicity=1)

        self.specific_validator = ValidatorConnectorsMatch(self)

        self.GenerateValidator()
Ejemplo n.º 13
0
def h2o_mean_squared_error(y_actual, y_predicted, weights=None):
  """
  Mean squared error regression loss

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :param weights: (Optional) sample weights
  :return: loss (float) (best is 0.0)
  """
  ModelBase._check_targets(y_actual, y_predicted)
  return ((y_predicted-y_actual)**2).mean()
Ejemplo n.º 14
0
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Types")
        
        self.AddAllowedAssociation(Type = {"cid" : "C_BUS_TYPE", "id" : "ANY"}, Subject = {"cid" : "C_BUS", "id" : "EACH"}, multiplicity = 1)
        self.AddAllowedAssociation(Type = {"cid" : "C_BUS_CONTROLLER_TYPE", "id" : "ANY"}, Subject = {"cid" : "C_BUS_CONTROLLER", "id" : "EACH"}, multiplicity = 1)
        self.AddAllowedAssociation(Type = {"cid" : "C_DEVICE_TYPE", "id" : "ANY"}, Subject = {"cid" : "C_DEVICE", "id" : "EACH"}, multiplicity = 1)

        self.GenerateValidator()
Ejemplo n.º 15
0
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Responsibilities")
        
        self.AddAllowedAssociation(Responsible = {"cid" : "C_SERVER", "id" : "ANY"}, Subject = {"cid" : "C_IOC", "id" : "EACH"}, multiplicity=1)
        self.AddAllowedAssociation(Responsible = {"cid" : "C_IOC", "id" : "ANY"}, Subject = {"cid" : "C_DEVICE", "id" : "EACH"}, multiplicity=1)


        self.GenerateValidator()
Ejemplo n.º 16
0
 def __init__(self,dbg=None,lg=None):
   ModelBase.__init__(self,dbg=dbg,lg=lg)
   self.IH=Parameter(0.)
   self.ID=Parameter(0.)
   self.CH=Parameter(0.)
   self.NP=Parameter(0.)
   self.MA=Parameter(0.)
   self.ME=Parameter(0.)
   self.TF=Parameter(0.)
   self.FO=Parameter(0.)
   self.DAF=Parameter(0.)
   self.parameters=[self.IH,self.ID,self.CH,self.NP,self.MA,self.ME,self.TF,self.FO,self.DAF]
   self.phi=None
Ejemplo n.º 17
0
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Types")
        
        self.AddAllowedAssociation(Type = {"cid" : "C_DEVICE_TYPE", "id" : "ANY"}, Subject = {"cid" : "C_DEVICE", "id" : "EACH"}, multiplicity = 1)
        self.AddAllowedAssociation(Type = {"cid" : "C_DEVICE_CHANNEL_GROUP", "id" : "ANY"}, Subject = {"cid" : "C_DEVICE_CHANNEL", "id" : "EACH"}, multiplicity = 1)
        self.AddAllowedAssociation(Type = {"cid" : "C_DEVICE_LINK_GROUP", "id" : "ANY"}, Subject = {"cid" : "C_DEVICE_LINK", "id" : "EACH"}, multiplicity = 1)
        self.AddAllowedAssociation(Type = {"cid" : "C_DEVICE_CONNECTOR_TYPE", "id" : "ANY"}, Subject = {"cid" : "C_DEVICE_CHANNEL_GROUP", "id" : "EACH"}, multiplicity = 1)
        self.AddAllowedAssociation(Type = {"cid" : "C_DEVICE_CONNECTOR_TYPE", "id" : "ANY"}, Subject = {"cid" : "C_DEVICE_LINK_GROUP", "id" : "EACH"}, multiplicity = 1)

        self.GenerateValidator()
Ejemplo n.º 18
0
    def __init__(self, parent, id, args, type):
        ModelBase.__init__(self, parent, id, args[1], 'Equation')
        self.identifier = id
        self.domain = args[1]
        # These values should be of the form: (rel_path.module, function_name)
        self.imports = []
        self.eqn_slash_eqns = args[3]
        self.declared_variables = parent.declared_variables
        self.declared_parameters = parent.declared_parameters
        self.declared_equations = parent.declared_equations
        self.warning_list = []
        self.processed_eqs = []
        self.domain_checking = parent.domain_checking

        return
Ejemplo n.º 19
0
  def __init__(self,dbg=None,lg=None):
    ModelBase.__init__(self,dbg=dbg,lg=lg)
    
    self.Dd=Parameter(0.)
    self.gamma=Parameter(0.)
    self.theta=Parameter(0.)
    self.e=Parameter(0.)
    self.Dt=Parameter(0.)
    self.c=Parameter(0.)
    self.ip=Parameter(0.)
    self.l=Parameter(0.)
    self.r=Parameter(0.)

    self.parameters=[self.Dd,self.gamma,self.theta,self.e,self.Dt,self.c,self.ip,self.l,self.r]
    self.phi=None
Ejemplo n.º 20
0
    def __init__(self):
        """
        Constructor
        """
        ModelBase.__init__(self, "Logical Connections")
        
        self.AddAllowedAssociation(From = {"cid" : "C_MIDDLEWARE", "id" : "EACH"}, To = {"cid" : "C_SERVER", "id" : "ANY"}, multiplicity=1)
        self.AddAllowedAssociation(From = {"cid" : "C_SERVER", "id" : "EACH"}, To = {"cid" : "C_BRIDGE", "id" : "ANY"}, multiplicity=1)
        self.AddAllowedAssociation(From = {"cid" : "C_BRIDGE", "id" : "EACH"}, To = {"cid" : "C_BPM", "id" : "ANY"}, multiplicity=1)
        self.AddAllowedAssociation(From = {"cid" : "C_BRIDGE_GROUP", "id" : "ANY"}, To = {"cid" : "C_BRIDGE", "id" : "ANY"})
        self.AddAllowedAssociation(From = {"cid" : "C_BPM", "id" : "ANY"}, To = {"cid" : "C_ALGORITHM_PARAMETERS", "id" : "ANY"})
        self.AddAllowedAssociation(From = {"cid" : "C_MODE", "id" : "EACH"}, To = {"cid" : "C_ALGORITHM_PARAMETERS", "id" : "ANY"})
        self.AddAllowedAssociation(From = {"cid" : "C_ALGORITHM", "id" : "EACH"}, To = {"cid" : "C_ALGORITHM_PARAMETERS", "id" : "ANY"})

        self.GenerateValidator()
Ejemplo n.º 21
0
    def size(self, train=False, valid=False, xval=False):
        """
    Get the sizes of each cluster.

    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where
    the keys are "train", "valid", and "xval"

    Parameters
    ----------
      train : bool, optional
        If True, then return cluster sizes for the training data.
      valid : bool, optional
        If True, then return the cluster sizes for the validation data.
      xval : bool, optional
        If True, then return the cluster sizes for each of the cross-validated splits.

    Returns
    -------
      Returns the cluster sizes for the specified key(s).
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else [v[2] for v in v._metric_json["centroid_stats"].cell_values]
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 22
0
    def totss(self, train=False, valid=False, xval=False):
        """
    Get the total sum of squares.

    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where
    the keys are "train", "valid", and "xval".

    Parameters
    ----------
      train : bool, optional
        If True, then return the total sum of squares value for the training
        data.
      valid : bool, optional
        If True, then return the total sum of squares value for the validation
        data.
      xval : bool, optional
        If True, then return the total sum of squares value for each of the
        cross-validated splits.

    Returns
    -------
      Returns the total sum of squares values for the specified key(s).
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else v._metric_json["totss"]
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 23
0
    def size(self, train=False, valid=False, xval=False):
        """
    Get the sizes of each cluster.

    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where
    the keys are "train", "valid", and "xval"

    Parameters
    ----------
      train : bool, optional
        If True, then return cluster sizes for the training data.
      valid : bool, optional
        If True, then return the cluster sizes for the validation data.
      xval : bool, optional
        If True, then return the cluster sizes for each of the cross-validated splits.

    Returns
    -------
      Returns the cluster sizes for the specified key(s).
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else [
                v[2] for v in v._metric_json["centroid_stats"].cell_values
            ]
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 24
0
    def totss(self, train=False, valid=False, xval=False):
        """
    Get the total sum of squares.

    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where
    the keys are "train", "valid", and "xval".

    Parameters
    ----------
      train : bool, optional
        If True, then return the total sum of squares value for the training
        data.
      valid : bool, optional
        If True, then return the total sum of squares value for the validation
        data.
      xval : bool, optional
        If True, then return the total sum of squares value for each of the
        cross-validated splits.

    Returns
    -------
      Returns the total sum of squares values for the specified key(s).
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else v._metric_json["totss"]
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 25
0
    def confusion_matrix(self,
                         metrics=None,
                         thresholds=None,
                         train=False,
                         valid=False,
                         xval=False):
        """
    Get the confusion matrix for the specified metrics/thresholds
    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_MCC", "tnr", "fnr", "fpr", "tpr", "precision", "accuracy", "f0point5", "f2", "f1"}
    :param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
    :param train: If train is True, then return the confusion matrix value for the training data.
    :param valid: If valid is True, then return the confusion matrix value for the validation data.
    :param xval:  If xval is True, then return the confusion matrix value for the cross validation data.
    :return: The confusion matrix for this binomial model.
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else v.confusion_matrix(
                metrics=metrics, thresholds=thresholds)
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 26
0
    def max_per_class_error(self,
                            thresholds=None,
                            train=False,
                            valid=False,
                            xval=False):
        """
    Get the max per class error for a set of thresholds.
    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
    :param train: If train is True, then return the max_per_class_error value for the training data.
    :param valid: If valid is True, then return the max_per_class_error value for the validation data.
    :param xval:  If xval is True, then return the max_per_class_error value for the cross validation data.
    :return: The max_per_class_error for this binomial model.
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else [[
                mpca[0], 1 - mpca[1]
            ] for mpca in v.metric("min_per_class_accuracy",
                                   thresholds=thresholds)]
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 27
0
 def null_deviance(self):
   """
   :return: the null deviance if the model has residual deviance, or None if no null deviance.
   """
   if ModelBase._has(self._metric_json, "null_deviance"):
     return self._metric_json["null_deviance"]
   return None
Ejemplo n.º 28
0
  def __init__(self,dbg=None,lg=None):
    ModelBase.__init__(self,dbg=dbg,lg=lg)

    self.C1=Parameter(0.)
    self.C2=Parameter(0.)
    self.C3=Parameter(0.)
    self.C4=Parameter(0.)
    self.C5=Parameter(0.)
    self.C6=Parameter(0.)
    self.C7=Parameter(0.)
    self.A0=Parameter(0.)
    self.A1=Parameter(0.)
    self.A2=Parameter(0.)
    self.A3=Parameter(0.)
    self.parameters=[self.C1,self.C2,self.C3,self.C4,self.C5,self.C6,self.C7,self.A0,self.A1,self.A2,self.A3]
    self.fit_plus_poly=None
Ejemplo n.º 29
0
def h2o_r2_score(y_actual, y_predicted, weights=1.):
    """
  R^2 (coefficient of determination) regression score function

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :param weights: (Optional) sample weights
  :return: R^2 (float) (best is 1.0, lower is worse)
  """
    ModelBase._check_targets(y_actual, y_predicted)
    numerator = (weights * (y_actual - y_predicted)**2).sum()
    denominator = (weights * (y_actual - y_actual.mean()[0])**2).sum()

    if denominator == 0.0:
        return 1. if numerator == 0. else 0.  # 0/0 => 1, else 0
    return 1 - numerator / denominator
Ejemplo n.º 30
0
 def cat_err(self):
   """
   :return: the Number of Misclassified categories over non-missing categorical entries, or None if not present.
   """
   if ModelBase._has(self._metric_json, "caterr"):
     return self._metric_json["caterr"]
   return None
Ejemplo n.º 31
0
 def totss(self):
   """
   :return: the Total Sum-of-Square Error to Grand Mean, or None if not present.
   """
   if ModelBase._has(self._metric_json, "totss"):
     return self._metric_json["totss"]
   return None
Ejemplo n.º 32
0
 def tot_withinss(self):
     """
 :return: the Total Within Cluster Sum-of-Square Error, or None if not present.
 """
     if ModelBase._has(self._metric_json, "tot_withinss"):
         return self._metric_json["tot_withinss"]
     return None
Ejemplo n.º 33
0
 def tot_withinss(self):
   """
   :return: the Total Within Cluster Sum-of-Square Error, or None if not present.
   """
   if ModelBase._has(self._metric_json, "tot_withinss"):
     return self._metric_json["tot_withinss"]
   return None
Ejemplo n.º 34
0
 def num_err(self):
     """
 :return: the Sum of Squared Error over non-missing numeric entries, or None if not present.
 """
     if ModelBase._has(self._metric_json, "numerr"):
         return self._metric_json["numerr"]
     return None
Ejemplo n.º 35
0
 def betweenss(self):
     """
 :return: the Between Cluster Sum-of-Square Error, or None if not present.
 """
     if ModelBase._has(self._metric_json, "betweenss"):
         return self._metric_json["betweenss"]
     return None
Ejemplo n.º 36
0
 def null_degrees_of_freedom(self):
   """
   :return: the null dof if the model has residual deviance, or None if no null dof.
   """
   if ModelBase._has(self._metric_json, "null_degrees_of_freedom"):
     return self._metric_json["null_degrees_of_freedom"]
   return None
Ejemplo n.º 37
0
 def num_err(self):
   """
   :return: the Sum of Squared Error over non-missing numeric entries, or None if not present.
   """
   if ModelBase._has(self._metric_json, "numerr"):
     return self._metric_json["numerr"]
   return None
Ejemplo n.º 38
0
def h2o_explained_variance_score(y_actual, y_predicted, weights=None):
  """
  Explained variance regression score function

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :param weights: (Optional) sample weights
  :return: the explained variance score (float)
  """
  ModelBase._check_targets(y_actual, y_predicted)

  _, numerator   = _mean_var(y_actual - y_predicted, weights)
  _, denominator = _mean_var(y_actual, weights)
  if denominator == 0.0:
    return 1. if numerator == 0 else 0.  # 0/0 => 1, otherwise, 0
  return 1 - numerator / denominator
Ejemplo n.º 39
0
def h2o_r2_score(y_actual, y_predicted, weights=1.):
  """
  R^2 (coefficient of determination) regression score function

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :param weights: (Optional) sample weights
  :return: R^2 (float) (best is 1.0, lower is worse)
  """
  ModelBase._check_targets(y_actual, y_predicted)
  numerator   = (weights * (y_actual - y_predicted) ** 2).sum()
  denominator = (weights * (y_actual - y_actual.mean()) ** 2).sum()

  if denominator == 0.0:
    return 1. if numerator == 0. else 0.  # 0/0 => 1, else 0
  return 1 - numerator / denominator
Ejemplo n.º 40
0
 def cat_err(self):
     """
 :return: the Number of Misclassified categories over non-missing categorical entries, or None if not present.
 """
     if ModelBase._has(self._metric_json, "caterr"):
         return self._metric_json["caterr"]
     return None
Ejemplo n.º 41
0
def h2o_explained_variance_score(y_actual, y_predicted, weights=None):
    """
  Explained variance regression score function

  :param y_actual: H2OFrame of actual response.
  :param y_predicted: H2OFrame of predicted response.
  :param weights: (Optional) sample weights
  :return: the explained variance score (float)
  """
    ModelBase._check_targets(y_actual, y_predicted)

    _, numerator = _mean_var(y_actual - y_predicted, weights)
    _, denominator = _mean_var(y_actual, weights)
    if denominator == 0.0:
        return 1. if numerator == 0 else 0.  # 0/0 => 1, otherwise, 0
    return 1 - numerator / denominator
Ejemplo n.º 42
0
 def betweenss(self):
   """
   :return: the Between Cluster Sum-of-Square Error, or None if not present.
   """
   if ModelBase._has(self._metric_json, "betweenss"):
     return self._metric_json["betweenss"]
   return None
Ejemplo n.º 43
0
 def totss(self):
     """
 :return: the Total Sum-of-Square Error to Grand Mean, or None if not present.
 """
     if ModelBase._has(self._metric_json, "totss"):
         return self._metric_json["totss"]
     return None
Ejemplo n.º 44
0
 def null_degrees_of_freedom(self):
     """
 :return: the null dof if the model has residual deviance, or None if no null dof.
 """
     if ModelBase._has(self._metric_json, "null_degrees_of_freedom"):
         return self._metric_json["null_degrees_of_freedom"]
     return None
Ejemplo n.º 45
0
  def find_threshold_by_max_metric(self,metric,train=False, valid=False, xval=False):
    """
    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param train: If train is True, then return the threshold_by_max_metric value for the training data.
    :param valid: If valid is True, then return the threshold_by_max_metric value for the validation data.
    :param xval:  If xval is True, then return the threshold_by_max_metric value for the cross validation data.
    :return: The threshold_by_max_metric for this binomial model.
    """
    tm = ModelBase._get_metrics(self, train, valid, xval)
    m = {}
    for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.find_threshold_by_max_metric(metric)
    return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 46
0
    def gains_lift(self, train=False, valid=False, xval=False):
        """
    Get the Gains/Lift table for the specified metrics
    If all are False (default), then return the training metric Gains/Lift table.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param train: If train is True, then return the Gains/Lift table for the training data.
    :param valid: If valid is True, then return the Gains/Lift table for the validation data.
    :param xval:  If xval is True, then return the Gains/Lift table for the cross validation data.
    :return: The Gains/Lift table for this binomial model.
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else v.gains_lift()
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 47
0
  def hit_ratio_table(self, train=False, valid=False, xval=False):
    """
    Retrieve the Hit Ratios

    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param train: If train is True, then return the R^2 value for the training data.
    :param valid: If valid is True, then return the R^2 value for the validation data.
    :param xval:  If xval is True, then return the R^2 value for the cross validation data.
    :return: The R^2 for this regression model.
    """
    tm = ModelBase._get_metrics(self, train, valid, xval)
    m = {}
    for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.hit_ratio_table()
    return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 48
0
 def roc(self, train=False, valid=False, xval=False):
     """
 Return the coordinates of the ROC curve for a given set of data,
 as a two-tuple containing the false positive rates as a list and true positive
 rates as a list.
 If all are False (default), then return is the training data.
 If more than one ROC curve is requested, the data is returned as a dictionary
 of two-tuples.
 :param train: If train is true, then return the ROC coordinates for the training data.
 :param valid: If valid is true, then return the ROC coordinates for the validation data.
 :param xval: If xval is true, then return the ROC coordinates for the cross validation data.
 :return rocs_cooridinates: the true cooridinates of the roc curve.
 """
     tm = ModelBase._get_metrics(self, train, valid, xval)
     m = {}
     for k, v in zip(tm.keys(), tm.values()):
         if v is not None:
             m[k] = (v.fprs, v.tprs)
     return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 49
0
    def missrate(self, thresholds=None, train=False, valid=False, xval=False):
        """
    Get the miss rate (AKA False Negative Rate) for a set of thresholds.
    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
    :param train: If train is True, then return the missrate value for the training data.
    :param valid: If valid is True, then return the missrate value for the validation data.
    :param xval:  If xval is True, then return the missrate value for the cross validation data.
    :return: The missrate for this binomial model.
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else v.metric("fnr",
                                                   thresholds=thresholds)
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 50
0
    def find_idx_by_threshold(self,
                              threshold,
                              train=False,
                              valid=False,
                              xval=False):
        """
    Retrieve the index in this metric's threshold list at which the given threshold is located.
    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param train: If train is True, then return the idx_by_threshold for the training data.
    :param valid: If valid is True, then return the idx_by_threshold for the validation data.
    :param xval:  If xval is True, then return the idx_by_threshold for the cross validation data.
    :return: The idx_by_threshold for this binomial model.
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else v.find_idx_by_threshold(threshold)
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 51
0
    def find_threshold_by_max_metric(self,
                                     metric,
                                     train=False,
                                     valid=False,
                                     xval=False):
        """
    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
    and "xval"

    :param train: If train is True, then return the threshold_by_max_metric value for the training data.
    :param valid: If valid is True, then return the threshold_by_max_metric value for the validation data.
    :param xval:  If xval is True, then return the threshold_by_max_metric value for the cross validation data.
    :return: The threshold_by_max_metric for this binomial model.
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in zip(tm.keys(), tm.values()):
            m[k] = None if v is None else v.find_threshold_by_max_metric(
                metric)
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 52
0
    def F1(self, thresholds=None, train=False, valid=False, xval=False):
        """Get the F1 value for a set of thresholds

    If all are False (default), then return the training metric value.
    If more than one options is set to True, then return a dictionary of metrics where
    the keys are "train", "valid", and "xval".

    Parameters
    ----------
      thresholds : list, optional
        If None, then the thresholds in this set of metrics will be used.
      train : bool, optional
        If True, return the F1 value for the training data.
      valid : bool, optional
        If True, return the F1 value for the validation data.
      xval : bool, optional
        If True, return the F1 value for each of the cross-validated splits.

    Returns
    -------
      The F1 values for the specified key(s).

    Examples
    --------
    >>> import h2o as ml
    >>> from h2o.estimators.gbm import H2OGradientBoostingEstimator
    >>> ml.init()
    >>> rows=[[1,2,3,4,0],[2,1,2,4,1],[2,1,4,2,1],[0,1,2,34,1],[2,3,4,1,0]]*50
    >>> fr = ml.H2OFrame(rows)
    >>> fr[4] = fr[4].asfactor()
    >>> model = H2OGradientBoostingEstimator(ntrees=10, max_depth=10, nfolds=4)
    >>> model.train(x=range(4), y=4, training_frame=fr)
    >>> model.F1(train=True)
    """
        tm = ModelBase._get_metrics(self, train, valid, xval)
        m = {}
        for k, v in tm.iteritems():
            m[k] = None if v is None else v.metric("f1", thresholds=thresholds)
        return m.values()[0] if len(m) == 1 else m
Ejemplo n.º 53
0
class ChainedRequest(ModelBase):

    _ModelBase__schema = {
        # Database id
        '_id': '',
        # PrepID
        'prepid': '',
        # Notes
        'notes': '',
        # List of requests in the chained request
        'chain': []}

    __lambda_checks = {
        'prepid': lambda prepid: ModelBase.matches_regex(prepid, '[a-zA-Z0-9]{1,50}')
    }

    def __init__(self, json_input=None):
        ModelBase.__init__(self, json_input)

    def check_attribute(self, attribute_name, attribute_value):
        if attribute_name in self.__lambda_checks:
            return self.__lambda_checks.get(attribute_name)(attribute_value)

        return True
Ejemplo n.º 54
0
from model_base import ModelBase
from model_vgg19 import ModelVGG19

m = ModelVGG19()

weights_path = 'saved_weights/model-04-vgg19-type-1-epoch46-acc0.9164-loss0.2398-valacc0.8824-valloss0.4505.hdf5'

pred_test = m.predict_test(weights_path)
ModelBase.write_predictions(pred_test, 'test-pred-model-04-vgg19-type-1')

pred_instance = m.predict_instance_test(weights_path)
ModelBase.write_predictions(pred_instance,
                            'test-pred-model-04-vgg19-type-1-instance')
Ejemplo n.º 55
0
 def __init__(self, *args, **kwargs):
     ModelBase.__init__(self, *args, **kwargs)
Ejemplo n.º 56
0
 def __init__(self, *args, **kwargs):
     ModelBase.__init__(self, *args, **kwargs)
     self.imagenet_use_id = True
     self.imagenet_weights_url = \
         'https://github.com/fchollet/deep-learning-models/releases/download' \
         '/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
Ejemplo n.º 57
0
 def __init__(self, *args, **kwargs):
     ModelBase.__init__(self, *args, **kwargs)
     self.imagenet_weights_url = \
         'https://github.com/fchollet/deep-learning-models/releases/download/v0.1' \
         '/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
Ejemplo n.º 58
0
 def __init__(self, json_input=None):
     ModelBase.__init__(self, json_input)
 def __init__(self, model_name, output_folder):
    
     ModelBase.__init__(self, model_name, output_folder=output_folder)