def fit_on_spark(self, df, num_steps=None, profile=False, reduce_results=True, max_retries=3, info=None): super(TorchEstimator, self).fit_on_spark(df) ds = save_to_ray(df, self._num_workers) self.fit(ds, num_steps, profile, reduce_results, max_retries, info)
def from_spark_df(df: "pyspark.sql.DataFrame", num_shards: int = 2) -> "Dataset[T]": return rcontext.save_to_ray(df, num_shards)
def fit_on_spark(self, df, **kwargs) -> NoReturn: super(TFEstimator, self).fit_on_spark(df, **kwargs) ds = save_to_ray(df, self._num_workers) self.fit(ds)