def test_workflows(self): a = FugueWorkflow().df([[0]], "a:int") df_eq(a.compute(self.engine), [[0]], "a:int") a = _FugueInteractiveWorkflow(self.engine).df([[0]], "a:int").persist() df_eq(a.result, [[0]], "a:int")
def test_default_init(self): a = FugueWorkflow().df([[0]], "a:int") df_eq(a.compute(DaskExecutionEngine), [[0]], "a:int")
def test_default_session(self): a = FugueWorkflow().df([[0]], "a:int") df_eq(a.compute(SparkExecutionEngine), [[0]], "a:int")
def test_df_init(self): sdf = self.spark_session.createDataFrame([[1.1]], "a:double") a = FugueWorkflow().df(sdf) df_eq(a.compute(SparkExecutionEngine), [[1.1]], "a:double")