コード例 #1
0
ファイル: test_taskgraph_api.py プロジェクト: idanre1/gQuant
    def setUp(self):
        import gc  # python garbage collector
        import cudf

        # warmup
        s = cudf.Series([1, 2, 3, None, 4], nan_as_null=False)
        del(s)
        gc.collect()

        os.environ['GQUANT_PLUGIN_MODULE'] = 'tests.unit.custom_port_nodes'

        points_task_spec = {
            TaskSpecSchema.task_id: 'points_task',
            TaskSpecSchema.node_type: 'PointNode',
            TaskSpecSchema.conf: {'npts': 1000},
            TaskSpecSchema.inputs: []
        }

        distance_task_spec = {
            TaskSpecSchema.task_id: 'distance_by_cudf',
            TaskSpecSchema.node_type: 'DistanceNode',
            TaskSpecSchema.conf: {},
            TaskSpecSchema.inputs: {
                'points_df_in': 'points_task.points_df_out'
            }
        }

        tspec_list = [points_task_spec, distance_task_spec]

        self.tgraph = TaskGraph(tspec_list)

        # Create a temporary directory
        self._test_dir = tempfile.mkdtemp()
        os.environ['GQUANT_CACHE_DIR'] = os.path.join(self._test_dir, '.cache')
コード例 #2
0
def mortgage_gquant_run(run_params_dict):
    '''Using dataframe-flow runs the tasks/workflow specified in the
    run_params_dict. Expected run_params_dict ex:
        run_params_dict = {
            'replace_spec': replace_spec,
            'task_spec_list': gquant_task_spec_list,
            'out_list': out_list
        }

    gquant_task_spec_list - Mortgage ETL workflow list of task-specs. Refer to
        module mortgage_common function mortgage_etl_workflow_def.

    out_list - Expected to specify one output which should be the final
        dataframe produced by the mortgage ETL workflow.

    :param run_params_dict: Dictionary with parameters and gquant task list to
        run mortgage workflow.

    '''
    from gquant.dataframe_flow import TaskGraph

    task_spec_list = run_params_dict['task_spec_list']
    out_list = run_params_dict['out_list']

    replace_spec = run_params_dict['replace_spec']
    task_graph = TaskGraph(task_spec_list)

    (final_perf_acq_df, ) = task_graph.run(out_list, replace_spec)

    return final_perf_acq_df
コード例 #3
0
    def test_columns_and_ports_types_match(self):
        numgen_spec = copy.deepcopy(self.numgen_spec)
        numproc_spec = copy.deepcopy(self.numproc_spec)

        numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'listnums'}

        tspec_list = [numgen_spec, numproc_spec]
        tgraph_valid = TaskGraph(tspec_list)

        sumout, = tgraph_valid.run(['numproc.sum'])

        self.assertEqual(sumout, 45)
コード例 #4
0
    def test_load_workflow(self):
        '''Test loading a workflow from yaml:'''
        from gquant.dataframe_flow import TaskGraph
        workflow_file = os.path.join(self._test_dir, 'test_save_workflow.yaml')

        with open(workflow_file, 'w') as wf:
            wf.write(WORKFLOW_YAML)

        task_list = TaskGraph.load_taskgraph(workflow_file)
        all_tasks_exist = True
        for t in task_list:
            match = False
            if t._task_spec in self._task_list:
                match = True
            if not match:
                all_tasks_exist = False
                break
        with StringIO() as yf:
            yaml.dump(self._task_list,
                      yf,
                      default_flow_style=False,
                      sort_keys=False)
            yf.seek(0)

            err_msg = 'Load workflow failed. Missing expected task items.\n'\
                'EXPECTED WORKFLOW YAML:\n\n'\
                '{wyaml}\n\n'\
                'GOT TASKS FORMATTED AS YAML:\n\n'\
                '{tlist}\n\n'.format(wyaml=WORKFLOW_YAML, tlist=yf.read())

            self.assertTrue(all_tasks_exist, err_msg)
コード例 #5
0
    def test_ports_connection_subclass_type_match(self):
        numgen_spec = copy.deepcopy(self.numgen_spec)
        numproc_spec = copy.deepcopy(self.numproc_spec)

        numgen_spec[TaskSpecSchema.conf] = {
            'port_type': MyList,
            'columns_option': 'mylistnums'
        }
        numproc_spec[TaskSpecSchema.conf] = {'port_type': list}

        tspec_list = [numgen_spec, numproc_spec]
        tgraph_valid = TaskGraph(tspec_list)

        sumout, = tgraph_valid.run(['numproc.sum'])

        self.assertEqual(sumout, 45)
コード例 #6
0
def get_nodes_from_file(file):
    """
    Given an input yaml file string. It returns a dict which has two keys.
        nodes:
            - list of node objects for the UI client. It contains all the
            necessary information about the node including the size of the node
            input ports, output ports, output column names/types,
            conf schema and conf data.
        edges:
            - list of edge objects for the UI client. It enumerate all the
            edges in the graph.

    Arguments
    -------
    file: string
        file name

    Returns
    -------
    dict
        nodes and edges of the graph data

    """
    task_graph = TaskGraph.load_taskgraph(file)
    return get_nodes(task_graph)
コード例 #7
0
    def test_columns_type_mismatch(self):
        numgen_spec = copy.deepcopy(self.numgen_spec)
        numproc_spec = copy.deepcopy(self.numproc_spec)

        numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'listnotnums'}

        tspec_list = [numgen_spec, numproc_spec]
        tgraph_invalid = TaskGraph(tspec_list)

        with self.assertRaises(LookupError) as cm:
            tgraph_invalid.run(['numproc.sum'])
        outerr_msg = '{}'.format(cm.exception)

        errmsg = 'Task "numproc" column "list" expected type "numbers" got '\
            'type "notnumbers" instead.'
        self.assertIn(errmsg, outerr_msg)
コード例 #8
0
ファイル: test_taskgraph_api.py プロジェクト: idanre1/gQuant
    def test_load(self):
        '''Test that a taskgraph can be loaded from a yaml file.
        '''
        workflow_file = os.path.join(self._test_dir,
                                     'test_load_taskgraph.yaml')

        global TASKGRAPH_YAML
        with open(workflow_file, 'w') as wf:
            wf.write(TASKGRAPH_YAML)

        tspec_list = [task._task_spec for task in self.tgraph]

        tgraph = TaskGraph.load_taskgraph(workflow_file)
        all_tasks_exist = True
        for task in tgraph:
            if task._task_spec not in tspec_list:
                all_tasks_exist = False
                break

        with StringIO() as yf:
            yaml.dump(tspec_list, yf,
                      default_flow_style=False, sort_keys=False)
            yf.seek(0)

            err_msg = 'Load taskgraph failed. Missing expected task items.\n'\
                'EXPECTED TASKGRAPH YAML:\n\n'\
                '{wyaml}\n\n'\
                'GOT TASKS FORMATTED AS YAML:\n\n'\
                '{tlist}\n\n'.format(wyaml=TASKGRAPH_YAML, tlist=yf.read())

            self.assertTrue(all_tasks_exist, err_msg)
コード例 #9
0
    def test_columns_name_mismatch(self):
        numgen_spec = copy.deepcopy(self.numgen_spec)
        numproc_spec = copy.deepcopy(self.numproc_spec)

        numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'rangenums'}

        tspec_list = [numgen_spec, numproc_spec]
        tgraph_invalid = TaskGraph(tspec_list)

        with self.assertRaises(LookupError) as cm:
            tgraph_invalid.run(['numproc.sum'])
        outerr_msg = '{}'.format(cm.exception)

        errmsg = 'Task "numproc" missing required column "list" from '\
            '"numgen.numlist".'
        self.assertIn(errmsg, outerr_msg)
コード例 #10
0
 def _compute_hash_key(self):
     """
     if hash changed, the port_setup, meta_setup
     and conf_json should be different
     In very rara case, might have the problem of hash collision,
     It affects the column, port and conf calculation. It won't
     change the computation result though.
     It returns the hash code, the loaded task_graph,
     the replacement conf obj
     """
     task_graph = ""
     inputs = ()
     replacementObj = {}
     input_node = ""
     task_graph_obj = None
     if 'taskgraph' in self.conf:
         task_graph = get_file_path(self.conf['taskgraph'])
         if os.path.exists(task_graph):
             with open(task_graph) as f:
                 task_graph = hashlib.md5(f.read().encode()).hexdigest()
             task_graph_obj = TaskGraph.load_taskgraph(
                 get_file_path(self.conf['taskgraph']))
     self.update_replace(replacementObj, task_graph_obj)
     if 'input' in self.conf:
         for inp in self.conf['input']:
             input_node += inp+","
             if hasattr(self, 'inputs'):
                 for i in self.inputs:
                     inputs += (hash(i['from_node']),
                                i['to_port'], i['from_port'])
     return (hash((self.uid, task_graph, inputs, json.dumps(self.conf),
                   input_node, json.dumps(replacementObj))), task_graph_obj,
             replacementObj)
コード例 #11
0
ファイル: compositeNode.py プロジェクト: zfy1989lee/gQuant
    def ports_setup(self):
        cache_key = self._compute_hash_key()
        if cache_key in cache_ports:
            # print('cache hit')
            return cache_ports[cache_key]
        inports = {}
        outports = {}
        if 'taskgraph' in self.conf:
            task_graph = TaskGraph.load_taskgraph(
                get_file_path(self.conf['taskgraph']))
            replacementObj = {}
            self.update_replace(replacementObj)
            task_graph.build(replace=replacementObj)

            def inputNode_fun(inputNode, in_ports):
                inport = {}
                before_fix = inputNode.ports_setup().inports
                for key in before_fix.keys():
                    if key in in_ports:
                        inport[key] = before_fix[key]
                inports.update(fix_port_name(inport, inputNode.uid))

            def outNode_fun(outNode, out_ports):
                ouport = {}
                before_fix = outNode.ports_setup().outports
                for key in before_fix.keys():
                    if key in out_ports:
                        ouport[key] = before_fix[key]
                outports.update(fix_port_name(ouport, outNode.uid))

            self._make_sub_graph_connection(task_graph, inputNode_fun,
                                            outNode_fun)
        output_port = NodePorts(inports=inports, outports=outports)
        cache_ports[cache_key] = output_port
        return output_port
コード例 #12
0
ファイル: handlers.py プロジェクト: idanre1/gQuant
 def post(self):
     # input_data is a dictionnary with a key "name"
     input_data = self.get_json_body()
     task_graph = TaskGraph(input_data)
     # import pudb
     # pudb.set_trace()
     nodes_and_edges = get_nodes(task_graph)
     self.finish(json.dumps(nodes_and_edges))
コード例 #13
0
    def test_ports_connection_subclass_type_mismatch(self):
        numgen_spec = copy.deepcopy(self.numgen_spec)
        numproc_spec = copy.deepcopy(self.numproc_spec)

        numgen_spec[TaskSpecSchema.conf] = {'columns_option': 'listnums'}
        numproc_spec[TaskSpecSchema.conf] = {'port_type': MyList}

        tspec_list = [numgen_spec, numproc_spec]
        tgraph_invalid = TaskGraph(tspec_list)

        with self.assertRaises(TypeError) as cm:
            tgraph_invalid.run(['numproc.sum'])
        outerr_msg = '{}'.format(cm.exception)

        errmsg = 'Connected nodes do not have matching port types. '\
            'Fix port types.'
        self.assertIn(errmsg, outerr_msg)
コード例 #14
0
    def test_ports_output_type_mismatch(self):
        numgen_spec = copy.deepcopy(self.numgen_spec)
        numproc_spec = copy.deepcopy(self.numproc_spec)

        numgen_spec[TaskSpecSchema.conf] = {
            'columns_option': 'listnums',
            'out_type': 'rangenums'
        }

        tspec_list = [numgen_spec, numproc_spec]
        tgraph_invalid = TaskGraph(tspec_list)

        with self.assertRaises(TypeError) as cm:
            tgraph_invalid.run(['numproc.sum'])
        outerr_msg = '{}'.format(cm.exception)

        errmsg = 'Node "numgen" output port "numlist" produced wrong type '\
            '"<class \'range\'>". Expected type "[<class \'list\'>]"'
        self.assertEqual(errmsg, outerr_msg)
コード例 #15
0
ファイル: flow.py プロジェクト: zfy1989lee/gQuant
def main():
    parser = argparse.ArgumentParser(
        description='Evaluate the dataframe flow graph')
    parser.add_argument('-t', '--task', help="the yaml task file")
    parser.add_argument('output', help="the output nodes", nargs='+')
    args = parser.parse_args()
    import pudb
    pudb.set_trace()

    task_graph = TaskGraph.load_workflow(args.task)
    print('output nodes:', args.output)
    task_graph.run(args.output)
コード例 #16
0
    def test_save_workflow(self):
        '''Test saving a workflow to yaml:'''
        from gquant.dataframe_flow import TaskGraph
        task_graph = TaskGraph(self._task_list)
        workflow_file = os.path.join(self._test_dir, 'test_save_workflow.yaml')
        task_graph.save_taskgraph(workflow_file)

        with open(workflow_file) as wf:
            workflow_str = wf.read()

        # verify the workflow contentst same as expected. Empty list if same.
        cdiff = list(context_diff(WORKFLOW_YAML, workflow_str))
        cdiff_empty = cdiff == []

        err_msg = 'Workflow yaml contents do not match expected results.\n'\
            'SHOULD HAVE SAVED:\n\n'\
            '{wyaml}\n\n'\
            'INSTEAD FILE CONTAINS:\n\n'\
            '{fcont}\n\n'\
            'DIFF:\n\n'\
            '{diff}'.format(wyaml=WORKFLOW_YAML, fcont=workflow_str,
                            diff=''.join(cdiff))

        self.assertTrue(cdiff_empty, err_msg)
コード例 #17
0
ファイル: compositeNode.py プロジェクト: zfy1989lee/gQuant
    def columns_setup(self):
        cache_key = self._compute_hash_key()
        if cache_key in cache_columns:
            # print('cache hit')
            return cache_columns[cache_key]
        required = {}
        out_columns = {}
        if 'taskgraph' in self.conf:
            task_graph = TaskGraph.load_taskgraph(
                get_file_path(self.conf['taskgraph']))
            replacementObj = {}
            self.update_replace(replacementObj)
            task_graph.build(replace=replacementObj)

            def inputNode_fun(inputNode, in_ports):
                req = {}
                # do columns_setup so required columns are ready
                inputNode.columns_setup()
                for key in inputNode.required.keys():
                    if key in in_ports:
                        req[key] = inputNode.required[key]
                required.update(fix_port_name(req, inputNode.uid))

            def outNode_fun(outNode, out_ports):
                oucols = {}
                before_fix = outNode.columns_setup()
                for key in before_fix.keys():
                    if key in out_ports:
                        oucols[key] = before_fix[key]
                out_columns.update(fix_port_name(oucols, outNode.uid))

            self._make_sub_graph_connection(task_graph, inputNode_fun,
                                            outNode_fun)

        self.required = required
        cache_columns[cache_key] = out_columns
        return out_columns
コード例 #18
0
    def process(self, inputs):
        """
        Composite computation

        Arguments
        -------
         inputs: list
            list of input dataframes.
        Returns
        -------
        dataframe
        """
        if 'taskgraph' in self.conf:
            task_graph = TaskGraph.load_taskgraph(
                get_file_path(self.conf['taskgraph']))
            task_graph.build()

            outputLists = []
            replaceObj = {}
            input_feeders = []

            def inputNode_fun(inputNode, in_ports):
                inports = inputNode.ports_setup().inports

                class InputFeed(Node):

                    def meta_setup(self):
                        output = {}
                        for inp in inputNode.inputs:
                            output[inp['to_port']] = inp[
                                'from_node'].meta_setup().outports[
                                    inp['from_port']]
                        # it will be something like { input_port: columns }
                        return MetaData(inports={}, outports=output)

                    def ports_setup(self):
                        # it will be something like { input_port: types }
                        return NodePorts(inports={}, outports=inports)

                    def conf_schema(self):
                        return ConfSchema()

                    def process(self, empty):
                        output = {}
                        for key in inports.keys():
                            if inputNode.uid+'@'+key in inputs:
                                output[key] = inputs[inputNode.uid+'@'+key]
                        return output

                uni_id = str(uuid.uuid1())
                obj = {
                    TaskSpecSchema.task_id: uni_id,
                    TaskSpecSchema.conf: {},
                    TaskSpecSchema.node_type: InputFeed,
                    TaskSpecSchema.inputs: []
                }
                input_feeders.append(obj)
                newInputs = {}
                for key in inports.keys():
                    if inputNode.uid+'@'+key in inputs:
                        newInputs[key] = uni_id+'.'+key
                for inp in inputNode.inputs:
                    if inp['to_port'] not in in_ports:
                        # need to keep the old connections
                        newInputs[inp['to_port']] = (inp['from_node'].uid
                                                     + '.' + inp['from_port'])
                replaceObj.update({inputNode.uid: {
                    TaskSpecSchema.inputs: newInputs}
                })

            def outNode_fun(outNode, out_ports):
                out_ports = outNode.ports_setup().outports
                # fixed_outports = fix_port_name(out_ports, outNode.uid)
                for key in out_ports.keys():
                    if self.outport_connected(outNode.uid+'@'+key):
                        outputLists.append(outNode.uid+'.'+key)

            self._make_sub_graph_connection(task_graph,
                                            inputNode_fun, outNode_fun)

            task_graph.extend(input_feeders)
            self.update_replace(replaceObj, task_graph)
            result = task_graph.run(outputLists, replace=replaceObj)
            output = {}
            for key in result.get_keys():
                splits = key.split('.')
                output['@'.join(splits)] = result[key]
            return output
        else:
            return {}
コード例 #19
0
ファイル: handlers.py プロジェクト: idanre1/gQuant
 def post(self):
     # input_data is a dictionnary with a key "name"
     input_data = self.get_json_body()
     task_graph = TaskGraph.load_taskgraph(input_data['path'])
     nodes_and_edges = get_nodes(task_graph)
     self.finish(json.dumps(nodes_and_edges))
コード例 #20
0
ファイル: compositeNode.py プロジェクト: zfy1989lee/gQuant
    def conf_schema(self):
        cache_key = self._compute_hash_key()
        if cache_key in cache_schema:
            # print('cache hit')
            return cache_schema[cache_key]
        json = {
            "title": "Composite Node configure",
            "type": "object",
            "description": """Use a sub taskgraph as a composite node""",
            "properties": {
                "taskgraph": {
                    "type": "string",
                    "description": "the taskgraph filepath"
                },
                "input": {
                    "type": "array",
                    "description": "the input node ids",
                    "items": {
                        "type": "string"
                    }
                },
                "output": {
                    "type": "array",
                    "description": "the output node ids",
                    "items": {
                        "type": "string"
                    }
                },
                "subnode_ids": {
                    "title":
                    self.uid + " subnode ids",
                    "type":
                    "array",
                    "items": {
                        "type": "string"
                    },
                    "description":
                    """sub graph node ids that need
                    to be reconfigured"""
                },
                "subnodes_conf": {
                    "title": self.uid + " subnodes configuration",
                    "type": "object",
                    "properties": {}
                }
            },
            "required": ["taskgraph"],
        }
        ui = {
            "taskgraph": {
                "ui:widget": "TaskgraphSelector"
            },
            "subnodes_conf": {}
        }
        if 'taskgraph' in self.conf:
            task_graphh = TaskGraph.load_taskgraph(
                get_file_path(self.conf['taskgraph']))
            replacementObj = {}
            self.update_replace(replacementObj)
            task_graphh.build(replace=replacementObj)

            def inputNode_fun(inputNode, in_ports):
                pass

            def outNode_fun(outNode, out_ports):
                pass

            self._make_sub_graph_connection(task_graphh, inputNode_fun,
                                            outNode_fun)

            ids_in_graph = []
            in_ports = []
            out_ports = []
            for t in task_graphh:
                node_id = t.get('id')
                if node_id != '':
                    node = task_graphh[node_id]
                    all_ports = node.ports_setup()
                    for port in all_ports.inports.keys():
                        in_ports.append(node_id + '.' + port)
                    for port in all_ports.outports.keys():
                        out_ports.append(node_id + '.' + port)
                    ids_in_graph.append(node_id)
            json['properties']['input']['items']['enum'] = in_ports
            json['properties']['output']['items']['enum'] = out_ports
            json['properties']['subnode_ids']['items']['enum'] = ids_in_graph
        if 'subnode_ids' in self.conf:
            for subnodeId in self.conf['subnode_ids']:
                if subnodeId in task_graphh:
                    nodeObj = task_graphh[subnodeId]
                    schema = nodeObj.conf_schema()
                    json['properties']["subnodes_conf"]['properties'][
                        subnodeId] = {
                            "type": "object",
                            "properties": {
                                "conf": schema.json
                            }
                        }
                    ui["subnodes_conf"].update(
                        {subnodeId: {
                            'conf': schema.ui
                        }})
        out_schema = ConfSchema(json=json, ui=ui)
        cache_schema[cache_key] = out_schema
        return out_schema
コード例 #21
0
ファイル: test_taskgraph_api.py プロジェクト: idanre1/gQuant
class TestTaskGraphAPI(unittest.TestCase):
    def setUp(self):
        import gc  # python garbage collector
        import cudf

        # warmup
        s = cudf.Series([1, 2, 3, None, 4], nan_as_null=False)
        del(s)
        gc.collect()

        os.environ['GQUANT_PLUGIN_MODULE'] = 'tests.unit.custom_port_nodes'

        points_task_spec = {
            TaskSpecSchema.task_id: 'points_task',
            TaskSpecSchema.node_type: 'PointNode',
            TaskSpecSchema.conf: {'npts': 1000},
            TaskSpecSchema.inputs: []
        }

        distance_task_spec = {
            TaskSpecSchema.task_id: 'distance_by_cudf',
            TaskSpecSchema.node_type: 'DistanceNode',
            TaskSpecSchema.conf: {},
            TaskSpecSchema.inputs: {
                'points_df_in': 'points_task.points_df_out'
            }
        }

        tspec_list = [points_task_spec, distance_task_spec]

        self.tgraph = TaskGraph(tspec_list)

        # Create a temporary directory
        self._test_dir = tempfile.mkdtemp()
        os.environ['GQUANT_CACHE_DIR'] = os.path.join(self._test_dir, '.cache')

    def tearDown(self):
        global DEFAULT_MODULE
        os.environ['GQUANT_PLUGIN_MODULE'] = DEFAULT_MODULE
        os.environ['GQUANT_CACHE_DIR'] = Node.cache_dir
        shutil.rmtree(self._test_dir)

    @ordered
    def test_viz_graph(self):
        '''Test taskgraph to networkx graph conversion for graph visualization.
        '''
        nx_graph = self.tgraph.viz_graph(show_ports=True)
        nx_nodes = ['points_task', 'points_task.points_df_out',
                    'points_task.points_ddf_out',
                    'distance_by_cudf', 'distance_by_cudf.distance_df',
                    'distance_by_cudf.distance_abs_df']
        nx_edges = [('points_task', 'points_task.points_df_out'),
                    ('points_task', 'points_task.points_ddf_out'),
                    ('points_task.points_df_out', 'distance_by_cudf'),
                    ('distance_by_cudf', 'distance_by_cudf.distance_df'),
                    ('distance_by_cudf', 'distance_by_cudf.distance_abs_df')]
        self.assertEqual(list(nx_graph.nodes), nx_nodes)
        self.assertEqual(list(nx_graph.edges), nx_edges)

    @ordered
    def test_build(self):
        '''Test build of a taskgraph and that all inputs and outputs are set
        for the tasks withink a taskgraph.
        '''
        self.tgraph.build()

        points_node = self.tgraph['points_task']
        distance_node = self.tgraph['distance_by_cudf']

        onode_info = {
            'to_node': distance_node,
            'to_port': 'points_df_in',
            'from_port': 'points_df_out'
        }
        self.assertIn(onode_info, points_node.outputs)

        onode_cols = {'points_df_out': {'x': 'float64', 'y': 'float64'},
                      'points_ddf_out': {'x': 'float64', 'y': 'float64'}}
        self.assertEqual(onode_cols, points_node.meta_setup().outports)

        inode_info = {
            'from_node': points_node,
            'from_port': 'points_df_out',
            'to_port': 'points_df_in'
        }
        self.assertIn(inode_info, distance_node.inputs)

        inode_in_cols = {
            'points_df_in': {
                'x': 'float64',
                'y': 'float64'
            }
        }
        self.assertEqual(inode_in_cols, distance_node.get_input_meta())

        inode_out_cols = {'distance_df': {'distance_cudf': 'float64',
                                          'x': 'float64',
                                          'y': 'float64'},
                          'distance_abs_df': {'distance_abs_cudf': 'float64',
                                              'x': 'float64', 'y': 'float64'}}
        self.assertEqual(inode_out_cols, distance_node.meta_setup().outports)

    @ordered
    def test_run(self):
        '''Test that a taskgraph can run successfully.
        '''
        outlist = ['distance_by_cudf.distance_df']
        # Using numpy random seed to get repeatable and deterministic results.
        # For seed 2335 should get something around 761.062831178.
        replace_spec = {
            'points_task': {
                TaskSpecSchema.conf: {
                    'npts': 1000,
                    'nseed': 2335
                }
            }
        }
        (dist_df_w_cudf, ) = self.tgraph.run(
            outputs=outlist, replace=replace_spec)
        dist_sum = dist_df_w_cudf['distance_cudf'].sum()
        # self.assertAlmostEqual(dist_sum, 0.0, places, msg, delta)
        self.assertAlmostEqual(dist_sum, 761.062831178)  # match to 7 places

    @ordered
    def test_save(self):
        '''Test that a taskgraph can be save to a yaml file.
        '''
        workflow_file = os.path.join(self._test_dir,
                                     'test_save_taskgraph.yaml')
        self.tgraph.save_taskgraph(workflow_file)

        with open(workflow_file) as wf:
            workflow_str = wf.read()

        # verify the workflow contentst same as expected. Empty list if same.
        global TASKGRAPH_YAML
        cdiff = list(context_diff(TASKGRAPH_YAML, workflow_str))
        cdiff_empty = cdiff == []

        err_msg = 'Taskgraph yaml contents do not match expected results.\n'\
            'SHOULD HAVE SAVED:\n\n'\
            '{wyaml}\n\n'\
            'INSTEAD FILE CONTAINS:\n\n'\
            '{fcont}\n\n'\
            'DIFF:\n\n'\
            '{diff}'.format(wyaml=TASKGRAPH_YAML, fcont=workflow_str,
                            diff=''.join(cdiff))

        self.assertTrue(cdiff_empty, err_msg)

    @ordered
    def test_load(self):
        '''Test that a taskgraph can be loaded from a yaml file.
        '''
        workflow_file = os.path.join(self._test_dir,
                                     'test_load_taskgraph.yaml')

        global TASKGRAPH_YAML
        with open(workflow_file, 'w') as wf:
            wf.write(TASKGRAPH_YAML)

        tspec_list = [task._task_spec for task in self.tgraph]

        tgraph = TaskGraph.load_taskgraph(workflow_file)
        all_tasks_exist = True
        for task in tgraph:
            if task._task_spec not in tspec_list:
                all_tasks_exist = False
                break

        with StringIO() as yf:
            yaml.dump(tspec_list, yf,
                      default_flow_style=False, sort_keys=False)
            yf.seek(0)

            err_msg = 'Load taskgraph failed. Missing expected task items.\n'\
                'EXPECTED TASKGRAPH YAML:\n\n'\
                '{wyaml}\n\n'\
                'GOT TASKS FORMATTED AS YAML:\n\n'\
                '{tlist}\n\n'.format(wyaml=TASKGRAPH_YAML, tlist=yf.read())

            self.assertTrue(all_tasks_exist, err_msg)

    @ordered
    def test_save_load_cache(self):
        '''Test caching of tasks outputs within a taskgraph.

            1. Save points_task output to cache when running the taskgraph.
            2. Load points_task df from cache when running the taskgraph.
        '''
        replace_spec = {'points_task': {TaskSpecSchema.save: True}}
        outlist = ['distance_by_cudf.distance_df']

        with warnings.catch_warnings():
            # ignore UserWarning: Using CPU via Pandas to write HDF dataset
            warnings.filterwarnings(
                'ignore',
                message='Using CPU via Pandas to write HDF dataset',
                category=UserWarning,)
            # ignore RuntimeWarning: numpy.ufunc size changed
            warnings.filterwarnings('ignore',
                                    category=RuntimeWarning,
                                    message='numpy.ufunc size changed')
            (_, ) = self.tgraph.run(outputs=outlist, replace=replace_spec)

        cache_dir = os.path.join(self._test_dir, '.cache', 'points_task.hdf5')
        self.assertTrue(os.path.exists(cache_dir))

        replace_spec = {'points_task': {TaskSpecSchema.load: True}}
        with warnings.catch_warnings():
            # ignore UserWarning: Using CPU via Pandas to read HDF dataset
            warnings.filterwarnings(
                'ignore',
                message='Using CPU via Pandas to read HDF dataset',
                category=UserWarning)
            (_, ) = self.tgraph.run(outputs=outlist, replace=replace_spec)
コード例 #22
0
            def search_fun(config, checkpoint_dir=None):
                myinputs = {}
                for key in data_store.keys():
                    v = ray.get(data_store[key])
                    if isinstance(v, pandas.DataFrame):
                        myinputs[key] = cudf.from_pandas(v)
                    else:
                        myinputs[key] = v
                task_graph = TaskGraph.load_taskgraph(
                    get_file_path(self.conf['taskgraph']))
                task_graph.build()

                outputLists = [train_id + '.' + 'checkpoint_dir']
                replaceObj = {}
                input_feeders = []

                def inputNode_fun(inputNode, in_ports):
                    inports = inputNode.ports_setup().inports

                    class InputFeed(Node):
                        def meta_setup(self):
                            output = {}
                            for inp in inputNode.inputs:
                                output[inp['to_port']] = inp[
                                    'from_node'].meta_setup()[inp['from_port']]
                            # it will be something like { input_port: columns }
                            return output

                        def ports_setup(self):
                            # it will be something like { input_port: types }
                            return NodePorts(inports={}, outports=inports)

                        def conf_schema(self):
                            return ConfSchema()

                        def process(self, empty):
                            output = {}
                            for key in inports.keys():
                                if (inputNode.uid + '@' + key in myinputs):
                                    output[key] = myinputs[inputNode.uid +
                                                           '@' + key]
                            return output

                    uni_id = str(uuid.uuid1())
                    obj = {
                        TaskSpecSchema.task_id: uni_id,
                        TaskSpecSchema.conf: {},
                        TaskSpecSchema.node_type: InputFeed,
                        TaskSpecSchema.inputs: []
                    }
                    input_feeders.append(obj)
                    newInputs = {}
                    for key in inports.keys():
                        if inputNode.uid + '@' + key in myinputs:
                            newInputs[key] = uni_id + '.' + key
                    for inp in inputNode.inputs:
                        if inp['to_port'] not in in_ports:
                            # need to keep the old connections
                            newInputs[inp['to_port']] = (inp['from_node'].uid +
                                                         '.' +
                                                         inp['from_port'])
                    replaceObj.update(
                        {inputNode.uid: {
                            TaskSpecSchema.inputs: newInputs
                        }})

                def outNode_fun(outNode, out_ports):
                    pass

                self._make_sub_graph_connection(task_graph, inputNode_fun,
                                                outNode_fun)

                task_graph.extend(input_feeders)
                self.update_conf_for_search(replaceObj, task_graph, config)
                task_graph.run(outputLists, replace=replaceObj)
コード例 #23
0
def main():
    _basedir = os.path.dirname(__file__)

    # mortgage_data_path = '/datasets/rapids_data/mortgage'
    mortgage_data_path = os.path.join(_basedir, 'mortgage_data')

    # Using some default csv files for testing.
    # csvfile_names = os.path.join(mortgage_data_path, 'names.csv')
    # acq_data_path = os.path.join(mortgage_data_path, 'acq')
    # perf_data_path = os.path.join(mortgage_data_path, 'perf')
    # csvfile_acqdata = os.path.join(acq_data_path, 'Acquisition_2000Q1.txt')
    # csvfile_perfdata = \
    #     os.path.join(perf_data_path, 'Performance_2000Q1.txt_0')
    # mortgage_etl_workflow_def(
    #     csvfile_names, csvfile_acqdata, csvfile_perfdata)

    gquant_task_spec_list = mortgage_etl_workflow_def()

    start_year = 2000
    end_year = 2001  # end_year is inclusive
    # end_year = 2016  # end_year is inclusive
    # part_count = 16  # the number of data files to train against
    part_count = 12  # the number of data files to train against
    # part_count = 4  # the number of data files to train against

    mortgage_run_params_dict_list = generate_mortgage_gquant_run_params_list(
        mortgage_data_path, start_year, end_year, part_count,
        gquant_task_spec_list)

    _basedir = os.path.dirname(__file__)
    mortgage_lib_module = os.path.join(_basedir, 'mortgage_gquant_plugins.py')

    mortgage_workflow_runner_task = {
        TaskSpecSchema.task_id:
        MortgageTaskNames.mortgage_workflow_runner_task_name,
        TaskSpecSchema.node_type: 'MortgageWorkflowRunner',
        TaskSpecSchema.conf: {
            'mortgage_run_params_dict_list': mortgage_run_params_dict_list
        },
        TaskSpecSchema.inputs: [],
        TaskSpecSchema.filepath: mortgage_lib_module
    }

    # Can be multi-gpu. Set ngpus > 1. This is different than dask xgboost
    # which is distributed multi-gpu i.e. dask-xgboost could distribute on one
    # node or multiple nodes. In distributed mode the dmatrix is disributed.
    ngpus = 1
    xgb_gpu_params = {
        'nround': 100,
        'max_depth': 8,
        'max_leaves': 2**8,
        'alpha': 0.9,
        'eta': 0.1,
        'gamma': 0.1,
        'learning_rate': 0.1,
        'subsample': 1,
        'reg_lambda': 1,
        'scale_pos_weight': 2,
        'min_child_weight': 30,
        'tree_method': 'gpu_hist',
        'n_gpus': ngpus,
        # 'distributed_dask': True,
        'loss': 'ls',
        # 'objective': 'gpu:reg:linear',
        'objective': 'reg:squarederror',
        'max_features': 'auto',
        'criterion': 'friedman_mse',
        'grow_policy': 'lossguide',
        'verbose': True
    }

    xgb_trainer_task = {
        TaskSpecSchema.task_id:
        MortgageTaskNames.xgb_trainer_task_name,
        TaskSpecSchema.node_type:
        'XgbMortgageTrainer',
        TaskSpecSchema.conf: {
            'delete_dataframes': False,
            'xgb_gpu_params': xgb_gpu_params
        },
        TaskSpecSchema.inputs:
        [MortgageTaskNames.mortgage_workflow_runner_task_name],
        TaskSpecSchema.filepath:
        mortgage_lib_module
    }

    task_spec_list = [mortgage_workflow_runner_task, xgb_trainer_task]
    task_graph = TaskGraph(task_spec_list)

    # out_list = [MortgageTaskNames.mortgage_workflow_runner_task_name]
    # ((mortgage_feat_df_pandas, delinq_df_pandas),) = task_graph.run(out_list)

    out_list = [MortgageTaskNames.xgb_trainer_task_name]
    (bst, ) = task_graph.run(out_list)

    print('XGBOOST BOOSTER:\n', bst)
コード例 #24
0
def main():

    memory_limit = 128e9
    threads_per_worker = 4
    cluster = LocalCUDACluster(memory_limit=memory_limit,
                               threads_per_worker=threads_per_worker)
    client = Client(cluster)
    sched_info = client.scheduler_info()

    print('CLIENT: {}'.format(client))
    print('SCHEDULER INFO:\n{}'.format(json.dumps(sched_info, indent=2)))

    # Importing here in case RMM is used later on. Must start client prior
    # to importing cudf stuff if using RMM.
    from gquant.dataframe_flow import (TaskSpecSchema, TaskGraph)

    # workers_names = \
    #     [iw['name'] for iw in client.scheduler_info()['workers'].values()]
    # nworkers = len(workers_names)

    _basedir = os.path.dirname(__file__)
    # mortgage_data_path = '/datasets/rapids_data/mortgage'
    mortgage_data_path = os.path.join(_basedir, 'mortgage_data')

    # Using some default csv files for testing.
    # csvfile_names = os.path.join(mortgage_data_path, 'names.csv')
    # acq_data_path = os.path.join(mortgage_data_path, 'acq')
    # perf_data_path = os.path.join(mortgage_data_path, 'perf')
    # csvfile_acqdata = os.path.join(acq_data_path, 'Acquisition_2000Q1.txt')
    # csvfile_perfdata = \
    #     os.path.join(perf_data_path, 'Performance_2000Q1.txt_0')
    # mortgage_etl_workflow_def(
    #     csvfile_names, csvfile_acqdata, csvfile_perfdata)

    gquant_task_spec_list = mortgage_etl_workflow_def()

    start_year = 2000
    end_year = 2001  # end_year is inclusive
    # end_year = 2016  # end_year is inclusive
    # part_count = 16  # the number of data files to train against

    # create_dmatrix_serially - When False on same node if not enough host RAM
    # then it's a race condition when creating the dmatrix. Make sure enough
    # host RAM otherwise set to True.
    # create_dmatrix_serially = False

    # able to do 18 with create_dmatrix_serially set to True
    part_count = 18  # the number of data files to train against
    create_dmatrix_serially = True
    # part_count = 4  # the number of data files to train against

    # Use RAPIDS Memory Manager. Seems to work fine without it.
    use_rmm = False

    # Clean up intermediate dataframes in the xgboost training task.
    delete_dataframes = True

    mortgage_run_params_dict_list = generate_mortgage_gquant_run_params_list(
        mortgage_data_path, start_year, end_year, part_count,
        gquant_task_spec_list)

    _basedir = os.path.dirname(__file__)
    mortgage_lib_module = os.path.join(_basedir, 'mortgage_gquant_plugins.py')

    filter_dask_logger = False

    mortgage_workflow_runner_task = {
        TaskSpecSchema.task_id:
        MortgageTaskNames.dask_mortgage_workflow_runner_task_name,
        TaskSpecSchema.node_type: 'DaskMortgageWorkflowRunner',
        TaskSpecSchema.conf: {
            'mortgage_run_params_dict_list': mortgage_run_params_dict_list,
            'client': client,
            'use_rmm': use_rmm,
            'filter_dask_logger': filter_dask_logger,
        },
        TaskSpecSchema.inputs: [],
        TaskSpecSchema.filepath: mortgage_lib_module
    }

    dxgb_gpu_params = {
        'nround': 100,
        'max_depth': 8,
        'max_leaves': 2**8,
        'alpha': 0.9,
        'eta': 0.1,
        'gamma': 0.1,
        'learning_rate': 0.1,
        'subsample': 1,
        'reg_lambda': 1,
        'scale_pos_weight': 2,
        'min_child_weight': 30,
        'tree_method': 'gpu_hist',
        'n_gpus': 1,
        'distributed_dask': True,
        'loss': 'ls',
        # 'objective': 'gpu:reg:linear',
        'objective': 'reg:squarederror',
        'max_features': 'auto',
        'criterion': 'friedman_mse',
        'grow_policy': 'lossguide',
        'verbose': True
    }

    dxgb_trainer_task = {
        TaskSpecSchema.task_id:
        MortgageTaskNames.dask_xgb_trainer_task_name,
        TaskSpecSchema.node_type:
        'DaskXgbMortgageTrainer',
        TaskSpecSchema.conf: {
            'create_dmatrix_serially': create_dmatrix_serially,
            'delete_dataframes': delete_dataframes,
            'dxgb_gpu_params': dxgb_gpu_params,
            'client': client,
            'filter_dask_logger': filter_dask_logger
        },
        TaskSpecSchema.inputs:
        [MortgageTaskNames.dask_mortgage_workflow_runner_task_name],
        TaskSpecSchema.filepath:
        mortgage_lib_module
    }

    task_spec_list = [mortgage_workflow_runner_task, dxgb_trainer_task]

    out_list = [MortgageTaskNames.dask_xgb_trainer_task_name]
    task_graph = TaskGraph(task_spec_list)
    (bst, ) = task_graph.run(out_list)

    print('XGBOOST BOOSTER:\n', bst)