コード例 #1
0
    def exec(self, args: dict):
        filepath = args.get('input-filepath')
        source_id = args.get('source_id')
        input_type = args.get('input_type')
        separator = args.get('separator')
        has_headers = args.get('headers')
        watch = args.get('watch')

        interactive_mode = filepath is None
        if interactive_mode:
            filepath = get_args_or_prompt_input(
                arg_name='input_filepath',
                args=args,
                message=
                'Please indicate the path on your local file you want to parse',
                force_interactive=interactive_mode)
            input_type = get_args_or_prompt_list(
                arg_name='input_type',
                args=args,
                message='What is the type of your input file ?',
                choices_function=lambda: list(map(str, InputType)),
                force_interactive=interactive_mode)
            if input_type == InputType.CSV:
                separator = get_args_or_prompt_list(
                    arg_name='separator',
                    args=args,
                    message='What is your CSV Separator ?',
                    choices_function=lambda: list(map(str, Separator)),
                    force_interactive=interactive_mode)
            else:
                separator = None

            has_headers = get_args_or_prompt_confirm(
                arg_name='headers',
                args=args,
                message=
                'Does your csv file has a header row ? (doesn\'t matter in case of a parquet file)',
                force_interactive=interactive_mode)
            source_id = get_args_or_prompt_input(
                arg_name='source_id',
                args=args,
                message=
                'What will be the name of your source ? (should be unique in your project)',
                force_interactive=interactive_mode)
            watch = get_args_or_prompt_confirm(
                arg_name='watch',
                args=args,
                message=
                'Do you want to keep watching for the task until it ends ?',
                force_interactive=interactive_mode)

        input_local_file = LocalFileInput(input_type=input_type,
                                          headers=has_headers,
                                          separator=separator,
                                          filepath=filepath,
                                          prescience=self.prescience_client)
        parse_task = input_local_file.parse(source_id=source_id)
        if watch:
            parse_task.watch()
コード例 #2
0
    def exec(self, args: dict):
        interactive_mode = args.get('dataset-id') is None
        dataset_id = get_args_or_prompt_list(
            arg_name='dataset-id',
            args=args,
            message='Which dataset do you want to preprocess ?',
            choices_function=lambda: [
                x.dataset_id()
                for x in self.prescience_client.datasets(page=1).content
            ],
            force_interactive=interactive_mode)
        budget = get_args_or_prompt_input(
            arg_name='budget',
            args=args,
            message='Which budget do you want to allow on optimization ?',
            force_interactive=interactive_mode)

        dataset = self.prescience_client.dataset(dataset_id)
        scoring_metric = get_args_or_prompt_list(
            arg_name='scoring-metric',
            args=args,
            message='On which scoring metric do you want to optimize on ?',
            choices_function=lambda: get_scoring_metrics(
                dataset.problem_type(), dataset.label_id(), dataset.source()),
            force_interactive=interactive_mode)
        if interactive_mode and self.prescience_client.dataset(
                dataset_id).problem_type() == ProblemType.TIME_SERIES_FORECAST:
            forecast_horizon_steps = get_args_or_prompt_input(
                arg_name='forecast_horizon_steps',
                args=args,
                message='How many steps do you expect as a forecast horizon ?',
                force_interactive=interactive_mode)
            forecast_discount = get_args_or_prompt_input(
                arg_name='forecast_discount',
                args=args,
                message=
                'Which discount value fo you want to apply on your forecasted values ?',
                force_interactive=interactive_mode)
        else:
            forecast_horizon_steps = args.get('forecast_horizon_steps')
            forecast_discount = args.get('forecast_discount')

        watch = get_args_or_prompt_confirm(
            arg_name='watch',
            args=args,
            message='Do you want to keep watching for the task until it ends ?',
            force_interactive=interactive_mode)
        task = self.prescience_client.optimize(
            dataset_id=dataset_id,
            scoring_metric=scoring_metric,
            budget=budget,
            optimization_method=None,
            custom_parameter=None,
            forecasting_horizon_steps=forecast_horizon_steps,
            forecast_discount=forecast_discount)
        if watch:
            task.watch()
コード例 #3
0
ファイル: get_command.py プロジェクト: ovh/prescience-client
    def exec(self, args: dict):
        model_id = get_args_or_prompt_list(
            arg_name='id',
            args=args,
            message='Which model do you want to get ?',
            choices_function=lambda: [
                x.model_id()
                for x in self.prescience_client.models(page=1).content
            ])
        output = args.get('output') or OutputFormat.TABLE
        tree = args.get('tree')
        scores = args.get('scores')
        test_evaluation = args.get('test_evaluation')
        confusion_matrix = args.get('confusion_matrix')

        if scores:
            df = self.prescience_client.get_metric_scores_dataframe(model_id)
            print(df)
        elif confusion_matrix:
            df = self.prescience_client.get_confusion_matrix(model_id)
            print(df)
        elif test_evaluation:
            self.prescience_client.model_test_evaluation(model_id).show(output)
        elif tree:
            self.prescience_client.model(model_id).tree().show()
        else:
            self.prescience_client.model(model_id).show(output)
コード例 #4
0
    def exec(self, args: dict):
        interactive_mode = args.get('dataset-id') is None
        dataset_id = get_args_or_prompt_list(
            arg_name='dataset-id',
            args=args,
            message='Which dataset do you want to mask ?',
            choices_function=lambda: [
                x.dataset_id()
                for x in self.prescience_client.datasets(page=1).content
            ],
            force_interactive=interactive_mode)
        dataset = self.prescience_client.dataset(dataset_id)
        field_selection_function = lambda: [
            x.name() for x in dataset.schema().fields()
        ]

        interactive_mode = interactive_mode or args.get('mask-id') is None
        mask_id = get_args_or_prompt_input(
            arg_name='mask-id',
            args=args,
            message='What will be the name of you mask ?',
            force_interactive=interactive_mode)

        interactive_mode = interactive_mode or len(args.get('columns')) == 0
        columns = get_args_or_prompt_checkbox(
            arg_name='columns',
            args=args,
            message=
            'Select the column in your initial dataset that you want to keep',
            choices_function=field_selection_function,
            selected_function=field_selection_function,
            force_interactive=interactive_mode)
        self.prescience_client.create_mask(dataset_id=dataset_id,
                                           mask_id=mask_id,
                                           selected_column=columns)
コード例 #5
0
 def exec(self, args: dict):
     project = get_args_or_prompt_list(
         arg_name='project',
         args=args,
         message='Which project do you want to switch on ?',
         choices_function=self.prescience_client.config(
         ).get_all_projects_names)
     self.prescience_client.config().set_current_project(
         project_name=project)
コード例 #6
0
ファイル: get_command.py プロジェクト: ovh/prescience-client
 def exec(self, args: dict):
     output = args.get('output') or OutputFormat.TABLE
     task_id = get_args_or_prompt_list(
         arg_name='id',
         args=args,
         message='Which task uuid do you want to get ?',
         choices_function=lambda:
         [x.uuid() for x in self.prescience_client.tasks(page=1).content])
     self.prescience_client.task(task_id).show(output)
コード例 #7
0
ファイル: get_command.py プロジェクト: ovh/prescience-client
 def exec(self, args: dict):
     category = get_args_or_prompt_list(
         arg_name='category',
         args=args,
         message='Which algorithm category do you want to get ?',
         choices_function=lambda: list(
             map(str, AlgorithmConfigurationCategory)))
     output = args.get('output')
     self.prescience_client.get_available_configurations(
         kind=category).show(output=output)
コード例 #8
0
    def exec(self, args: dict):
        interactive_mode = args.get('dataset-id') is None
        dataset_id = get_args_or_prompt_list(
            arg_name='dataset-id',
            args=args,
            message='Which dataset do you want to launch an evaluation on ?',
            choices_function=lambda: [
                x.dataset_id()
                for x in self.prescience_client.datasets(page=1).content
            ],
            force_interactive=interactive_mode)
        interactive_mode = interactive_mode or args.get(
            'custom-config') is None
        if not interactive_mode:
            prescience_config = Config(
                json_dict=json.loads(args.get('custom-config')))
        else:
            # Use interactive mode to create the configuration
            dataset = self.prescience_client.dataset(dataset_id=dataset_id)
            all_config_list = dataset.get_associated_algorithm()
            choice_function = lambda: UtilList(all_config_list).flat_map(
                lambda x: x.get_algorithm_list_names()).value
            algo_id = get_args_or_prompt_list(
                arg_name='algo_id',
                args=args,
                message='Which algorithm ID do you want to get ?',
                choices_function=choice_function)
            algorithm = UtilList(all_config_list) \
                .map(lambda x: x.get_algorithm(algo_id)) \
                .find(lambda x: x is not None) \
                .get_or_else(None)

            prescience_config = algorithm.interactive_kwargs_instanciation()
        watch = get_args_or_prompt_confirm(
            arg_name='watch',
            args=args,
            message='Do you want to keep watching for the task until it ends ?',
            force_interactive=interactive_mode)
        print(json.dumps(prescience_config.to_dict(), indent=4))
        task = self.prescience_client.custom_config(dataset_id=dataset_id,
                                                    config=prescience_config)
        if watch:
            task.watch()
コード例 #9
0
ファイル: plot_command.py プロジェクト: ovh/prescience-client
    def exec(self, args: dict):
        interactive_mode = args.get('id') is None

        dataset_id = get_args_or_prompt_list(
            arg_name='id',
            args=args,
            message='For which dataset do you want to plot evaluations ?',
            choices_function=lambda: [
                x.dataset_id()
                for x in self.prescience_client.datasets(page=1).content
            ],
            force_interactive=interactive_mode)

        dataset = self.prescience_client.dataset(dataset_id)
        scoring_metric = get_args_or_prompt_list(
            arg_name='scoring-metric',
            args=args,
            message='On which scoring metric do you want to optimize on ?',
            choices_function=lambda: get_scoring_metrics(
                dataset.problem_type(), dataset.label_id(), dataset.source()),
            force_interactive=interactive_mode)

        if dataset.problem_type() == ProblemType.TIME_SERIES_FORECAST:
            forecasting_horizon_steps = get_args_or_prompt_input(
                arg_name='forecast_horizon_steps',
                args=args,
                message='For which horizon do you want to plot evaluations ?',
                force_interactive=interactive_mode,
                validator=IntegerValidator,
                filter_func=int)
            forecasting_discount = get_args_or_prompt_input(
                arg_name='forecast_discount',
                args=args,
                message=
                'For Which discount value do you want to plot evaluations ?',
                force_interactive=interactive_mode,
                validator=FloatValidator,
                filter_func=float)
            self.prescience_client.plot_evaluations(dataset_id, scoring_metric,
                                                    forecasting_horizon_steps,
                                                    forecasting_discount)
        else:
            self.prescience_client.plot_evaluations(dataset_id, scoring_metric)
コード例 #10
0
ファイル: get_command.py プロジェクト: ovh/prescience-client
 def exec(self, args: dict):
     model_id = get_args_or_prompt_list(
         arg_name='id',
         args=args,
         message='Which model do you want to get ?',
         choices_function=lambda: [
             x.model_id()
             for x in self.prescience_client.models(page=1).content
         ])
     output = args.get('output') or OutputFormat.TABLE
     model = self.prescience_client.model(model_id)
     evaluator = model.get_model_evaluator()
     evaluator.show(output)
コード例 #11
0
ファイル: get_command.py プロジェクト: ovh/prescience-client
 def exec(self, args: dict):
     category = get_args_or_prompt_list(
         arg_name='category',
         args=args,
         message='Which algorithm category do you want to get ?',
         choices_function=lambda: list(
             map(str, AlgorithmConfigurationCategory)))
     all_config = self.prescience_client.get_available_configurations(
         kind=category)
     algo_id = get_args_or_prompt_list(
         arg_name='id',
         args=args,
         message='Which algorithm ID do you want to get ?',
         choices_function=all_config.get_algorithm_list_names)
     output = args.get('output')
     create = args.get('create')
     algorithm = all_config.get_algorithm(algo_id)
     if create:
         algorithm_config = algorithm.interactive_kwargs_instanciation()
         print(json.dumps(algorithm_config.to_dict()))
     else:
         algorithm.show(output=output)
コード例 #12
0
    def exec(self, args):
        maybe_cmd = List(self.sub_commands)\
            .find(lambda cmd: cmd.should_exec(args))\
            .get_or_else(None)

        if maybe_cmd is None:
            subject = get_args_or_prompt_list(
                arg_name='subject',
                args=args,
                message='Please select an command to execute',
                choices_function=lambda:
                [cmd.name for cmd in self.sub_commands])
            args[f'{self.name}_subject'] = subject
            maybe_cmd = List(self.sub_commands) \
                .find(lambda cmd: cmd.should_exec(args)) \
                .get_or_else(None)

        maybe_cmd.exec(args)
コード例 #13
0
    def exec(self, args: dict):

        interactive_mode = args.get('source-id') is None

        source_id = get_args_or_prompt_input(
            arg_name='source-id',
            args=args,
            message='What will be the name of the generated source',
            force_interactive=interactive_mode)

        read_token = get_args_or_prompt_input(
            arg_name='read-token',
            args=args,
            message='What is your read token',
            force_interactive=interactive_mode)

        selector = get_args_or_prompt_input(
            arg_name='selector',
            args=args,
            message='What is your warp 10 selector, It must match only one GTS',
            force_interactive=interactive_mode)

        labels = get_args_or_prompt_input(
            arg_name='labels',
            args=args,
            message=
            'What is your warp 10 labels, It must match only one GTS (ex: {"label":"label1"})',
            force_interactive=interactive_mode)

        sample_span = get_args_or_prompt_input(
            arg_name='sample-span',
            args=args,
            message=
            'What is the span over which the sample is read.(e.g: 6w for 6 weeks)',
            force_interactive=interactive_mode)

        sampling_interval = get_args_or_prompt_input(
            arg_name='sampling-interval',
            args=args,
            message=
            'The size of the interval which is reduced to a single point.(e.g: 1d for 1 day)',
            force_interactive=interactive_mode)

        if interactive_mode:
            dataset_id = get_args_or_prompt_input(
                arg_name='dataset_id',
                args=args,
                message='What will be the name of the generated dataset',
                force_interactive=interactive_mode)

            model_id = get_args_or_prompt_input(
                arg_name='model_id',
                args=args,
                message='What will be the name of the generated model',
                force_interactive=interactive_mode)
        else:
            dataset_id = args.get('dataset_id')
            model_id = args.get('model_id')

        sampling_strategy = get_args_or_prompt_list(
            arg_name='sampling_strategy',
            args=args,
            message=
            'Wich strategy to use to transform an interval into a point ?',
            choices_function=lambda: list(map(str, SamplingStrategy)),
            force_interactive=interactive_mode)

        forecasting_horizon_steps = get_args_or_prompt_input(
            arg_name='forecast_horizon_steps',
            args=args,
            message='How many steps do you expect as a forecast horizon ?',
            force_interactive=interactive_mode,
            validator=IntegerValidator,
            filter_func=int)

        forecast_discount = get_args_or_prompt_input(
            arg_name='forecast_discount',
            args=args,
            message=
            'Which discount value fo you want to apply on your forecasted values ?',
            force_interactive=interactive_mode,
            validator=FloatValidator,
            filter_func=float)

        nb_fold = get_args_or_prompt_input(
            arg_name='nb_fold',
            args=args,
            message='How many folds do you want ?',
            force_interactive=interactive_mode,
            validator=IntegerValidator,
            filter_func=int)

        scoring_metric = get_args_or_prompt_list(
            arg_name='scoring_metric',
            args=args,
            message='On which scoring metric do you want to optimize on ?',
            choices_function=lambda: list(map(str, ScoringMetricRegression)),
            force_interactive=interactive_mode)

        budget = get_args_or_prompt_input(
            arg_name='budget',
            args=args,
            message='Which budget do you want to allow on optimization ?',
            force_interactive=interactive_mode,
            validator=IntegerValidator,
            filter_func=int)

        backend_url = args.get('backend_url')

        input_ts = TimeSerieFeature(selector, labels)
        warp_input = Warp10TimeSerieInput(value=input_ts,
                                          source_id=source_id,
                                          read_token=read_token,
                                          sample_span=sample_span,
                                          sampling_interval=sampling_interval,
                                          sampling_strategy=sampling_strategy,
                                          backend_url=backend_url,
                                          last_point_date=None)

        scheduler = args.get('scheduler')

        if interactive_mode:
            scheduler = get_args_or_prompt_confirm(
                arg_name='scheduler',
                args=args,
                message=
                'Do you want to add a scheduler to the model generated ?',
                force_interactive=interactive_mode)

        if scheduler:
            write_token = get_args_or_prompt_input(
                arg_name='write_token',
                args=args,
                message='What is your write token',
                force_interactive=interactive_mode)

            scheduler_frequency = get_args_or_prompt_input(
                arg_name='scheduler_frequency',
                args=args,
                message='How many intervals you want to make a prediction ?',
                force_interactive=interactive_mode,
                validator=IntegerValidator,
                filter_func=int)

            # We keep the same labes and add predicted to the selector
            scheduler_output = Warp10Scheduler(write_token=write_token,
                                               frequency=scheduler_frequency,
                                               output_value=TimeSerieFeature(
                                                   f'{selector}.predicted',
                                                   labels),
                                               nb_steps=None)
        else:
            scheduler_output = None

        watch = get_args_or_prompt_confirm(
            arg_name='watch',
            args=args,
            message='Do you want to keep watching for the task until it ends ?',
            force_interactive=interactive_mode)

        task, _, _ = self.prescience_client.start_auto_ml_warp10(
            warp_input=warp_input,
            scheduler_output=scheduler_output,
            dataset_id=dataset_id,
            model_id=model_id,
            scoring_metric=scoring_metric,
            nb_fold=nb_fold,
            budget=budget,
            forecasting_horizon_steps=forecasting_horizon_steps,
            forecast_discount=forecast_discount)

        query = Warp10Util.generate_warp10_query(
            token=read_token,
            input_ts=input_ts,
            interval=sampling_interval,
            horizon=forecasting_horizon_steps)

        print('You can find your results here:')
        print(Warp10Util.generate_warp10_quantum_query(query, backend_url))

        if watch:
            task.watch()
コード例 #14
0
    def exec(self, args: dict):

        interactive_mode = args.get('source-id') is None
        source_id = get_args_or_prompt_list(
            arg_name='source-id',
            args=args,
            message='Which source do you want to preprocess ?',
            choices_function=lambda: [
                x.get_source_id()
                for x in self.prescience_client.sources(page=1).content
            ],
            force_interactive=interactive_mode)
        if interactive_mode:
            dataset_id = get_args_or_prompt_input(
                arg_name='dataset_id',
                args=args,
                message='What will be the name of the generated dataset',
                force_interactive=interactive_mode)
        else:
            dataset_id = args.get('dataset_id')

        if interactive_mode:
            model_id = get_args_or_prompt_input(
                arg_name='model_id',
                args=args,
                message='What will be the name of the generated model',
                force_interactive=interactive_mode)
        else:
            model_id = args.get('model_id')

        if interactive_mode:
            selected_column = get_args_or_prompt_checkbox(
                arg_name='columns',
                args=args,
                message=
                'Select the columns you want to keep for your preprocessing',
                choices_function=lambda: [
                    x.name() for x in self.prescience_client.source(source_id).
                    schema().fields()
                ],
                selected_function=lambda: [
                    x.name() for x in self.prescience_client.source(source_id).
                    schema().fields()
                ],
                force_interactive=interactive_mode)
        else:
            selected_column = args.get('columns')

        problem_type = get_args_or_prompt_list(
            arg_name='problem_type',
            args=args,
            message='What kind of problem do you want to solve ?',
            choices_function=lambda: list(map(str, ProblemType)),
            force_interactive=interactive_mode)
        label_id = get_args_or_prompt_list(
            arg_name='label',
            args=args,
            message=
            'What will be your label ? (the column you want to predict)',
            choices_function=lambda: copy.deepcopy(selected_column),
            force_interactive=interactive_mode)
        time_column = args.get('time_column')
        forecasting_horizon_steps = args.get('forecast_horizon_steps')
        forecast_discount = args.get('forecast_discount')
        _problem_type = ProblemType(problem_type)
        if _problem_type == ProblemType.TIME_SERIES_FORECAST:
            available_time_columns = copy.deepcopy(selected_column)
            available_time_columns.remove(label_id)
            time_column = get_args_or_prompt_list(
                arg_name='time_column',
                args=args,
                message='What will be the column used for time ?',
                choices_function=lambda: copy.deepcopy(available_time_columns),
                force_interactive=interactive_mode)
            forecasting_horizon_steps = get_args_or_prompt_input(
                arg_name='forecast_horizon_steps',
                args=args,
                message='How many steps do you expect as a forecast horizon ?',
                force_interactive=interactive_mode,
                validator=IntegerValidator,
                filter_func=int)
            forecast_discount = get_args_or_prompt_input(
                arg_name='forecast_discount',
                args=args,
                message=
                'Which discount value fo you want to apply on your forecasted values ?',
                force_interactive=interactive_mode,
                validator=FloatValidator,
                filter_func=float)
        if interactive_mode:
            nb_fold = get_args_or_prompt_input(
                arg_name='nb_fold',
                args=args,
                message='How many folds do you want ?',
                force_interactive=interactive_mode,
                validator=IntegerValidator,
                filter_func=int)
        else:
            nb_fold = args.get('nb_fold')

        scoring_metric = get_args_or_prompt_list(
            arg_name='scoring-metric',
            args=args,
            message='On which scoring metric do you want to optimize on ?',
            choices_function=lambda: get_scoring_metrics(
                _problem_type, label_id,
                self.prescience_client.source(source_id)),
            force_interactive=interactive_mode)

        if interactive_mode:
            budget = get_args_or_prompt_input(
                arg_name='budget',
                args=args,
                message='Which budget do you want to allow on optimization ?',
                force_interactive=interactive_mode,
                validator=IntegerValidator,
                filter_func=int)
        else:
            budget = args.get('budget')

        if interactive_mode:
            watch = get_args_or_prompt_confirm(
                arg_name='watch',
                args=args,
                message=
                'Do you want to keep watching for the task until it ends ?',
                force_interactive=interactive_mode)
        else:
            watch = args.get('watch')

        task, _, _ = self.prescience_client.start_auto_ml(
            source_id=source_id,
            dataset_id=dataset_id,
            label_id=label_id,
            model_id=model_id,
            problem_type=problem_type,
            scoring_metric=scoring_metric,
            time_column=time_column,
            nb_fold=nb_fold,
            selected_column=selected_column,
            budget=budget,
            forecasting_horizon_steps=forecasting_horizon_steps,
            forecast_discount=forecast_discount)
        if watch:
            task.watch()
コード例 #15
0
 def exec(self, args: dict):
     interactive_mode = args.get('source-id') is None
     source_id = get_args_or_prompt_list(
         arg_name='id',
         args=args,
         message='Which source do you want to preprocess ?',
         choices_function=lambda: [
             x.get_source_id()
             for x in self.prescience_client.sources(page=1).content
         ],
         force_interactive=interactive_mode)
     selected_columns = get_args_or_prompt_checkbox(
         arg_name='columns',
         args=args,
         message=
         'Select the columns you want to keep for your preprocessing',
         choices_function=lambda: [
             x.name() for x in self.prescience_client.source(source_id).
             schema().fields()
         ],
         selected_function=lambda: [
             x.name() for x in self.prescience_client.source(source_id).
             schema().fields()
         ],
         force_interactive=interactive_mode)
     problem_type = get_args_or_prompt_list(
         arg_name='problem_type',
         args=args,
         message='What kind of problem do you want to solve ?',
         choices_function=lambda: list(map(str, ProblemType)),
         force_interactive=interactive_mode)
     label = get_args_or_prompt_list(
         arg_name='label',
         args=args,
         message=
         'What will be your label ? (the column you want to predict)',
         choices_function=lambda: copy.deepcopy(selected_columns),
         force_interactive=interactive_mode)
     time_column = None
     formatter = None
     exogenous = None
     granularity = None
     if ProblemType(problem_type) == ProblemType.TIME_SERIES_FORECAST:
         available_time_columns = copy.deepcopy(selected_columns)
         available_time_columns.remove(label)
         time_column = get_args_or_prompt_list(
             arg_name='time_column',
             args=args,
             message='What will be the column used for time ?',
             choices_function=lambda: copy.deepcopy(available_time_columns),
             force_interactive=interactive_mode)
         formatter = get_args_or_prompt_input(
             arg_name='date_format',
             args=args,
             message='What date format do you want to use ?',
             force_interactive=interactive_mode)
         granularity = get_args_or_prompt_list(
             arg_name='granularity',
             args=args,
             message='Which granularity is used for your date ?',
             choices_function=lambda:
             ['year', 'month', 'day', 'hour', 'minute'],
             force_interactive=interactive_mode)
         exogenous = get_args_or_prompt_checkbox(
             arg_name='exogenous',
             args=args,
             message='Which exogenous features do you want on your date ?',
             choices_function=lambda:
             ['year', 'month', 'dayofmonth', 'hour', 'minute'],
             selected_function=lambda: [],
             force_interactive=interactive_mode)
     nb_fold = get_args_or_prompt_input(
         arg_name='nb_fold',
         args=args,
         message='How many folds do you want ?',
         force_interactive=interactive_mode)
     dataset_id = get_args_or_prompt_input(
         arg_name='dataset-id',
         args=args,
         message=
         'What will be the name of your dataset ? (should be unique in your project)',
         force_interactive=interactive_mode)
     watch = get_args_or_prompt_confirm(
         arg_name='watch',
         args=args,
         message='Do you want to keep watching for the task until it ends ?',
         force_interactive=interactive_mode)
     task = self.prescience_client.preprocess(
         source_id=source_id,
         dataset_id=dataset_id,
         label_id=label,
         problem_type=problem_type,
         selected_column=selected_columns,
         time_column=time_column,
         nb_fold=int(nb_fold),
         formatter=formatter,
         datetime_exogenous=exogenous,
         granularity=granularity)
     if watch:
         task.watch()