from ls_dataset.d3m_prediction import D3MPrediction from ls_problem_desc.ls_problem import ProblemDesc from ls_problem_desc.d3m_problem import DefaultProblemDesc from d3m_ta2.ta2_client import TA2Client from d3m_eval.summer_2018.prob_discovery import ProblemDiscoveryWriter # from ls_workflow.workflow import Workflow as Solution from modeling.models import * from modeling.component_out import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Model Search") parser.add_argument('-file0', type=argparse.FileType('r'), help='the dataset json provided for the search') parser.add_argument('-file1', type=argparse.FileType('r'), help='the problem json provided for the search') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program', 'settings.cfg'), program_dir=args.programDir, working_dir=args.workingDir,
import pandas as pd # Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset from modeling.models import * from modeling.component_out import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Rerank Model") parser.add_argument('-model_id', type=str, help='the name of the dataset to import') parser.add_argument('-new_rank', type=int, help='the new rank to resort the specified model') parser.add_argument('-file0', type=argparse.FileType('r'), help='the tab-separated list of models to select from') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False
# plt.yticks(tick_marks, classes) # fmt = '.2f' if normalize else 'd' # thresh = cm.max() / 2. # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): # plt.text(j, i, format(cm[i, j], fmt), # horizontalalignment="center", # color="white" if cm[i, j] > thresh else "black") # plt.tight_layout() # plt.ylabel('True label') # plt.xlabel('Predicted label') if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("D3M Compare Model Predictions") parser.add_argument('-file0', type=argparse.FileType('r'), help='the dataset json provided for the search') parser.add_argument('-file1', type=argparse.FileType('r'), help='the problem json provided for the search') parser.add_argument('-file2', type=argparse.FileType('r'), help='at tab-delimited list of the fitted models') parser.add_argument('-file3', type=argparse.FileType('r'), help='the csv of a data predictions dataframe') args = parser.parse_args() if args.is_test is not None:
import csv # Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset # from ls_problem_desc.d3m_problem import * from ls_problem_desc.ls_problem import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Initialize a new problem") parser.add_argument('-probname', type=str, help='the name of the new problem given by the user') parser.add_argument( '-probdesc', type=str, help='the plain text description of the problem supplied by the user') parser.add_argument('-targetname', type=str, help='the name of the column from the dataset to use') parser.add_argument('-file0', type=argparse.FileType('r'), help='the description of the dataset') args = parser.parse_args()
from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import SettingsFactory from ls_dataset.d3m_dataset import D3MDataset # from ls_dataset.d3m_prediction import D3MPrediction # from ls_problem_desc.ls_problem import ProblemDesc # from ls_problem_desc.d3m_problem import D3MProblemDesc # from ls_workflow.workflow import Workflow from d3m_ta2.ta2_client import TA2Client from modeling.models import * from modeling.component_out import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Model Predict") parser.add_argument( '-file0', type=argparse.FileType('r'), help='the dataset json provided for making predictions') parser.add_argument('-file1', type=argparse.FileType('r'), help='at tab-delimited list of the fitted models') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file
# from ls_utilities.ls_wf_settings import Settings as stg from ls_utilities.ls_wf_settings import SettingsFactory from ls_dataset.d3m_dataset import D3MDataset from ls_dataset.d3m_prediction import D3MPrediction from ls_problem_desc.ls_problem import ProblemDesc from ls_problem_desc.d3m_problem import DefaultProblemDesc from d3m_ta2.ta2_client import TA2Client from d3m_eval.summer_2018.prob_discovery import ProblemDiscoveryWriter from modeling.models import * from modeling.component_out import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Fit Models") parser.add_argument('-file0', type=argparse.FileType('r'), help='the dataset json provided for the search') parser.add_argument('-file1', type=argparse.FileType('r'), help='at tab-delimited list of models to fit') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program',
thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("D3M Visualize Confusion Matrix") parser.add_argument( '-file0', type=argparse.FileType('r'), help='the dataset json including pipeline search result') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program', 'settings.cfg'), program_dir=args.programDir,
from ls_dataset.d3m_prediction import D3MPrediction from ls_problem_desc.ls_problem import ProblemDesc from ls_problem_desc.d3m_problem import DefaultProblemDesc from d3m_ta2.ta2_client import TA2Client # from ls_workflow.workflow import Workflow as Solution from modeling.models import Model from modeling.component_out import * from modeling.scores import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Model Rank") parser.add_argument('-metric', type=str, help='the metric to use to compare the models') parser.add_argument('-ordering', type=str, help='the sort order use to rank the models') parser.add_argument('-file0', type=argparse.FileType('r'), help='the dataset json provided for the search') parser.add_argument('-file1', type=argparse.FileType('r'), help='the set of models to score') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False
import csv # Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset # from ls_problem_desc.d3m_problem import * from ls_problem_desc.ls_problem import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Select Problem Task Select") parser.add_argument('-task_name', type=str, help='the task type the user selected') parser.add_argument('-file0', type=argparse.FileType('r'), help='the description of the dataset') parser.add_argument('-file1', type=argparse.FileType('r'), help='the problem template with target selected') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False
import os.path as path import os import csv # Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Import List of available D3M Datasets") args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program', 'settings.cfg'), program_dir=args.programDir, working_dir=args.workingDir, is_test=is_test) # Setup Logging
import csv # Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset from modeling.models import * from modeling.component_out import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Select Model") parser.add_argument('-model_id', type=str, help='the name of the dataset to import') parser.add_argument('-file0', type=argparse.FileType('r'), help='the tab-separated list of models to select from') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program',
# Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset from ls_problem_desc.d3m_problem import DefaultProblemDesc from ls_problem_desc.ls_problem import ProblemDesc __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Generate Default Problem") parser.add_argument('-file0', type=argparse.FileType('r'), help='the dataset json provided for the search') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program', 'settings.cfg'), program_dir=args.programDir, working_dir=args.workingDir, is_test=is_test )
class LS_Path_Factory(object): def __init__(self, workingDir, programDir): self.workingDir = workingDir self.programDir = programDir def get_out_path(self, fpath): return path.join(self.workingDir, fpath) def get_hosted_path(self, fpath): return "LearnSphere?htmlPath=" + self.get_out_path(fpath) if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Compare Model Scores") parser.add_argument('-file0', type=argparse.FileType('r'), help='the scores for the model to render in a boxplot') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program', 'settings.cfg'), program_dir=args.programDir, working_dir=args.workingDir,
from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset from modeling.models import * from modeling.component_out import * from d3m_ta2.ta2_client import TA2Client from d3m_eval.summer_2018.model_generation import RankedPipelineWriter __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Export Models") parser.add_argument('-file0', type=argparse.FileType('r'), help='the tab-separated list of ranked models to export') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program', 'settings.cfg'), program_dir=args.programDir, working_dir=args.workingDir, is_test=is_test )
import csv # Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset # from ls_problem_desc.d3m_problem import * from ls_problem_desc.ls_problem import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Select Problem Metric") parser.add_argument('-metric', type=str, help='the metric the user selected') parser.add_argument('-file0', type=argparse.FileType('r'), help='the description of the dataset') parser.add_argument('-file1', type=argparse.FileType('r'), help='the problem template with target selected') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False
from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset from ls_dataset.d3m_prediction import D3MPrediction from ls_problem_desc.ls_problem import ProblemDesc from ls_problem_desc.d3m_problem import DefaultProblemDesc from d3m_ta2.ta2_client import TA2Client # from ls_workflow.workflow import Workflow as Solution from modeling.models import Model from modeling.component_out import * from modeling.scores import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Model Score") parser.add_argument('-metric', type=str, help='the metric to use to compare the models') parser.add_argument('-file0', type=argparse.FileType('r'), help='the dataset json provided for the search') parser.add_argument('-file1', type=argparse.FileType('r'), help='the set of models to score') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False
from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import SettingsFactory from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset from ls_dataset.d3m_prediction import D3MPrediction from ls_problem_desc.ls_problem import ProblemDesc from ls_problem_desc.d3m_problem import DefaultProblemDesc from modeling.models import * from modeling.component_out import * __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("D3M Dataset Augmenter") parser.add_argument('-file0', type=argparse.FileType('r'), help='the dataset json provided for the search') parser.add_argument('-file1', type=argparse.FileType('r'), help='the problem json provided for the search') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program',
import pprint import argparse import csv # Workflow component specific imports from ls_utilities.ls_logging import setup_logging from ls_utilities.cmd_parser import get_default_arg_parser from ls_utilities.ls_wf_settings import * from ls_dataset.d3m_dataset import D3MDataset __version__ = '0.1' if __name__ == '__main__': # Parse argumennts parser = get_default_arg_parser("Select Dataset") parser.add_argument('-ds_name', type=str, help='the name of the dataset to import') parser.add_argument('-file0', type=argparse.FileType('r'), help='the file list of all datasets') args = parser.parse_args() if args.is_test is not None: is_test = args.is_test == 1 else: is_test = False # Get config file config = SettingsFactory.get_settings(path.join(args.programDir, 'program', 'settings.cfg'), program_dir=args.programDir, working_dir=args.workingDir,