コード例 #1
0
    def test_times_execution_and_adds_it_to_result(self):
        result = pipeline.Pipeline({
            "Step1": lambda ctx: self.mark_as_executed()
        }).execute()

        self.assertEqual(result["timings"][0],
                         "Completed: 'Step1''. Took 0.0ms")
コード例 #2
0
def run_cv(dataset_type,
           cls_type,
           use_data_subset=False,
           use_features=True,
           use_class_weights=True,
           generator_epochs=2,
           classifier_epochs=12,
           n_clusters=30,
           fix_class_imbalance=False):

    base_dir = config.local_results_dir
    for hidden_size in hidden_sizes:
        for λ in λs:
            _logger.info('run for  hidden_size=%d and λ=%f' % (hidden_size, λ))
            pipeline.Pipeline(dataset_type,
                              cls_type,
                              use_data_subset,
                              use_features,
                              use_class_weights,
                              generator_epochs,
                              classifier_epochs,
                              n_clusters,
                              hidden_size,
                              λ,
                              fix_class_imbalance=fix_class_imbalance)

            new_dir_path = os.path.join(base_dir, '%d_%f' % (hidden_size, λ))
            os.makedirs(new_dir_path)
            for png_file in glob.glob(os.path.join(base_dir, '*.png')):
                shutil.copy2(png_file, new_dir_path)
コード例 #3
0
    def test_and_run_multiple_pipeline_jobs(self):

        with open("./test_pipeline_build_custom.json") as f:
            pipeline_structure = json.load(f)

        pipeline_name = "test custom pipeline"
        sys.path.insert(
            0, self.config["local_pipeline_import_path"][pipeline_name])

        pipeline_obj = pipeline.Pipeline(pipeline_name, self.connection,
                                         self.meta_data)
        pipeline_obj.load_steps_into_db(pipeline_structure)

        jobs_obj1 = pipeline.Jobs("Test custom job", self.connection,
                                  self.meta_data)
        jobs_obj1.create_jobs_to_run("test custom pipeline")

        jobs_obj1.run_job()

        with open("test_output_custom.json", "r") as f:
            output1 = json.load(f)

        jobs_obj2 = pipeline.Jobs("Test custom job", self.connection,
                                  self.meta_data)
        jobs_obj2.create_jobs_to_run("test custom pipeline")

        jobs_obj2.run_job()

        with open("test_output_custom.json", "r") as f:
            output2 = json.load(f)

        self.assertEqual(len(output1), len(output2))
コード例 #4
0
def run(file_name, configuration, files):
    pp = pipeline.Pipeline(configuration)
    pp.populate_table("maccoss-fasta", "tide/", files)

    name = "0/123.400000-13/1-1/1-0.000000-1-tide.mzML"
    pp.run(name, file_name)
    return pp.task_queue
コード例 #5
0
    def verify(self, area, task_name, vis):
        # ---------------------------
        #  prepare to check the task
        # ---------------------------
        h_init()
        h_save()
        hifv_importdata(vis=[vis], session=['session_1'], overwrite=False)
        h_save()

        context = pipeline.Pipeline(context='last').context

        # ---------------------------
        #  execute the task
        # ---------------------------
        inputs = eval('pipeline.{area}.tasks.{task}.{captask}.Inputs(context)'.format(area=area, task=task_name, captask=task_name.capitalize()))
        task = eval('pipeline.{area}.tasks.{task}.{captask}(inputs)'.format(area=area, task=task_name, captask=task_name.capitalize()))
        result = task.execute(dry_run=False)
        result.accept(context)
        context.save()

        # --------------------------------
        #   run as a registered CASA task
        # --------------------------------
        try:
            eval('{area}_{task}()'.format(area=area, task=task_name))
            h_save()
        except NameError as ee:
            print('ERROR: {msg}'.format(msg=ee.message))
            print('\tTry using runsetup to register the new task with CASA first.')
コード例 #6
0
ファイル: test_quitter.py プロジェクト: icefoxx/basilisk
 def setUp(self):
     self.TestQuitter = pipeline.Quitter()
     self.TestQuitter.settings['logpath'] = '.'
     self.TestQuitter.settings['logfile'] = 'test.log'
     self.TestQuitter.settings['MsgTheme'] = 1
     self.TestPipeline = pipeline.Pipeline()
     self.TestPipeline._InitLog()
コード例 #7
0
ファイル: pipeline.py プロジェクト: Foztarz/bb_behavior
def get_default_pipeline(localizer_threshold=None, verbose=False):
    """Creates and returns a bb_pipeline Pipeline object that is configured to
    take an image and return all info required for bb_binary (PipelineResult).
    
    Arguments:
        localizer_threshold: float
            Threshold for the localizer in the pipeline.
        verbose: bool
            Whether to also provide the CrownOverlay output for display purposes (slower).
        
    Returns:
        bb_pipeline.pipeline.Pipeline object, ready to be used.
    """
    import pipeline
    import pipeline.pipeline
    import pipeline.objects

    outputs = [pipeline.objects.PipelineResult]
    if verbose:
        outputs += [pipeline.objects.CrownOverlay]
    conf = pipeline.pipeline.get_auto_config()
    if localizer_threshold is not None:
        conf['Localizer']['threshold_tag'] = localizer_threshold
    decoder_pipeline = pipeline.Pipeline(
        [pipeline.objects.Image],  # inputs
        outputs,  # outputs
        **conf)
    return decoder_pipeline
コード例 #8
0
def main(sleep_time=constants.IMAGE_PROCESSOR_DAEMON_SLEEP_TIME_S):

    logging.basicConfig(level=logging.INFO,
                        format=constants.LOG_FMT_S_THREADED)

    logging.info("Reading images from " + config.GCS_BUCKET)
    logging.info("Writing images from " + config.GCS_PROCESSED_PHOTOS_BUCKET)

    #Get current projects storage and datastore client
    credentials = sa.get_credentials()
    datastore_client = datastore.Client(project=config.PROJECT_ID, \
                                        credentials=credentials)

    storage_client = storage.client.Client(project=config.PROJECT_ID, \
                                           credentials=credentials)

    # Create new instance of image_processor pipeline w/ datastore & GCS
    image_processor_pipeline = pipeline.Pipeline(datastore_client, storage_client)


    while True:

        # Get all newly pre-processed images
        fnames = image_processor_pipeline.scan(ds.DATASTORE_PHOTO)

        if fnames:
            processed_fnames = image_processor_pipeline.process(fnames)

            if processed_fnames:
                # copy the processed results to GCS, update the Photo record's
                # processed field, and add the ProcessedImage record
                uploaded_fnames = image_processor_pipeline.upload(processed_fnames)

        # Allow files to accumulate before taking our next pass
        time.sleep(sleep_time)
コード例 #9
0
def main(data_filepath, output_dir, grid_size, debug):
    '''
	Mainflow function for running the full ML pipeline.
	*Includes specific parameters for this project.

	Inputs:
		data_filepath: (str)
		output_dir: (str) output directory
		grid_size: (str) "large", "small", or "test"
		debug: (bool)
	'''
    print('Reading data...')
    # Read data
    coltypes = {'school_ncesid': str}
    parse_dates = ['date_posted', 'datefullyfunded']
    df = pp.read_csv(data_filepath, coltypes=coltypes, parse_dates=parse_dates)

    print('Preparing data...')
    # Prepare data
    df = preprocess.pre_pipeline_clean(df)

    print('Constructing pipeline...')
    # Construct pipeline
    pipeline = pp.Pipeline()

    # Set pipeline parameters
    label = 'not_funded_wi_60d'
    predictor_sets = [[
        'school_city', 'school_state', 'school_metro', 'school_district',
        'school_county', 'school_charter', 'school_magnet', 'teacher_prefix',
        'primary_focus_subject', 'primary_focus_area',
        'secondary_focus_subject', 'secondary_focus_area', 'resource_type',
        'poverty_level', 'grade_level',
        'total_price_including_optional_support', 'students_reached',
        'eligible_double_your_impact_match'
    ]]
    time_col = 'date_posted'
    start = parser.parse('2012-01-01')
    end = parser.parse('2013-12-31')
    test_window_months = 6
    outcome_lag_days = 60
    output_filename = 'evaluations.csv'
    ks = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]

    print('Running pipeline...')
    pipeline.run(df,
                 time_col,
                 predictor_sets,
                 label,
                 start,
                 end,
                 test_window_months,
                 outcome_lag_days,
                 output_dir,
                 output_filename,
                 grid_size=grid_size,
                 thresholds=[],
                 ks=ks,
                 save_output=True,
                 debug=debug)
コード例 #10
0
ファイル: main.py プロジェクト: tomcur/fisheries-monitoring
def example_crop_plot():
    import scipy

    def crop_and_resize(img, y, meta):
        bbox = meta['bounding_boxes'][0]
        x = round(bbox['x'])
        y = round(bbox['y'])
        width = round(bbox['width'])
        height = round(bbox['height'])

        img_height = len(img)

        img = img[y:y + height, x:x + width, :]

        return img

    pl = pipeline.Pipeline(class_filter=["NoF"], f_middleware=crop_and_resize)
    class_count = pl.class_count()
    class_count_idx = {}
    for clss in class_count:
        class_count_idx[settings.CLASS_NAME_TO_INDEX_MAPPING[clss]] = float(
            class_count[clss]) / pl.num_unique_samples()

    generators = pl.train_and_validation_data_generator_builder()

    (x, y, meta) = next(generators['train'])
    img = x

    import matplotlib.pyplot as plt
    plt.imshow(img)
    plt.ylabel('some numbers')
    plt.show()
コード例 #11
0
ファイル: main.py プロジェクト: tomcur/fisheries-monitoring
def example_train_and_validation_split():
    """
    Run the pipeline example (tests if the pipeline runs succesfully, should produce summary output of the first batch and first case in that batch).
    """

    pl = pipeline.Pipeline(data_type="ground_truth_cropped")

    generator = pl.train_and_validation_data_generator_builder(
        pl.mini_batch_generator, balance=True, infinite=True)

    x, y, meta = next(generator['train'])
    print("Number of cases in first batch: %s" % len(x))
    print("First image shape and label: %s - %s" % (str(x[0].shape), y[0]))
    print("First image meta information:")
    pprint.pprint(meta[0])

    print("Class counts:")
    class_counts = {}
    for clss in y:
        if clss not in class_counts:
            class_counts[clss] = 0

        class_counts[clss] += 1

    pprint.pprint(class_counts)
コード例 #12
0
def main():
    pipeline_obj= pipeline.Pipeline()
    
    pipeline_obj.createSparkSession()
#     pipeline.testSparkSession()
    
    pipeline_obj.runPipeline()
コード例 #13
0
def main():
    import data
    data_path = "../data/wikitext-2/wiki.train.tokens"
    corpus = data.Corpus(data_path, is_test=False)
    train_data = corpus.tokenize()
    print("Size of training data = {}".format(train_data.size()))

    import net
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    rnn = net.RNN(vocab_size=len(corpus.dictionary),
                  embed_size=300,
                  hidden_size=1024).to(device)
    print("Model structure: {}".format(rnn))

    import pipeline
    zeros = (torch.zeros(1, 100,
                         1024).to(device), torch.zeros(1, 100,
                                                       1024).to(device))
    pipe = pipeline.Pipeline(rnn,
                             device,
                             train_data,
                             epochs=20,
                             seq_length=10,
                             init_states=zeros)
    pipe.run()
    pass
コード例 #14
0
ファイル: pipeline_ssw_test.py プロジェクト: wkclalala/ripple
    def test_basic(self):
        pp: pipeline.Pipeline = pipeline.Pipeline("ssw/smith-waterman.json")

        pp.populate_table("ssw-database", "ssw/", ["uniprot.fasta"])
        pp.populate_table("ssw-program", "ssw/", ["ssw_test"])

        name = "0/123.400000-13/1-1/1-0.000000-1-fasta.fasta"
        pp.run(name, "ssw/input-10.fasta")

        entries: List[TestEntry] = pp.database.get_entries(pp.table.name)
        entry: TestEntry = entries[-1]
        actual_output: List[bytes] = filter(lambda item: len(item.strip()) > 0,
                                            entry.get_content().split(b"\n\n"))
        blast = pp.__import_format__("blast")
        actual_output = sorted(actual_output,
                               key=lambda item: [
                                   blast.Iterator.get_identifier_value(
                                       item, blast.Identifiers.score), item
                               ])

        with open(pp.dir_path + "/ssw/output", "rb") as f:
            expected_output: List[bytes] = list(
                filter(lambda item: len(item.strip()) > 0,
                       f.read().split(b"\n\n")))
        expected_output = sorted(expected_output,
                                 key=lambda item: [
                                     blast.Iterator.get_identifier_value(
                                         item, blast.Identifiers.score), item
                                 ])

        self.assertCountEqual(actual_output, expected_output)
        pp.database.destroy()
コード例 #15
0
    def test_basic(self):
        pp: pipeline.Pipeline = pipeline.Pipeline(
            "spacenet/spacenet-classification.json")
        pp.populate_table("spacenet", "spacenet/",
                          ["train.classification.w1-h1"])

        name = "0/123.400000-13/1-1/1-0.000000-1-image.tif"
        pp.run(name, "spacenet/3band_AOI_1_RIO_img1457.tif")
コード例 #16
0
    def test_execute_runs_steps(self):
        self.has_run = False

        result = pipeline.Pipeline({
            "Step1": lambda ctx: self.mark_as_executed()
        }).execute()

        self.assertEqual(self.has_run, True)
コード例 #17
0
 def test_process(self):
     p = pipeline.Pipeline()
     imgs = image_util.loadImagesRGB(IMG_DIR)
     for i, img in enumerate(imgs):
         processed_img = p.process(img)
         image_util.saveBeforeAfterImages(
             img, "Original", processed_img, "Thresholded",
             TEST_OUT_DIR + "/thresholded" + str(i) + ".jpg")
コード例 #18
0
def classify_fish_or_no_fish(params: prep_classif):
    """
    Stage 3
    """

    mini_batch_size = 32  # decrease in case of resource exhausted errors

    import keras
    import metrics
    import shutil
    from time import strftime

    ppl = pipeline.Pipeline(data_type="candidates_fullyconv_cropped",
                            dataset=params.dataset)

    # Load fish-or-no-fish classification model
    model = keras.models.load_model(os.path.join(
        settings.WEIGHTS_DIR,
        settings.FISH_OR_NO_FISH_CLASSIFICATION_NETWORK_WEIGHT_NAME),
                                    custom_objects={
                                        'precision': metrics.precision,
                                        'recall': metrics.recall
                                    })

    data_generator = ppl.data_generator_builder(
        functools.partial(ppl.mini_batch_generator,
                          mini_batch_size=mini_batch_size))

    predicted = {}
    n_batches = 0
    batch_print_interval = int(100 / mini_batch_size) + 1

    # For each batch
    for x, y, meta in data_generator:
        x = np.array(x)

        predictions = model.predict(x, batch_size=mini_batch_size)

        for m, pred in zip(meta, list(predictions)):
            predicted[m['filename']] = float(pred[0])

        n_batches += 1
        if n_batches % batch_print_interval == 0:
            print('%d candidates processed' % (mini_batch_size * n_batches))

    # Save classifications
    if not os.path.exists(params.fish_or_no_fish_classification_dir):
        os.makedirs(params.fish_or_no_fish_classification_dir)

    outpath = os.path.join(params.fish_or_no_fish_classification_dir,
                           "classification.json")
    outpath2 = os.path.join(
        params.fish_or_no_fish_classification_dir,
        "classification-%s.json" % strftime("%Y%m%dT%H%M%S"))
    with open(outpath, 'w') as outfile:
        json.dump(predicted, outfile)

    shutil.copyfile(outpath, outpath2)
コード例 #19
0
def classify_fish_type(params: prep_classif):
    """
    Stage 4
    """

    import keras
    import metrics
    import shutil
    from time import strftime

    ppl = pipeline.Pipeline(data_type="candidates_fullyconv_cropped",
                            dataset=params.dataset)

    # Load fish type classification model
    model = keras.models.load_model(os.path.join(
        settings.WEIGHTS_DIR,
        settings.FISH_TYPE_CLASSIFICATION_NETWORK_WEIGHT_NAME),
                                    custom_objects={
                                        'precision': metrics.precision,
                                        'recall': metrics.recall
                                    })

    data = ppl.get_data()

    fish_type_classification = {}
    n_imgs = 0

    # For each single crop
    for x, meta in zip(data['x'], data['meta']):
        #x = np.array(x)

        img = x()
        img = np.array([img])

        predictions = model.predict(img, batch_size=1)

        fish_type_classification[meta['filename']] = [
            float(pred) for pred in predictions.tolist()[0]
        ]

        n_imgs += 1
        if n_imgs % 100 == 0:
            print('%d candidates processed' % n_imgs)

    # Save classifications
    if not os.path.exists(params.fish_type_classification_dir):
        os.makedirs(params.fish_type_classification_dir)

    outpath = os.path.join(params.fish_type_classification_dir,
                           "classification.json")
    outpath2 = os.path.join(
        params.fish_type_classification_dir,
        "classification-%s.json" % strftime("%Y%m%dT%H%M%S"))
    with open(outpath, 'w') as outfile:
        json.dump(fish_type_classification, outfile)

    shutil.copyfile(outpath, outpath2)
コード例 #20
0
ファイル: run_info.py プロジェクト: mai00fti/uap
def main(args):
    p = pipeline.Pipeline(arguments=args)
    group_by_status = True

    if args.sources:
        # print all sources (i. e. instances of AbstractSourceStep)
        p.print_source_runs()

    elif len( args.run ) >= 1:
        # print run infos of one or more specific tasks
        for task_id in args.run:
            parts = task_id.split('/')
            if len(parts) != 2:
                raise StandardError("Invalid run ID %s." % task_id)
            step_name = parts[0]
            run_id = parts[1]
            run = p.steps[step_name].get_run(run_id)
            report = run.as_dict()
            report['state'] = p.steps[step_name].get_run_state(run_id)
            shebang = "#!/usr/bin/env bash"
            print(shebang + "\n")
            report_header = "%s/%s -- Report" % (step_name, run_id)
            print("# " + report_header)
            print("# " + "=" * len(report_header) + "\n#")
            dump = yaml.dump(report, default_flow_style = False)
            for line in dump.split('\n'):
                print("# " + line)
            exec_header = "%s/%s -- Commands" % (step_name, run_id)
            print("# " + exec_header)
            print("# " + "=" * len(exec_header) + "\n")
            eg_count = 1
            for exec_group in run.get_exec_groups():
                goc_header = "%d. Group of Commands" % eg_count
                eg_count += 1
                pipe_count = 1
                cmd_count = 1
                for poc in exec_group.get_pipes_and_commands():
                    # for each pipe or command (poc)
                    # check if it is a pipeline ...
                    
                    if isinstance(poc, pipeline_info.PipelineInfo):
                        pipe_header = goc_header + " -- %d. Pipeline" % pipe_count
                        print("# " + pipe_header)
                        print("# " + "-" * len(pipe_header) + "\n")
                        # ... create a pipeline ...
                        pipe = list()
                        for command in poc.get_commands():
                            pipe.append( " ".join(command.get_command()) )
                        print( " | ".join(pipe) + "\n" )
                        pipe_count += 1
                    elif isinstance(poc, command_info.CommandInfo):
                        cmd_header = goc_header + " -- %d. Command" % cmd_count
                        print("# " + cmd_header)
                        print("# " + "-" * len(cmd_header) + "\n")
                        print(" ".join(poc.get_command()) + "\n")
                        cmd_count
コード例 #21
0
    def test_times_execution_and_adds_it_to_result_for_actual_work(self):
        with self.assertLogs('', level='INFO') as cm:
            pipeline.Pipeline({"Step1": lambda ctx: time.sleep(1)}).execute()

            # "INFO:root:Completed: 'Step1'. Took 1"

            self.assertTrue(
                any(
                    logline.startswith("INFO:root:Completed: 'Step1'. Took 1")
                    for logline in cm.output))
コード例 #22
0
ファイル: kit.py プロジェクト: timhannifan/ml-toolkit
def build_pipeline(path, target_col, dummify_target,
               discretize_cols, dummify_cols, reports):
    '''
    Builds a complete pipeline using ml-toolkit classes
    '''
    pipe = pipeline.Pipeline()
    read_step = reader.CSVReader()
    explore_step = explorer.DataExplorer()
    parse_step = dparser.DataParser()
    features_step = features.FeatureGenerator()
    classify_step = classifier.Classifier()
    evaluate_step = evaluate.ModelEvaluator()
    pipe.clear()
    
    read_step.load(path)
    pipe.add(read_step)

    explore_step.configure({
        'target': target_col,
        'fill_target_mean': True,
        'reports': reports,
        'output_path': './reports/'
    })
    pipe.add(explore_step)

    parse_step.configure({
        'fillna': 'mean'
    })
    pipe.add(parse_step)

    dum_cols = []
    if dummify_target:
        dum_cols.append(target_col)
    dum_cols.extend(dummify_cols)

    features_step.configure({
        'discretize': discretize_cols,
        'dummify': dum_cols
    })
    pipe.add(features_step)

    classify_step.configure({
        'type': 'DecisionTreeClassifier',
        'target': 'SeriousDlqin2yrs_0'
    })
    pipe.add(classify_step)

    evaluate_step.configure({
        'metrics': ['accuracy_score']
    })
    pipe.add(evaluate_step)

    result = pipe.execute()
    print('Pipe completed with result type:', type(result))
    return result
コード例 #23
0
def main(args):
    p = pipeline.Pipeline(arguments=args)

    # Test if dot is available
    dot_version = ['dot', '-V']
    try:
        with open(os.devnull, 'w') as devnull:
            subprocess.check_call(dot_version, stdout=devnull)
    except subprocess.CalledProcessError as e:
        raise StandardError("Execution of %s failed. GraphViz seems to be "
                            "unavailable." % " ".join(dot_version))

    if args.files:
        logger.info(
            "Going to plot the graph containing all files of the analysis")
        raise StandardError("Sorry, feature not implemented yet!")
    elif args.steps:
        logger.info("Create a graph showing the DAG of the analysis")

        render_graph_for_all_steps(p, args)

    else:
        #    elif args.run:
        #        if len(args.run) >= 1:
        # Compile a list of all tasks
        task_list = list()
        # Only use tasks listed in args.run
        if args.run and len(args.run) >= 1:
            for task_id in args.run:
                if '/' in task_id:
                    task = p.task_for_task_id[task_id]
                    task_list.append(task)
                else:
                    for task in p.all_tasks_topologically_sorted:
                        if str(task)[0:len(task_id)] == task_id:
                            task_list.append(task)
        # or take all available tasks
        else:
            task_list = p.all_tasks_topologically_sorted

        for task in task_list:
            outdir = task.get_run().get_output_directory()
            anno_files = glob.glob(
                os.path.join(
                    outdir,
                    ".%s*.annotation.yaml" % task.get_run().get_run_id()))

            yaml_files = { os.path.realpath(f) for f in anno_files \
                           if os.path.islink(f) }
            for y in yaml_files:
                log_level = logger.getEffectiveLevel()
                logger.setLevel(logging.INFO)
                logger.info("Going to plot the graph for task: %s" % task)
                logger.setLevel(log_level)
                render_single_annotation(y, args)
コード例 #24
0
def main(args):
    p = pipeline.Pipeline(arguments=args)

    task = None
    def handle_signal(signum, frame):
        logger.warning("Catching %s!" %
                       process_pool.ProcessPool.SIGNAL_NAMES[signum])
        p.caught_signal = signum
        process_pool.ProcessPool.kill()
        if task:
            signame = process_pool.ProcessPool.SIGNAL_NAMES[signum]
            error = 'UAP stopped because it caught signal %d - %s' % \
                        (signum, signame)
            log_task_error(task, error, True, True)
    signal.signal(signal.SIGTERM, handle_signal)
    signal.signal(signal.SIGINT, handle_signal)

    # execute all tasks
    finished_states = [p.states.FINISHED]
    if args.ignore:
        finished_states += [p.states.CHANGED]

    accepted_states = [p.states.BAD, p.states.READY, p.states.QUEUED,
                       p.states.VOLATILIZED]
    for task in p.get_task_with_list():
        task_state = task.get_task_state()
        if task_state in finished_states:
            task.move_ping_file()
            sys.stderr.write("Skipping %s because it's already %s.\n" %
                             (task, task_state))
        elif task_state == p.states.VOLATILIZED and not args.run:
            task.move_ping_file()
            sys.stderr.write("Skipping %s because it's already %s and not "
                             "specified as argument.\n" %
                             (task, task_state))
        elif task_state == p.states.CHANGED:
            if not args.force:
                task.move_ping_file()
                raise UAPError(
                    "Task %s has changed. "
                    "Run 'uap %s status --details' to see what changed or "
                    "'uap %s run-locally --force' to force overwrite "
                    "of the results." %
                    (task, args.config.name, args.config.name))
            else:
                check_parents_and_run(task, finished_states, args.debugging)
        elif task_state in accepted_states:
            check_parents_and_run(task, finished_states, args.debugging)
        else:
            task.move_ping_file()
            raise UAPError(
                "Unexpected task state for %s: %s\n"
                "Expected state to be 'READY'. Probably an upstream "
                "run crashed." %
                (task, task_state))
コード例 #25
0
ファイル: test_pipeline.py プロジェクト: IBM/cmnnc
def test_conv1d():
    """ Test a single 1D convolution """
    eg_vals = xparams({"n": 10, "k": 3, "p": 1})

    s1_ops = [
        pl.OpInfo(
            "MxV",
            [
                RD_a(
                    "[n,k,p] -> { S1[o1] -> in[j] : 0 <= o1 < ((n - k + 2*p) + 1) and o1 <= j < o1 + k }"
                ),
                WR_a(
                    "[n,k,p] -> { S1[o1] -> out[j] : 0 <= o1 < ((n - k + 2*p) + 1) and j = o1 }"
                ),
            ],
        ),
    ]
    stage1 = pl.Stage(pl.StageInfo(s1_ops), eg_vals)
    objs_info = {
        "in": ObjectInfo(shape=(eg_vals.n, ), padding=eg_vals.p),
        "out": ObjectInfo(shape=eg_vals.eval("(n-k+1,)"), padding=eg_vals.p),
    }
    pline = pl.Pipeline([stage1], objs_info, execute_ops=True)

    conv1_ps = conv.Conv1DParams(
        i=conv.Conv1DInParams(w=eg_vals["n"], d=1),
        f=conv.Conv1DFiltParams(w=eg_vals["k"], d=1, l=1),
        p=1,
        s=1,
        p_out=0,
    )

    # Set filters
    filters1 = np.random.rand(*conv1_ps.get_filters_shape())
    filters1_m = filters1.reshape(conv1_ps.eval("(f.l, f.d*f.w)"))
    cconf = pl.CoreConf(filters1_m)

    # Set input
    image1 = np.random.rand(*conv1_ps.get_input_shape())
    image1 = np.pad(image1, conv1_ps.get_input_padding())
    inp = pline.get_object("in")
    inp[...] = image1

    pline.configure([cconf])
    for _ in range(conv1_ps.o.w):
        pline.tick()
    out = pline.get_object("out")

    # Verify results
    output_simple = conv.conv1d_simple(image1, filters1, conv1_ps)
    # NB: conv1d_simple considers the depth dimension while our access
    # relations above do not
    np.testing.assert_allclose(output_simple[0, :], out)
コード例 #26
0
	def run(self):
		
		histTest = pl.Pipeline(cons.POLONIEX_PUBLIC_URL, cons.POLONIEX_HISTORICAL_DATA)
		histData = histTest.getHistoricalData(self.currency, self.period, self.startDate, self.endDate)
		laggedHistData = histTest.getHistoricalData(self.currency, (self.period * 2), self.startDate, self.endDate)
		packagedHistoricalData = histTest.packageHistoricalData(histData, laggedHistData)

		self.finishedPositions = alg.tryAlgorithmLogic(packagedHistoricalData)
		self.formattedPositions = pl.algoResultsToDataframe(self.finishedPositions[0], self.finishedPositions[1], self.gainPerCent)
		self.verifiedStats = diag.verifyBacktestAccuracy(self.formattedPositions, False)

		return diag.CMD_UI(self.verifiedStats, self.currency, ut.CURRENT_DATE)
コード例 #27
0
def get_abilene_pipeline(fixstep):
    cypress = pipeline.CypressStage()
    spirit = pipeline.SpiritStage(ispca=False,
                                  thresh=0.01,
                                  ebounds=(0, 1.1),
                                  startm=6)
    kalman = pipeline.KalmanStage(step_size=fixstep, lookahead_flag=0)
    pipe = pipeline.Pipeline()
    pipe.append_stage(cypress)
    pipe.append_stage(spirit)
    pipe.append_stage(kalman)
    return pipe
コード例 #28
0
    def execute(self):
        algo_name = self.select_algo_to_execute()        
        algo = self.factory.create_algo_proxy(algo_name)

        pipeline.Pipeline({
            "Prepare": lambda ctx: [ algo.prepare_parameters(self.sourcer.source_required_data(algo.entrypoint_arg_spec)) ],
            "Execute": lambda ctx: [ algo.execute() ],
            "Verify" : lambda ctx: [ algo.verify() ],
            "Save"   : lambda ctx: [ self.persister.store(algo.last_execution_result, ctx) ]
        }).execute()
        
        logging.info("Pipeline completed.")
コード例 #29
0
    def test_create_and_run_multiple_jobs(self):

        with open("./test_pipeline_build.json") as f:
            pipeline_structure = json.load(f)

        pipeline_obj_1 = pipeline.Pipeline("test pipeline", self.connection,
                                           self.meta_data)
        pipeline_obj_1.load_steps_into_db(pipeline_structure)

        pipeline_obj_2 = pipeline.Pipeline("test second job pipeline",
                                           self.connection, self.meta_data)
        pipeline_obj_2.load_steps_into_db(pipeline_structure)

        jobs_obj_2 = pipeline.Jobs("Test job", self.connection, self.meta_data)
        jobs_obj_2.create_jobs_to_run("test second job pipeline")

        jobs_obj_2.run_job()

        with open("./test_output.json") as f:
            pipeline_results = json.load(f)

        self.assertEquals(2, len(pipeline_results))
コード例 #30
0
ファイル: main.py プロジェクト: tomcur/fisheries-monitoring
def example():
    """
    Run the pipeline example (tests if the pipeline runs succesfully, should produce summary output of the first batch and first case in that batch).
    """

    pl = pipeline.Pipeline(data_type="original")

    generator = pl.data_generator_builder(pl.mini_batch_generator)

    x, y, meta = next(generator)
    print("Number of cases in first batch: %s" % len(x))
    print("First image shape and label: %s - %s" % (str(x[0].shape), y[0]))
    print("First image meta information:")
    pprint.pprint(meta[0])