Exemple #1
0
    def test_star_apply(self):
        pipe = Pipeline([10, 20, 30])
        self.assertIsInstance(Pipeline([]).star_apply(func=func), Pipeline)
        with self.assertRaises(ValueError):
            Pipeline([]).star_apply(object())
        with self.assertRaises(TypeError):
            Pipeline([]).star_apply(not_a_param=object())
        with self.assertRaises(TypeError):
            out, err = pipe.star_apply(add_two_params).run()

            def error_func():
                raise err[0]

            error_func()
        out, err = pipe.apply(lambda x: [x, x]).star_apply(
            add_two_params).run()
        self.assertEqual(len(out), 3)
        self.assertEqual(sum(out), 120)
        self.assertEqual(len(err), 0)
        out, err = pipe.apply(lambda x: {
            "a": x,
            "b": x
        }).star_apply(add_two_params).run()
        self.assertEqual(len(out), 3)
        self.assertEqual(sum(out), 120)
        self.assertEqual(len(err), 0)
Exemple #2
0
 def __init__(self):
     self.pipeline = Pipeline()
     self.history_pipeline = Pipeline()
     for plsc in pipelinestages.getPipelineFeedStageClasses():
         self.pipeline.appendStage(plsc())
     for plsc in pipelinestages.getPipelineHistoryFeedStageClasses():
         self.history_pipeline.appendStage(plsc())
Exemple #3
0
 def test_run(self):
     run_output = Pipeline([10, 5, 30]).apply(func).run()
     self.assertIsInstance(run_output, tuple)
     out, err = run_output
     self.assertEqual(len(out), 2)
     self.assertEqual(len(err), 1)
     with self.assertRaises(AttributeError):
         Pipeline([]).run()
Exemple #4
0
def main():
    """ 
    Parses arguments; initialises logger; initialises camera driver if
    necessary; loads single image from disk if necessary; and runs desired parts
    of pipeline, or loads output from previous execution for printout.
    
    """

    options, args = argparse.run()
    loginit.run(options.verbosity)
    logger = logging.getLogger('main')

    logger.info(' '.join(sys.argv[1:]))

    if options.simulate == 0:
        options.simulate = None
        l = DC1394Library()
    elif options.simulate > 0:
        options.simulate -= 1
    elif options.simtime is None:
        options.simtime = 36000

    global pipeline
    pipeline = Pipeline(options)

    if options.disk:
        logger.info('using poses from disk')
        pipe = Pipeline()
        pipe.options = options
        printer = Printer(pipe=pipe)
        printer.final()
        logger.info('done. exiting')
        sys.exit(0)

    if args:
        try:
            image = cv2.imread('images/' + args[0],
                               cv2.CV_LOAD_IMAGE_GRAYSCALE)
            pipeline.set_image(image)
            logger.info('opening image file %s from disk' % args[0])
        except IOError:
            logger.error('image file not found: %s' % args[0])
            exit(1)
    elif options.simulate is not None:
        logger.info('running in simulation mode')
    else:
        try:
            fwcam = handle_common_options(options, l)
            pipeline.set_fwcam(fwcam)
            logger.info('init. pydc1394 camera object')
            logger.info('camera: %s' % fwcam.model)
            logger.info('mode: %s' % fwcam.mode)
            logger.info('framerate: %d' % fwcam.framerate.val)
        except:
            logger.error('unable to open camera capture')
            exit(1)

    pipeline.run()
Exemple #5
0
def create_pipeline(game_settins, screen, pipeline_group, num, order):

    pipeline_top = Pipeline(game_settins, screen, 'top', num * 10, order)
    pipeline_bottom = Pipeline(game_settins, screen, 'bottom', (18 - num) * 10,
                               order)

    pipeline_group.add(pipeline_bottom)
    pipeline_group.add(pipeline_top)

    return pipeline_group
Exemple #6
0
 def test_threads(self):
     pipe_threads = Pipeline(range(1, 3), threads=10).apply(sleep)
     threads_time = timeit("pipe_threads.run()",
                           number=1,
                           globals={"pipe_threads": pipe_threads})
     self.assertLess(threads_time, 3)
     pipe_no_threads = Pipeline(range(1, 3), threads=1).apply(sleep)
     no_threads_time = timeit("pipe_no_threads.run()",
                              number=1,
                              globals={"pipe_no_threads": pipe_no_threads})
     self.assertGreater(no_threads_time, 3)
Exemple #7
0
 def test_pipeline_unknown(self):
     os.environ["IN_KIND"] = "MEM"
     os.environ["OUT_KIND"] = "MEM"
     pipeline = Pipeline(args=["unknown"])
     del os.environ["IN_KIND"]
     del os.environ["OUT_KIND"]
     assert pipeline is not None
Exemple #8
0
def test_imagereader(bees_image, pipeline_config):
    pipeline = Pipeline([Filename], [Image, Timestamp, CameraIndex], **pipeline_config)

    expected_stages = [ImageReader]
    _assert_types(pipeline.pipeline, expected_stages)

    outputs = pipeline([bees_image])
    assert(len(outputs) == 3)

    assert Image in outputs
    assert Timestamp in outputs
    assert CameraIndex in outputs

    im = outputs[Image]
    ts = outputs[Timestamp]
    idx = outputs[CameraIndex]

    tz = pytz.timezone('Europe/Berlin')
    dt = datetime.datetime.fromtimestamp(ts, tz=pytz.utc)
    dt = dt.astimezone(tz)
    assert(im.shape == (3000, 4000))
    assert(dt.year == 2015)
    assert(dt.month == 8)
    assert(dt.day == 21)
    assert(dt.hour == 16)
    assert(dt.minute == 15)
    assert(dt.second == 30)
    assert(dt.microsecond == 884267)
    assert(idx == 2)
Exemple #9
0
def test_tagSimilarityEncoder(pipeline_config):
    pipeline = Pipeline([Filename], [Descriptors], **pipeline_config)
    fname = os.path.dirname(__file__) + '/data/Cam_2_20150821161530_884267.jpeg'

    outputs = pipeline([fname])
    assert Descriptors in outputs
    assert len(outputs[Descriptors]) > 20
Exemple #10
0
 def test_add_extension_input_resource_file(self):
     input_file1 = '/tmp/data/example1.txt.bgz.foo'
     p = Pipeline()
     in1 = p.read_input(input_file1, extension='.txt.bgz.foo')
     with self.assertRaises(Exception):
         in1.add_extension('.baz')
     assert in1._value.endswith('.txt.bgz.foo')
Exemple #11
0
def gen_data_for_relation_matcher_from_WebQSP(fn_webqsp_list, fn_out,
                                              use_aqqu):
    pipe = Pipeline(use_aqqu)
    #从问题里提取question->candidate relation的映射
    wq = []
    for fn in fn_webqsp_list:
        wq.extend(json.load(open(fn))['Questions'])
    with open(fn_out, 'w') as fout:
        for data in wq:

            question, candidates = pipe.gen_candidate_relations(
                data['RawQuestion'])
            pattern_to_correct = dict()
            for parse in data['Parses']:
                if not parse['PotentialTopicEntityMention']:
                    continue
                mention = ' '.join(
                    naive_split(parse['PotentialTopicEntityMention']))
                pattern = question.replace(mention, '<$>')
                if '<$>' not in pattern:
                    print question
                if parse['InferentialChain']:
                    if pattern not in pattern_to_correct:
                        pattern_to_correct[pattern] = set()
                    pattern_to_correct[pattern].add(
                        parse['InferentialChain'][-1])
            for pattern, correct in pattern_to_correct.items():
                wrong = candidates - correct
                print >> fout, json.dumps({
                    'question': pattern,
                    "pos_relation": list(correct),
                    "neg_relation": list(wrong)
                })
def build_export_pipeline(client_config, checkpoint_service, args) -> Pipeline:
    """
    All export jobs
    export_instance_profiles -> export_users -> export_groups -> export_secrets -> export_clusters -> export_instance_pools -> export_jobs
                                                              -> log_workspace_items -> export_workspace_acls
                                                              -> export_notebooks
                                                              -> export_metastore -> export_metastore_table_acls
    """
    skip_tasks = args.skip_tasks

    source_info_file = os.path.join(client_config['export_dir'], "source_info.txt")
    with open(source_info_file, 'w') as f:
        f.write(client_config['url'])

    completed_pipeline_steps = checkpoint_service.get_checkpoint_key_set(
        wmconstants.WM_EXPORT, wmconstants.MIGRATION_PIPELINE_OBJECT_TYPE)
    pipeline = Pipeline(client_config['export_dir'], completed_pipeline_steps, args.dry_run)
    export_instance_profiles = pipeline.add_task(InstanceProfileExportTask(client_config, checkpoint_service, wmconstants.INSTANCE_PROFILES in skip_tasks))
    export_users = pipeline.add_task(UserExportTask(client_config, checkpoint_service, wmconstants.USERS in skip_tasks), [export_instance_profiles])
    export_groups = pipeline.add_task(GroupExportTask(client_config, checkpoint_service, wmconstants.GROUPS in skip_tasks), [export_users])
    workspace_item_log_export = pipeline.add_task(WorkspaceItemLogExportTask(client_config, args, checkpoint_service, wmconstants.WORKSPACE_ITEM_LOG in skip_tasks), [export_groups])
    export_workspace_acls = pipeline.add_task(WorkspaceACLExportTask(client_config, checkpoint_service, wmconstants.WORKSPACE_ACLS in skip_tasks), [workspace_item_log_export])
    export_notebooks = pipeline.add_task(NotebookExportTask(client_config, checkpoint_service, wmconstants.NOTEBOOKS in skip_tasks), [workspace_item_log_export])
    export_secrets = pipeline.add_task(SecretExportTask(client_config, args, checkpoint_service, wmconstants.SECRETS in skip_tasks), [export_groups])
    export_clusters = pipeline.add_task(ClustersExportTask(client_config, args, checkpoint_service, wmconstants.CLUSTERS in skip_tasks), [export_secrets])
    export_instance_pools = pipeline.add_task(InstancePoolsExportTask(client_config, args, checkpoint_service, wmconstants.INSTANCE_POOLS in skip_tasks), [export_clusters])
    export_jobs = pipeline.add_task(JobsExportTask(client_config, args, checkpoint_service, wmconstants.JOBS in skip_tasks), [export_instance_pools])
    export_metastore = pipeline.add_task(MetastoreExportTask(client_config, checkpoint_service, args, wmconstants.METASTORE in skip_tasks), [export_groups])
    export_metastore_table_acls = pipeline.add_task(MetastoreTableACLExportTask(client_config, args, checkpoint_service, wmconstants.METASTORE_TABLE_ACLS in skip_tasks), [export_metastore])
    # FinishExport task is never skipped
    finish_export = pipeline.add_task(FinishExportTask(client_config),
                                      [export_workspace_acls, export_notebooks, export_jobs,
                                       export_metastore_table_acls])

    return pipeline
def build_import_pipeline(client_config, checkpoint_service, args) -> Pipeline:
    """
    All import jobs
    import_instance_profiles -> import_users -> import_groups -> import_secrets -> import_clusters -> import_instance_pools -> import_jobs
                                                              -> log_workspace_items -> import_notebooks -> import_workspace_acls
                                                              -> import_metastore -> import_metastore_table_acls
    """
    skip_tasks = args.skip_tasks

    source_info_file = os.path.join(client_config['export_dir'], "source_info.txt")
    with open(source_info_file, 'r') as f:
        source_url = f.readline()
        confirm = input(f"Import from `{source_url}` into `{client_config['url']}`? (y/N) ")
        if confirm.lower() not in ["y", "yes"]:
            raise RuntimeError("User aborted import pipeline. Exiting..")

    completed_pipeline_steps = checkpoint_service.get_checkpoint_key_set(
        wmconstants.WM_IMPORT, wmconstants.MIGRATION_PIPELINE_OBJECT_TYPE)
    pipeline = Pipeline(client_config['export_dir'], completed_pipeline_steps, args.dry_run)
    import_instance_profiles = pipeline.add_task(InstanceProfileImportTask(client_config, checkpoint_service, wmconstants.INSTANCE_PROFILES in skip_tasks))
    import_users = pipeline.add_task(UserImportTask(client_config, checkpoint_service, wmconstants.USERS in skip_tasks), [import_instance_profiles])
    import_groups = pipeline.add_task(GroupImportTask(client_config, checkpoint_service, wmconstants.GROUPS in skip_tasks), [import_users])
    import_notebooks = pipeline.add_task(NotebookImportTask(client_config, checkpoint_service, args, wmconstants.NOTEBOOKS in skip_tasks), [import_groups])
    import_workspace_acls = pipeline.add_task(WorkspaceACLImportTask(client_config, checkpoint_service, wmconstants.WORKSPACE_ACLS in skip_tasks), [import_notebooks])
    import_secrets = pipeline.add_task(SecretImportTask(client_config, checkpoint_service, wmconstants.SECRETS in skip_tasks), [import_groups])
    import_clusters = pipeline.add_task(ClustersImportTask(client_config, args, checkpoint_service, wmconstants.CLUSTERS in skip_tasks), [import_secrets])
    import_instance_pools = pipeline.add_task(InstancePoolsImportTask(client_config, args, checkpoint_service, wmconstants.INSTANCE_POOLS in skip_tasks), [import_clusters])
    import_jobs = pipeline.add_task(JobsImportTask(client_config, args, checkpoint_service, wmconstants.JOBS in skip_tasks), [import_instance_pools])
    import_metastore = pipeline.add_task(MetastoreImportTask(client_config, checkpoint_service, args, wmconstants.METASTORE in skip_tasks), [import_groups])
    import_metastore_table_acls = pipeline.add_task(MetastoreTableACLImportTask(client_config, args, checkpoint_service, wmconstants.METASTORE_TABLE_ACLS in skip_tasks), [import_metastore])
    return pipeline
Exemple #14
0
    def display():
        CAMERA.render()
        gl.glClear(gl.GL_COLOR_BUFFER_BIT)

        nonlocal scale
        scale_location = gl.glGetUniformLocation(program, "gScale")
        assert scale_location != 0xffffffff
        world_location = gl.glGetUniformLocation(program, "gWorld")
        assert world_location != 0xffffffff

        scale += 0.01

        pipeline = Pipeline(
            rotation=[0.0, 30 * scale, 0.0],
            # scaling=[math.sin(scale)] * 3,
            translation=[0, 0, 6],
            projection=ProjParams(WINDOW_WIDTH, WINDOW_HEIGHT, 1.0, 100.0,
                                  60.0))
        pipeline.set_camera(CAMERA)

        gl.glUniformMatrix4fv(world_location, 1, gl.GL_TRUE,
                              pipeline.get_wvp())
        gl.glDrawElements(gl.GL_TRIANGLES, 18, gl.GL_UNSIGNED_INT,
                          ctypes.c_void_p(0))
        glut.glutSwapBuffers()
Exemple #15
0
    def test_run_multiple_times(self):
        pipeline = Pipeline().add(
            PipeBuilder("aggregator").aggregation_size(2).buffer_size(1)).add(
                PipeBuilder("summation").consumer(
                    lambda arr: sum(arr)).number_of_consumer(3).buffer_size(1)
            ).add(
                PipeBuilder("nth_triangular").consumer(
                    lambda n: reduce(lambda n1, n2: n1 + n2, range(1, n), 1)).
                number_of_consumer(5).buffer_size(1)).add(
                    PipeBuilder("nth_triangular2").consumer(
                        lambda n: reduce(lambda n1, n2: n1 + n2, range(
                            1, n), 1)).number_of_consumer(5).buffer_size(1))

        expect = [1, 56, 407, 667, 3082]
        actual_results = []

        def do_test(i):
            actual = [x for x in pipeline.stream(range(9))]
            actual.sort()
            actual_results.append(actual)
            self.assertEquals(
                actual, expect,
                "%s th test. actual(%s) != expected(%s)" % (i, actual, expect))
            pipeline.reset()
            pipeline.logger.info("%sth test done", i)

        map(do_test, range(100))

        self.assertEquals(actual_results, map(lambda ignore: expect,
                                              range(100)))
        self.assertTrue(True)
Exemple #16
0
def make_human_rope_pipeline(downsample=5, init_period=0):

    if init_period == 0: init_period = 9999

    P = Pipeline()

    P.add_topic("kinect")
    P.add_topic("rope_pts")
    P.add_topic("rope_init", dont_clean=True)
    P.add_topic("rope_model")

    P.add_file("once/table_corners.txt")
    P.add_program("write_pcds", "comm_write_pcds kinect -d %i" % downsample,
                  [], ["kinect"])

    P.add_program(
        "initializer",
        "loop_initialize_rope_lite.py rope_pts rope_init %.2f" % init_period,
        ["rope_pts"], ["rope_init"])

    P.add_program("rope_preproc", "comm_rope_preproc",
                  ["kinect", "once/table_corners.txt"], ["rope_pts"])

    P.add_program(
        "track_rope",
        "track_rope --scale=10 --kp=.1 --kd=0 --outlierParam=.2 --showEst=0 --showObs=1 --showKinect=1",
        ["kinect", "rope_init", "rope_pts", "once/table_corners.txt"],
        ["rope_model"])
    P.add_program("get_table", "comm_get_table", ["kinect"],
                  ["once/table_corners.txt"])

    P.env = os.environ

    return P
Exemple #17
0
 def test_resource_group_get_all_inputs(self):
     p = Pipeline()
     input = p.read_input_group(fasta="foo", idx="bar")
     t = p.new_task()
     t.command(f"cat {input.fasta}")
     assert (input.fasta in t._inputs)
     assert (input.idx in t._inputs)
Exemple #18
0
def test_padding(pipeline_config):
    pipeline = Pipeline(
        [Filename],
        [
            PaddedLocalizerPositions,
            LocalizerPositions,
            PaddedImage,
            Image,
            LocalizerShapes,
        ],
        **pipeline_config,
    )

    fname = os.path.dirname(
        __file__) + "/data/Cam_2_20150821161530_884267.jpeg"
    outputs = pipeline([fname])

    assert len(outputs) == 5
    assert PaddedImage in outputs
    assert PaddedLocalizerPositions in outputs

    offset = outputs[LocalizerShapes]["roi_size"] // 2
    for padded, original in zip(outputs[PaddedLocalizerPositions],
                                outputs[LocalizerPositions]):
        assert all([(pc - offset) == oc for pc, oc in zip(padded, original)])
Exemple #19
0
def test_decoder(pipeline_config):
    pipeline = Pipeline([Filename], [LocalizerPositions, IDs, Radii], **pipeline_config)

    expected_stages = [ImageReader,
                       LocalizerPreprocessor,
                       Localizer,
                       Decoder]
    _assert_types(pipeline.pipeline, expected_stages)

    fname = os.path.dirname(__file__) + '/data/Cam_2_20150821161530_884267.jpeg'

    outputs = pipeline([fname])

    assert len(outputs) == 3
    assert IDs in outputs
    assert LocalizerPositions in outputs
    assert Radii in outputs

    positions = outputs[LocalizerPositions]
    ids = outputs[IDs]
    radii = outputs[Radii]

    assert(len(ids) == len(positions))
    assert(len(ids) == len(radii))

    for pos, id, radius in zip(positions, ids, radii):
        pos = np.round(pos).astype(np.int)
        id = ''.join([str(int(b)) for b in (np.round(id))])
        print('Detection at ({}, {}) \t ID: {} \t Radius: {}'.format(pos[0], pos[1], id, radius))
Exemple #20
0
def test_generator_processor_video(tmpdir, bees_video, filelists_path,
                                   pipeline_config):
    repo = Repository(str(tmpdir))
    pipeline = Pipeline([Image, Timestamp], [PipelineResult],
                        **pipeline_config)
    gen_processor = GeneratorProcessor(pipeline,
                                       lambda: BBBinaryRepoSink(repo, camId=0))

    gen = video_generator(bees_video,
                          ts_format="2015",
                          path_filelists=filelists_path)

    gen_processor(gen)
    fnames = list(repo.iter_fnames())
    assert len(fnames) == 1

    last_ts = 0
    num_frames = 0
    for fname in repo.iter_fnames():
        print("{}: {}".format(fname, os.path.getsize(fname)))
        with open(fname, "rb") as f:
            fc = FrameContainer.read(f)
            num_frames += len(list(fc.frames))
        assert fc.dataSources[0].filename == os.path.basename(bees_video)
        assert last_ts < fc.fromTimestamp
        last_ts = fc.fromTimestamp

    assert num_frames == 3
Exemple #21
0
def test_generator_processor(tmpdir, bees_image, pipeline_config):
    def image_generator():
        ts = time.time()
        data_source = DataSource.new_message(filename='bees.jpeg')
        for i in range(2):
            img = imread(bees_image)
            yield data_source, img, ts + i

    repo = Repository(str(tmpdir))
    pipeline = Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config)
    gen_processor = GeneratorProcessor(
        pipeline, lambda: BBBinaryRepoSink(repo, camId=2))

    gen_processor(image_generator())
    gen_processor(image_generator())
    fnames = list(repo.iter_fnames())
    assert len(fnames) == 2

    last_ts = 0
    for fname in repo.iter_fnames():
        print("{}: {}".format(fname, os.path.getsize(fname)))
        with open(fname, 'rb') as f:
            fc = FrameContainer.read(f)
        assert fc.dataSources[0].filename == 'bees.jpeg'
        assert last_ts < fc.fromTimestamp
        last_ts = fc.fromTimestamp
Exemple #22
0
def test_generator_processor_threads(tmpdir, bees_video, filelists_path,
                                     pipeline_config):
    repo = Repository(str(tmpdir))
    pipelines = [
        Pipeline([Image, Timestamp], [PipelineResult], **pipeline_config)
        for _ in range(3)
    ]
    gen_processor = GeneratorProcessor(pipelines,
                                       lambda: BBBinaryRepoSink(repo, camId=0))

    gen = video_generator(bees_video,
                          ts_format="2015",
                          path_filelists=filelists_path)

    gen_processor(gen)
    fnames = list(repo.iter_fnames())
    assert len(fnames) == 1

    num_frames = 0
    for fname in repo.iter_fnames():
        with open(fname, "rb") as f:
            fc = FrameContainer.read(f)
            num_frames += len(list(fc.frames))

    assert num_frames == 3
Exemple #23
0
def test_localizer(pipeline_config):
    pipeline = Pipeline([Filename], [Regions, LocalizerPositions], **pipeline_config)

    expected_stages = [ImageReader,
                       LocalizerPreprocessor,
                       Localizer]
    _assert_types(pipeline.pipeline, expected_stages)

    fname = os.path.dirname(__file__) + '/data/Cam_2_20150821161530_884267.jpeg'

    outputs = pipeline([fname])

    assert len(outputs) == 2
    assert Regions in outputs
    assert LocalizerPositions in outputs

    regions = outputs[Regions]
    assert(len(regions) > 0)

    positions = outputs[LocalizerPositions]
    assert(len(regions) == len(positions))

    for pos in positions:
        assert(pos[0] >= 0 and pos[0] < 3000)
        assert(pos[1] >= 0 and pos[1] < 4000)
Exemple #24
0
def json_(workflow, input_dict, **kwargs):
    """
    Input file is a json of the following format:
    [
        {
            'chunk': 001,
            'library': 'LIB-1216301779A',
            'sample_name': '1216301779A',
            'platform': 'ILLUMINA',
            'platform_unit': 'C0MR3ACXX.001'
            'pair': 0, #0 or 1
            'path': '/path/to/fastq'
        },
        {..}
    ]
    """
    input_json = json.load(open(input_dict, 'r'))
    inputs = [
        INPUT(name='fastq.gz',
              path=i['path'],
              fmt='fastq.gz',
              tags=i,
              stage_name='Load Input Fastqs') for i in input_json
    ]

    DAG(ignore_stage_name_collisions=True).sequence_(add_(inputs), Pipeline(),
                                                     configure(wga_settings),
                                                     add_run(workflow))
def log_summarizer(file):
    pipeline = Pipeline()

    @pipeline.task()
    def parse_logs(log):
        return parse_log(log)

    @pipeline.task(depends_on=parse_logs)
    def build_raw_csv(lines):
        return build_csv(lines, header=[
            'ip', 'time_local', 'request_type',
            'request_path', 'status', 'bytes_sent',
            'http_referrer', 'http_user_agent'
        ],
        file=io.StringIO())

    @pipeline.task(depends_on=build_raw_csv)
    def count_uniques(csv_file):
        return count_unique_request(csv_file)

    @pipeline.task(depends_on=count_uniques)
    def summarize_csv(lines):
        return build_csv(lines, header=['request_type', 'count'], file=io.StringIO())

    log = open(file)
    summarized_csv = pipeline.run(log)
    print(summarized_csv.readlines())
Exemple #26
0
def musixmatch():
    k = int(request.form['k'])
    country = request.form['country']
    musix = Musix(country)
    tracks = musix.get_top_lyrics(k)
    f = open(FILE_NAME, "w")
    f.write("song\ttone-analyzer\tibm\n")
    f.close()
    result = ""
    for i, track in enumerate(tracks):
        try:
            pipeline = Pipeline([track.lyrics])
            track.label(pipeline.vectorize())

            ibm = ToneAnalyzer()
            ibm_results = ibm.analyze(track)

            result = result + (track.name + " by <i>" + track.artist +
                               "</i> : <b><u>" + track.mood +
                               "</b></u> || IBM's Results <b><u>" +
                               ', '.join(ibm_results)) + "</b></u><br><br>"
            f = open(FILE_NAME, "a")
            f.write(
                str(track.name + " by " + track.artist + "\t" + track.mood +
                    "\t" + ', '.join(ibm_results) + "\n"))
            f.close()
            print("Completed classifying " + str(i + 1) + " tracks")
        except AttributeError:
            pass

    return render_template("musixmatch.html", k=k, result=result)
Exemple #27
0
def example_four():
    # 多线程跑,一共十个任务,每个任务执行都是2秒左右。多线程跑。
    # 之所以说每个任务都是2秒左右,是因为测试的时候在Pipeline的run函数的开头添加了sleep(2)。
    # test case four:multi process, every one will cost 2 seconds。
    threads = []
    start_time = time.time()
    for idx in range(10):
        base = idx * 10
        a_ins = StepA(base)
        b_ins = StepB(200)
        pipeline = Pipeline([('name_a', a_ins, "add_value"),
                             ("name_b", b_ins, "self_double"),
                             ('name_c', b_ins, "add_two"),
                             ('name_d', a_ins, "add_value")],
                            thread_name="id_{}".format(idx))
        parameters = {
            'name_a__value': 100,
            'name_c__value_one': "name_a.output",
            'name_c__value_two': "name_b.output",
            'name_d__value': "name_c.output",
        }
        pipeline.set_params(parameters)
        threads.append(pipeline)
    for each_thread in threads:
        # import ctypes, threading
        each_thread.setDaemon(True)  # 主线程结束后就不要在后台继续跑了
        each_thread.start()
        print("thread name:{}".format(each_thread.name))
        # print(threading.currentThread().ident)  # 这个只会打印出主线程的id。
        # print("thread id: ", ctypes.CDLL('libc.so.6').syscall(186))  # 这个windows下没法用,186,224,178三个数都不行
    for each_thread in threads:
        each_thread.join()  # 会等待每一个都执行ok的。
    for each_thread in threads:
        print(each_thread.get_results())  # 看一下每一个的结果是不是都是ok的。
    print("total cost: {} seconds".format(time.time() - start_time))
Exemple #28
0
    def test_bottom_edge(self, name):
        pipeline = Pipeline(
            get_project_path() / 'test/data/pipeline/bottom_edge.tif',
            get_project_path() / f'test/data/pipeline/{name}.tif', 45, 45)
        pipeline.run()
        result = pipeline.result

        height, width = get_shape(get_project_path() /
                                  f'test/data/pipeline/{name}.tif')

        if height == width == 30:
            assert (result == 0).all()
        else:
            window = get_corresponding_window(
                get_project_path() / 'test/data/pipeline/bottom_edge.tif',
                get_project_path() / f'test/data/pipeline/{name}.tif')
            pixels = get_pixels(window)
            horizontal_edge = pixels[pixels[:, 0] == window[0][1] - 1]
            # check result is lesser equal 9 everywhere
            assert (result <= 9).all()
            # check that all counts on the vertical edge are greater 0 and less or equal 3
            assert (result[horizontal_edge[:, 0], horizontal_edge[:, 1]] >
                    0).all() and (result[horizontal_edge[:, 0],
                                         horizontal_edge[:, 1]] <= 3).all()
            # check that result is 0 everywhere else
            result[horizontal_edge[:, 0], horizontal_edge[:, 1]] = 0
            assert (result == 0).all()
Exemple #29
0
    def on_display(self):
        """
        Rendering callback.
        """
        self._camera.render()

        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)

        self._scale += 0.1
        pipeline = Pipeline(rotation=[0, self._scale, 0],
                            translation=[0, 0, 6],
                            projection=self._projection)
        pipeline.set_camera(self._camera)

        self._effect.set_wvp(pipeline.get_wvp())
        self._effect.set_directional_light(
            self._dir_light_color, self._dir_light_ambient_intensity)

        position, tex_coord = 0, 1
        glEnableVertexAttribArray(position)
        glEnableVertexAttribArray(tex_coord)

        glBindBuffer(GL_ARRAY_BUFFER, self._vbo)
        glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 20, ctypes.c_void_p(0))
        glVertexAttribPointer(tex_coord, 2, GL_FLOAT, GL_FALSE, 20, ctypes.c_void_p(12))
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self._ibo)

        self._texture.bind(GL_TEXTURE0)
        glDrawElements(GL_TRIANGLES, 18, GL_UNSIGNED_INT, ctypes.c_void_p(0))
        glDisableVertexAttribArray(position)
        glDisableVertexAttribArray(tex_coord)
        glutSwapBuffers()
Exemple #30
0
    def test_ones(self, name):
        pipeline = Pipeline(
            get_project_path() / 'test/data/pipeline/ones.tif',
            get_project_path() / f'test/data/pipeline/{name}.tif', 45, 45)
        pipeline.run()
        result = pipeline.result

        window = get_corresponding_window(
            get_project_path() / 'test/data/pipeline/ones.tif',
            get_project_path() / f'test/data/pipeline/{name}.tif')
        pixels = get_pixels(window)
        rectangle = pixels
        inner_rectangle = rectangle[np.logical_and(
            np.logical_and(rectangle[:, 0] != window[0][0],
                           rectangle[:, 1] != window[1][0]),
            np.logical_and(rectangle[:, 0] != window[0][1] - 1,
                           rectangle[:, 1] != window[1][1] - 1))]

        # check result is lesser equal 9 everywhere
        assert (result <= 9).all()
        # check that all counts on the diagonal are greater 0 and less or equal 3
        assert (result[rectangle[:, 0], rectangle[:, 1]] > 0).all()
        assert (result[inner_rectangle[:, 0], inner_rectangle[:,
                                                              1]] == 9).all()
        # check that result is 0 everywhere else
        result[rectangle[:, 0], rectangle[:, 1]] = 0
        assert (result == 0).all()