def for_in(cls, sources, result_selector): """Concatenates the observable sequences obtained by running the specified result selector for each element in source. sources -- {Array} An array of values to turn into an observable sequence. result_selector -- {Function} A function to apply to each item in the sources array to turn it into an observable sequence. Returns an observable {Observable} sequence from the concatenated observable sequences.""" return Observable.concat(Enumerable.for_each(sources, result_selector))
def daemon_main(sources): args = sources["ARG"]["arguments"]() stt = sources["HTTP"]["request"]() text = sources["DEEPSPEECH"]["text"]().share() config_data = sources["FILE"]["data"]("config") arg_specs = Observable.from_([ {"what": "argument", "arg_name": "config", "arg_help": "Path of the server configuration file"}, ]) config_file = args \ .filter(lambda i: i["name"] == "config") \ .map(lambda i: {"name": "config", "path": i["value"]}) config = parse_config(config_data) ds_stt = stt \ .map(lambda i: {"what": "stt", "data": i["data"], "context": i["context"]}) ds_arg = config \ .filter(lambda i: i["what"] in [ "ds_conf_model", "ds_conf_alphabet", "ds_conf_trie", "ds_conf_lm", "conf_complete"]) ds = ds_stt.merge(ds_arg) http_arg = config \ .filter(lambda i: i["what"] in [ "srv_http_conf_request_max_size", "srv_http_conf_host", "srv_http_conf_port", "conf_complete"]) http_response = text \ .map(lambda i: {"what": "response", "data": i["text"], "context": i["context"]}) http_stt_route = Observable.from_([ {"what": "add_route", "type": "POST", "path": "/stt"} ]) http_init = Observable.concat([http_arg, http_stt_route]) http = http_response.merge(http_init) console = text.map(lambda i: i["text"]) return OrderedDict([ ("ARG", arg_specs), ("FILE", config_file), ("CONSOLE", console), ("DEEPSPEECH", ds), ("HTTP", http), ])
def scrape_ui(driver, connection): """ Scrapes ui of application and records """ cursor = connection.cursor() links = Observable.from_(get_description_links()) \ .skip(9) \ .to_dict(lambda e: e.string, lambda e: "http://www.diagnos-online.ru/" + e["href"]) scraped_data = Observable.from_(scrape_diagnosis(driver)) \ .map(lambda e: { "diagnosis": [(d.text.split(" ")[1], float(d.text.split(" ")[0][:-1])) for d in e["diagnosis"]], "symptom_group": e["symptom_group"].text.strip(), "symptom": e["symptom"].text.strip() }) wait_links = Observable.concat(links, scraped_data).skip(1) Observable.combine_latest(wait_links, links, lambda d, l: { "diagnosis": [(i[0], i[1], match_link(l, i[0])) for i in d["diagnosis"]], "symptom_group": d["symptom_group"], "symptom": d["symptom"] }) \ .do_action(print) \ .subscribe(on_next=partial(database_writer, cursor), on_completed=partial(scrape_complete, connection, driver))
from rx import Observable observable = Observable.from_([1, 2]) other_observable = Observable.from_([3, 4]) Observable.concat(observable, other_observable) \ .subscribe(on_next=lambda n: print(n)) # O/P # (1, 'LITE Industrial', 'Southwest', '729 Ravine Way', 'Irving', 'TX', 75014) # (3, 'Re-Barre Construction', 'Southwest', '9043 Windy Dr', 'Irving', 'TX', 75032) # (5, 'Marsh Lane Metal Works', 'Southeast', '9143 Marsh Ln', 'Avondale', 'LA', 79782)
Observable.from_([obs1, obs2, obs3]) \ .merge_all() \ .subscribe(lambda x: print(x)) items = ['"12/123/345/123/3/6', "8/3/1/6/9/05/", "4/3/6/8/9/4/3/67"] Observable.from_(items) \ .map(lambda s: Observable.from_(s.split('/'))) \ .merge_all() \ .subscribe(lambda i: print(i)) Observable.from_(items) \ .flat_map(lambda s: Observable.from_(s.split('/'))) \ .subscribe(lambda i: print(i)) Observable.concat(obs1, obs2).subscribe(lambda x: print(x)) letters = Observable.from_(['A', 'B', 'C', 'D', 'E', 'F']) numbers = Observable.range(1, 5) Observable.zip(letters, numbers, lambda l,n: "{0}-({1})".format(l,n))\ .subscribe(lambda x: print(x)) letters.zip(numbers, lambda l,n: "{0}-({1})".format(l,n))\ .subscribe(lambda x: print(x)) letters = Observable.from_(['Alpha', 'Betta', 'Gamma', 'Delta', 'Epsilon']) intervals = Observable.interval(1000) Observable.zip(letters, intervals, lambda l,i: l)\ .subscribe(lambda s: print(s), on_completed=lambda: print('Completed!'))
from __future__ import print_function from rx import Observable source1 = Observable.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]) source2 = Observable.from_(["Zeta", "Eta", "Theta", "Iota"]) Observable.concat(source1,source2) \ .subscribe(lambda s: print(s))
def extract_features(sources): aio_scheduler = AsyncIOScheduler() file_response = sources.file.response.share() config_sink = config.read_configuration( config.Source(file_response=file_response, argv=sources.argv.argv.subscribe_on(aio_scheduler))) configuration = config_sink.configuration.share() walk_adapter = walk.adapter(sources.walk.response) #file_adapter = file.adapter(sources.media_file.response) #write_feature_request, write_feature_file = router.make_crossroad_router(file_response) media_file_request, feature_file_request, process_path = path_processor.make_path_processor( sources.media_file.response, sources.feature_file.response) random_cross_request, cross_random = router.make_crossroad_router( sources.random.response) features = ( configuration.flat_map( lambda configuration: walk_adapter.api.walk(configuration.dataset. voxceleb2_path) # extract features from files .let( process_path, configuration=configuration, #file_adapter=file_adapter, ) # create sets .reduce(lambda acc, i: acc + [{ 'file': i, 'label': label_from_path(i), 'set': set_from_path(i), }], seed=[]) # todo: shuffle .map(train_test_split).flat_map( lambda dataset: Observable.just(dataset['test']).map(pair_set) # shuffle apn pairs .map(lambda i: random.Shuffle(id='dev_test_set', data=i)).let( cross_random).filter(lambda i: i.id == 'dev_test_set'). map(lambda i: i.data).map(lambda i: test_dev_split( i, configuration.dataset.dev_set_utterance_count, configuration.dataset.test_set_utterance_count)).map( lambda i: { 'train': dataset['train'], 'dev': i['dev'], 'test': i['test'], }))).share()) # save dataset json file write_dataset_request = (features.map(json.dumps).with_latest_from( configuration, lambda dataset, configuration: file.Write( id='write_dataset', path=configuration.dataset.path, data=dataset, mode='w')).share()) # random random_request = Observable.concat( configuration.map(lambda i: random.SetSeed(value=i.random_seed)), random_cross_request) logs = features exit = sources.dataset_file.response.ignore_elements() return Sink(file=file.Sink(request=config_sink.file_request), dataset_file=file.Sink(request=write_dataset_request), media_file=file.Sink(request=media_file_request), feature_file=file.Sink(request=feature_file_request), logging=logging.Sink(request=logs), walk=walk.Sink(request=walk_adapter.sink), stop=stop.Sink(control=exit), random=random.Sink(request=random_request))
def class_nineteen(): letters1 = Observable.from_(["alpha", "Beta", "Gamma", "Delta", "Epsilon"]) letters2 = Observable.from_iterable(["Zeta", "Exo", "Flota", "Delta"]) Observable.concat(letters1, letters2)\ .subscribe(print)