コード例 #1
0
def get_vbb_data(centre):
	global stations
	global station_types
	g = Graph()
	with open('nodes.ndjson') as f:
		dataSta = ndjson.load(f)

	# convert to and from objects
	textSta = ndjson.dumps(dataSta)
	dataSta = ndjson.loads(textSta)
	for i in dataSta:
		#tupel = str(i['metadata']['x'])+","+str(i['metadata']['y'])
		x = float(i['metadata']['longitude'])
		y = float(i['metadata']['latitude'])
		idSt = str(i['id'])
		g.add_node(idSt)
		stations[idSt] = (x, y)
		# g.add_node(tupel)

	with open('edges.ndjson') as f:
		dataDist = ndjson.load(f)

	# convert to and from objects
	textDist = ndjson.dumps(dataDist)
	dataDist = ndjson.loads(textDist)

	for i in dataDist:
		stationA = str(i['source'])
		stationB = str(i['target'])
		distance = int(i['metadata']['time'])
		line = i['metadata']['line']
		if line.startswith('RB') or line.startswith('RB'):
			station_types[stationA] = 1
			station_types[stationB] = 1
		elif line.startswith('U') or line.startswith('S'):
			if stationA in station_types:
				if station_types[stationA] > 1:
					station_types[stationA] = 2
			else:
				station_types[stationA] = 2
			if stationB in station_types:
				if station_types[stationB] > 1:
					station_types[stationB] = 2
			else:
				station_types[stationB] = 2
		else:
			if stationA in station_types:
				if station_types[stationA] > 2:
					station_types[stationA] = 3
			else:
				station_types[stationA] = 3

			if stationB in station_types:
				if station_types[stationB] > 2:
					station_types[stationB] = 3
			else:
				station_types[stationB] = 3
		g.add_edge(stationA, stationB, distance)

	return dijsktra(g, centre)  # Station name of Dabendorf node: 900000245024
コード例 #2
0
ファイル: kafka_data.py プロジェクト: ihandmine/msqcrawler
 def single_data_handler(self, item):
     topic, value = item
     _data = ndjson.dumps([
         value,
     ])
     # print(_data)
     self.send(topic, _data)
コード例 #3
0
async def run_app(
    loop: asyncio.AbstractEventLoop,
    payload: List[dict],
    entity: str,
    mock_url: str,
) -> None:
    asyncio.set_event_loop(loop)
    with aioresponses() as mocked:
        body = ndjson.dumps(payload)
        mocked.get(mock_url, status=200, body=body)
        test_app = init_app(
            loop=loop,
            settings=settings,
            command_line_args=argparse.Namespace(verbose=False))

        if entity == "patients":
            task = loop.create_task(test_app.resolve_patients())
        elif entity == "encounters":
            task = loop.create_task(test_app.resolve_encounters())
        elif entity == "procedures":
            task = loop.create_task(test_app.resolve_procedures())
        elif entity == "observations":
            task = loop.create_task(test_app.resolve_observations())
        else:
            raise ValueError("unknown entity")

        await asyncio.sleep(SLEEP_PERIOD)
        task.cancel()
        await asyncio.gather(task, return_exceptions=True)
コード例 #4
0
ファイル: kafka_data.py プロジェクト: ihandmine/msqcrawler
 def multi_data_handler(self, item):
     topic, value = item
     if not self.cache.get(topic):
         self.cache[topic] = []
     self.cache[topic].append(value)
     if len(self.cache[topic]) >= 1:
         _data = ndjson.dumps(self.cache[topic])
         # print(_data)
         self.send(topic, _data)
         self.cache[topic] = []
コード例 #5
0
    def create_from_objects(cls,
                            client,
                            project_id: str,
                            name: str,
                            predictions: Iterable[Dict],
                            validate=True) -> 'BulkImportRequest':
        """
        Creates a `BulkImportRequest` from an iterable of dictionaries.

        Conforms to JSON predictions format, e.g.:
        ``{
            "uuid": "9fd9a92e-2560-4e77-81d4-b2e955800092",
            "schemaId": "ckappz7d700gn0zbocmqkwd9i",
            "dataRow": {
                "id": "ck1s02fqxm8fi0757f0e6qtdc"
            },
            "bbox": {
                "top": 48,
                "left": 58,
                "height": 865,
                "width": 1512
            }
        }``

        Args:
            client (Client): a Labelbox client
            project_id (str): id of project for which predictions will be imported
            name (str): name of BulkImportRequest
            predictions (Iterable[dict]): iterable of dictionaries representing predictions
            validate (bool): a flag indicating if there should be a validation
                if `predictions` is valid ndjson
        Returns:
            BulkImportRequest object
        """
        if validate:
            _validate_ndjson(predictions, client.get_project(project_id))

        data_str = ndjson.dumps(predictions)
        if not data_str:
            raise ValueError('annotations cannot be empty')

        data = data_str.encode('utf-8')
        file_name = _make_file_name(project_id, name)
        request_data = _make_request_data(project_id, name, len(data_str),
                                          file_name)
        file_data = (file_name, data, NDJSON_MIME_TYPE)
        response_data = _send_create_file_command(client,
                                                  request_data=request_data,
                                                  file_name=file_name,
                                                  file_data=file_data)

        return cls(client, response_data["createBulkImportRequest"])
コード例 #6
0
def json_to_gcs(path, json_object, bucket_name):
    blob = storage.Blob(
        name=path,
        bucket=storage_client.get_bucket(bucket_name),
    )
    blob.upload_from_string(
        # dataflow needs newline-delimited json, so use ndjson
        data=ndjson.dumps(json_object),
        content_type='application/json',
        client=storage_client,
    )
    logging.info('Successfully uploaded blob %r to bucket %r.', path,
                 bucket_name)
コード例 #7
0
def json_to_gcs(path, json_object_list, bucket_name):
    """
    take list of dicts in memory and upload to GCS as newline JSON
    """
    blob = storage.Blob(
        name=path,
        bucket=storage_client.get_bucket(bucket_name),
    )
    blob.upload_from_string(
        # dataflow needs newline-delimited json, so use ndjson
        data=ndjson.dumps(json_object_list),
        content_type='application/json',
        client=storage_client,
    )
    logging.info('Successfully uploaded blob %r to bucket %r.', path,
                 bucket_name)

    print('Successfully uploaded blob {} to bucket {}'.format(
        path, bucket_name))
コード例 #8
0
def test_wait_till_done(rectangle_inference, configured_project):
    name = str(uuid.uuid4())
    url = configured_project.client.upload_data(content=ndjson.dumps(
        [rectangle_inference]),
                                                sign=True)
    bulk_import_request = configured_project.upload_annotations(
        name=name, annotations=url, validate=False)

    assert len(bulk_import_request.inputs) == 1
    bulk_import_request.wait_until_done()
    assert bulk_import_request.state == BulkImportRequestState.FINISHED

    # Check that the status files are being returned as expected
    assert len(bulk_import_request.errors) == 0
    assert len(bulk_import_request.inputs) == 1
    assert bulk_import_request.inputs[0]['uuid'] == rectangle_inference['uuid']
    assert len(bulk_import_request.statuses) == 1
    assert bulk_import_request.statuses[0]['status'] == 'SUCCESS'
    assert bulk_import_request.statuses[0]['uuid'] == rectangle_inference[
        'uuid']
コード例 #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('seq_fn')
    parser.add_argument('--format', default='fasta')
    parser.add_argument('--sleep', default='1', type=int,
        help="Time to wait between queries (sec)")
    parser.add_argument('--max_per_query', default=10, type=int)

    args = parser.parse_args()

    gen = cast_to_seq(Bio.SeqIO.parse(args.seq_fn, args.format))

    for batch in chunk(gen, args.max_per_query):
        data = query_interpro(batch)
        # print output as NDjson
        print(ndjson.dumps(data))

        time.sleep(args.sleep)

    return
コード例 #10
0
def search_ndjson(request, database_name, collection_name,
                  skip=0, limit=getattr(settings, 'MONGO_LIMIT', 200),
                  sort=None, return_keys=(),
                  query={}):
    result = prepare_search_results(
        request,
        database_name=database_name,
        collection_name=collection_name,
        skip=skip,
        sort=sort,
        limit=limit,
        return_keys=return_keys,
        query=query)

    if int(result['code']) == 200:
        return HttpResponse(ndjson.dumps(result["results"]),
                            status=int(result['code']),
                            content_type="application/x-ndjson")
    else:
        response = json.dumps(result, indent=4)
        return HttpResponse(response, status=int(result['code']),
                            content_type="application/json")
コード例 #11
0
ファイル: event_rule.py プロジェクト: reflexsoar/reflex-api
    def get(self, current_user):
        '''
        Takes a list of organizations and exports all the Event Rules for the supplied organizations
        as NDJSON, if no organizations are provided, just dump the rules for the users current
        organization.
        '''

        args = export_rule_parser.parse_args()

        event_rules = EventRule.search()

        if args.organizations:
            print(args.organizations)
        else:   
            event_rules = event_rules.filter('term', organization=current_user.organization)
            
        event_rules = event_rules.scan()        

        output = ndjson.dumps([marshal(e, mod_event_rule_list) for e in event_rules])
        resp = Response(output,headers={'Content-Type': 'application/x-ndjson', 'Content-disposition':'attachment; filename=event_rules.ndjson'})
        
        return resp
コード例 #12
0
def __assert_file_content(url: str):
    response = requests.get(url)
    assert response.text == ndjson.dumps(PREDICTIONS)
コード例 #13
0
def assert_file_content(url: str, predictions):
    response = requests.get(url)
    assert response.text == ndjson.dumps(predictions)
コード例 #14
0
 def print_ndjson(self):
     print(ndjson.dumps({k: v} for k, v in self.data_as_json().items()))
コード例 #15
0
 def msearch(self, json_queries, *args, **kwargs):
     r = self.session.get(self.host + "/_msearch",
                          headers={"Content-Type": "application/x-ndjson"},
                          data=ndjson.dumps(json_queries) + "\n")
     return r.json()
コード例 #16
0
def test_dumps(text):
    objects = _create_list(text)
    assert ndjson.dumps(objects, sort_keys=True) == text
コード例 #17
0
def export_to_ndjson(records: List[CallRecord], filepath: str) -> None:
    ndjson_records = ndjson.dumps([r.dict() for r in records])
    with open(filepath, "w") as export_file:
        export_file.write(ndjson_records)