def diarized_transcribe(gcred, gcs_uri, speakercount): os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = gcred client = speech_v1p1beta1.SpeechClient() audio = beta_types.RecognitionAudio(uri=gcs_uri) config = beta_types.RecognitionConfig( encoding=beta_enums.RecognitionConfig.AudioEncoding.FLAC, language_code='en-US', enable_speaker_diarization=True, diarization_speaker_count=speakercount, enable_word_time_offsets=True, model='video', enable_automatic_punctuation=True) operation = client.long_running_recognize(config, audio) response = operation.result(timeout=3600) transcript = MessageToDict(response) transcript = transcript.get('results') transcript = transcript.pop() transcript = transcript.get('alternatives') transcript = transcript.pop() transcript = transcript.get('words') return transcript
def detect_text(self): if self.preview_data and self.preview_data[ "thumbnails"] and credentials: client = vision.ImageAnnotatorClient(credentials=credentials) results = [] pages = [] for thumbnail in self.preview_data["thumbnails"]: image = vision.types.Image() image.source.image_uri = thumbnail["url"] response = client.document_text_detection(image=image) result = MessageToDict(response) result["page"] = thumbnail["page"] results.append(result) full_text_annotation = result.get("fullTextAnnotation") text_annotations = result.get("textAnnotations") if full_text_annotation: pages.append({ "number": result["page"], "text": full_text_annotation["text"] }) elif text_annotations: pages.append({ "number": result["page"], "text": text_annotations[0]["description"], }) self.pages = pages self.update_vision_data(results) self.save(update_fields=["vision_data_file", "pages"])
async def deserialize_asset(REST_API_URL, address): logging.info(f"Now deserializing asset present on {address}") state_data = await address_state(REST_API_URL, address) ##decoding data stored on the blockchain if not state_data: logging.error(f"No asset data present corresponding to {address}") return False acc = asset_pb2.Asset() acc.ParseFromString(state_data) asset = MessageToDict(acc, preserving_proto_field_name=True) ##This is True, implies this asset has been created by the owner ot its ##child but havent been transffered to anyone else if not asset.get("ownership_received"): data = { "ownership_received": None, "received_on": None, "parent_address": None, "issuer_child_zero_pub": None } asset.update(data) if not asset.get("child_zero_pub"): asset.update({"child_zero_pub": None}) asset.update({"address": address}) return asset
def record_step_best(self, step_best, host_mask, guest_mask, data_instances, model): metas = { "host_mask": host_mask.tolist(), "guest_mask": guest_mask.tolist(), "score_name": self.score_name } metas["number_in"] = int(sum(host_mask) + sum(guest_mask)) metas["direction"] = self.direction metas["n_count"] = int(self.n_count) host_party_id = model.component_properties.host_party_idlist[0] guest_party_id = model.component_properties.guest_partyid metas["host_features_anonym"] = [ f"host_{host_party_id}_{i}" for i in range(len(host_mask)) ] metas["guest_features_anonym"] = [ f"guest_{guest_party_id}_{i}" for i in range(len(guest_mask)) ] model_info = self.models_trained[step_best] loss = model_info.get_loss() ic_val = model_info.get_score() metas["loss"] = loss metas["current_ic_val"] = ic_val metas["fit_intercept"] = model.fit_intercept model_key = model_info.get_key() model_dict = self._get_model(model_key) if self.role != consts.ARBITER: all_features = data_instances.schema.get('header') metas["all_features"] = all_features metas["to_enter"] = self.get_to_enter(host_mask, guest_mask, all_features) model_param = list(model_dict.get('model').values())[0].get( model.model_param_name) param_dict = MessageToDict(model_param) metas["intercept"] = param_dict.get("intercept", None) metas["weight"] = param_dict.get("weight", {}) metas["header"] = param_dict.get("header", []) if self.n_step == 0 and self.direction == "forward": metas["intercept"] = self.intercept metric_name = f"stepwise_{self.n_step}" metric = [Metric(metric_name, float(self.n_step))] model.callback_metric(metric_name=metric_name, metric_namespace=self.metric_namespace, metric_data=metric) model.tracker.set_metric_meta(metric_name=metric_name, metric_namespace=self.metric_namespace, metric_meta=MetricMeta( name=metric_name, metric_type=self.metric_type, extra_metas=metas)) LOGGER.info(f"metric_name: {metric_name}, metas: {metas}") return
def pull_wmata_bus(): ssl._create_default_https_context = ssl._create_unverified_context headers = { # Request headers 'api_key': 'acf1010abac8437dbeb5f9ed3699169b', } tripfeed = gtfs_realtime_pb2.FeedMessage() url = 'https://api.wmata.com/gtfs/bus-gtfsrt-tripupdates.pb' # to make as a function to pass through later response = urllib.request.Request(url, headers=headers) xml = urllib.request.urlopen(response) tripfeed.ParseFromString(xml.read()) dict_obj_trip = MessageToDict(tripfeed) if not dict_obj_trip.get('entity'): return collector = [] for message in dict_obj_trip['entity']: row = OrderedDict() row['trip_id'] = message['tripUpdate']['trip']['tripId'] row['route_id'] = message['tripUpdate']['trip']['routeId'] row['vehicle_id'] = message['tripUpdate']['vehicle']['id'] row['delay'] = message['tripUpdate'].get('delay') collector.append(row) df_trip = pd.DataFrame(collector) busfeed = gtfs_realtime_pb2.FeedMessage() url = 'https://api.wmata.com/gtfs/bus-gtfsrt-vehiclepositions.pb' response = urllib.request.Request(url, headers=headers) xml = urllib.request.urlopen(response) busfeed.ParseFromString(xml.read()) dict_obj_bus = MessageToDict(busfeed) if not dict_obj_bus.get('entity'): return None collector = [] for message in dict_obj_bus['entity']: row = OrderedDict() row['vehicle_id'] = message['id'] row['route_id'] = message['vehicle']['trip']['routeId'] row['trip_id'] = message['vehicle']['trip']['tripId'] collector.append(row) df_bus = pd.DataFrame(collector) df_merged = df_bus.merge(df_trip, on=['trip_id', 'route_id', 'vehicle_id'], how='left') df_merged['mode'] = "Bus" df_merged['agency'] = "WMATA" df_merged['region'] = "Washington" return df_merged
def test_modify_member_zone_labels(self): self.test_create_zone() self._create_user() param = { 'zone_id': self.zone.zone_id, 'user_id': self.user.user_id, 'domain_id': self.domain.domain_id } labels = ['developer', 'operator', 'operator'] self.inventory_v1.Zone.add_member(param, metadata=(('token', self.token), )) param = { 'zone_id': self.zone.zone_id, 'user_id': self.user.user_id, 'domain_id': self.domain.domain_id, 'labels': labels } zone_member = self.inventory_v1.Zone.modify_member( param, metadata=(('token', self.token), )) print(zone_member.labels) user_info = MessageToDict(zone_member.user_info) self.assertEqual(user_info.get('user_id'), self.user.user_id)
def _parse_train_lineage(self, train_lineage): """ Parse train lineage. Args: train_lineage (Event): Train lineage. """ if train_lineage is None: self._lineage_info[self._name_model] = {} self._lineage_info[self._name_algorithm] = {} self._lineage_info[self._name_hyper_parameters] = {} self._lineage_info[self._name_train_dataset] = {} return event_dict = MessageToDict(train_lineage, preserving_proto_field_name=True) train_dict = event_dict.get(self._name_train_lineage) if train_dict is None: raise LineageEventFieldNotExistException(self._name_train_lineage) # when MessageToDict is converted to dict, int64 type is converted # to string, so we convert it to an int in python if train_dict.get(self._name_model): model_size = train_dict.get(self._name_model).get('size') if model_size: train_dict[self._name_model]['size'] = int(model_size) self._lineage_info.update(**train_dict)
def to_wavefunction_properties( job_output: pb.JobOutput, atomic_input: AtomicInput) -> WavefunctionProperties: """Extract WavefunctionProperties from JobOutput protobuf message""" jo_dict = MessageToDict(job_output, preserving_proto_field_name=True) return WavefunctionProperties( basis=BasisSet( name=atomic_input.model.basis, center_data={}, # TODO: need to fill out atom_map=[], # TODO: need to fill out ), restricted=atomic_input.keywords.get("restricted", True), scf_eigenvalues_a=jo_dict.get("orba_energies"), scf_occupations_a=jo_dict.get("orba_occupations"), scf_eigenvalues_b=jo_dict.get("orbb_energies", []), scf_occupations_b=jo_dict.get("orbb_occupations", []), )
async def play(self, ws, observation): success = True request_data = api.Request( data=api.RequestData(ability_id=True, unit_type_id=True, upgrade_id=True) ) await asyncio.wait_for(ws.send(request_data.SerializeToString()), 5) try: result = await asyncio.wait_for(ws.recv(), 5) data_response = api.Response.FromString(result) game_data = data_response.data request_game_info = api.Request(game_info=api.RequestGameInfo()) await asyncio.wait_for(ws.send(request_game_info.SerializeToString()), 5) result = await asyncio.wait_for(ws.recv(), 5) game_info_response = api.Response.FromString(result) # If game is still on if game_data.units: obj = decode_observation(observation.observation.observation, game_data, game_info_response) obs = MessageToDict(observation) obs = str(obs) obs = obs.replace("\'", "\"") obs = obs.replace("False", "false") obs = obs.replace("True", "true") obs = json.loads(obs,encoding="UTF-8") game_meta = api.Request( game_info=api.RequestGameInfo() ) await ws.send(game_meta.SerializeToString()) result = await ws.recv() game_meta = api.Response.FromString(result) game_meta = MessageToDict(game_meta) game_meta = str(game_meta) game_meta = game_meta.replace("\'", "\"") game_meta = game_meta.replace("False", "false") game_meta = game_meta.replace("True", "true") game_meta = json.loads(game_meta,encoding="UTF-8") game_meta = game_meta.get("gameInfo", None) game_meta.pop("modNames") game_meta.pop("options") game_meta.pop("mapName") if("localMapPath" in game_meta.keys()): game_meta.pop("localMapPath") game_meta.pop("playerInfo") game_meta.update(game_meta["startRaw"]) game_meta.pop("startRaw") game_meta.pop("mapSize") await self.process_step(ws, obj, raw=(obs, game_meta, game_data)) # function = self.decision_function # alvailable_actions = self.query_alvailable_actions() # to_do_action = function(observation, alvailable_actions) # while(to_do_action and alvailable_actions): # self.send_order(self, to_do_action) # to_do_action = self.query_alvailable_actions() except asyncio.TimeoutError: return False return True
def get_provider(self, secret_id, domain_id): """ Return: provider in secret """ secret_connector = self.locator.get_connector('SecretConnector') secret = secret_connector.get_secret(secret_id, domain_id) secret_dict = MessageToDict(secret, preserving_proto_field_name=True) _LOGGER.debug(f'[get_provider] secret: {secret}') return secret_dict.get('provider', None)
def detect_intent_text(project_id, session_id, text, language_code): session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) text_input = dialogflow.types.TextInput(text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent(session=session, query_input=query_input) response_content = MessageToDict(response.query_result) return { 'expense_account': response_content.get('fulfillmentMessages')[0].get('payload').get( 'expense_account'), 'params': response_content.get('parameters'), 'query_text': response_content.get('queryText') }
def get_hand_labels(hand_process_results, labels_array): if hand_process_results.multi_handedness is not None: for idx, hand_handedness in enumerate( hand_process_results.multi_handedness): handedness_dict = MessageToDict(hand_handedness) class_array = handedness_dict.get('classification') if class_array is not None: for item in class_array: labels_array.append(item.get('label')) return labels_array
def embedded_entity_to_dict(embedded_entity, data): ep = entity_pb2.EntityProto() ep.ParseFromString(embedded_entity) d = MessageToDict(ep) for entry in d.get("rawProperty", []): name = entry.get("name") value = entry.get("value") if entry.get("meaning") == "ENTITY_PROTO": dt = {} data[name] = embedded_entity_to_dict(get_value(value, raw=True), dt) else: data[name] = get_value(value) return data
async def observe_replay(self, step=24, id=0): previous = None game_units_by_tag = {} while self.status == "started" or self.status == "replay": async with websockets.connect("ws://{0}:{1}/sc2api".format( self.host.address, self.host.port), ping_interval=1, ping_timeout=1, close_timeout=1) as ws: try: request_payload = api.Request() request_payload.observation.disable_fog = False await asyncio.wait_for(ws.send( request_payload.SerializeToString()), timeout=1) result = await asyncio.wait_for(ws.recv(), timeout=1) obs = api.Response.FromString(result) obs = MessageToDict(obs) obs = str(obs) obs = obs.replace("\'", "\"") obs = obs.replace("False", "false") obs = obs.replace("True", "true") obs = json.loads(obs, encoding="UTF-8") if obs.get("observation", {}).get("observation", {}): game_units_by_tag.update( units_by_tag(obs, self.game_info)) actual = obs_to_case_replay(obs, self.replay_info, self.game_info, game_units_by_tag) if previous: previous["actions"] = actual["actions"] previous["observation"].pop("playerId") yield previous print(previous["observation"]["loop"]) previous = actual request_payload = api.Request() request_payload.step.count = step await asyncio.wait_for(ws.send( request_payload.SerializeToString()), timeout=1) result = await asyncio.wait_for(ws.recv(), timeout=1) response = api.Response.FromString(result) if response.status == 4: self.status = "replay" else: self.status = "finished" except Exception: print(traceback.format_exc()) continue self.host.status = "idle"
async def deserialize_float_account(REST_API_URL, address): state_data = await address_state(REST_API_URL, address) ##decoding data stored on the flt_acc = float_account_pb2.FloatAccount() flt_acc.ParseFromString(state_data) float_account = MessageToDict(flt_acc, preserving_proto_field_name=True) float_account.update({"address": address}) ##this is to handle accounts which havent claimed their account if float_account.get("claimed_by"): account_address = addresser.create_organization_account_address( float_account["claimed_by"], 0) else: account_address = None data = {"claimed": None, "claimed_by": None, "claimed_on": None} float_account.update(data) float_account.update({"account_address": account_address}) if not float_account.get("child_zero_pub"): float_account.update({"child_zero_pub": None}) return float_account
def test_add_member_zone(self): self.test_create_zone() self._create_user() param = { 'zone_id': self.zone.zone_id, 'user_id': self.user.user_id, 'domain_id': self.domain.domain_id } zone_admin = self.inventory_v1.Zone.add_member( param, metadata=(('token', self.token), )) user_info = MessageToDict(zone_admin.user_info) self.assertEqual(user_info.get('user_id'), self.user.user_id)
def get_matching_result(self, picture_path_list): if picture_path_list is None: print("Error: npy is None") return None request_data = template_matcning_pb2.Matching( picture_file_list=picture_path_list) try: res = self.stub.get_matching_result(request_data) data = MessageToDict(res.data_dict) except grpc.RpcError as e: lprint_exception(e) return None else: return data.get('data')
def embedded_entity_to_dict(embedded_entity, data): ep = entity_pb2.EntityProto() ep.ParseFromString(embedded_entity) d = MessageToDict(ep) for entry in d.get('rawProperty', []): if 'meaning' in entry and entry['meaning'] == "ENTITY_PROTO": dt = {} data[entry['name']] = embedded_entity_to_dict( base64.b64decode(entry['value']['stringValue']), dt) else: if entry['value']: data[entry['name']] = base64.b64decode( entry['value']['stringValue']).decode('utf-8') else: data[entry['name']] = None return data
def filter_feed_for_delays(self): output = set() for feed_entity in self.feed.entity: #converts feed entity to dictionary entity = MessageToDict(feed_entity) #attempts to get status from entity status = entity.get('alert') and \ entity['alert'].get('headerText') and \ entity['alert']['headerText'].get('translation') and \ entity['alert']['headerText']['translation'][0].get('text') #if the status is delays, attempt to grab the affected routes (trains) if status == "Delays" and 'informedEntity' in entity['alert']: for informedEntity in entity['alert']['informedEntity']: if 'routeId' in informedEntity: output.add(informedEntity['routeId']) return output
def test_add_member_pool(self): self.test_create_pool() self._create_user() param = { 'pool_id': self.pool.pool_id, 'user_id': self.user.user_id, 'domain_id': self.domain.domain_id } pool_member = self.inventory_v1.Pool.add_member( param, metadata=(('token', self.token), )) user_info = MessageToDict(pool_member.user_info) self.assertEqual(user_info.get('user_id'), self.user.user_id)
def print_result(response, output): logging.info("Inference result {}".format(response.ack_sequence_number)) for inference in response.media_sample.inferences: tag = inference.entity.tag box = inference.entity.box atrributes = [] for attribute in inference.entity.attributes: attribute_string = "{}: {}".format(attribute.name, attribute.value) atrributes.append(attribute_string) logging.info("- {} ({:.2f}) [{:.2f}, {:.2f}, {:.2f}, {:.2f}] {}"\ .format(tag.value, tag.confidence, box.l, box.t, box.w, box.h, atrributes)) response_dict = MessageToDict(response.media_sample) if response_dict.get("inferences"): for inference in response_dict["inferences"]: inference["type"] = inference["type"].lower() output.write("{}\n".format(json.dumps(response_dict)))
def _check_plugin_state(self, plugins, params): """ Check plugin state first if state == RE_PROVISIONING, delete plugin first """ for plugin in plugins: dict_plugin = MessageToDict(plugin, preserving_proto_field_name=True) dict_plugin.update(params) state = dict_plugin.get('state', None) _LOGGER.debug(f'[_check_plugin_state] plugin_info: {dict_plugin}') if state == 'RE_PROVISIONING' or state == 'ERROR': _LOGGER.debug(f'[_check_plugin_state] params: {params}') self.install_plugin(dict_plugin) delete_params = { 'plugin_id': dict_plugin['plugin_id'], 'version': dict_plugin['version'], 'domain_id': dict_plugin['domain_id'] } self.delete_plugin(delete_params)
def __init__(self, config_proto): config_dict = MessageToDict(config_proto, including_default_value_fields=True, preserving_proto_field_name=True) self.gpio = [GpioConfig(v) for v in config_dict.get('gpio', [])] self.timer = [TimerConfig(v) for v in config_dict.get('timer', [])] self.pwm = [PwmConfig(v) for v in config_dict.get('pwm', [])] self.encoder = [ EncoderConfig(v) for v in config_dict.get('encoder', []) ] self.usart = [UsartConfig(v) for v in config_dict.get('uart', [])] self.i2c = [I2cConfig(v) for v in config_dict.get('i2c', [])] self.spi = [SpiConfig(v) for v in config_dict.get('spi', [])]
def _parse_evaluation_lineage(self, evaluation_lineage): """ Parse evaluation lineage. Args: evaluation_lineage (Event): Evaluation lineage. """ if evaluation_lineage is None: return event_dict = MessageToDict(evaluation_lineage, preserving_proto_field_name=True) evaluation_dict = event_dict.get(self._name_evaluation_lineage) if evaluation_dict is None: raise LineageEventFieldNotExistException( self._name_evaluation_lineage) self._lineage_info.update(**evaluation_dict) metric = self._lineage_info.get(self._name_metric) self._lineage_info[self._name_metric] = json.loads( metric) if metric else {}
def do_create_volume(self, log, nvmesh_vol_name, request): # UNUSED - secrets = request.secrets # UNUSED - volume_content_source = request.volume_content_source reqDict = MessageToDict(request) reqJson = MessageToJson(request) log.debug('request: {}'.format(reqJson)) capacity = self._parse_required_capacity(request.capacity_range) csi_metadata = self._build_metadata_field(reqDict) nvmesh_params = self._handle_volume_req_parameters(reqDict, log) topology_requirements = reqDict.get('accessibilityRequirements') volume = NVMeshVolume(name=nvmesh_vol_name, capacity=capacity, csi_metadata=csi_metadata, **nvmesh_params) allowed_zones = TopologyUtils.get_allowed_zones_from_topology( topology_requirements) log.debug('Allowed zones: %s' % allowed_zones) zone = self.create_volume_on_a_valid_zone(volume, allowed_zones, log) # we return the zone:nvmesh_vol_name to the CO # all subsequent requests for this volume will have volume_id of the zone:nvmesh_vol_name volume_id_for_co = Utils.nvmesh_vol_name_to_co_id( nvmesh_vol_name, zone) topology_key = TopologyUtils.get_topology_key() volume_topology = Topology(segments={topology_key: zone}) # Volume Context will be returned by the CO in NodeStageVolume and NodePublishVolume volume_context = {} # Add all fields from the StorageClass parameters volume_context.update(reqDict['parameters']) csiVolume = Volume(volume_id=volume_id_for_co, capacity_bytes=capacity, accessible_topology=[volume_topology], volume_context=volume_context) return csiVolume
async def get_one_page_bullet(self, params: dict, page: int): result = DanmakuData() data_api_v2 = "https://api.bilibili.com/x/v2/dm/web/seg.so" params = params.copy() params.update({"segment_index": page}) resp = await self.get(data_api_v2, params=params) if not resp or resp.status != 200: return result data = await resp.read() pb2 = danmaku_pb2.DanmakuData() pb2.ParseFromString(data) data = MessageToDict(pb2) # 哔哩哔哩: 1 飞行弹幕, 4 底部弹幕, 5 顶部弹幕, 6 逆向飞行弹幕 # Dplayer: 0 飞行弹幕, 1 顶部弹幕, 2 底部弹幕 pos_fix = {1: 0, 5: 1, 4: 2, 6: 0} for bullet in data.get("bullet", []): # 可能这一页没有数据 result.append_bullet( time=bullet.get("progress", 0) / 1000, pos=pos_fix.get(bullet["mode"], 0), color=bullet.get("color", 16777215), message=bullet.get("content", "") ) return result
def dlf_and_localdb_sync(): Intents.objects.all().delete() for page in client.list_intents(parent).pages: for raw_element in page: element = MessageToDict(raw_element) intents_db = Intents( name=element.get('name'), display_name=element.get('displayName'), priority=element.get('priority') ) intents_db.save() input_contexts_list = element.get('inputContextNames') if input_contexts_list is not None: for input_context in input_contexts_list: input_contexts_db = InputContexts( name=input_context, display_name=input_context.replace(contexts_path, ''), input_context=intents_db ) input_contexts_db.save() output_contexts_list = element.get('outputContexts') if output_contexts_list is not None: for output_context in output_contexts_list: output_contexts_db = OutputContexts( name=output_context.get('name'), display_name=output_context.get('name').replace(contexts_path, ''), lifespan_count=output_context.get('lifespanCount'), output_context=intents_db ) output_contexts_db.save() params_list = element.get('parameters') if params_list is not None: for param in params_list: params_db = Parameters( name=param.get('name'), display_name=param.get('displayName'), value=param.get('value'), default_value=param.get('defaultValue'), entity_type_display_name=param.get('entityTypeDisplayName'), parameters=intents_db ) params_db.save()
def job_output_to_atomic_result(*, atomic_input: AtomicInput, job_output: pb.JobOutput) -> AtomicResult: """Convert JobOutput to AtomicResult""" # Convert job_output to python types # NOTE: Required so that AtomicResult is JSON serializable. Protobuf types are not. jo_dict = MessageToDict(job_output, preserving_proto_field_name=True) if atomic_input.driver.upper() == "ENERGY": # Select first element in list (ground state); may need to modify for excited # states return_result: Union[float, List[float]] = jo_dict["energy"][0] elif atomic_input.driver.upper() == "GRADIENT": return_result = jo_dict["gradient"] else: raise ValueError( f"Unsupported driver: {atomic_input.driver.upper()}, supported drivers " f"include: {SUPPORTED_DRIVERS}") if atomic_input.keywords.get("molden"): # Molden file was request try: molden_string = tcpb_imd_fields2molden_string(job_output) except Exception: # Don't know how this code will blow up, so except everything for now :/ # NOTE: mo_output will set imd_orbital_type to "WHOLE_C" molden_string = "Unable to create molden output. Did you include the 'mo_output' keyword??" else: molden_string = None # Prepare AtomicInput to be base input for AtomicResult atomic_input_dict = atomic_input.dict() atomic_input_dict.pop("provenance", None) # Create AtomicResult as superset of AtomicInput values atomic_result = AtomicResult( **atomic_input_dict, # Create new provenance object provenance=Provenance( creator="terachem_pbs", version="1.9-2021.01-dev", routine="tcpb.TCProtobufClient.compute", ), return_result=return_result, properties=to_atomic_result_properties(job_output), # NOTE: Wavefunction will only be added if atomic_input.protocols.wavefunction != 'none' wavefunction=to_wavefunction_properties(job_output, atomic_input), success=True, ) # And extend extras to include values additional to input extras atomic_result.extras.update({ "qcvars": { "charges": jo_dict.get("charges"), "spins": jo_dict.get("spins"), "meyer_bond_order": jo_dict.get("bond_order"), "orb_size": jo_dict.get("orb_size"), "excited_state_energies": jo_dict.get("energy"), "cis_transition_dipoles": jo_dict.get("cis_transition_dipoles"), "compressed_bond_order": jo_dict.get("compressed_bond_order"), "compressed_hessian": jo_dict.get("compressed_hessian"), "compressed_ao_data": jo_dict.get("compressed_ao_data"), "compressed_primitive_data": jo_dict.get("compressed_primitive_data"), "compressed_mo_vector": jo_dict.get("compressed_mo_vector"), "imd_mmatom_gradient": jo_dict.get("imd_mmatom_gradient"), }, "job_extras": { "job_dir": jo_dict.get("job_dir"), "job_scr_dir": jo_dict.get("job_scr_dir"), "server_job_id": jo_dict.get("server_job_id"), "orb1afile": jo_dict.get("orb1afile"), "orb1bfile": jo_dict.get("orb1bfile"), }, "molden": molden_string, }) return atomic_result
def fetch_valid_tvs(request_context, async_kvstore_client, user=None, tv_ids=None): """ Function to fetch valid tvs for drone mode :param request_context: request context used to make kvstore requests :param async_kvstore_client: async client used to make kvstore requests :param device_id: optional device id to filter on """ # fetch list of all registered tvs valid_tv_protos = [] valid_tv_json = [] if not user: user = request_context.current_user tv_list = yield get_registered_tvs(request_context.system_auth_header, user, async_kvstore_client, device_ids=tv_ids) # fetch devices from drone_mode_tvs collection # if there is data, return that, otherwise just return blank TV data # construct set containing device ids drone_mode_tvs = yield get_drone_mode_tvs(request_context, async_kvstore_client, user=user) LOGGER.debug('registered_tvs=%s, drone mode tvs=%s', tv_list, drone_mode_tvs) drone_mode_tv_dict = {} for element in drone_mode_tvs: if constants.CONTENT in element: raw_id = base64.urlsafe_b64decode(str(element[constants.KEY])) encoded_id = b64encode_to_str(raw_id) element[constants.DEVICE_ID] = encoded_id drone_mode_tv_dict[encoded_id] = element if tv_list: active_subscriptions = yield fetch_subscriptions( request_context.auth_header, async_kvstore_client, user_list=[user], subscription_type=constants.DRONE_MODE_TV, device_ids=tv_ids) LOGGER.debug('active_subscriptions=%s', active_subscriptions) active_subscription_ids = { subscription.device_id for subscription in active_subscriptions } for device in tv_list: device_id = device[constants.DEVICE_ID] tv_proto = drone_mode_pb2.TVData() tv_proto.device_id = device_id tv_proto.display_name = device['device_name'] tv_proto.is_active = device_id in active_subscription_ids tv_proto.tv_config.SetInParent() if device_id in drone_mode_tv_dict: tv_config = TVConfig(**drone_mode_tv_dict[device_id]) tv_config.set_protobuf(tv_proto.tv_config) valid_tv_protos.append(tv_proto) json_obj = MessageToDict(tv_proto, including_default_value_fields=True, use_integers_for_enums=True, preserving_proto_field_name=True) # since we're storing the user choices and input tokens as blobs, # when we deserialize from proto to dict, we need to json dumps each of the fields json_obj[constants.TV_CONFIG][constants.INPUT_TOKENS] = json.dumps( json_obj.get(constants.TV_CONFIG, {}).get(constants.INPUT_TOKENS, {})) json_obj[constants.TV_CONFIG][constants.USER_CHOICES] = json.dumps( json_obj.get(constants.TV_CONFIG, {}).get(constants.USER_CHOICES, {})) valid_tv_json.append(json_obj) LOGGER.debug( 'finished fetch valid tvs: valid_tv_protos=%s, valid_tv_json=%s', valid_tv_protos, valid_tv_json) defer.returnValue([valid_tv_protos, valid_tv_json])
import sys from io import BytesIO import json from google.cloud import vision from google.protobuf.json_format import MessageToDict import os if __name__ == "__main__": os.environ[ "GOOGLE_APPLICATION_CREDENTIALS"] = "/home/secrets/GCP-cred.json" client = vision.ImageAnnotatorClient() # load image content = sys.stdin.buffer.read() image = vision.types.Image(content=content) # OCR on image response = client.text_detection(image=image) response_dict = MessageToDict(response) image_text = response_dict.get('textAnnotations', [{}])[0].get('description', '') image_text = image_text.replace('\n', ' ') # Return OCR text print(json.dumps({"text": image_text}))