Example #1
0
def make_update_list(pb_file_list, folder_path):
    update_list = []
    bad_update_header_list = []
    dirname = folder_path
    for pb_file in pb_file_list:
        pb_file_path = os.path.join(dirname, pb_file)
        with open(pb_file_path, 'rb') as f:
            feed = gtfs_realtime_pb2.FeedMessage()
            feed.ParseFromString(f.read())
            dict_obj = MessageToDict(feed)
        if 'entity' in dict_obj.keys():
            for update_idx in range(len(dict_obj['entity'])):
                update_dict = {}
                j_in = json.dumps(dict_obj['entity'][update_idx])
                j_out = json.loads(j_in)
                if 'tripUpdate' in j_out.keys():
                    update_dict['delay'] = j_out['tripUpdate']['delay']
                    update_dict['stop_update_departure'] = j_out['tripUpdate'][
                        'stopTimeUpdate'][0]['departure']['time']
                    update_dict['stop_id'] = j_out['tripUpdate'][
                        'stopTimeUpdate'][0]['stopId']
                    update_dict['timestamp'] = j_out['tripUpdate']['timestamp']
                    update_dict['route_id'] = j_out['tripUpdate']['trip'][
                        'routeId']
                    update_dict['trip_id'] = j_out['tripUpdate']['trip'][
                        'tripId']
                    update_dict['vehicle_id'] = j_out['tripUpdate']['vehicle'][
                        'id']
                    update_list.append(update_dict)
                else:
                    bad_update_header_list.append(dict_obj['header'])
        else:
            bad_update_header_list.append(dict_obj['header'])
    return update_list, bad_update_header_list
Example #2
0
    async def play(self, ws, observation):
        success = True
        request_data = api.Request(
            data=api.RequestData(ability_id=True, unit_type_id=True, upgrade_id=True)
        )
        await asyncio.wait_for(ws.send(request_data.SerializeToString()), 5)
        try:
            result = await asyncio.wait_for(ws.recv(), 5)
            data_response = api.Response.FromString(result)
            game_data = data_response.data

            request_game_info = api.Request(game_info=api.RequestGameInfo())
            await asyncio.wait_for(ws.send(request_game_info.SerializeToString()), 5)
            result = await asyncio.wait_for(ws.recv(), 5)
            game_info_response = api.Response.FromString(result)

            # If game is still on
            if game_data.units:
                obj = decode_observation(observation.observation.observation, game_data, game_info_response)
                obs = MessageToDict(observation)
                obs = str(obs)
                obs = obs.replace("\'", "\"")
                obs = obs.replace("False", "false")
                obs = obs.replace("True", "true")
                obs = json.loads(obs,encoding="UTF-8")
                game_meta = api.Request(
                    game_info=api.RequestGameInfo()
                )
                await ws.send(game_meta.SerializeToString())
                result = await ws.recv()
                game_meta = api.Response.FromString(result)
                game_meta = MessageToDict(game_meta)
                game_meta = str(game_meta)
                game_meta = game_meta.replace("\'", "\"")
                game_meta = game_meta.replace("False", "false")
                game_meta = game_meta.replace("True", "true")
                game_meta = json.loads(game_meta,encoding="UTF-8")
                game_meta = game_meta.get("gameInfo", None)
                game_meta.pop("modNames")
                game_meta.pop("options")
                game_meta.pop("mapName")
                if("localMapPath" in game_meta.keys()):
                    game_meta.pop("localMapPath")
                game_meta.pop("playerInfo")
                game_meta.update(game_meta["startRaw"])
                game_meta.pop("startRaw")
                game_meta.pop("mapSize")
                await self.process_step(ws, obj, raw=(obs, game_meta, game_data))
                # function = self.decision_function
                # alvailable_actions = self.query_alvailable_actions()
                # to_do_action = function(observation, alvailable_actions)
                # while(to_do_action and alvailable_actions):
                #    self.send_order(self, to_do_action)
                #    to_do_action = self.query_alvailable_actions()
        except asyncio.TimeoutError:
            return False
        return True
Example #3
0
 def normalize_list_from_config(config):
     """ Construct list of attrs to normalize
     from protobuf config
     """
     sarsa_as_dict = MessageToDict(config)
     attr_mapping = Sarsa.proto_name_to_attr_dict()
     return [
         v for k, v in attr_mapping.items()
         if k not in sarsa_as_dict.keys()
     ]
Example #4
0
def asset_edit(asset_id=None):
    form = AssetForm()

    if form.validate_on_submit():
        return asset_submit(form, asset_id)
    else:
        logging.info('loading current values because: {}'.format(form.errors))
        
        old_asset = assets.GetAsset(a.GetAssetRequest(_id=asset_id))

        # All of this fuckery is needed because the proto object validates field types,
        # so we can't just change the field to a datetime object but need a new object
        asset_dict = MessageToDict(message=old_asset, preserving_proto_field_name=True)
        asset_dict['acquired'] = date.fromtimestamp(old_asset.acquired)
        # Have to delete _id since it's not a valid field for a namedtuple
        del asset_dict['_id']
        asset_obj = namedtuple("Asset", asset_dict.keys()) (*asset_dict.values())
        form = AssetForm(obj=asset_obj)

    return render_template('asset_edit.html', form=form, view='Edit Asset')
Example #5
0
def get_sketch(request: schemas.ColorInput):

    knn = request.max_results
    color_list = []
    c0 = closest_color([request.c0.red, request.c0.green, request.c0.blue])
    c1 = closest_color([request.c1.red, request.c1.green, request.c1.blue])
    c2 = closest_color([request.c2.red, request.c2.green, request.c2.blue])
    c3 = closest_color([request.c3.red, request.c3.green, request.c3.blue])
    c4 = closest_color([request.c4.red, request.c4.green, request.c4.blue])
    c5 = closest_color([request.c5.red, request.c5.green, request.c5.blue])
    c6 = closest_color([request.c6.red, request.c6.green, request.c6.blue])
    c7 = closest_color([request.c7.red, request.c7.green, request.c7.blue])
    c8 = closest_color([request.c8.red, request.c8.green, request.c8.blue])
    c9 = closest_color([request.c9.red, request.c9.green, request.c9.blue])
    c10 = closest_color([request.c10.red, request.c10.green, request.c10.blue])
    c11 = closest_color([request.c11.red, request.c11.green, request.c11.blue])

    color_list.extend([c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11])
    color_query = [item for t in color_list for item in t]

    with CottontailDBClient('localhost', 1865) as client:
        result = client.knn(
            color_query, "tal_db", "color_image", "dominant_color_vector",
            ["video_id", "keyframe_id", "start_time", "distance"], knn)
        result = MessageToDict(list(result)[0])
        response = {}
        columns = result["columns"]
        if 'tuples' in result.keys():
            results = result["tuples"]
        else:
            return {"results": []}
        for i, tuple in enumerate(results):
            response[f"{i}"] = dict()
            response[f"{i}"][columns[0]
                             ["name"]] = tuple["data"][0]["doubleData"]
            response[f"{i}"][columns[1]
                             ["name"]] = tuple["data"][1]["stringData"]
            response[f"{i}"][columns[2]["name"]] = tuple["data"][2]["intData"]
            response[f"{i}"][columns[3]
                             ["name"]] = tuple["data"][3]["floatData"]
    return {"results": list(response.values())}
def read_tfrecords(tfrecords):
    tfrecords_bytes = io.BytesIO(tfrecords)

    examples = []

    while True:
        length_header = 12
        buf = tfrecords_bytes.read(length_header)
        if not buf:
            # reached end of tfrecord buffer, return examples
            return examples

        if len(buf) != length_header:
            raise ValueError("TFrecord is fewer than %d bytes" % length_header)
        length, length_mask = struct.unpack("<QI", buf)
        length_mask_actual = _masked_crc32c(buf[:8])
        if length_mask_actual != length_mask:
            raise ValueError("TFRecord does not contain a valid length mask")

        length_data = length + 4
        buf = tfrecords_bytes.read(length_data)
        if len(buf) != length_data:
            raise ValueError(
                "TFRecord data payload has fewer bytes than specified in header"
            )
        data, data_mask_expected = struct.unpack("<%dsI" % length, buf)
        data_mask_actual = _masked_crc32c(data)
        if data_mask_actual != data_mask_expected:
            raise ValueError("TFRecord has an invalid data crc32c")

        # Deserialize the tf.Example proto
        example = tf.train.Example()
        example.ParseFromString(data)

        # Extract a feature map from the example object
        example_feature = MessageToDict(example.features)["feature"]
        feature_dict = {}
        for feature_key in example_feature.keys():
            feature_dict[feature_key] = example_feature[feature_key][list(
                example_feature[feature_key].keys())[0]]["value"][0]
        examples.append(feature_dict)
def read_tfrecords(tfrecords):
    tfrecords_bytes = io.BytesIO(tfrecords)

    examples = []

    while True:
      length_header = 12
      buf = tfrecords_bytes.read(length_header)
      if not buf:
        # reached end of tfrecord buffer, return examples
        return examples

      if len(buf) != length_header:
        raise ValueError('TFrecord is fewer than %d bytes' % length_header)
      length, length_mask = struct.unpack('<QI', buf)
      length_mask_actual = _masked_crc32c(buf[:8])
      if length_mask_actual != length_mask:
        raise ValueError('TFRecord does not contain a valid length mask')

      length_data = length + 4
      buf = tfrecords_bytes.read(length_data)
      if len(buf) != length_data:
        raise ValueError('TFRecord data payload has fewer bytes than specified in header')
      data, data_mask_expected = struct.unpack('<%dsI' % length, buf)
      data_mask_actual = _masked_crc32c(data)
      if data_mask_actual != data_mask_expected:
        raise ValueError('TFRecord has an invalid data crc32c')

      # Deserialize the tf.Example proto
      example = tf.train.Example()
      example.ParseFromString(data)

      # Extract a feature map from the example object
      example_feature = MessageToDict(example.features)['feature']
      feature_dict = {}
      for feature_key in example_feature.keys():
        feature_dict[feature_key] = example_feature[feature_key][list(example_feature[feature_key].keys())[0]]['value'][0]
      examples.append(feature_dict)
Example #8
0
def make_vehicle_list(pb_file_list, folder_path):
    vehicle_list = []
    bad_vehicle_header_list = []
    dirname = folder_path
    for pb_file in pb_file_list:
        pb_file_path = os.path.join(dirname, pb_file)
        with open(pb_file_path, 'rb') as f:
            feed = gtfs_realtime_pb2.FeedMessage()
            feed.ParseFromString(f.read())
            dict_obj = MessageToDict(feed)
        if 'entity' in dict_obj.keys():
            for vehicles_idx in range(len(dict_obj['entity'])):
                vehicle_dict = {}
                j_in = json.dumps(dict_obj['entity'][vehicles_idx])
                j_out = json.loads(j_in)
                if 'position' in j_out['vehicle'] and 'trip' in j_out[
                        'vehicle']:
                    vehicle_dict['vehicle_id'] = j_out['vehicle']['vehicle'][
                        'id']
                    vehicle_dict['timestamp'] = j_out['vehicle']['timestamp']
                    vehicle_dict['trip_id'] = j_out['vehicle']['trip'][
                        'tripId']
                    vehicle_dict['route_id'] = j_out['vehicle']['trip'][
                        'routeId']
                    vehicle_dict['vehicle_lat'] = j_out['vehicle']['position'][
                        'latitude']
                    vehicle_dict['vehicle_long'] = j_out['vehicle'][
                        'position']['longitude']
                    #trip_id = j_out['vehicle']['trip']['tripId']
                    #route_id = j_out['vehicle']['trip']['routeId']
                    #vehicle_dict['shape_id'] = get_shape_id_from_triproute(trip_id, route_id, schedule_df)
                    vehicle_list.append(vehicle_dict)
                else:
                    bad_vehicle_header_list.append(dict_obj['header'])
        else:
            bad_vehicle_header_list.append(dict_obj['header'])
    return vehicle_list, bad_vehicle_header_list
Example #9
0
def user_edit(user_id=None):
    form = UserForm()
    form.submit.label.text = 'Update'

    if form.validate_on_submit():
        # check that old_password matches the current user's
        if bcrypt.check_password_hash(current_user.user.password, form.old_password.data):
            return user_submit(form, user_id)
        else:
            flash('Current password incorrect!')
    else:
        logging.info('loading current values because: {}'.format(form.errors))

        old_user = users.GetUser(u.GetUserRequest(_id=user_id))

        # All of this fuckery is needed because the proto object validates field types,
        # so we can't just change the field to a datetime object but need a new object
        user_dict = MessageToDict(message=old_user, preserving_proto_field_name=True)
        # Have to delete _id since it's not a valid field for a namedtuple
        del user_dict['_id']
        user_obj = namedtuple("User", user_dict.keys()) (*user_dict.values())
        form = UserForm(obj=user_obj)

    return render_template('user_edit.html', form=form, view='Edit User')
Example #10
0
def get_sketch(request: schemas.ObjectSketchInput):
    knn = request.max_results

    object_query = request.object
    # (x1,y1) is lower left and (x2,y2) is upper right
    sketch_query = [
        request.sketch.x1, request.sketch.y1, request.sketch.x2,
        request.sketch.y2
    ]  # list of 4 elements

    with CottontailDBClient('localhost', 1865) as client:

        result = client.knn_where(
            sketch_query, "tal_db", "sketch", "sketch_vector", "object",
            ["video_id", "keyframe_id", "distance", "start_time", "object"],
            [object_query], knn)
        result = MessageToDict(list(result)[0])
        response = {}
        columns = result["columns"]
        if 'tuples' in result.keys():
            results = result["tuples"]
        else:
            return {"results": []}
        for i, tuple in enumerate(results):
            response[f"{i}"] = dict()
            response[f"{i}"][columns[0]
                             ["name"]] = tuple["data"][0]["stringData"]
            response[f"{i}"][columns[1]
                             ["name"]] = tuple["data"][1]["doubleData"]
            response[f"{i}"][columns[2]
                             ["name"]] = tuple["data"][2]["stringData"]
            response[f"{i}"][columns[3]["name"]] = tuple["data"][3]["intData"]
            response[f"{i}"][columns[4]
                             ["name"]] = tuple["data"][4]["floatData"]

    return {"results": list(response.values())}
Example #11
0
def get_gas_price(url):
    keywords = [
        'regular', 'reg', 'plus', 'premium', 'pre', 'prem', 'unleaded',
        'super', 'v-power', 'special', 'super+', 'unlead', 'noethnl', 'diesel',
        'extra', 'midgrade', 'mid-grade', 'mid', 'silver', 'ultimate',
        'unleaded plus', 'unleaded premium'
    ]
    spell = SpellChecker()
    spell.word_frequency.load_words(keywords)
    client = vision.ImageAnnotatorClient()
    image = vision.types.Image()
    image.source.image_uri = url

    texts = MessageToDict(client.text_detection(image=image))
    while "error" in texts.keys():
        texts = MessageToDict(client.text_detection(image=image))
        sleep(5)

    if len(texts.keys()) == 0:
        return {"ERROR": "No text found"}

    texts["textAnnotations"].pop(0)
    strings = [{
        "text":
        x["description"],
        "bounds": [(y["x"], y["y"]) if "y" in y.keys() else (y["x"], 0)
                   for y in x["boundingPoly"]["vertices"]]
    } for x in texts["textAnnotations"]]

    # Get number prices
    numbers = []
    for i in range(len(strings)):
        if strings[i]["text"].replace(".", "").replace(" ", "").replace(
                "$", "")[:3].isnumeric():
            string = strings[i]["text"].replace(".",
                                                "").replace(" ", "").replace(
                                                    "$", "")[:3]
            if len(string) >= 3:
                strings[i]["text"] = "$" + string[0] + "." + string[1:]
                numbers.append(strings[i])

    # Get adaptive keyword list
    keywords = adaptive_keywords(keywords, strings)

    # Get keyword
    keyword_indexes = [
        i for i in range(len(strings))
        if strings[i]["text"].lower() in keywords
    ]
    if len(keyword_indexes) == 0:
        return {str(i): numbers[i]["text"] for i in range(len(numbers))}
    keyword = strings[keyword_indexes[0]]
    keyword_midpoint = get_midpoint(keyword["bounds"])

    # Get vector
    vector = (10000, 10000)
    for i in range(len(numbers)):
        number_midpoint = get_midpoint(numbers[i]["bounds"])
        if math.sqrt((keyword_midpoint[0] - number_midpoint[0])**2 +
                     (keyword_midpoint[1] - number_midpoint[1])**2
                     ) < math.sqrt(vector[0]**2 + vector[1]**2):
            vector = (keyword_midpoint[0] - number_midpoint[0],
                      keyword_midpoint[1] - number_midpoint[1])

    # Get gas type and price association
    output = {}
    for i in range(len(numbers)):
        number_midpoint = get_midpoint(numbers[i]["bounds"])
        gas_type = get_text_element(
            strings,
            (vector[0] + number_midpoint[0], vector[1] + number_midpoint[1]))
        if gas_type:
            output[spell.correction(
                gas_type["text"]).capitalize()] = numbers[i]["text"]
        else:
            # Attempt to cast a wider net around the vector
            output[str(i)] = numbers[i]["text"]
            for n in range(-50, 51, 10):
                gas_type = get_text_element(
                    strings, (vector[0] + number_midpoint[0] + n,
                              vector[1] + number_midpoint[1]))
                if gas_type and gas_type["text"].lower() in keywords:
                    output.pop(str(i))
                    output[spell.correction(
                        gas_type["text"]).capitalize()] = numbers[i]["text"]
                    break
                gas_type = get_text_element(
                    strings, (vector[0] + number_midpoint[0],
                              vector[1] + number_midpoint[1] + n))
                if gas_type and gas_type["text"].lower() in keywords:
                    output.pop(str(i))
                    output[spell.correction(
                        gas_type["text"]).capitalize()] = numbers[i]["text"]
                    break
    for k in output.keys():
        if k.isnumeric():
            output["Unknown " + str(k)] = output.pop(k)
    return output
Example #12
0
def get_text(text: str):
    initial_text_list = []
    text_list = []
    initial_text_list = list(text.split(" "))
    for element in initial_text_list:
        element = stemming_algo(element)
        text_list.append(element)

    if len(text_list) == 1:
        with CottontailDBClient('localhost', 1865) as client:
            result = client.select_where("tal_db", "transcription",
                                         ["video_id", "audio_transcription"],
                                         "audio_transcription",
                                         [f"%{text[0]}%"])
            result = MessageToDict(list(result)[0])
            response = {}
            columns = result["columns"]
            if 'tuples' in result.keys():
                results = result["tuples"]
                print(results)
            else:
                return {"results": []}
            for i, tuple in enumerate(results):
                response[f"{i}"] = dict()
                response[f"{i}"][columns[0]
                                 ["name"]] = tuple["data"][0]["stringData"]
                response[f"{i}"][columns[1]
                                 ["name"]] = tuple["data"][1]["stringData"]
        return {"results": list(response.values())}

    elif len(text_list) == 2:
        with CottontailDBClient('localhost', 1865) as client:
            result1_text = client.select_where(
                "tal_db", "transcription", ["video_id", "audio_transcription"],
                "audio_transcription", [f"%{text_list[0]}%"])
            df_text1 = cottontail_simple_text_where_to_df(
                result1_text, "audio_transcription")
            print(df_text1)
            result2_text = client.select_where(
                "tal_db", "transcription", ["video_id", "audio_transcription"],
                "audio_transcription", [f"%{text_list[1]}%"])
            df_text2 = cottontail_simple_text_where_to_df(
                result2_text, "audio_transcription")
            print(df_text2)
            merged_df = pd.merge(df_text1, df_text2, on=['video_id'])
            print(merged_df)
            merged_df = merged_df.drop(
                ['audio_transcription_x', 'audio_transcription'],
                axis=1).sort_values(by=['video_id'])
            response = merged_df.head(20000000).to_dict(orient="records")
            print(response)
        return {"results": response}

    elif len(text_list) == 3:
        with CottontailDBClient('localhost', 1865) as client:
            result1_text = client.select_where(
                "tal_db", "transcription", ["video_id", "audio_transcription"],
                "audio_transcription", [f"%{text_list[0]}%"])
            df_text1 = cottontail_simple_text_where_to_df(
                result1_text, "audio_transcription")
            print(df_text1)
            result2_text = client.select_where(
                "tal_db", "transcription", ["video_id", "audio_transcription"],
                "audio_transcription", [f"%{text_list[1]}%"])
            df_text2 = cottontail_simple_text_where_to_df(
                result2_text, "audio_transcription")
            print(df_text2)
            result3_text = client.select_where(
                "tal_db", "transcription", ["video_id", "audio_transcription"],
                "audio_transcription", [f"%{text_list[2]}%"])
            df_text3 = cottontail_simple_text_where_to_df(
                result3_text, "audio_transcription")
            print(df_text3)
            merged_df = df_text1.merge(df_text2,
                                       on=['video_id']).merge(df_text3,
                                                              on=['video_id'])
            print(merged_df)
            merged_df = merged_df.drop(
                ['audio_transcription_x', 'audio_transcription_y'],
                axis=1).sort_values(by=['video_id'])
            response = merged_df.head(20000000).to_dict(orient="records")
            print(response)
        return {"results": response}

    elif len(text_list) > 3:
        with CottontailDBClient('localhost', 1865) as client:
            result = client.select_where("tal_db", "transcription",
                                         ["video_id", "audio_transcription"],
                                         "audio_transcription", [f"%{text}%"])
            result = MessageToDict(list(result)[0])
            response = {}
            columns = result["columns"]
            if 'tuples' in result.keys():
                results = result["tuples"]
                print(results)
            else:
                return {"results": []}
            for i, tuple in enumerate(results):
                response[f"{i}"] = dict()
                response[f"{i}"][columns[0]
                                 ["name"]] = tuple["data"][0]["stringData"]
                response[f"{i}"][columns[1]
                                 ["name"]] = tuple["data"][1]["stringData"]
        return {"results": list(response.values())}
Example #13
0
    async def load_replay(self, replay_file, id=0):
        print(replay_file)
        async with websockets.connect("ws://{0}:{1}/sc2api".format(
                self.host.address, self.host.port)) as ws:
            replay_meta = api.Request(replay_info=api.RequestReplayInfo(
                replay_path=replay_file))
            await ws.send(replay_meta.SerializeToString())
            result = await ws.recv()
            metadata = api.Response.FromString(result)
            self.replay_info = {
                "map":
                metadata.replay_info.map_name,
                "races": [
                    metadata.replay_info.player_info[0].player_info.
                    race_requested,
                    metadata.replay_info.player_info[1].player_info.
                    race_requested,
                ],
                "results": [
                    metadata.replay_info.player_info[0].player_result.result,
                    metadata.replay_info.player_info[1].player_result.result,
                ],
            }
            print(self.replay_info)
            msg = api.Request(start_replay=api.RequestStartReplay(
                replay_path=replay_file,
                observed_player_id=id,
                options=api.InterfaceOptions(raw=True, score=False),
            ))

            await ws.send(msg.SerializeToString())
            time.sleep(1)
            result = await ws.recv()
            response = api.Response.FromString(result)
            print(response)
            game_meta = api.Request(game_info=api.RequestGameInfo())
            await ws.send(game_meta.SerializeToString())
            result = await ws.recv()
            game_meta = api.Response.FromString(result)
            game_meta = MessageToDict(game_meta)
            game_meta = str(game_meta)
            game_meta = game_meta.replace("\'", "\"")
            game_meta = game_meta.replace("False", "false")
            game_meta = game_meta.replace("True", "true")
            game_meta = json.loads(game_meta, encoding="UTF-8")
            if "gameInfo" in game_meta.keys():
                game_meta = game_meta.get("gameInfo", None)
                game_meta.pop("modNames")
                game_meta.pop("options")
                game_meta.pop("mapName")
                if ("localMapPath" in game_meta.keys()):
                    game_meta.pop("localMapPath")
                game_meta.pop("playerInfo")
                game_meta.update(game_meta["startRaw"])
                game_meta.pop("startRaw")
                game_meta.pop("mapSize")
                self.game_info = game_meta
                self.status = "started"
                return True
            else:
                return False