def precondition(update, context):
    chat_id = update.message.chat_id
    da = DatabaseAccess()
    user = da.getUser(chat_id)
    chat_id = user['userid']
    zes = ZEServices(da, chat_id, user['username'], None)
    token = zes.refreshTokenIfNecessary()
    if token is not None:
        newBatteryStatus = zes.postApiCall('/api/vehicle/' + user['vin'] + '/air-conditioning')
        context.bot.sendMessage(chat_id, "Attempted to preheat.")
    else:
        context.bot.sendMessage(chat_id, "Could not connect to ZE Services, you have been logged out. Register again to continue receiving updates.")
        da.deleteUser(chat_id)
def status(update, context):
    chat_id = update.message.chat_id
    da = DatabaseAccess()
    user = da.getUser(chat_id)
    chat_id = user['userid']
    zes = ZEServices(da, chat_id, user['username'], None)
    token = zes.refreshTokenIfNecessary()
    if token is not None:
        newBatteryStatus = zes.apiCall('/api/vehicle/' + user['vin'] + '/battery')
        result = getStatusString(newBatteryStatus)
        context.bot.sendMessage(chat_id, result)
        da.updateApiResultForUser(chat_id, json.dumps(newBatteryStatus))
    else:
        context.bot.sendMessage(chat_id,
                                "Could not connect to ZE Services, you have been logged out. Register again to continue receiving updates.")
        da.deleteUser(chat_id)
def sendUpdates(context):
    da = DatabaseAccess()
    users = da.getUsers()
    for user in users:
        chat_id = user['userid']
        zes = ZEServices(da, chat_id, user['username'], None)
        token = zes.refreshTokenIfNecessary()
        if token != None:
            newBatteryStatus = zes.apiCall('/api/vehicle/' + user['vin'] + '/battery')
            oldBatteryStatusString = user['lastApiResult']
            oldBatteryStatus = json.loads(oldBatteryStatusString) if oldBatteryStatusString != None else {}

            result = getStatusString(newBatteryStatus)
            if result != getStatusString(oldBatteryStatus):
                context.bot.sendMessage(chat_id, result)
                da.updateApiResultForUser(chat_id, json.dumps(newBatteryStatus))
        else:
            context.bot.sendMessage(chat_id,
                                    "Could not connect to ZE Services, you have been logged out. Register again to continue receiving updates.")
            da.deleteUser(chat_id)
def register(update, context):
    da = DatabaseAccess()

    chat_id = update.message.chat_id
    print("Registering Chat ID", chat_id)

    da.deleteUser(chat_id)

    username = context.args[0]
    password = context.args[1]

    zes = ZEServices(da, chat_id, username, password)
    token = zes.refreshTokenIfNecessary()
    if token is not None:
        update.message.reply_text("Login successful!")
    else:
        update.message.reply_text("Login was not successful :(")
        da.deleteUser(chat_id)
Esempio n. 5
0
from database_access import DatabaseAccess, DatabaseCreator
import os

db_file_to_open = r"amazon_reviews_us_shoes_v1_00.db"
db_file_to_open = os.getcwd() + '\data\\' + db_file_to_open
db_file_to_save = r"amazon_reviews_us_shoes_v1_00_2015_top10000_bad.db"

db_access = DatabaseAccess(db_file_to_open)
reviews = db_access.retrive_reviews_with_products_top_n(10000)

db_creator = DatabaseCreator(db_file_to_save)
db_creator.create_table("Product", db_creator.product_columns)
db_creator.create_table("Review", db_creator.review_columns)

with db_creator.conn:
    # insert data
    for index, review in reviews.iterrows():
        product_info = (review[10], review[11], review[12])
        review_info = (review[1], review[2], review[3], review[4], review[5],
                       review[6], review[7], review[8], review[9])
        # insert product
        db_creator.insert_product(product_info)
        # insert review
        db_creator.insert_review(review_info)
    trips, num_segments, road_seq, seq_road = prepare_data(trips_raw)

    # Build and train a skip-gram model
    execute_w2v(dao, batch_size, embedding_size, skip_window, num_skips,
                num_sampled, num_steps, trips, num_segments, road_seq,
                seq_road)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Embedding parameter settings")
    parser.add_argument("--skip_window", nargs="?", const=1)

    arg = vars(parser.parse_args())

    dao = DatabaseAccess(city='', data_dir="data")
    #dao = DatabaseAccess(city='jinan',
    #                     data_dir="/Volumes/Porter's Data/penn-state/data-sets/")

    # Build and train a skip-gram model
    batch_size = 32
    embedding_size = 100
    skip_window = int(arg['skip_window'])
    num_skips = 2
    num_sampled = 16
    num_steps = 5000

    main(dao, batch_size, embedding_size, skip_window, num_skips, num_sampled,
         num_steps)
Esempio n. 7
0
            lat_key = int(
                (road.coordinates[0] - cls.lat_cut_points[0]) // lat_dist)

            if lat_key >= len(cls.lat_cut_points):
                lat_key = None

            # search longitude
            lon_key = int(
                (road.coordinates[1] - cls.lon_cut_points[0]) // lon_dist)
            if lon_key >= len(cls.lon_cut_points):
                lon_key = None

            if lat_key is not None and lon_key is not None:
                cell_id = GridCell.cell_coord_dict[(lat_key, lon_key)]
                road.updateGridCell(cell_id)

                # append road ID to cell's road_list attribute
                cls.cell_id_dict[cell_id].road_list.append(road)

    @classmethod
    def init(cls, dao):
        GridCell.setDao(dao)
        GridCell.initAllCells(n_grids=5)
        GridCell.mapRoadsToCell()


if __name__ == '__main__':
    dao = DatabaseAccess(
        city='jinan', data_dir="/Volumes/Porter's Data/penn-state/data-sets/")
    GridCell.setDao(dao)
Esempio n. 8
0
def create_continuous_view(plpy, cv_name: str, query: str) -> None:
    # wrap plpy object
    db = DatabaseAccess(plpy)

    # register continuous view, will fail if another view with the same name exists
    db.continuous_views_insert(cv_name)

    # extract common table expressions and the actual query (ordering of the ctes does not change)
    parsed_query = sqlparse.parse(query)
    query_string, ctes = parse_cte(parsed_query)

    upper_pipelines = []

    # new schema for the view (add to search path allows to use same table names)
    db.create_schema(f'cv_{cv_name}')
    db.add_to_search_path(f'cv_{cv_name}')

    for cte_name, cte in ctes.items():
        pipeline_name = f'cv_{cv_name}.{cte_name}'

        if cte.pipeline_type == PipelineType.STREAM:
            stream_name = cte.get_stream()

            # construct the insert function for the stream
            function_name = construct_insert_function_name(stream_name)

            # create an auxiliary table for aggregates
            db.create_stream_pipeline(pipeline_name, cte.query.normalized)

            # store stream query for later use
            db.stream_pipelines_insert(cv_name, stream_name, pipeline_name,
                                       function_name, cte.query.normalized)

            # if we group by attributes create an index on the primary keys
            if len(cte.primary_keys()) > 0:
                db.create_index(f'cv_{cv_name}_{cte_name}_index',
                                pipeline_name, cte.primary_keys())

            # construct the insert function for the stream
            declarations, body = FunctionGenerator(
            ).generate_stream_insert_function(f'cv_{cv_name}', stream_name,
                                              pipeline_name, cte)

            # create insert function
            db.create_insert_function(function_name, stream_name, declarations,
                                      body)

        elif cte.pipeline_type == PipelineType.STATIC:
            # evaluate static pipelines and save them
            db.create_static_pipeline(pipeline_name, cte.query.normalized)
            # create index on static pipeline
            if len(cte.keys_for_index()) > 0:
                db.create_index(f'cv_{cv_name}_{cte_name}_index',
                                pipeline_name, cte.keys_for_index())

        elif cte.pipeline_type == PipelineType.UPPER:
            # merge all upper pipelines
            upper_pipelines.append(cte)

    # create query from all upper pipelines
    db.create_upper_pipeline(f'cv.{cv_name}', upper_pipelines, query_string)
def unregister(update, context):
    chat_id = update.message.chat_id
    bot.sendMessage(chat_id, "Your data has been deleted")
    da = DatabaseAccess()
    da.deleteUser(chat_id)
def dump(update, context):
    print("Database content:")
    da = DatabaseAccess()
    da.dumpUsersTable()
    print("---")
Esempio n. 11
0
def get_reviews():
    db_file = r"amazon_reviews_us_kindle.db"
    db_file = os.getcwd() + '\data\\' + db_file

    db = DatabaseAccess(db_file)
    return db.retrive_reviews()
Esempio n. 12
0
    Trip.init(dao,read_pickle=trip_pickle)

    # create traffic cameras
    TrafficCam.init(dao,read_pickle=cam_pickle)








if __name__ == '__main__':
    # Initialize DatabaseAcessObject (dao)
    dao = DatabaseAccess(city='jinan',
                         data_dir="/Volumes/Porter's Data/penn-state/data-sets/",
                         lat_range= (36.6467,36.6738), # filter city to smaller window
                         lon_range= (116.9673,117.0286))

    initCity(dao=dao,trip_pickle=True,cam_pickle=True)


    """

    # save matrices for xianfeng's code
    fname = "/Users/porterjenkins/Documents/PENN STATE/RESEARCH/supervised-embedding/xianfeng/city-eye/data_back/"
    monitored_roads = RoadNode.getMonitoredRoads()
    np.savez(fname + "monitored_file-porter-small.npz", monitored_nodes = monitored_roads)
    rawnodes = RoadNode.getNodeVolumeMatrix()
    np.savez(fname + "nodes-porter-small.npz",nodes = rawnodes)
    adjacency = RoadNode.getAdjacencyMatrix(tensor=True)
    #adjacency[0] = np.transpose(adjacency[0])