Ejemplo n.º 1
0
def show_user():
    model.connect()
    user_id = request.args.get("id")
    user = model.get_user_by_id(user_id)
    # user_profile = model.user_name.get_posts()

    return render_template("user_profile.html", user= user)
Ejemplo n.º 2
0
def main():
    # DB stuff
    connect()
    init()

    globalized.init()

    # TCP stuff
    listener = listen()

    # REST stuff
    launch()
Ejemplo n.º 3
0
def start():
    u = Updater( model.connect() )
    u.next([
        create_table_link_projection,
        create_table_response_cache,
    ])
    u.execute()
Ejemplo n.º 4
0
def query_CAISONetImports_hrly_Series():
    """specifically gets import data"""

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    s = model.connect()

    imports_obj = s.execute(
        'SELECT time_start, sum(mw_imports) FROM "HistoricCAISONetImports" where time_start between \'2014-01-01 07:00:00.000000\' and \'2015-01-01 00:00:00.000000\' GROUP BY time_start '
    )
    imports_entry = imports_obj.fetchall()
    imports_df = DataFrame(imports_entry)
    imports_df.columns = ['time_start', 'mw_demand']

    dict_with_datetime_keys = {}

    for idx, row in enumerate(imports_df.values):
        time_start = row[0]

        # check date, since logs show we're missing a few
        if check_if_bad_date(time_start) != True:

            # turn dict into a series.  will auto-index on dict keys
            mw_imports = row[1]
            dict_with_datetime_keys[time_start] = mw_imports

    return Series(dict_with_datetime_keys)
Ejemplo n.º 5
0
def query_CAISODemand_hrly_Series():
    """specifically gets demand data"""

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    s = model.connect()

    demand_obj = s.execute(
        'SELECT time_start, mw_demand FROM "HistoricCAISODemands" WHERE caiso_tac=\'CA ISO-TAC\' and time_start between \'2014-01-01 07:00:00.000000\' and \'2015-01-01 00:00:00.000000\' '
    )
    demand_entry = demand_obj.fetchall()
    demand_df = DataFrame(demand_entry)
    demand_df.columns = ['time_start', 'mw_demand']

    dict_with_datetime_keys = {}

    for idx, row in enumerate(demand_df.values):
        time_start = row[0]

        # check date, since logs show we're missing a few
        if check_if_bad_date(time_start) != True:

            # turn dict into a series.  will auto-index on dict keys
            mw_demand = row[1]
            dict_with_datetime_keys[time_start] = mw_demand

    return Series(dict_with_datetime_keys)
Ejemplo n.º 6
0
def load_demographics():
    with open('seed_data/SlimmerData_Consolidated.csv', 'rU') as csvfile:
        reader = csv.reader(csvfile, dialect='excel')
        for row in reader:
            print "row:", row
            # row = troubleshooting.  If issue, see where.
            try:
                session = model.connect()
                demo_obj = model.Demographic()

                demo_obj.zipcode = row[0]
                demo_obj.popdensity = float(row[1])
                demo_obj.pctemployed = float(row[2])
                demo_obj.pctmnf = float(row[3])
                demo_obj.pctlogistics = float(row[4])
                demo_obj.pctit = float(row[5])
                demo_obj.pctprof = float(row[6])
                demo_obj.hhincq10 = int(row[7])
                demo_obj.hhincq30 = int(row[8])
                demo_obj.hhincq50 = int(row[9])
                demo_obj.hhincq70 = int(row[10])
                demo_obj.hhincq90 = int(row[11])
                demo_obj.hhincq95 = int(row[12])
                demo_obj.pctheatelec = float(row[13])

                session.add(demo_obj)
                session.commit()
            except:
                print "Error for row data:", row
                f = open('log_file.txt', 'a')
                f.write("\nError. Failure for row:" + str(row))
                f.close
Ejemplo n.º 7
0
def insert_row_db(date, hr, adict):
    """imports model, and inserts web scraped data into the db"""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    session = model.connect()


    for k,v in adict.items():
        fuel_obj = model.HistoricCAISOProdByFuel()

        fuel_obj.date = datetime.strptime(date,'%Y%m%d')
        fuel_obj.time_start = hr
        fuel_obj.fuel_type = k
        fuel_obj.mw_gen = v

        session.add(fuel_obj)
        print fuel_obj

    session.commit()
Ejemplo n.º 8
0
def query_CAISOProdByFuel_Series(ea_fuel):
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    s = model.connect()

    ea_fuel_obj = s.execute('SELECT time_start, fuel_type, mw_gen FROM "HistoricCAISOProdByFuels" WHERE fuel_type=\'%s\' and time_start between \'2014-01-01 07:00:00.000000\' and \'2015-01-01 00:00:00.000000\'  ' % ea_fuel)
    ea_fuel_entry = ea_fuel_obj.fetchall()
    ea_fuel_df = DataFrame(ea_fuel_entry)
    ea_fuel_df.columns = ['time_start', 'fuel_type', 'mw_gen']

    dict_with_datetime_keys = { }

    for idx,row in enumerate(ea_fuel_df.values):
        time_start = row[0]

        # check date, since logs show we're missing a few
        if check_if_bad_date(time_start)!=True:
            mw_gen = row[2]
            dict_with_datetime_keys[time_start] = mw_gen

    # turn dict into a series.  will auto-index on dict keys
    return Series(dict_with_datetime_keys)
Ejemplo n.º 9
0
def retrieve_from_db_usa():
    """imports model, pulls mwh production data from db, and places into pandas df.
    Also pulls state for each plant_name, and places into dict."""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    s = model.connect()

    # retrive DECEMBER production data, for all turbines at all power plants in California
    USA_gen_dec13_obj = s.execute('SELECT plant_name, state, fuel_type, dec_mwh_gen FROM "ProdGensDec2013" ')
    USA_gen_dec13_data = USA_gen_dec13_obj.fetchall()
    df_dec2013 = DataFrame(USA_gen_dec13_data)
    df_dec2013.columns = ['plant_name', 'state', 'fuel_type', 'dec_mwh_gen']

    # retrive JAN-NOV 2014 production data, for all turbines at all power plants in USA
    USA_gen_2014_obj = s.execute('SELECT plant_name, state, fuel_type, jan_mwh_gen, feb_mwh_gen, mar_mwh_gen, apr_mwh_gen, may_mwh_gen, jun_mwh_gen, jul_mwh_gen, aug_mwh_gen, sep_mwh_gen, oct_mwh_gen, nov_mwh_gen FROM "ProdGens" ')
    USA_gen_2014_data = USA_gen_2014_obj.fetchall()
    df_2014 = DataFrame(USA_gen_2014_data)
    df_2014.columns = ['plant_name', 'state', 'fuel_type', 'jan_mwh_gen', 'feb_mwh_gen', 'mar_mwh_gen', 'apr_mwh_gen', 'may_mwh_gen', 'jun_mwh_gen', 'jul_mwh_gen', 'aug_mwh_gen', 'sep_mwh_gen', 'oct_mwh_gen', 'nov_mwh_gen']

    return df_dec2013, df_2014
Ejemplo n.º 10
0
def insert_row_imports_db(date, list_of_dicts):
    """Takes in a list of dicts, with each list item equal to a timepoint. inserts into HistoricCAISODemand"""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    session = model.connect()

    from datetime import datetime

    for timept_dict in list_of_dicts:
        imports_obj = model.HistoricCAISONetImport()

        opr_date = timept_dict['opr_date']
        imports_obj.date = datetime.strptime(opr_date, '%Y-%m-%d')

        imports_obj.time_start = timept_dict['time_start']
        imports_obj.time_end = timept_dict['time_end']

        imports_obj.resource = (timept_dict['resource']).strip()

        imports_obj.mw_imports = float(timept_dict['mw_imports'])

        session.add(imports_obj)

    session.commit()
Ejemplo n.º 11
0
def query_CAISODemand_hrly_Series():
    """specifically gets demand data"""

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    s = model.connect()

    demand_obj = s.execute('SELECT time_start, mw_demand FROM "HistoricCAISODemands" WHERE caiso_tac=\'CA ISO-TAC\' and time_start between \'2014-01-01 07:00:00.000000\' and \'2015-01-01 00:00:00.000000\' ')
    demand_entry = demand_obj.fetchall()
    demand_df = DataFrame(demand_entry)
    demand_df.columns = ['time_start','mw_demand']

    dict_with_datetime_keys = { }

    for idx,row in enumerate(demand_df.values):
        time_start = row[0]

        # check date, since logs show we're missing a few
        if check_if_bad_date(time_start)!=True:

            # turn dict into a series.  will auto-index on dict keys
            mw_demand = row[1]
            dict_with_datetime_keys[time_start] = mw_demand

    return Series(dict_with_datetime_keys)
Ejemplo n.º 12
0
def query_CAISONetImports_hrly_Series():
    """specifically gets import data"""

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    s = model.connect()

    imports_obj = s.execute('SELECT time_start, sum(mw_imports) FROM "HistoricCAISONetImports" where time_start between \'2014-01-01 07:00:00.000000\' and \'2015-01-01 00:00:00.000000\' GROUP BY time_start ')
    imports_entry = imports_obj.fetchall()
    imports_df = DataFrame(imports_entry)
    imports_df.columns = ['time_start','mw_demand']

    dict_with_datetime_keys = { }

    for idx,row in enumerate(imports_df.values):
        time_start = row[0]

        # check date, since logs show we're missing a few
        if check_if_bad_date(time_start)!=True:

            # turn dict into a series.  will auto-index on dict keys
            mw_imports = row[1]
            dict_with_datetime_keys[time_start] = mw_imports

    return Series(dict_with_datetime_keys)
Ejemplo n.º 13
0
def retrieve_from_db_usa():
    """imports model, pulls mwh production data from db, and places into pandas df.
    Also pulls state for each plant_name, and places into dict."""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    s = model.connect()

    # retrive DECEMBER production data, for all turbines at all power plants in California
    USA_gen_dec13_obj = s.execute(
        'SELECT plant_name, state, fuel_type, dec_mwh_gen FROM "ProdGensDec2013" '
    )
    USA_gen_dec13_data = USA_gen_dec13_obj.fetchall()
    df_dec2013 = DataFrame(USA_gen_dec13_data)
    df_dec2013.columns = ['plant_name', 'state', 'fuel_type', 'dec_mwh_gen']

    # retrive JAN-NOV 2014 production data, for all turbines at all power plants in USA
    USA_gen_2014_obj = s.execute(
        'SELECT plant_name, state, fuel_type, jan_mwh_gen, feb_mwh_gen, mar_mwh_gen, apr_mwh_gen, may_mwh_gen, jun_mwh_gen, jul_mwh_gen, aug_mwh_gen, sep_mwh_gen, oct_mwh_gen, nov_mwh_gen FROM "ProdGens" '
    )
    USA_gen_2014_data = USA_gen_2014_obj.fetchall()
    df_2014 = DataFrame(USA_gen_2014_data)
    df_2014.columns = [
        'plant_name', 'state', 'fuel_type', 'jan_mwh_gen', 'feb_mwh_gen',
        'mar_mwh_gen', 'apr_mwh_gen', 'may_mwh_gen', 'jun_mwh_gen',
        'jul_mwh_gen', 'aug_mwh_gen', 'sep_mwh_gen', 'oct_mwh_gen',
        'nov_mwh_gen'
    ]

    return df_dec2013, df_2014
Ejemplo n.º 14
0
def insert_row_imports_db(date, list_of_dicts):
    """Takes in a list of dicts, with each list item equal to a timepoint. inserts into HistoricCAISODemand"""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    session = model.connect()

    from datetime import datetime

    for timept_dict in list_of_dicts:
        imports_obj = model.HistoricCAISONetImport()

        opr_date = timept_dict['opr_date']
        imports_obj.date = datetime.strptime(opr_date,'%Y-%m-%d')

        imports_obj.time_start = timept_dict['time_start']
        imports_obj.time_end = timept_dict['time_end']

        imports_obj.resource = (timept_dict['resource']).strip()

        imports_obj.mw_imports = float(timept_dict['mw_imports'])

        session.add(imports_obj)

    session.commit()
Ejemplo n.º 15
0
def query_CAISOProdByFuel_Series(ea_fuel):
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    s = model.connect()

    ea_fuel_obj = s.execute(
        'SELECT time_start, fuel_type, mw_gen FROM "HistoricCAISOProdByFuels" WHERE fuel_type=\'%s\' and time_start between \'2014-01-01 07:00:00.000000\' and \'2015-01-01 00:00:00.000000\'  '
        % ea_fuel)
    ea_fuel_entry = ea_fuel_obj.fetchall()
    ea_fuel_df = DataFrame(ea_fuel_entry)
    ea_fuel_df.columns = ['time_start', 'fuel_type', 'mw_gen']

    dict_with_datetime_keys = {}

    for idx, row in enumerate(ea_fuel_df.values):
        time_start = row[0]

        # check date, since logs show we're missing a few
        if check_if_bad_date(time_start) != True:
            mw_gen = row[2]
            dict_with_datetime_keys[time_start] = mw_gen

    # turn dict into a series.  will auto-index on dict keys
    return Series(dict_with_datetime_keys)
Ejemplo n.º 16
0
def load_geographics():
    with open('seed_data/zip_code_database.csv', 'rb') as csvfile:
        reader = csv.reader(csvfile, dialect='excel')
        for row in reader:
            print("row:", row)
            # row = troubleshooting.  If issue, see where.
            try:
                session = model.connect()
                geo_obj = model.Geographic()

                geo_obj.zipcode = row[0]
                geo_obj.type_addy = row[1]
                geo_obj.primary_city = row[2]
                geo_obj.acceptable_cities = row[3]
                geo_obj.unacceptable_cities = row[4]
                geo_obj.state = row[5]
                geo_obj.county = row[6]
                geo_obj.timezone = row[7]
                geo_obj.area_codes = row[8]
                geo_obj.latitude = float(row[9])
                geo_obj.longitude = float(row[10])
                geo_obj.world_region = row[11]
                geo_obj.country = row[12]
                geo_obj.decommissioned = row[13]
                geo_obj.estimated_population = int(row[14])
                geo_obj.notes = row[15]

                session.add(geo_obj)
                session.commit()
            except:
                print "Error for row data:", row
                f = open('log_file.txt', 'a')
                f.write("\nError.  failure for row:" + str(row))
                f.close
Ejemplo n.º 17
0
def is_stressful(data_point_time, bpm):
    """This takes a datetime object as its parameter and determines if 
	data associated with it indicates stress by comparing to filtered datapoints from the preceeding week.
	The caller should expect a Boolean to be returned."""

    # Determine the startbound for the query:
    d = timedelta(days=-7)
    sb = data_point_time + d

    dbsession = connect()

    dataset = dbsession.query(HRDataPoint).filter(
        and_(HRDataPoint.start_datetime > sb,
             HRDataPoint.start_datetime < data_point_time)).all()

    bpm_list = []
    for each in dataset:
        bpm_list.append(each.bpm)

    mean_of_dataset = numpy.mean(bpm_list)

    if bpm > (mean_of_dataset + 9):
        return True

    return False
Ejemplo n.º 18
0
def load_geographics():
    with open('seed_data/zip_code_database.csv', 'rb') as csvfile:
        reader = csv.reader(csvfile, dialect='excel')
        for row in reader:
            print ("row:", row)
            # row = troubleshooting.  If issue, see where.
            try:
                session=model.connect()
                geo_obj = model.Geographic()

                geo_obj.zipcode = row[0]
                geo_obj.type_addy = row[1]
                geo_obj.primary_city = row[2]
                geo_obj.acceptable_cities = row[3]
                geo_obj.unacceptable_cities = row[4]
                geo_obj.state = row[5]
                geo_obj.county = row[6]
                geo_obj.timezone = row[7]
                geo_obj.area_codes = row[8]
                geo_obj.latitude = float(row[9])
                geo_obj.longitude = float(row[10])
                geo_obj.world_region = row[11]
                geo_obj.country = row[12]
                geo_obj.decommissioned = row[13]
                geo_obj.estimated_population = int(row[14])
                geo_obj.notes = row[15]

                session.add(geo_obj)
                session.commit()
            except:
                print "Error for row data:", row
                f = open('log_file.txt','a')
                f.write("\nError.  failure for row:"+str(row))
                f.close
Ejemplo n.º 19
0
def load_demographics():
    with open('seed_data/SlimmerData_Consolidated.csv', 'rU') as csvfile:
        reader = csv.reader(csvfile, dialect='excel')
        for row in reader:
            print "row:", row
            # row = troubleshooting.  If issue, see where.
            try:
                session=model.connect()
                demo_obj = model.Demographic()

                demo_obj.zipcode = row[0]
                demo_obj.popdensity = float(row[1])
                demo_obj.pctemployed = float(row[2])
                demo_obj.pctmnf = float(row[3])
                demo_obj.pctlogistics = float(row[4])
                demo_obj.pctit = float(row[5])
                demo_obj.pctprof = float(row[6])
                demo_obj.hhincq10 = int(row[7])
                demo_obj.hhincq30 = int(row[8])
                demo_obj.hhincq50 = int(row[9])
                demo_obj.hhincq70 = int(row[10])
                demo_obj.hhincq90 = int(row[11])
                demo_obj.hhincq95 = int(row[12])
                demo_obj.pctheatelec = float(row[13])

                session.add(demo_obj)
                session.commit()
            except:
                print "Error for row data:", row
                f = open('log_file.txt','a')
                f.write("\nError. Failure for row:"+str(row))
                f.close
def write_all_user_predictions_to_sql():
	# user_index_to_id, movie_id_to_index = load_json_indexes()
	# user_id_to_index = load_user_id_to_index_index()
	session = model.connect()
	existing_ratings = get_existing_ratings_from_file()
	for user_index in xrange(70000): #fixme
		write_one_user_prediction_to_sql(user_index, session, existing_ratings)
Ejemplo n.º 21
0
    def setServiceParent(self, parent):
        log.msg("Starting DB Status handler")
        self.orig_parent = parent
        base.StatusReceiverMultiService.setServiceParent(self, parent)

        # Skip doing anything if we're just doing a checkconfig.  We don't want to
        # potentially change the state of the database on a checkconfig.
        if isinstance(parent, checkconfig.ConfigLoader):
            return

        # Keep a local reference to the session maker On a buildbot reconfig,
        # model.Session will be reset to None, and we might get
        # stepStarted/stepFinished notifications while the reconfig is
        # happening.
        try:
            self.Session = model.connect(self.dburl, pool_recycle=60)

            # Let our subscribers know about the database connection
            # This gives them the opportunity to set up their own tables, etc.
            for sub in self.subscribers:
                if hasattr(sub, 'databaseConnected'):
                    try:
                        sub.databaseConnected(model.metadata.bind)
                    except:
                        log.msg("DBERROR: Couldn't notify subscriber %s of database connection" % sub)
                        log.err()

            self.setup()
        except:
            if sys.exc_info()[0] is not sqlalchemy.exc.OperationalError:
                log.msg("DBERROR: Couldn't connect to database")
                log.err()
            self.lostConnection()
Ejemplo n.º 22
0
 def __init__(self, src_path, output, ext_pool='.pdf', ignore_hidden=True):
     self.db = model.connect()
     if not os.path.exists(media_path):
         os.makedirs(media_path)
     self.ext_pool = ext_pool
     self.ignore_hidden = ignore_hidden
     self.src_path = src_path
     self.output = output
     self.flag = True
Ejemplo n.º 23
0
def main(session):
    # You'll call each of the load_* functions with the session as an argument
    # load_users(session) # comment this out when seed users have been loaded
    # load_movies(session) # comment this out when seed movies have been loaded
    # load_ratings(session) # comment this out when seed ratings have been loaded

if __name__ == "__main__":
    s = model.connect()
    main(s)
Ejemplo n.º 24
0
def main():
    # You'll call each of the load_* functions with the session as an argument
    session = model.connect()
    load_user(session)
    print "user loaded"
    load_item(session)
    print "movies loaded"
    load_data(session)
    print "ratings loaded"
Ejemplo n.º 25
0
def main(session):
    # You'll call each of the load_* functions with the session as an argument
    session = model.connect()
    # load_users(session)
    # print "loaded users"
    load_movies(session)
    print "loaded movies"
    load_ratings(session)
    print "loaded ratings"
Ejemplo n.º 26
0
 def __init__(self, src_path, output, ext_pool='.pdf', ignore_hidden=True):
     self.db = model.connect()
     if not os.path.exists(media_path):
         os.makedirs(media_path)
     self.ext_pool = ext_pool
     self.ignore_hidden = ignore_hidden
     self.src_path = src_path
     self.output = output
     self.flag = True
Ejemplo n.º 27
0
def retrieve_from_db(state_code):
    """imports model, pulls mwh production data from db, and places into pandas df.
    Also pulls county for each plant_name, and places into dict."""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    s = model.connect()

    # retrive DECEMBER production data, for all turbines at all power plants in California
    CA_gen_dec13_obj = s.execute(
        'SELECT plant_name, state, fuel_type, dec_mwh_gen FROM "ProdGensDec2013" WHERE state=\'%s\' '
        % state_code)

    CA_gen_dec13_data = CA_gen_dec13_obj.fetchall()
    df_dec2013 = DataFrame(CA_gen_dec13_data)
    df_dec2013.columns = ['plant_name', 'state', 'fuel_type', 'dec_mwh_gen']

    # retrive JAN-NOV 2014 production data, for all turbines at all power plants in California
    CA_gen_2014_obj = s.execute(
        'SELECT plant_name, state, fuel_type, jan_mwh_gen, feb_mwh_gen, mar_mwh_gen, apr_mwh_gen, may_mwh_gen, jun_mwh_gen, jul_mwh_gen, aug_mwh_gen, sep_mwh_gen, oct_mwh_gen, nov_mwh_gen FROM "ProdGens" WHERE state=\'%s\' '
        % state_code)

    CA_gen_2014_data = CA_gen_2014_obj.fetchall()
    df_2014 = DataFrame(CA_gen_2014_data)
    df_2014.columns = [
        'plant_name', 'state', 'fuel_type', 'jan_mwh_gen', 'feb_mwh_gen',
        'mar_mwh_gen', 'apr_mwh_gen', 'may_mwh_gen', 'jun_mwh_gen',
        'jul_mwh_gen', 'aug_mwh_gen', 'sep_mwh_gen', 'oct_mwh_gen',
        'nov_mwh_gen'
    ]

    # retrieve county name, assigned to each turbine at each plant in California
    CA_counties_obj = s.execute(
        'SELECT plant_name, county FROM "StatsGens" WHERE state=\'%s\' GROUP BY plant_name, county'
        % state_code)

    CA_plant_counties = CA_counties_obj.fetchall()
    df_counties = DataFrame(CA_plant_counties)
    df_counties.columns = ['plant_name', 'county']
    # now convert into dict, so caan easily add county to other df.
    dict_counties = {}
    for idx, row in enumerate(df_counties.values):
        plant_name, county = row
        # clean the county name
        county = unicodedata.normalize('NFKD',
                                       county).encode('ascii', 'ignore')
        county = county.lower().title()
        county = county.replace(" County", "")
        dict_counties[plant_name] = county

    return df_dec2013, df_2014, dict_counties
Ejemplo n.º 28
0
def main(session):
    # when running for real, remove echo = true
    # You'll call each of the load_* functions with the session as an argument
     # load_movies(session, 'seed_data/u.item')
    #load_ratings(session, 'seed_data/u.data')
    # load_users(session,'seed_data/u.user')

if __name__ == "__main__":
    s= model.connect()
    main(s)
Ejemplo n.º 29
0
def main(session):
    # if dataset hasn't already been imported into DB, call the following:
    # load_users(session)
    # load_movies(session)
    # load_ratings(session)


if __name__ == "__main__":
    s = model.connect()
    main(s)
Ejemplo n.º 30
0
def display_provider():
	"""Given a unique identifier for a doctor, display a page with the doctor's 
	contact information"""

	npi = request.args.get("id")

	session = model.connect()

	provider = session.query(model.Provider).get(npi)

	return render_template('provider_page.html', provider = provider)
Ejemplo n.º 31
0
def load_users(session):
    # use u.user
    with open ('seed_data/u.user', 'rb') as csvfile:
        reader = csv.reader(csvfile)
        session = model.connect()
        for row in reader: 
            row = row.pop().split("|")
            user = model.User (None, None, row[1], row[4])
            user.id = row[0]
            session.add(user)
        session.commit()
Ejemplo n.º 32
0
def query_EIA_fuel_monthly_Series(ea_fuel):
    """specifically gets EIA data from ProdGen"""

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    s = model.connect()

    fuel_codes = {
        'coal':
        "('BIT','ANT','LIG','SUB','RC','WC','CBL','SC', 'SGC')",
        'gas':
        "('NG','BFG','OG','PG','OTH')",
        'nuclear':
        'NUC',
        'solar':
        'SUN',
        'hydro':
        'WAT',
        'wind':
        'WND',
        'other':
        "('DFO','RFO','JF','KER','WO','PC','SG','AB','MSW','OBS','WDS','OBL','SLW','BLQ','WDL','OBG','GEO','LFG','TDF','MSB','MSN','WH','PUR','SGP','MWH')"
    }

    if (ea_fuel == 'nuclear') or (ea_fuel == 'solar') or (
            ea_fuel == 'wind') or (ea_fuel == 'hydro'):
        code = fuel_codes[ea_fuel]
        mo_fuel_obj = s.execute(
            'SELECT sum(jan_mwh_gen), sum(feb_mwh_gen), sum(mar_mwh_gen), sum(apr_mwh_gen), sum(may_mwh_gen), sum(jun_mwh_gen), sum(jul_mwh_gen), sum(aug_mwh_gen), sum(sep_mwh_gen), sum(oct_mwh_gen), sum(nov_mwh_gen) FROM "ProdGens" WHERE fuel_type=\'%s\' and state=\'CA\' '
            % code)

    if (ea_fuel == 'coal') or (ea_fuel == 'gas') or (ea_fuel == 'other'):
        list_of_codes = fuel_codes[ea_fuel]
        mo_fuel_obj = s.execute(
            'SELECT sum(jan_mwh_gen), sum(feb_mwh_gen), sum(mar_mwh_gen), sum(apr_mwh_gen), sum(may_mwh_gen), sum(jun_mwh_gen), sum(jul_mwh_gen), sum(aug_mwh_gen), sum(sep_mwh_gen), sum(oct_mwh_gen), sum(nov_mwh_gen) FROM "ProdGens" WHERE state=\'CA\' and fuel_type IN %s '
            % list_of_codes)

    mo_fuel_entry = mo_fuel_obj.fetchall()

    # turn into dict
    list_of_months = [
        'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
        'nov'
    ]
    mo_fuel_entry_dict = {}
    for i in range(11):
        mo_fuel_entry_dict[list_of_months[i]] = mo_fuel_entry[0][i]

    # can now turn dict into Series
    return Series(mo_fuel_entry_dict)
Ejemplo n.º 33
0
def before_request():
	engine, DB = model.connect()
	g.db = DB()

	id = session.get('id', None)
	if id:
		g.me = g.db.query(Person).filter_by(id = id).first()
	else:
		g.me = Person()
		g.db.add(g.me)
		g.db.commit()
		session['id'] = g.me.id
Ejemplo n.º 34
0
def format_data_day(day_string):
    """Takes a single date as parameter and returns a dict of times during the 
	day as keys and boolean values.
	This will take a string parameter formatted like this: '2015-24-02' """

    dbsession = connect()
    db_result = dbsession.query(HRDataPoint).filter_by(
        day_of_point=day_string).all()

    # Need to extract the time from the datetime attribute,
    # so make a dict of {<time> : <boolean>} pairs:
    dict_day_booleans = {}

    for each_point in db_result:

        dt = each_point.start_datetime
        hour_of_point = dt.hour

        # Create a dictionary of the data to display with a *datetime* object as the key
        dict_day_booleans[hour_of_point] = [
            each_point.is_stressful, each_point.bpm
        ]

    # Reference the dict indexed by time and create the dict to return, which uses a string as key instead:
    to_display = {}
    dict_keys = dict_day_booleans.keys()

    for a_key in dict_keys:
        if a_key < 10:
            to_display['9 am'] = dict_day_booleans.get(a_key)
        elif a_key < 11:
            to_display['10 am'] = dict_day_booleans.get(a_key)
        elif a_key < 12:
            to_display['11 am'] = dict_day_booleans.get(a_key)
        elif a_key < 13:
            to_display['noon'] = dict_day_booleans.get(a_key)
        elif a_key < 14:
            to_display['1 pm'] = dict_day_booleans.get(a_key)
        elif a_key < 15:
            to_display['2 pm'] = dict_day_booleans.get(a_key)
        elif a_key < 16:
            to_display['3 pm'] = dict_day_booleans.get(a_key)
        elif a_key < 17:
            to_display['4 pm'] = dict_day_booleans.get(a_key)
        elif a_key < 18:
            to_display['5 pm'] = dict_day_booleans.get(a_key)
        elif a_key < 19:
            to_display['6 pm'] = dict_day_booleans.get(a_key)
        else:
            pass

    return to_display
Ejemplo n.º 35
0
def main(session):

    #read_directions_files(session)
    #read_directions_files_same_airport(session)
    #load_directions(session)
    #load_distance(session)
    #load_cities(session)
    #load_flight_data(session)


if __name__ == "__main__":
    s= model.connect()
    main(s)
Ejemplo n.º 36
0
    def test_connect(self):
        connexions = Counter()
        Counter.ensure_0()
        config = type('ConfigMock', (), {})()

        # mocking a model
        DummyModel = type('ModelMock', (), {})

        def mocked_connect(test):
            def _connect(conf):
                connexions.inc()
                test.assertIs(conf, config)
            return _connect

        model1 = DummyModel()
        model1.connect = mocked_connect(self)

        model2 = DummyModel()
        model2.connect = mocked_connect(self)

        model._models_registry = set([model1, model2])
        model.connect(config)
        self.assertEqual(connexions.num, 2)
Ejemplo n.º 37
0
def load_ratings(session):
    # use u.data
    with open ('seed_data/u.data', 'rb') as csvfile:
        reader = csv.reader(csvfile)
        session = model.connect()
        for row in reader:
            row = row.pop().split("\t")
            rating = model.Rating(row[1], row[2], row[0])
            session.add(rating)
        
        session.commit()


    pass
Ejemplo n.º 38
0
 def result(*args, **kwargs):
     with model.connect(db_path) as connection:
         context = Context(db_logic=model.DBLogic(connection))
         context.db_logic.initialize_db()
     context.debug = debug
     try:
         return func(context, *args, **kwargs)
     # Bottle returns all responses via Exceptions. Yuck :(
     except bottle.HTTPResponse:
         raise
     # Format real exceptions.
     except Exception as e:
         if not context.debug or srvcfg.CTF_DIFFICULTY >= 3:
             raise
         return context.render_exception(e)
Ejemplo n.º 39
0
def save_to_db(data_as_string):
	""" 'each' is a dictionary. 'each' will become a single DB record; this loop 
		parses the json and assigns its values to the datapoint object, then adds it to the SQL 
		session to commit at the end.""" 

	data_dict = json.loads(data_as_string)
	dbsession = model.connect()

	for each in data_dict['point']:
		# Create a datapoint oject
		datapoint = model.HRDataPoint()

		# Assign its attributes according to the dictionary contents.
		datapoint.user_id = 1 # Need to hardcode this to me until multiple users/logins are supported. 
		datapoint.bpm = each['value'][0]['fpVal']
		datapoint.start_time = each['startTimeNanos']
		datapoint.end_time = each['endTimeNanos']
		datapoint.start_datetime = convert_to_datetime(datapoint.start_time)
		datapoint.end_datetime = convert_to_datetime(datapoint.end_time)

		sdt = convert_to_datetime(datapoint.start_time)
		time_of_sdt = sdt.time()

		# Make sure the point is in working hours before writing it to the DB:
		if not (time_of_sdt > WORK_START) & (time_of_sdt < WORK_END):
			continue #I expect this to go to the next point in data_dict in line 16

		datapoint.day_of_point = sdt.strftime('%Y-%m-%d')
		
		# Check if the datapoint is stressful when compared to existing DB data
		datapoint.is_stressful = data_filter.is_stressful(datapoint.start_datetime, datapoint.bpm)
		
		# Make sure elevated bpm isn't motion related before writing it to the DB
		if datapoint.is_stressful:
			if data_filter.is_motion_related(datapoint.start_time):
				print "the datapoint is stressful, using continue."
				continue

		# Add the datapoint to the db session
		dbsession.add(datapoint)
		# Putting the commit *inside* the loop so that the
		# is_stressful function can use the committed datapoints
		# when it calls the db. Not as performant but it
		# makes the calculations more accurate.
		dbsession.commit()
Ejemplo n.º 40
0
def neighborhoods_in_county():
    import model

    import os
    Zillow_key = os.environ["ZILLOW_ZWSID"]

    from urllib2 import Request, urlopen, URLError
    from xml.dom import minidom

    url_zillow_neighborhood = "http://www.zillow.com/webservice/GetRegionChildren.htm?zws-id=" + Zillow_key + "&state=CA&county=Alameda"

    response = urlopen(url_zillow_neighborhood)
    dom_zillow_neighborhood = minidom.parse(response)

    # need only region tags in the list
    list_regions = dom_zillow_neighborhood.getElementsByTagName("list")

    for node in dom_zillow_neighborhood.getElementsByTagName("region"):
        name = (handleTok(
            node.getElementsByTagName("name"))).encode("utf8").strip()
        latitude = (handleTok(
            node.getElementsByTagName("latitude"))).encode("utf8").strip()
        longitude = (handleTok(
            node.getElementsByTagName("longitude"))).encode("utf8").strip()

        print "NEW NEIGHBORHOOD"
        print name
        print latitude
        print longitude

        try:
            session = model.connect()
            Zdemo_obj = model.zillow_neighborhood()

            Zdemo_obj.name = name
            Zdemo_obj.latitude = float(latitude)
            Zdemo_obj.longitude = float(longitude)

            session.add(Zdemo_obj)
            session.commit()
        except:
            print "Error for row data:", name
            f = open('log_file.txt', 'a')
            f.write("\nError.  failure for row:" + str(name))
            f.close
Ejemplo n.º 41
0
def retrieve_from_db(state_code):
    """imports model, pulls mwh production data from db, and places into pandas df.
    Also pulls county for each plant_name, and places into dict."""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    s = model.connect()

    # retrive DECEMBER production data, for all turbines at all power plants in California
    CA_gen_dec13_obj = s.execute('SELECT plant_name, state, fuel_type, dec_mwh_gen FROM "ProdGensDec2013" WHERE state=\'%s\' ' % state_code)

    CA_gen_dec13_data = CA_gen_dec13_obj.fetchall()
    df_dec2013 = DataFrame(CA_gen_dec13_data)
    df_dec2013.columns = ['plant_name', 'state', 'fuel_type', 'dec_mwh_gen']

    # retrive JAN-NOV 2014 production data, for all turbines at all power plants in California
    CA_gen_2014_obj = s.execute('SELECT plant_name, state, fuel_type, jan_mwh_gen, feb_mwh_gen, mar_mwh_gen, apr_mwh_gen, may_mwh_gen, jun_mwh_gen, jul_mwh_gen, aug_mwh_gen, sep_mwh_gen, oct_mwh_gen, nov_mwh_gen FROM "ProdGens" WHERE state=\'%s\' ' % state_code)

    CA_gen_2014_data = CA_gen_2014_obj.fetchall()
    df_2014 = DataFrame(CA_gen_2014_data)
    df_2014.columns = ['plant_name', 'state', 'fuel_type', 'jan_mwh_gen', 'feb_mwh_gen', 'mar_mwh_gen', 'apr_mwh_gen', 'may_mwh_gen', 'jun_mwh_gen', 'jul_mwh_gen', 'aug_mwh_gen', 'sep_mwh_gen', 'oct_mwh_gen', 'nov_mwh_gen']

    # retrieve county name, assigned to each turbine at each plant in California
    CA_counties_obj = s.execute('SELECT plant_name, county FROM "StatsGens" WHERE state=\'%s\' GROUP BY plant_name, county' % state_code)

    CA_plant_counties = CA_counties_obj.fetchall()
    df_counties = DataFrame(CA_plant_counties)
    df_counties.columns = ['plant_name', 'county']
    # now convert into dict, so caan easily add county to other df.
    dict_counties={}
    for idx, row in enumerate(df_counties.values):
        plant_name, county = row
        # clean the county name
        county = unicodedata.normalize('NFKD', county).encode('ascii', 'ignore')
        county = county.lower().title()
        county = county.replace(" County", "")
        dict_counties[plant_name] = county


    return df_dec2013, df_2014, dict_counties
Ejemplo n.º 42
0
def load_movies(session):
    # use u.item
    with open ('seed_data/u.item', 'rb') as csvfile: 
        session = model.connect()
        reader = csv.reader(csvfile, delimiter = '|')

        for row in reader:
            print row
            title = row[1]
            title = title.decode("latin-1") 
            if row[2]:
                row[2] = datetime.strptime(row[2], '%d-%b-%Y')
            else:
                row[2] = None

            movie = model.Movie(title, row[2], row[4]) 
            session.add(movie)
        
        session.commit()
Ejemplo n.º 43
0
def fetch_weeks_data(week_number):
    """Takes as parameter a week number (so 1 through 52) and returns a list of data 
	point objects that ocurred in that week. It currently assumes the year is 2015; this will
	need to be refactored in later iterations."""

    dbsession = connect()

    requested_week = Week(2015, week_number)

    # These functions return datetime objects. I <3 the isoweek library zomg.
    startbound = requested_week.monday()
    endbound = requested_week.saturday(
    )  #This doesn't *include* data from the endbound day, just up to that day.

    one_weeks_data = dbsession.query(HRDataPoint).filter(
        HRDataPoint.start_datetime > startbound,
        HRDataPoint.start_datetime < endbound).all()

    return one_weeks_data
Ejemplo n.º 44
0
def check_for_new_bpm():
    """This queries the DB for a user to see when the last bpm data refresh 
	was for that user, and if the last pull was > 24 hours ago, it calls
	fetch_data to add recent bpm data for that user. The user is currently
	hardcoded to me."""

    dbsession = connect()
    result = dbsession.execute(
        'select * from "HRDataPoints" order by start_datetime desc limit 1')

    latest_datapoint = result.first()
    latest_timestamp = int(latest_datapoint.end_time)
    now_in_nanotime = nanotime.now()

    # If the timestamp on the most recent datapoint is more than a day old, call Google for updated data
    if latest_timestamp < (int(nanotime.now()) - DAY_IN_NANOSECS):
        endbound = str(int(nanotime.now())
                       )  # Get now in nanotime for the endbound of the dataset
        # convert latest_timestamp to int so I can increment it up a second
        int_latest_timestamp = int(latest_timestamp)
        int_latest_timestamp += 1000000000
        latest_timestamp = str(int_latest_timestamp)

        new_data = foa.fetch_data(data_type='bpm',
                                  startbound=latest_datapoint.end_time,
                                  endbound=endbound)

        try:
            data_dict = json.loads(new_data)
        except:
            print "This is what new_data looks like: ", new_data
            return "There was an unexpected error."

        data_point_store.save_to_db(new_data)
        return True
    else:
        return False
Ejemplo n.º 45
0
def query_EIA_fuel_monthly_Series(ea_fuel):
    """specifically gets EIA data from ProdGen"""

    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0,parentdir)

    import model
    s = model.connect()

    fuel_codes = {
    'coal': "('BIT','ANT','LIG','SUB','RC','WC','CBL','SC', 'SGC')",
    'gas': "('NG','BFG','OG','PG','OTH')",
    'nuclear': 'NUC',
    'solar':'SUN',
    'hydro':'WAT',
    'wind':'WND',
    'other': "('DFO','RFO','JF','KER','WO','PC','SG','AB','MSW','OBS','WDS','OBL','SLW','BLQ','WDL','OBG','GEO','LFG','TDF','MSB','MSN','WH','PUR','SGP','MWH')"}

    if (ea_fuel=='nuclear') or (ea_fuel=='solar') or (ea_fuel=='wind') or (ea_fuel == 'hydro'):
        code = fuel_codes[ea_fuel]
        mo_fuel_obj = s.execute('SELECT sum(jan_mwh_gen), sum(feb_mwh_gen), sum(mar_mwh_gen), sum(apr_mwh_gen), sum(may_mwh_gen), sum(jun_mwh_gen), sum(jul_mwh_gen), sum(aug_mwh_gen), sum(sep_mwh_gen), sum(oct_mwh_gen), sum(nov_mwh_gen) FROM "ProdGens" WHERE fuel_type=\'%s\' and state=\'CA\' ' % code)

    if (ea_fuel=='coal') or (ea_fuel=='gas') or (ea_fuel=='other'):
        list_of_codes = fuel_codes[ea_fuel]
        mo_fuel_obj = s.execute('SELECT sum(jan_mwh_gen), sum(feb_mwh_gen), sum(mar_mwh_gen), sum(apr_mwh_gen), sum(may_mwh_gen), sum(jun_mwh_gen), sum(jul_mwh_gen), sum(aug_mwh_gen), sum(sep_mwh_gen), sum(oct_mwh_gen), sum(nov_mwh_gen) FROM "ProdGens" WHERE state=\'CA\' and fuel_type IN %s ' % list_of_codes )

    mo_fuel_entry = mo_fuel_obj.fetchall()

    # turn into dict
    list_of_months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov']
    mo_fuel_entry_dict ={}
    for i in range(11):
        mo_fuel_entry_dict[list_of_months[i]] = mo_fuel_entry[0][i]

    # can now turn dict into Series
    return Series(mo_fuel_entry_dict)
Ejemplo n.º 46
0
def main():

    dbsession = model.connect()

    hr_json = open('./raw_data/psql_output.txt').read()
    lines = hr_json.split('\n')

    for each_line in lines:
        # separate on pipes
        db_record_as_list = each_line.split('|')

        point = model.HRDataPoint()

        point.user_id = db_record_as_list[1].strip()
        point.bpm = db_record_as_list[2].strip()
        point.start_time = db_record_as_list[3].strip()
        point.end_time = db_record_as_list[4].strip()
        point.start_datetime = datetime.strptime(db_record_as_list[5].strip(),
                                                 "%Y-%m-%d %H:%M:%S")
        point.end_datetime = datetime.strptime(db_record_as_list[6].strip(),
                                               "%Y-%m-%d %H:%M:%S")
        point.day_of_point = db_record_as_list[7].strip()

        # convert the t or f to an actual bool
        if db_record_as_list[8].strip() == 't':
            stressful = True
        elif db_record_as_list[8].strip() == 'f':
            stressful = False
        else:
            stressful = None

        point.is_stressful = stressful

        print "Here's the current data point: ", point
        dbsession.add(point)

    dbsession.commit()
def insert_row_db(date, hr, adict):
    """imports model, and inserts web scraped data into the db"""

    # add parent directory to the path, so can import model.py
    #  need model in order to update the database when this task is activated by cron
    import os
    parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    os.sys.path.insert(0, parentdir)

    import model
    session = model.connect()

    for k, v in adict.items():
        fuel_obj = model.HistoricCAISOProdByFuel()

        fuel_obj.date = datetime.strptime(date, '%Y%m%d')
        fuel_obj.time_start = hr
        fuel_obj.fuel_type = k
        fuel_obj.mw_gen = v

        session.add(fuel_obj)
        print fuel_obj

    session.commit()
Ejemplo n.º 48
0
            if row[0] == '#':
                continue
            else:
                for i in range(len(row)):
                    row[i] = row[i].decode('utf-8')

                trad, simp, pinyin = row[0], row[1], row[2]
                definition = ''.join(row[3:])
                pinyin = pinyin.strip('"')
                definition = definition.strip('"')

                entry = model.Entry(simplified=simp,
                                    traditional=trad,
                                    pinyin=pinyin,
                                    definition=definition)
                session.add(entry)

        try:
            session.commit()
        except sqlalchemy.exc.IntegrityError, e:
            session.rollback()


def main(session):
    load_dict(session)


if __name__ == "__main__":
    session = model.connect()
    main(session)
Ejemplo n.º 49
0
            new_movie = model.Movie(id=row[0], name=title, released_at=formatted_date , imdb_url=row[4])
            session.add(new_movie)
    session.commit()
    


def load_data(session):
    with open('seed_data/u.data', 'rb') as csvfile:
        data_db = csv.reader(csvfile, delimiter = '\t')
        for row in data_db:
            timestamp = int(row[3])
            formatted_timestamp = datetime.datetime.utcfromtimestamp(timestamp)
            new_data= model.Rating(user_id=row[0], movie_id=row[1], rating=row[2], timestamp=formatted_timestamp)
            session.add(new_data)
    session.commit()
    

def main():
    # You'll call each of the load_* functions with the session as an argument
    session = model.connect()
    load_user(session)
    print "user loaded"
    load_item(session)
    print "movies loaded"
    load_data(session)
    print "ratings loaded"

if __name__ == "__main__":
    s = model.connect()
    main(s)
Ejemplo n.º 50
0
from model import (connect, get_session,
                   Question, Picture, Group, GroupMember)

connect()
db_session = get_session()

''' Function of Accessing Database '''
def get_group_list():
    query = (db_session
            .query(Group.id,
                   Group.name)
            .select_from(Group)
            .filter(Group.status == 1)
            .all())

    group_dict = {}
    group_list = []
    for groupId, group_name in query:
        if(groupId not in group_dict):
            group_dict[groupId] = "yes"
            group_list.append({
                "id": groupId,
                "name": group_name
            })

    groupList = {
        "group": group_list
    }

    print("[DB] get groupList success")
    return groupList
Ejemplo n.º 51
0
parser.add_argument('--lon', type=float, required=True, help='longitude')

parser.add_argument('--dlat',
                    type=float,
                    required=True,
                    help='destination latitude')

parser.add_argument('--dlon',
                    type=float,
                    required=True,
                    help='destination longitude')

parser.add_argument('--id', default='new', help='bike id')

args = parser.parse_args()

model.connect('mydb')

if args.id == 'new':
    b = model.Bike()
else:
    b = model.Bike.objects.with_id(args.id)

b.point = [args.lon, args.lat]
b.destination = [args.dlon, args.dlat]
b.update(new_point=[args.lon, args.lat])

b.save()

print b
Ejemplo n.º 52
0
        rating = Rating(user_id-user_id, movie_id=id)
        db_session.add(rating)
    else:
        flash("Rating updated", "success")

    rating.rating = rating_number
    db_session.commit()

    return redirect(url_for("view_movie", id=id))

@app.route("/my_ratings")
def my_ratings():
    if not g.user_id:
        flash("Please log in", "warning")
        return redirect(url_for("index"))

    ratings = db_session.query(Rating).filter_by(user_id=g.user_id).all()
    return render_template("my_ratings.html", ratings=ratings)

@app.route("/logout")
def logout:
    del session['user_id']
    return redirect(url_for("index"))

if __name__ == "__main__":
    db_uri = app.config.get('SQLALCHEMY_DATABASE_URI')
    if not db_uri:
        db_uri = "sqlite:///ratings.db"
    model.connect(db_uri)
    app.run(debug = True, port=8080, host='0.0.0.0')
Ejemplo n.º 53
0
def index():
    model.connect()
    users = model.get_users()
    return render_template("index.html", users=users)
Ejemplo n.º 54
0


def main(session):
    # You'll call each of the load_* functions with the session as an argument

    ## Sample code to test geo function call
    # lat,lng = geo.geocode("84 Madrid place, Fremont, CA, 94539", "false")
    # return  lat,lng

    #Call geo code function to load latlng for addresses where latlng is missing
    load_latlng()

    #Get all latlngs available in the Address table to calculate ceter using kmeans clustering. Return data as Tuple
    query = model.session.query(model.Address).all()
    latlng_list = get_latlng(query)

    
    #Format latlngs into numpy array to calculate centroids usig kmeans clusterig technique.
    data = numpy.array(latlng_list)
    centers = get_latlng_clustercenter(data,6)
    # print "Centers are: ", centers
    # print "idx is:," ,idx


if __name__ == "__main__":
    session= model.connect()
    main(session)
   

Ejemplo n.º 55
0
            if str_time != "0":
                movie_title = line[1].decode("latin-1")
                movie_title = movie_title[:-6].strip()
            
                release_datetime = datetime.strptime(str_time, "%d-%b-%Y")
                new_movie = model.Movie(id=line[0], movie_title=movie_title, 
                            release_date=release_datetime, IMDB=line[4])
                # add new movie to session
                session.add(new_movie)
    # commit all movies from session
    session.commit()

def load_ratings(session):
    # use u.data
    with open("seed_data/u.data") as r:
        reader = csv.reader(r, delimiter="\t")
        for line in reader:
            new_rating = model.Rating(user_id=line[0], movie_id=line[1], rating=line[2])
            session.add(new_rating)
    session.commit()

def main(session):
    # You'll call each of the load_* functions with the session as an argument
    load_users(session)
    load_movies(session)
    load_ratings(session)

if __name__ == "__main__":
    s= model.connect()
    main(s)