Exemple #1
0
def workspace_test():
	print_data('workspaces objects', br=False)

	for index in range(3):
		w = Workspace()
		w.name = 'New workspace name'
		w.description = 'Some new description'
		w.save()

	workspaces = Workspace.all()
	print_data('new objects -> model.all()', workspaces)

	w.name = 'Updated name'
	w.save()

	workspaces = Workspace.all()
	print_data('UPDATED -> model.all()', workspaces)

	workspaces = Workspace.get(id=w.id, name=w.name)
	print_data('GET -> model.get()', [workspaces])

	workspaces = Workspace.filter(name='New workspace name')
	print_data('FILTER -> model.filter()', workspaces)

	for index in range(2):
		o = Application()
		o.workspace_id = w.guid
		o.save()

	a = View()
	a.application_id = o.guid
	a.save()

	a = Resource()
	a.application_id = o.guid
	a.save()

	for index in range(3):
		o = Widget()
		o.workspace_id = w.guid
		o.save()

	for index in range(3):
		o = DataSource()
		o.workspace_id = w.guid
		o.save()

	objects = Workspace.all() + Resource.all() + Application.all() + Widget.all() + DataSource.all() + View.all()
	print_data('All objects in db', objects)

#	[w.delete() for w in Workspace.all()]
	workspaces = Workspace.all()
	print_data('cleaned', workspaces)

	workspaces = Workspace.filter(include_deleted=True)
	print_data('cleaned with deleted if exists', workspaces)

	objects = Workspace.all() + Resource.all() + Application.all() + Widget.all() + DataSource.all() + View.all()
	print_data('no objects left', objects)
Exemple #2
0
def make_data_source(name, func, func_lang, description="", dependencies=[]):
    data_source = DataSource(
        name=name,
        description=description,
        dependencies=dependencies,
        transform_function=func,
        transform_function_language=func_lang
    )
    db.session.add(data_source)
    db.session.commit()
    return data_source
Exemple #3
0
def add_data_source(uri):
    db = connect()

    matching = db.query(DataSource).filter_by(uri=uri).first()
    if matching is None:
        source = DataSource(uri=uri,
                            last_accessed=datetime.date.today(),
                            times_used=1)
        db.add(source)
    else:
        matching.last_accessed = datetime.date.today()
        matching.times_used += 1
    db.commit()
Exemple #4
0
def test_Impala():
    ds = DataSource(db_type="Impala",
                    host="10.10.8.102",
                    database="fdm_db",
                    port=21050)
    conn = get_Connection(ds)

    sql = 'select zsxm_dm,zsxmmc,zsxmjc,xybz,yxbz,sjzsxm_dm,yxqz,yxqq from odm_db.o_hx_qg_dm_gy_zsxm'
    rows = conn.query(sql)
    for row in rows:
        print(
            "zsxm_dm:%s, zsxmmc:%s, zsxmjc:%s, xybz:%s, yxbz:%s, sjzsxm_dm:%s, yxqz:%s, yxqq:%s"
            % (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]))
Exemple #5
0
def test_MySQL():
    ds = DataSource(db_type="MySQL",
                    host="10.10.8.104",
                    database="fdm_db",
                    port=4000,
                    user="******",
                    password="******")
    conn = get_Connection(ds)

    sql = 'select spbm,spmc from fdm_db.f_dm_gy_spbm'
    rows = conn.query(sql)
    for row in rows:
        print("spbm:%s, spmc:%s" % (row[0], row[1]))
Exemple #6
0
from app import create_app
from database import db
from models import DataSource, DataRange, Data
app = create_app()
with app.app_context():
    db.init_app(app)
with app.app_context():
    DataSource.query.all()

dsd = {}
dsd["name"] = "test"
dsd["description"] = ""
dsd["dependencies"] = []
dsd["transform_function"] = 'return [{"timestamp": "2017-10-05T14:48:00.000Z", "value": "1"}]'
dsd["transform_function_language"] = 'python'
ds = DataSource(**dsd)
with app.app_context():
    db.session.add(ds)
    db.session.commit()

with app.app_context():
    print(DataSource.query.one().dependencies)

f = """
def foo():
    return 5+5

print(foo())
"""
exec(f, {'__builtins__':{}, 'print': print})
if not workspace:
    self.action('goTo', ['/main'])

elif command in ['delete', 'update']:

    if 'data_source_id' not in request.arguments:
        raise Exception(u'DataSource ID is not provided')
    data_source_id = request.arguments['data_source_id']
    data_source = DataSource.get(guid=data_source_id,
                                 workspace_id=workspace.guid)

    if data_source:
        if command == 'delete':
            data_source.delete()
        else:
            connector = request.arguments['connector']
            if connector:
                data_source.connector = connector
                data_source.save()

    self.dialog_update.action('hide', ['0'])

elif command == 'create':
    connector = request.arguments['connector']
    if connector:
        data_source = DataSource()
        data_source.workspace_id = workspace.guid
        data_source.connector = connector
        data_source.save()
Exemple #8
0
"""
GOOGLE API
"""
GOOGLE_API_GEOCODE_COORD_URL = "https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&sensor=false&key=%s"
GOOGLE_API_GEOCODE_ADDR_URL = "https://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false&key=%s"
GOOGLE_API_KEY = environ.get("GOOGLE_API_KEY")

"""
US PREPROCESSOR
 - US_PROCESSOR_COLUMN_DEFINITIONS: The columns in the data files is inconsistent. This defines the fields for each file
 - US_PROCESSOR_DATA_SOURCES: series component and file location tuple
"""
US_PROCESSOR_FILTERED_KEYS = set(["united_states"])
US_PROCESSOR_COLUMN_DEFINITIONS = {'confirmed': {'name': 5, 'key': 10, 'data': 11}, 'deaths': {'name': 5, 'key': 10, 'data': 12}}
US_PROCESSOR_DATA_SOURCES = [
    DataSource("covid19", "confirmed", join(GITHUB_DIRECTORY, "CSSEGISandData/confirmed_us.csv")),
    DataSource("covid19", "deaths", join(GITHUB_DIRECTORY, "CSSEGISandData/deaths_us.csv"))
]

"""
MX PREPROCESSOR
"""
MX_PROCESSOR_DATA_START_DATE = date(2020, 1, 23)
MX_PROCESSOR_DATA_FILE = join(GITHUB_DIRECTORY, 'carranco-sga/mx_data.csv')
MX_PROCESSOR_DATA_SOURCES = [
    DataSource("covid19", "confirmed", join(OUTPUT_DIRECTORY, "confirmed_mx.csv")),
    DataSource("covid19", "deaths", join(OUTPUT_DIRECTORY, "deaths_mx.csv"))
]
MX_PROCESSOR_COLUMN_SUFFIXES = {
    'confirmed': '',
    'deaths': '_D'