コード例 #1
0
 def test_get_text(self):
     r = StrictRedis()
     r.set('test_get_2', "Open Mining")
     DW = DataWarehouse()
     self.assertEquals(
         DW.get("test_get_2", content_type='application/text'),
         "Open Mining")
コード例 #2
0
ファイル: _pandas.py プロジェクト: henriquejensen/mining
 def append(self):
     self.data = DataFrame({})
     DW = DataWarehouse()
     self.data.append([DataFrame(
                       DW.get(rel['cube']).get('data'))
                       for rel in self.cube.get('relationship')],
                      ignore_index=True)
     return self.data
コード例 #3
0
ファイル: _pandas.py プロジェクト: henriquejensen/mining
 def left(self):
     fields = [rel['field'] for rel in self.cube.get('relationship')]
     self.data = DataFrame({fields[0]: []})
     DW = DataWarehouse()
     for rel in self.cube.get('relationship'):
         data = DW.get(rel['cube'])
         self.data = self.data.merge(DataFrame(data.get('data')),
             how='outer', on=fields[0])
     return self.data
コード例 #4
0
ファイル: _pandas.py プロジェクト: yenchih/mining
 def append(self):
     self.data = DataFrame({})
     DW = DataWarehouse()
     self.data.append([
         DataFrame(DW.get(rel['cube']).get('data'))
         for rel in self.cube.get('relationship')
     ],
                      ignore_index=True)
     return self.data
コード例 #5
0
ファイル: _pandas.py プロジェクト: yenchih/mining
 def left(self):
     fields = [rel['field'] for rel in self.cube.get('relationship')]
     self.data = DataFrame({fields[0]: []})
     DW = DataWarehouse()
     for rel in self.cube.get('relationship'):
         data = DW.get(rel['cube'])
         self.data = self.data.merge(DataFrame(data.get('data')),
                                     how='outer',
                                     on=fields[0])
     return self.data
コード例 #6
0
ファイル: _pandas.py プロジェクト: yenchih/mining
 def inner(self):
     fields = [rel['field'] for rel in self.cube.get('relationship')]
     DW = DataWarehouse()
     for i, rel in enumerate(self.cube.get('relationship')):
         data = DW.get(rel['cube']).get('data')
         df = DataFrame(data)
         if i == 0:
             self.data = df
         else:
             self.data = self.data.merge(df, how='inner', on=fields[0])
     return self.data
コード例 #7
0
ファイル: _pandas.py プロジェクト: henriquejensen/mining
 def inner(self):
     fields = [rel['field'] for rel in self.cube.get('relationship')]
     DW = DataWarehouse()
     for i, rel in enumerate(self.cube.get('relationship')):
         data = DW.get(rel['cube']).get('data')
         df = DataFrame(data)
         if i == 0:
             self.data = df
         else:
             self.data = self.data.merge(df, how='inner', on=fields[0])
     return self.data
コード例 #8
0
ファイル: cube.py プロジェクト: yenchih/mining
    def save(self):
        log_it("SAVE DATA (JSON) ON DATA WAREHOUSE: {}".format(self.slug),
               "bin-mining")
        data = {'data': self.pdict, 'columns': self.keys}
        DW = DataWarehouse()
        DW.save(self.slug, data)

        self.cube['status'] = True
        self.cube['lastupdate'] = datetime.now()
        self.cube['run'] = True
        self.mongo['cube'].update({'slug': self.cube['slug']}, self.cube)

        log_it("CLEAN MEMORY: {}".format(self.slug), "bin-mining")
        gc.collect()
コード例 #9
0
ファイル: cube.py プロジェクト: henriquejensen/mining
    def save(self):
        log_it("SAVE DATA (JSON) ON DATA WAREHOUSE: {}".format(self.slug),
               "bin-mining")
        data = {'data': self.pdict, 'columns': self.keys}
        DW = DataWarehouse()
        DW.save(self.slug, data)

        self.cube['status'] = True
        self.cube['lastupdate'] = datetime.now()
        self.cube['run'] = True
        self.mongo['cube'].update({'slug': self.cube['slug']}, self.cube)

        log_it("CLEAN MEMORY: {}".format(self.slug), "bin-mining")
        gc.collect()
コード例 #10
0
ファイル: export.py プロジェクト: hlarndt/mining
def data(mongodb, slug, ext='xls'):
    DW = DataWarehouse()

    element = mongodb['element'].find_one({'slug': slug})

    element['page_limit'] = 50
    if request.GET.get('limit', True) is False:
        element['page_limit'] = 9999999999

    data = DW.get(element.get('cube'))
    columns = data.get('columns') or []

    fields = columns
    if request.GET.get('fields', None):
        fields = request.GET.get('fields').split(',')

    filters = [i[0] for i in request.GET.iteritems()
               if len(i[0].split('filter__')) > 1]

    df = DataFrame(data.get('data') or {}, columns=fields)
    if len(filters) >= 1:
        for f in filters:
            s = f.split('__')
            field = s[1]
            operator = s[2]
            value = request.GET.get(f)
            if operator == 'like':
                df = df[df[field].str.contains(value)]
            elif operator == 'regex':
                df = DataFrameSearchColumn(df, field, value, operator)
            else:
                df = df.query(df_generate(df, value, f))

    groupby = []
    if request.GET.get('groupby', None):
        groupby = request.GET.get('groupby', "").split(',')
    if len(groupby) >= 1:
        df = DataFrame(df.groupby(groupby).grouper.get_group_levels())

    if request.GET.get('orderby',
                       element.get('orderby', None)) and request.GET.get(
            'orderby', element.get('orderby', None)) in fields:

        orderby = request.GET.get('orderby', element.get('orderby', ''))
        if type(orderby) == str:
            orderby = orderby.split(',')
        orderby__order = request.GET.get('orderby__order',
                                         element.get('orderby__order', ''))
        if type(orderby__order) == str:
            orderby__order = orderby__order.split(',')
        ind = 0
        for orde in orderby__order:
            if orde == '0':
                orderby__order[ind] = False
            else:
                orderby__order[ind] = True
            ind += 1
        df = df.sort(orderby, ascending=orderby__order)

    # CLEAN MEMORY
    del filters, fields, columns
    gc.collect()

    file_name = '{}/assets/exports/openmining-{}.{}'.format(
        PROJECT_PATH, element.get('cube'), ext)
    if ext == 'csv':
        df.to_csv(file_name, sep=";")
        contenttype = 'text/csv'
    else:
        df.to_excel(file_name)
        contenttype = 'application/vnd.ms-excel'

    response.set_header('charset', 'utf-8')
    response.set_header('Content-disposition', 'attachment; '
                        'filename={}.{}'.format(element.get('cube'), ext))
    response.content_type = contenttype

    ifile = open(file_name, "r")
    o = ifile.read()
    ifile.close()
    return o
コード例 #11
0
 def test_get_application_json(self):
     r = StrictRedis()
     data = {"id": 1, "name": "Open Mining"}
     r.set('test_get_1', json.dumps(data))
     DW = DataWarehouse()
     self.assertEquals(DW.get("test_get_1"), data)
コード例 #12
0
 def test_save_text(self):
     DW = DataWarehouse()
     DW.save('test_2', "Open Mining", content_type='application/text')
     r = StrictRedis()
     self.assertEquals(r.get('test_2'), "Open Mining")
コード例 #13
0
 def test_save_application_json(self):
     DW = DataWarehouse()
     DW.save('test_1', {"id": 1, "name": "Open Mining"})
     r = StrictRedis()
     self.assertEquals(r.get('test_1'), '{"id": 1, "name": "Open Mining"}')
コード例 #14
0
 def test_connection(self):
     DW = DataWarehouse()
     self.assertTrue(isinstance(DW.conn(), StrictRedis))
コード例 #15
0
def data(mongodb, slug, ext='xls'):
    DW = DataWarehouse()

    element = mongodb['element'].find_one({'slug': slug})

    element['page_limit'] = 50
    if request.GET.get('limit', True) is False:
        element['page_limit'] = 9999999999

    data = DW.get(element.get('cube'))
    columns = data.get('columns') or []

    fields = columns
    if request.GET.get('fields', None):
        fields = request.GET.get('fields').split(',')

    filters = [
        i[0] for i in request.GET.iteritems()
        if len(i[0].split('filter__')) > 1
    ]

    df = DataFrame(data.get('data') or {}, columns=fields)
    if len(filters) >= 1:
        for f in filters:
            s = f.split('__')
            field = s[1]
            operator = s[2]
            value = request.GET.get(f)
            if operator == 'like':
                df = df[df[field].str.contains(value)]
            elif operator == 'regex':
                df = DataFrameSearchColumn(df, field, value, operator)
            else:
                df = df.query(df_generate(df, value, f))

    groupby = []
    if request.GET.get('groupby', None):
        groupby = request.GET.get('groupby', ).split(',')
    if len(groupby) >= 1:
        df = DataFrame(df.groupby(groupby).grouper.get_group_levels())

    if request.GET.get('orderby',
                       element.get('orderby', None)) and request.GET.get(
                           'orderby', element.get('orderby', None)) in fields:

        orderby = request.GET.get('orderby', element.get('orderby', ''))
        if type(orderby) == str:
            orderby = orderby.split(',')
        orderby__order = request.GET.get('orderby__order',
                                         element.get('orderby__order', ''))
        if type(orderby__order) == str:
            orderby__order = orderby__order.split(',')
        ind = 0
        for orde in orderby__order:
            if orde == '0':
                orderby__order[ind] = False
            else:
                orderby__order[ind] = True
            ind += 1
        df = df.sort(orderby, ascending=orderby__order)

    # CLEAN MEMORY
    del filters, fields, columns
    gc.collect()

    file_name = '{}/assets/exports/openmining-{}.{}'.format(
        PROJECT_PATH, element.get('cube'), ext)
    if ext == 'csv':
        df.to_csv(file_name, sep=";")
        contenttype = 'text/csv'
    else:
        df.to_excel(file_name)
        contenttype = 'application/vnd.ms-excel'

    response.set_header('charset', 'utf-8')
    response.set_header(
        'Content-disposition', 'attachment; '
        'filename={}.{}'.format(element.get('cube'), ext))
    response.content_type = contenttype

    ifile = open(file_name, "r")
    o = ifile.read()
    ifile.close()
    return o
コード例 #16
0
ファイル: stream.py プロジェクト: yenchih/mining
def data(ws, mongodb, slug):
    if not ws:
        abort(400, 'Expected WebSocket request.')

    DW = DataWarehouse()

    element = mongodb['element'].find_one({'slug': slug})

    element['page_limit'] = 50
    if request.GET.get('limit', True) is False:
        element['page_limit'] = 9999999999

    data = DW.get(element.get('cube'))
    columns = data.get('columns') or []

    fields = columns
    if request.GET.get('fields', None):
        fields = request.GET.get('fields').split(',')

    cube_last_update = mongodb['cube'].find_one({'slug': element.get('cube')})
    ws.send(json.dumps({'type': 'last_update',
                        'data': str(cube_last_update.get('lastupdate', ''))}))

    ws.send(json.dumps({'type': 'columns', 'data': fields}))

    filters = [i[0] for i in request.GET.iteritems()
               if len(i[0].split('filter__')) > 1]

    if element['type'] == 'grid':
        page = int(request.GET.get('page', 1))
        page_start = 0
        page_end = element['page_limit']
        if page >= 2:
            page_end = element['page_limit'] * page
            page_start = page_end - element['page_limit']
    else:
        page_start = None
        page_end = None

    df = DataFrame(data.get('data') or {}, columns=fields)
    if len(filters) >= 1:
        for f in filters:
            s = f.split('__')
            field = s[1]
            operator = s[2]
            value = request.GET.get(f)
            if operator == 'like':
                df = df[df[field].str.contains(value)]
            elif operator == 'regex':
                df = DataFrameSearchColumn(df, field, value, operator)
            else:
                df = df.query(df_generate(df, value, f))

    groupby = []
    if request.GET.get('groupby', None):
        groupby = request.GET.get('groupby', ).split(',')
    if len(groupby) >= 1:
        df = DataFrame(df.groupby(groupby).grouper.get_group_levels())

    if request.GET.get('orderby',
                       element.get('orderby', None)) and request.GET.get(
            'orderby', element.get('orderby', None)) in fields:

        orderby = request.GET.get('orderby', element.get('orderby', ''))
        if type(orderby) == str:
            orderby = orderby.split(',')
        orderby__order = request.GET.get('orderby__order',
                                         element.get('orderby__order', ''))
        if type(orderby__order) == str:
            orderby__order = orderby__order.split(',')
        ind = 0
        for orde in orderby__order:
            if orde == '0':
                orderby__order[ind] = False
            else:
                orderby__order[ind] = True
            ind += 1
        df = df.sort(orderby, ascending=orderby__order)

    ws.send(json.dumps({'type': 'max_page', 'data': len(df)}))

    # CLEAN MEMORY
    del filters, fields, columns
    gc.collect()
    categories = []
    for i in df.to_dict(outtype='records')[page_start:page_end]:
        if element.get('categories', None):
            categories.append(i[element.get('categories')])
        ws.send(json.dumps({'type': 'data', 'data': i}))

    # CLEAN MEMORY
    del df
    gc.collect()

    ws.send(json.dumps({'type': 'categories', 'data': categories}))
    ws.send(json.dumps({'type': 'close'}))

    # CLEAN MEMORY
    del categories
    gc.collect()
コード例 #17
0
ファイル: export.py プロジェクト: henriquejensen/mining
def data(mongodb, slug, ext="xls"):
    DW = DataWarehouse()

    element = mongodb["element"].find_one({"slug": slug})

    element["page_limit"] = 50
    if request.GET.get("limit", True) is False:
        element["page_limit"] = 9999999999

    data = DW.get(element.get("cube"))
    columns = data.get("columns") or []

    fields = columns
    if request.GET.get("fields", None):
        fields = request.GET.get("fields").split(",")

    filters = [i[0] for i in request.GET.iteritems() if len(i[0].split("filter__")) > 1]

    df = DataFrame(data.get("data") or {}, columns=fields)
    if len(filters) >= 1:
        for f in filters:
            s = f.split("__")
            field = s[1]
            operator = s[2]
            value = request.GET.get(f)
            if operator == "like":
                df = df[df[field].str.contains(value)]
            elif operator == "regex":
                df = DataFrameSearchColumn(df, field, value, operator)
            else:
                df = df.query(df_generate(df, value, f))

    groupby = []
    if request.GET.get("groupby", None):
        groupby = request.GET.get("groupby").split(",")
    if len(groupby) >= 1:
        df = DataFrame(df.groupby(groupby).grouper.get_group_levels())

    if (
        request.GET.get("orderby", element.get("orderby", None))
        and request.GET.get("orderby", element.get("orderby", None)) in fields
    ):

        orderby = request.GET.get("orderby", element.get("orderby", ""))
        if type(orderby) == str:
            orderby = orderby.split(",")
        orderby__order = request.GET.get("orderby__order", element.get("orderby__order", ""))
        if type(orderby__order) == str:
            orderby__order = orderby__order.split(",")
        ind = 0
        for orde in orderby__order:
            if orde == "0":
                orderby__order[ind] = False
            else:
                orderby__order[ind] = True
            ind += 1
        df = df.sort(orderby, ascending=orderby__order)

    # CLEAN MEMORY
    del filters, fields, columns
    gc.collect()

    file_name = "{}/assets/exports/openmining-{}.{}".format(PROJECT_PATH, element.get("cube"), ext)
    if ext == "csv":
        df.to_csv(file_name, sep=";")
        contenttype = "text/csv"
    else:
        df.to_excel(file_name)
        contenttype = "application/vnd.ms-excel"

    response.set_header("charset", "utf-8")
    response.set_header("Content-disposition", "attachment; " "filename={}.{}".format(element.get("cube"), ext))
    response.content_type = contenttype

    ifile = open(file_name, "r")
    o = ifile.read()
    ifile.close()
    return o
コード例 #18
0
def element_cube(mongodb, slug=None):
    DW = DataWarehouse()
    data = DW.get(slug)
    columns = data.get("columns") or []
    return {'columns': columns}