예제 #1
0
    def import_action(self, import_file, *args, **kwargs):
        '''
        Perform a dry_run of the import to make sure the import will not
        result in errors.  If there where no error, save the user
        uploaded file to a local temp file that will be used by
        'process_import' for the actual import.
        '''
        input_format = base_formats.XLSXBook()

        # first always write the uploaded file to disk as it may be a
        # memory file or else based on settings upload handlers
        tmp_storage = self.get_tmp_storage_class()()
        data = bytes()
        for chunk in import_file.chunks():
            data += chunk
        tmp_storage.save(data, input_format.get_read_mode())

        # then read the file, using the proper format-specific mode
        # warning, big files may exceed memory
        data = tmp_storage.read(input_format.get_read_mode())
        databook = tablib.Databook()
        input_format.fill_databook(databook, data)
        results = self.run_import(databook)
        tmp_storage.remove()

        return results
예제 #2
0
def export_all_device_information(request):
    _data_th = [
        ob.device_status() for ob in Thermostat.objects.filter(
            thermostat_id__approval_status='APR')
    ]
    _data_vav = [
        ob.device_status()
        for ob in VAV.objects.filter(vav_id__approval_status='APR')
    ]
    _data_rtu = [
        ob.device_status()
        for ob in RTU.objects.filter(rtu_id__approval_status='APR')
    ]
    _data_hvac = data_this([_data_th, _data_vav, _data_rtu], "Thermostats")
    _data_lt = [
        ob.device_status()
        for ob in Lighting.objects.filter(lighting_id__approval_status='APR')
    ]
    _data_lt = data_this([_data_lt], "Lighting Loads")
    _data_pl = [
        ob.device_status()
        for ob in Plugload.objects.filter(plugload_id__approval_status='APR')
    ]
    _data_pl = data_this([_data_pl], "Plugloads")

    devices = tablib.Databook((_data_hvac, _data_lt, _data_pl))
    with open('bemoss_devices.xls', 'wb') as f:
        f.write(devices.xls)
    response = HttpResponse(
        devices.xls, content_type='application/vnd.ms-excel;charset=utf-8')
    response['Content-Disposition'] = "attachment; filename=bemoss_devices.xls"
    return response
예제 #3
0
    def items_to_xls(self):
        """Writing the items information to an excel file with multiple sheets."""
        connection = sqlite3.connect(self._database_name)
        cursor = connection.cursor()

        cursor.execute(
            """SELECT category FROM items GROUP BY category ORDER BY item_number"""
        )
        categories = cursor.fetchall()

        if len(categories) > 0:
            book = tablib.Databook()

            for category in categories:
                cursor.execute(
                    """SELECT rank, name, price, asin, link, category FROM items WHERE category = ? AND rank >= ? AND 
                    rank <= ?ORDER BY rank""",
                    (str(category[0]), int(self._range[0]), int(
                        self._range[1])))
                items = cursor.fetchall()
                data = tablib.Dataset(title=category[0][-31:])
                data.headers = [
                    "BSR Rank", "Name", "Price", "ASIN", "Link",
                    "Item Category"
                ]

                for item in items:
                    data.append(item)

                book.add_sheet(data)

            # Writing the items information to an excel file with multiple sheets
            makedirs(path.dirname(self._file_name), exist_ok=True)
            with open(self._file_name + ".xls", 'wb') as f:
                f.write(book.xls)
예제 #4
0
class G:

    Schedule = {}
    MachPool = None
    PMPool = None
    Projects = None
    xlreftime = None
    OrderDates = None
    jsonInput = None
    excelInput = None
    simMode = None

    # reporting tabs
    reportResults = tablib.Databook()
    tabSchedule = {}
    pmSchedule = {}
    tabScheduleOrig = []
    pmScheduleOrig = []

    # re-initialised variables
    completionDate = {}
    seqPrjDone = None
    resAvailability = None
    seqPrjDoneOrig = None
    resAvailabilityOrig = None
예제 #5
0
    def gen_reports(self, data, *args, **kwargs):
        if not self.enabled:
            logging.warn(TABLIB_MISSING_MSG + self.name)
            return

        dataset = tablib.Dataset()
        dataset.title = _("Results Summary")
        dataset.headers = (_("Category"), _("Check point"), _("Result"))

        for category, kvs in SC.iteritems(data):
            for k, v in SC.iteritems(kvs):
                if isinstance(v, (list, tuple)):
                    v = ", ".join(str(x) for x in v)
                elif isinstance(v, dict):
                    v = ", ".join("%s=%s" % (k, str(x))
                                  for k, x in v.iteritems())
                else:
                    pass

                dataset.append((category, k, str(v)))

        book = tablib.Databook([dataset])
        fn = os.path.splitext(self.inputs[0])[0]
        outpath = self._mk_output_path(fn, ".xls")

        with open(outpath, 'wb') as out:
            # pylint: disable=no-member
            out.write(book.xls)
예제 #6
0
def data_sets_test():
    import tablib
    import os

    # 创建数据集,方法 1
    dataset1 = tablib.Dataset()
    header1 = ('ID', 'Name', 'Tel', 'Age')
    dataset1.headers = header1
    dataset1.append((1, 'zhangsan', 13711111111, 16))
    dataset1.append((2, 'lisi', 13811111111, 18))
    dataset1.append((3, 'wangwu', 13911111111, 20))
    dataset1.append((4, 'zhaoliu', 15811111111, 25))
    print('dataset1:', os.linesep, dataset1, os.linesep)

    # 创建数据集,方法 2
    header2 = ('ID', 'Name', 'Tel', 'Age')
    data2 = [[1, "zhangsan", 13711111111, 16], [2, "lisi", 13811111111, 18],
             [3, "wangwu", 13911111111, 20], [4, "zhaoliu", 15811111111, 25]]
    dataset2 = tablib.Dataset(*data2, headers=header2)
    print('dataset2: ', os.linesep, dataset2, os.linesep)

    # 增加行
    dataset1.append([5, 'sunqi', 15911111111, 30])  # 添加到最后一行的下面
    dataset1.insert(0, [0, 'liuyi', 18211111111, 35])  # 在指定位置添加行
    print("增加行后的dataset1: ", os.linesep, dataset1, os.linesep)

    # 删除行
    dataset1.pop()  # 删除最后一行
    dataset1.lpop()  # 删除第一行
    del dataset1[0:2]  # 删除第[0,2)行数据
    print("删除行后的dataset1:", os.linesep, dataset1, os.linesep)

    # 增加列
    # 现在 dataset1 就剩两行数据了
    dataset1.append_col(("beijing", "shenzhen"), header="city")  #增加列到最后一列
    dataset1.insert_col(2, ("male", "female"), header="sex")  #在指定位置添加列
    print("增加列后的dataset1: ", os.linesep, dataset1, os.linesep)

    # 删除列
    del dataset1["Tel"]
    print("删除列后的dataset1: ", os.linesep, dataset1, os.linesep)

    # 获取各种格式的数据
    print("yaml format: ", os.linesep, dataset1.yaml, os.linesep)
    print("csv format: ", os.linesep, dataset1.csv, os.linesep)
    print("tsv format: ", os.linesep, dataset1.tsv, os.linesep)

    # 导出到 Excel 表格中
    dataset1.title = "dataset1"  # 设置 Excel 中表单的名称
    dataset2.title = "dataset2"
    myfile = open("./file_location/mydata.xls", "wb")
    myfile.write(dataset1.xls)
    myfile.close()

    # 如果有多个 sheet 表单,使用 DataBook 就可以了
    myDataBook = tablib.Databook((dataset1, dataset2))
    myfile = open(myfile.name, "wb")
    myfile.write(myDataBook.xls)
    myfile.close()
예제 #7
0
def schedule_messages(xlsx_file):
    """
    Schedule the messages in bulk and
    returns erroneous sheets and rows if any
    """
    def is_blank(val):
        return val is None or val == ''

    def is_positive_numeric(val):
        try:
            return int(val) >= 0
        except ValueError:
            pass
        return False

    book = tablib.Databook()
    book.load(xlsx_file, "xlsx")
    sheets = json.loads(book.export("json"))
    bad_rows = defaultdict(list)
    bad_titles = []
    for sheet in sheets:
        try:
            receiver_id = int(sheet["title"].split('|')[-1])
            group = GroupChat.objects.get(id=receiver_id)
        except (GroupChat.DoesNotExist, TypeError, ValueError):
            bad_titles.append(sheet["title"])
            continue

        messages = []

        # disable all currently scheduled messages and recreate new ones (even if not changed)
        group.scheduled_messages.update(enabled=False)
        for n, row in enumerate(sheet["data"], start=1):
            days = row["Days"]
            message = row["Message"]
            comment = row["Comment"] or ''
            if is_blank(days) or is_blank(
                    message) or not is_positive_numeric(days):
                bad_rows[sheet["title"]].append(n + 1)
                continue

            messages.append(
                ScheduledMessage.objects.create(
                    group=group,
                    day=days,
                    message=message,
                    comment=comment,
                ))
        rebuild_schedule_for_group(group, messages)

    return {
        DATA: {
            MESSAGE: 'Messages have been scheduled',
            'bad titles': bad_titles,
            'bad rows': bad_rows,
            IS_SUCCESS: True,
        },
        STATUS: status.HTTP_200_OK,
    }
예제 #8
0
class G:
    Capacity = {}
    RouteDict = {}
    maxEarliness = 0  # max number of weeks for earliness
    maxLateness = 0  # max number of weeks for lateness
    planningHorizon = 0  # for future demand purposes
    #    demandFile = None
    CurrentCapacityDict = {}
    Bottlenecks = []
    SPlist = {}
    SPs = []
    BatchSize = {}
    orders = {}
    sortedOrders = {}
    forecast = {}
    incompleteBatches = {}
    items = {}
    WeekList = []
    priorityList = {}
    Earliness = {}
    Lateness = {}
    Excess = {}
    weightFactor = [10.0, 1.0, 0, 2]

    # ACO parameters
    ACO = 1
    noGen = 5
    popSize = 10

    # GA parameters
    GA = 0  # suggests whether application of GA to forecast diseggragation is required
    noGenGA = 5
    popSizeGA = 8
    probXover = 0.6
    probMutation = 0.1
    elitistSelection = 1
    terminationGA = 4

    # utilisation calculation
    minDeltaUt = 0

    # output variables
    reportResults = tablib.Databook()
    OrderResults = tablib.Dataset(title='OrderSummary')
    OrderResults.headers = ('PPOS', 'SP_NUMBER', 'MA_LIST', 'REQUEST_DATE',
                            'DFSELLER', 'ORDERQTY', 'PRIORITY', 'CHOSEN_MA',
                            'LATENESS', 'EARLINESS', 'ALLOCATION')
    forecastResults = tablib.Dataset(title='ForecastSummary')
    forecastResults.headers = ('PPOS', 'SP_NUMBER', 'MA_LIST', 'REQUEST_DATE',
                               'ORDERQTY', 'PRIORITY', 'CHOSEN_MA', 'LATENESS',
                               'EARLINESS', 'ALLOCATION')
    globalMAAllocation = {}
    spForecastOrder = []
    CapacityResults = tablib.Dataset(title='BN_Capa')
    allocationResults = tablib.Dataset(title='Demand_coverage')


#    filterItem = 0
#    filterWeek = 0
예제 #9
0
def test_xls_report(tmp_path):
    results_file = tmp_path / 'results.xls'
    main([samples.EPUB3_INVALID, '--xls', str(results_file)])

    with results_file.open('rb') as f:
        databook = tablib.Databook().load(f.read(), format='xls')
        assert databook.sheets()[1][0][:3] == ('OPF-003', 'WARNING',
                                               'invalid.epub')
예제 #10
0
    def test_xlsx_bad_chars_sheet_name(self):
        """
        Sheet names are limited to 30 chars and the following chars
        are not permitted: \\ / * ? : [ ]
        """
        _dataset = tablib.Dataset(
            title='bad name \\/*?:[]qwertyuiopasdfghjklzxcvbnm')
        _xlsx = _dataset.export('xlsx')
        new_data = tablib.Dataset().load(_xlsx)
        self.assertEqual(new_data.title, 'bad name -------qwertyuiopasdfg')

        _book = tablib.Databook()
        _book.add_sheet(_dataset)
        _xlsx = _book.export('xlsx')
        new_data = tablib.Databook().load(_xlsx, 'xlsx')
        self.assertEqual(new_data.sheets()[0].title,
                         'bad name -------qwertyuiopasdfg')
예제 #11
0
    def test_book_export_no_exceptions(self):
        """Test that various exports don't error out."""

        book = tablib.Databook()
        book.add_sheet(data)
        # These formats don't implement the book abstraction.
        unsupported = ['csv', 'tsv', 'jira', 'latex', 'df']
        self._test_export_data_in_all_formats(book, exclude=unsupported)
예제 #12
0
def export(request, modelo, formato):
    """Exporta dados direto para o navegador nos formatos CSV, XLS e JSON."""
    if modelo == "projetos":
        resource = ProjetosResource()
    elif modelo == "organizacoes":
        resource = OrganizacoesResource()
    elif modelo == "opcoes":
        resource = OpcoesResource()
    elif modelo == "avaliacoes":
        resource = Avaliacoes2Resource()
    elif modelo == "usuarios":
        resource = UsuariosResource()
    elif modelo == "estudantes":
        resource = EstudantesResource()
    elif modelo == "professores":
        resource = ProfessoresResource()
    elif modelo == "parceiros":
        resource = ParceirosResource()
    elif modelo == "configuracao":
        resource = ConfiguracaoResource()
    elif modelo == "feedbacks":
        resource = FeedbacksResource()
    # elif modelo == "comite":
    #     resource = ComiteResource()
    else:
        mensagem = "Chamada irregular : Base de dados desconhecida = " + modelo
        context = {
            "area_principal": True,
            "mensagem": mensagem,
        }
        return render(request, 'generic.html', context=context)

    dataset = resource.export()

    databook = tablib.Databook()

    databook.add_sheet(dataset)

    if formato in ("xls", "xlsx"):
        response = HttpResponse(databook.xlsx,
                                content_type='application/ms-excel')
        formato = "xlsx"
    elif formato == "json":
        response = HttpResponse(dataset.json, content_type='application/json')
    elif formato == "csv":
        response = HttpResponse(dataset.csv, content_type='text/csv')
    else:
        mensagem = "Chamada irregular : Formato desconhecido = " + formato
        context = {
            "area_principal": True,
            "mensagem": mensagem,
        }
        return render(request, 'generic.html', context=context)

    response[
        'Content-Disposition'] = 'attachment; filename="' + modelo + '.' + formato + '"'

    return response
예제 #13
0
def get_databook(lox_data):
    db = tablib.Databook()
    for label, headers, data in get_output(lox_data):
        ds = tablib.Dataset(title=label)
        ds.headers = headers
        for elem in data:
            ds.append(elem)
        db.add_sheet(ds)
    return db
예제 #14
0
    def test_book_export_no_exceptions(self):
        """Test that varoius exports don't error out."""

        book = tablib.Databook()
        book.add_sheet(data)

        book.json
        book.yaml
        book.xls
예제 #15
0
def xls2html(xls_sbtab,file_name,sbtype,def_file=None,def_file_name=None):
    '''
    generates html view out of xls file
    '''
    if def_file:
        FileValidClass = validatorSBtab.ValidateFile(def_file,def_file_name)
        def_delimiter  = FileValidClass.checkseparator(def_file)
    else:
        def_file_open = open('./definitions/definitions.tsv','r')
        def_file      = def_file_open.read()
        def_delimiter = '\t'

    col2description = findDescriptions(def_file,def_delimiter,sbtype)

    nice_sbtab = '<p><h2><b>'+file_name+'</b></h2></p>'
    ident_url = False
    print('I HAVE EXCLUDED TABLIB.XLRD HERE BECAUSE IT DOES NOT EXIST ANYMORE; GO TO MAKEHTML.PY AND FIX!')
    dbook = tablib.Databook()
    #xl = xlrd.open_workbook(file_contents=xls_sbtab)

    for sheetname in xl.sheet_names():
        dset = tablib.Dataset()
        dset.title = sheetname
        sheet = xl.sheet_by_name(sheetname)
        for row in range(sheet.nrows):
            if row == 0:
                new_row = ''
                for thing in sheet.row_values(row):
                    if not thing == '': new_row += thing
                nice_sbtab += '<a style="background-color:#00BFFF">'+new_row+'</a><br>'
                nice_sbtab += '<table>'
            elif row == 1:
                new_row = ''
                for i,thing in enumerate(sheet.row_values(row)):
                    try: title = col2description[thing[1:]]
                    except: title = ''
                    if not thing == '': new_row += '<td title="'+title+'">'+str(thing)+'</\td>'
                    if "Identifiers:" in thing:
                        urn_str = re.search("\w*\.\w*",thing)
                        urn     = urn_str.group(0)
                        ident_url = 'http://identifiers.org/'+urn+'/'
                        ident_col = i
                nice_sbtab += '<tr bgcolor="#87CEFA">'+new_row+'</tr>'
            else:
                new_row = ''
                for i,thing in enumerate(sheet.row_values(row)):
                    if not ident_url:
                        new_row += '<td>'+str(thing)+'</td>'
                    else:
                        if i == ident_col:
                            ref_string  = ident_url+thing
                            new_row    += '<td><a href="'+ref_string+'" target="_blank">'+str(thing)+'</a></\td>'
                        else:
                            new_row    += '<td>'+str(thing)+'</td>'
                nice_sbtab += '<tr>'+new_row+'</tr>'
                        
    return nice_sbtab    
예제 #16
0
def export_schedule_thermostats_daily(request, mac):
    mac = mac.encode('ascii', 'ignore')
    device = DeviceMetadata.objects.get(mac_address=mac)

    _file_name = os.path.join(
        settings_tornado.PROJECT_DIR, 'resources/scheduler_data/thermostat/' +
        device.device_id + '_schedule.json')
    if os.path.isfile(_file_name):
        json_file = open(_file_name, 'r+')
        _json_data = json.load(json_file)
        if device.device_id in _json_data['thermostat']:
            print 'device id present'
            _data = _json_data['thermostat'][
                device.device_id]['schedulers']['everyday']
            _data = json.dumps(_data)
            _data = json.loads(_data, object_hook=_decode_dict)
        json_file.close()

    headers = ('Period Name', 'From', 'Heat Setpoint (F)', 'Cool Setpoint (F)')
    _data_mon = _data_tue = _data_wed = _data_thu = _data_fri = _data_sat = _data_sun = []

    for day in _data:
        data = []
        data = tablib.Dataset(*data, headers=headers, title=day)
        day_data = _data[day]
        for record in day_data:
            rec_time = str(int(record['at']) / 60) + ':' + str(
                int(record['at']) % 60)
            data.append((record['nickname'], rec_time, record['heat_setpoint'],
                         record['cool_setpoint']))

        if day == 'monday':
            _data_mon = data
        elif day == 'tuesday':
            _data_tue = data
        elif day == 'wednesday':
            _data_wed = data
        elif day == 'thursday':
            _data_thu = data
        elif day == 'friday':
            _data_fri = data
        elif day == 'saturday':
            _data_sat = data
        elif day == 'sunday':
            _data_sun = data

    schedule = tablib.Databook((_data_mon, _data_tue, _data_wed, _data_thu,
                                _data_fri, _data_sat, _data_sun))

    with open(device.device_model + "_daily_sch.xls", 'wb') as f:
        f.write(schedule.xls)
    response = HttpResponse(
        schedule.xls, content_type='application/vnd.ms-excel;charset=utf-8')
    response[
        'Content-Disposition'] = "attachment; filename=" + device.device_model + "_daily_sch.xls"
    return response
예제 #17
0
 def get_export_data(self, file_format, *args, **kwargs):
     """
     Returns file_format representation for given queryset.
     """
     book = tablib.Databook()
     for resource in self.resources:
         dataset = resource.export(queryset=None, *args, **kwargs)
         book.add_sheet(dataset)
     export_data = file_format.export_databook(databook=book)
     return export_data
예제 #18
0
def excel_to_participants(participant_excel):
    book = tablib.Databook()
    book.load(participant_excel, "xlsx")
    sheets = json.loads(book.export("json"))
    sheet = sheets[0]
    participants = []
    for n, row in enumerate(sheet["data"], start=1):
        participants.append(
            ParticipantInfo(row['Study ID'], str(row['Phone Number'])))

    return participants
예제 #19
0
 def __init__(self, excel_name, excel_path=None):
     """
         excel book writer
     Args:
         excel_path: excel path
         excel_name: excel 名称
     """
     self.excel_path = excel_path
     self.excel_name = f"{excel_name}.xlsx"
     self.excel_book = tablib.Databook()
     self.merge_cells_index = {}
     self.sheet_names = Counter()  # 多个sheet name的映射,防止名称重复造成错误
예제 #20
0
def export_file():
    '''返回Excel文件'''
    filename = 'test_file'

    test_data1 = [['name1', 'age1', 'sex1'], ['name2', 'age2', 'sex2'], ['name3', 'age3', 'sex3']]
    data1 = tablib.Dataset(*test_data1, headers=['name', 'name', 'sex'], title='test1')

    test_data2 = [['address1', 'email1'], ['address2', 'email2']]
    data2 = tablib.Dataset(*test_data2, headers=['address', 'email'], title='test2')
    ds = tablib.Databook((data1, data2))  # Databook 用来合并多个sheet  , 只有一个sheet可以直接使用 tablib.Dataset
    # 生成文件 并返回
    response = make_response(ds.xls, 200, {'mimetype': 'application/vnd.ms-excel'})
    response.headers['Content-Disposition'] = "attachment; filename={}.xls" \
        .format(urllib.parse.quote(filename))
    return response
예제 #21
0
    def test_yaml_import_book(self):
        """Generate and import YAML book serialization."""
        data.append(self.john)
        data.append(self.george)
        data.headers = self.headers

        book.add_sheet(data)
        _yaml = book.yaml

        book.yaml = _yaml

        self.assertEqual(_yaml, book.yaml)
        # Same with the load interface
        book2 = tablib.Databook().load(_yaml, None)
        self.assertEqual(_yaml, book2.yaml)
예제 #22
0
    def test_json_import_book(self):
        """Generate and import JSON book serialization."""
        data.append(self.john)
        data.append(self.george)
        data.headers = self.headers

        book.add_sheet(data)
        _json = book.json

        book.json = _json

        self.assertEqual(json.loads(_json), json.loads(book.json))
        # Same with the load interface
        book2 = tablib.Databook().load(_json, None)
        self.assertEqual(json.loads(book.json), json.loads(book2.json))
예제 #23
0
파일: universe.py 프로젝트: lefs/equities
    def save(self, filename=None):
        """Save the current universe to a file."""
        if not filename and not self.filename:
            raise UniverseFilenameNotSet
        elif filename:
            self.filename = filename

        book = tablib.Databook()
        book.add_sheet(self.equities)
        if not self.prices.empty:
            prices = tablib.Dataset().load(self.prices.to_csv())
            book.add_sheet(prices)

        with open(self.filename, 'wb') as fname:
            fname.write(book.xlsx)
예제 #24
0
파일: cli.py 프로젝트: r4victor/epubcheck
def main(argv=None):
    """Command line app main function.

    :param list | None argv: Overrides command options (for libuse or testing)
    """

    parser = create_parser()
    args = parser.parse_args() if argv is None else parser.parse_args(argv)

    if not os.path.exists(args.path):
        sys.exit(0)

    all_valid = True
    single = os.path.isfile(args.path)
    files = [args.path] if single else iter_files(
        args.path, exts=('epub', ), recursive=args.recursive)

    pool = ThreadPool()
    results = pool.imap_unordered(EpubCheck, files)

    metas = tablib.Dataset(headers=Checker._fields + Meta._fields)
    messages = tablib.Dataset(headers=Message._fields)

    for result in results:
        metas.append(result.checker + result.meta.flatten())
        if not result.valid:
            all_valid = False
        for message in result.messages:
            messages.append(message)
            if message.level == 'ERROR':
                print(message.short, file=sys.stderr)
            else:
                print(message.short)

    if args.csv:
        args.csv.write(
            messages.export('csv', delimiter=compat.text_type(';')).encode())
        args.csv.close()

    if args.xls:
        databook = tablib.Databook((metas, messages))
        args.xls.write(databook.xls)
        args.xls.close()

    if all_valid:
        return 0
    else:
        return 1
예제 #25
0
    def test_header_no_pane_freeze(self):
        headers = ['foo', 'bar', 'plop']
        data = [["test", "test2", "toto"], ["test", "test2", "toto"],
                ["test", "test2", "toto"], ["test", "test2", "toto"],
                ["test", "test2", "toto"], ["test", "test2", "toto"],
                ["test", "test2", "toto"], ["test", "test2", "toto"],
                ["test", "test2", "toto"], ["test", "test2", "toto"]]
        d = tablib.Dataset(*data, headers=headers, freeze_panes=False)
        book = tablib.Databook()
        book.add_sheet(d)

        with open("test.xls", "wb") as f:
            f.write(book.xls)

        with open("test.xlsx", "wb") as f:
            f.write(book.xlsx)
예제 #26
0
    def build_xls(self, headers, data):
        '''
        生成导出方法
        :param headers:
        :param table:
        :return:databook
        '''

        if data:
            data = list(data)
        else:
            data = [()]
        data_set = tablib.Dataset(*data, headers=headers)
        data_book = tablib.Databook()
        data_book.add_sheet(data_set)
        return data_book
예제 #27
0
class G:
    
    Schedule={}
    seqPrjDone = None
    resAvailability = None
    MachPool = None
    PMPool = None
    Projects = None
    xlreftime = None
    reportResults = tablib.Databook()
    tabSchedule = tablib.Dataset(title='Schedule')
    tabSchedule.headers = (['Project', 'Part', 'Task ID', 'Station', 'Operator', 'Start Time', 'End Time'])
    OrderDates = None
    completionDate = None
    jsonInput = None
    excelInput = None
예제 #28
0
def build_xls(headers, param):
    '''
    生成导出方法
    :param headers:
    :param table:
    :return:parambook
    '''

    # if param:
    #     param = list(param)
    # else:
    #     param = [()]
    data_set = tablib.Dataset(*param, headers=headers)
    data_book = tablib.Databook()
    data_book.add_sheet(data_set)
    return data_book
예제 #29
0
    def setUp(self):
        """Create simple data set with headers."""

        global data, book

        data = tablib.Dataset()
        book = tablib.Databook()

        self.headers = ('first_name', 'last_name', 'gpa')
        self.john = ('John', 'Adams', 90)
        self.george = ('George', 'Washington', 67)
        self.tom = ('Thomas', 'Jefferson', 50)

        self.founders = tablib.Dataset(headers=self.headers, title='Founders')
        self.founders.append(self.john)
        self.founders.append(self.george)
        self.founders.append(self.tom)
예제 #30
0
def get_project_summary(project_pk):
    project_data = get_project_dataset(Project.objects.filter(pk__exact=project_pk))
    proj_assoc_data = get_con_assoc_dataset(ProjectContactAssoc.objects.filter(proj__exact=project_pk))

    tasks = Task.objects.filter(proj__exact=project_pk)

    task_list_data = get_task_dataset(tasks)

    book = tablib.Databook( sets=[project_data, proj_assoc_data, task_list_data] )

    for tsk in tasks:
        task_data, assoc_data = get_task_summary_unwrapped(tsk.pk)

        book.add_sheet(task_data)
        book.add_sheet(assoc_data)

    return book