示例#1
0
    def export(self):
        bd = Bbdd()
        file = OrderedDict()
        data = bd.select("bet")
        dataOds = [[
            "Fecha", "Deporte", "Competicion", "Región", "Local", "Visitante",
            "Pick", "Casa", "Mercado", "Tipster", "Stake", "Unidad",
            "Resultado", "Beneficio", "Apuesta", "Cuota", "Gratuita"
        ]]
        for i in data:
            row = []
            row.append(i[1])
            row.append(bd.getValue(i[2], "sport"))
            row.append(bd.getValue(i[3], "competition"))
            row.append(bd.getValue(i[4], "region"))
            row.append(i[5])
            row.append(i[6])
            row.append(i[7])
            row.append(bd.getValue(i[8], "bookie"))
            row.append(bd.getValue(i[9], "market"))
            row.append(bd.getValue(i[10], "tipster"))
            row.append(i[11])
            row.append(i[12])
            row.append(i[13])
            row.append(i[14])
            row.append(i[15])
            row.append(i[16])
            row.append("No" if i[17] == 0 else "Sí")
            dataOds.append(row)

        bd.close()
        file.update({"Apuestas": dataOds})

        save_data(self.directory, file)
示例#2
0
def generateODS():
    printDBG(1, "Generating report in ODS format")
    sheet1 = {"CV Version Errata Report": []}
    sheet1['CV Version Errata Report'].append([
        "Content View Name & Version", "Initial publication date",
        "Security Errata", "Bugfix errata", "Enhancement errata",
        "Total Errata"
    ])
    for name in cVVObjects.keys():
        cvv = cVVObjects[name]
        sheet1['CV Version Errata Report'].append([
            name, cvv['created'], cvv['secErrata'], cvv['bugErrata'],
            cvv['enhErrata'],
            cvv['secErrata'] + cvv['bugErrata'] + cvv['enhErrata']
        ])
        cvvSheet = [[
            "Errata Identifier", "Errata Name", "Type", "Issue date", "CVEs"
        ]]
        list = []
        for ei in cvv['errata'].keys():
            eo = cvv['errata'][ei]
            list.append(ei)
            list.append(eo['name'])
            list.append(eo['type'])
            list.append(eo['issued'])
            list.append(','.join(eo['cves']))
            cvvSheet.append(list)
            list = []
        contentViewVersionsWorkbook.update(sheet1)
        contentViewVersionsWorkbook.update({name.replace(':', '-'): cvvSheet})

    printDBG(2, "Saving CV errata report")
    save_data('CVVersionsErrata.ods', contentViewVersionsWorkbook)
示例#3
0
def generateODS():
    printDBG(1, "Generating report in ODS format")
    sheet1 = {"CV Version Errata Differential Report": []}
    sheet1['CV Version Errata Differential Report'].append(
        ["Errata ID", "Type", "Errata last modified", "Errata Title"])
    for cve in finalList:
        sheet1['CV Version Errata Differential Report'].append(
            [cve['errata_id'], cve['type'], cve['updated'], cve['title']])
        name = cve['errata_id']
        cvvSheet = []
        cvvSheet.append(['Errata ID', name])
        cvvSheet.append(['Type', cve['type']])
        cvvSheet.append(['Originally issued', cve['issued']])
        cvvSheet.append(['Last modified', cve['updated']])
        cvvSheet.append(['Description', cve['description']])
        cvvSheet.append(['CVEs'])
        for entry in cve['cves']:
            cvvSheet.append(['', entry['cve_id']])
        cvvSheet.append(['Packages affected'])
        for entry in cve['packages']:
            cvvSheet.append(['', entry])
        contentViewVersionsWorkbook.update(sheet1)
        contentViewVersionsWorkbook.update({name.replace(':', '-'): cvvSheet})

    printDBG(2, "Saving CV errata report")
    save_data('CVVersionsErrataDifferential.ods', contentViewVersionsWorkbook)
示例#4
0
    def encode(self, data: dict, **options) -> io.BinaryFileStream:
        """
        Encodes the data into a OpenDocument Spreadsheet file-like stream.

        Arguments:
            data: The data to encode
            **options: The encoding options

        Returns:
            An OpenDocument Spreadsheet file-like stream

        Raises:
            geodatabr.core.encoders.EncodeError: If data fails to encode
        """
        try:
            ods_file = io.BinaryFileStream()
            ods_data = types.OrderedMap()

            for entity, records in data.items():
                ods_data[entity] = [list(records.first().keys())] \
                    + [list(record.values()) for record in records]

            pyexcel_ods.save_data(ods_file, ods_data)
            ods_file.seek(0)

            return ods_file
        except Exception:
            raise encoders.EncodeError
示例#5
0
    def encode(self, data: dict, **options) -> io.BinaryFileStream:
        """
        Encodes the data into a OpenDocument Spreadsheet file-like stream.

        Arguments:
            data: The data to encode
            **options: The encoding options

        Returns:
            An OpenDocument Spreadsheet file-like stream

        Raises:
            geodatabr.core.encoders.EncodeError: If data fails to encode
        """
        try:
            ods_file = io.BinaryFileStream()
            ods_data = types.OrderedMap()

            for entity, records in data.items():
                ods_data[entity] = [list(records.first().keys())] \
                    + [list(record.values()) for record in records]

            pyexcel_ods.save_data(ods_file, ods_data)
            ods_file.seek(0)

            return ods_file
        except Exception:
            raise encoders.EncodeError
def save_csv(dir):
    def process_inputs(t):
        # x=cos(t['lat'])*cos(t['lng'])
        # y=cos(t['lat'])*sin(t['lng'])
        # z=sin(t['lat'])
        max = 1
        min = -1
        range = max - min
        lat = (t['lat'] - latmin) / (latmax - latmin) * range + min
        lng = (t['lng'] - lngmin) / (lngmax - lngmin) * range + min
        weekday = t['weekday'] / 6 * range + min
        hour = t['hour'] / 23 * range + min
        minutes = (t['min'] // 15 * 15) / 45 * range + min
        t = t['t']
        return [lat, lng, weekday, hour, minutes, t]

    print('Loading data...', end='', flush=True)
    tuple_paths = glob.glob(dir + '/**/*.json')
    data = []
    for path in tuple_paths:
        with open(path) as json_data:
            d = json.load(json_data)
        data.extend(d)
    # data = list(filter(fil, data))
    # shuffle(data)
    d = [['latitude', 'longitude', 'weekday', 'hour', 'min', 'traffic']]
    d.extend(list(map(process_inputs, data)))
    csv = OrderedDict([('', d)])
    save_data('v1.csv', csv)
    print('done')
示例#7
0
 def get(self, token):
     status = auth.Manageidentify(token)
     if not status:
         return jsonify({"status": 401})
     year = config.year()
     obj = db.students.find({"enable": 1, "year": status["permission"]})
     data = []
     table = OrderedDict()
     data.append(["學號", "班級", "姓名"])
     for item in obj:
         count = 0
         for i in item["chooses"]:
             if i["year"] == year:
                 count += 1
         if count == 0:
             data.append([
                 item["account"], item["student_class"],
                 item["student_name"]
             ])
     table.update({"Sheet 1": data})
     save_data("/tmp/tables.ods", table)
     return send_from_directory(
         "/tmp",
         "tables.ods",
         as_attachment=True,
         mimetype="application/file",
         attachment_filename=f"{year}未選課名單.ods",
     )
示例#8
0
def write_table(ods_file: str,
                sheet_name: str,
                data: Dict[str, Dict[str, str]],
                plus_notation: bool = False):
    """
    Writes the content of a dictionary mapping characters to each other
    to an ODS file.
    :param ods_file: The ODS file to write to
    :param sheet_name: The sheet name to write to
    :param data: The data to write
    :param plus_notation: Whether or not to use +-notation.
                          True: 1 -> C+
                          False: C1 -> CC
    :return: None
    """
    new_sheet = [[""]]
    for char_one in data:
        new_sheet[0].append(char_one)
        new_row = [char_one]
        for char_two in data:
            entry = data[char_one].get(char_two, "")
            entry = decode_support_levels(entry, for_sheet=not plus_notation)
            new_row.append(entry)
        new_sheet.append(new_row)

    ods = dict(read_data(ods_file))
    ods[sheet_name] = new_sheet
    save_data(ods_file, ods)
示例#9
0
def index(request):
    context = {}
    context['status'] = None
    if (request.method == 'GET'):
        print("data")
        app_name = request.GET.get('f')
        if app_name != None:
            output = open('home/static/home/data.json', 'w')
            call_command('dumpdata',
                         app_name,
                         format='json',
                         indent=3,
                         stdout=output)
            output.close()

            if app_name == 'member':
                with open('home/static/home/data.json') as data_file:
                    data_file = json.load(data_file)
                data = [["Name", "Roll No.", "Email ID", "Slots", "Fine"]]
                l = len(data_file)
                for x in range(0, l):
                    fields = data_file[x]["fields"]
                    data.append([
                        fields["Name"], fields["RollNo"], fields["EmailID"],
                        fields["Slots"], fields["Fine"]
                    ])
                    print(data)
                sheet = {"Member": data}
                save_data("home/static/home/dataods.ods", sheet)

            context['status'] = 'ok'
    return render(request, 'home/home.html', context)
示例#10
0
def test_issue_13():
    test_file = "test_issue_13.ods"
    data = [[1, 2], [], [], [], [3, 4]]
    save_data(test_file, {test_file: data})
    written_data = get_data(test_file, skip_empty_rows=False)
    eq_(data, written_data[test_file])
    os.unlink(test_file)
示例#11
0
文件: views.py 项目: irfanpule/ngods
def save_ods(request):
    session_key = get_session_key(request)
    count = cache.get(session_key)
    ods_data = request.GET

    new_data = []
    new_sheet = OrderedDict()
    for name_sheet, list_data in ods_data.items():
        for row in ast.literal_eval(list_data):
            new_row = []
            for _, cell in row.items():
                new_row.append(str(cell))
            new_data.append(new_row)
        new_sheet.update({name_sheet: new_data})

    if count:
        count = int(count) + 1
    else:
        count = 1

    os.system(
        f'mv {path_file_save(session_key)} {path_file_save(session_key, count)}'
    )
    save_data(path_file_save(session_key), new_sheet)

    cache.set(session_key, count)
    return JsonResponse(new_sheet)
示例#12
0
def test_issue_13():
    test_file = "test_issue_13.ods"
    data = [[1, 2], [], [], [], [3, 4]]
    save_data(test_file, {test_file: data})
    written_data = get_data(test_file, skip_empty_rows=False)
    eq_(data, written_data[test_file])
    os.unlink(test_file)
示例#13
0
def test_bug_fix_for_issue_2():
    data = {}
    data.update({"Sheet 1": [[1, 2, 3], [4, 5, 6]]})
    data.update({"Sheet 2": [[u"row 1", u"Héllô!", u"HolÁ!"]]})
    save_data("your_file.ods", data)
    new_data = get_data("your_file.ods")
    assert new_data["Sheet 2"] == [[u'row 1', u'H\xe9ll\xf4!', u'Hol\xc1!']]
示例#14
0
def test_bug_fix_for_issue_2():
    data = {}
    data.update({"Sheet 1": [[1, 2, 3], [4, 5, 6]]})
    data.update({"Sheet 2": [[u"row 1", u"Héllô!", u"HolÁ!"]]})
    save_data("your_file.ods", data)
    new_data = get_data("your_file.ods")
    assert new_data["Sheet 2"] == [[u"row 1", u"H\xe9ll\xf4!", u"Hol\xc1!"]]
示例#15
0
    def write_cell(self, book_name, sheet_name, cell, message):
        cleansed_filename = self.finish_filename(book_name)
        book = pyexcel_ods.get_data(cleansed_filename)
        assert (book != None), "Spreadsheet book has not been set!!"
        assert (
            len(cell) >= 2
        ), "Invalid cell size. It must be at least two characters in length."

        # RECALL: Valid cell names could be really long like "ACE6561"
        match_obj = re.match("^([a-zA-Z]+)(\\d+)$", cell)
        assert (
            match_obj != None
        ), "Invalid cell name. It must be a sequence of letters followed by a number."

        row = int(
            match_obj.group(2)) - 1  # don't forget, indices start at zero!
        col = self.convert_alphabetic_to_column(match_obj.group(1)) - 1

        print("[DEBUG]    Now trying to write %s at %s[%d][%d]" %
              (message, sheet_name, row, col))
        selected_sheet = book[sheet_name]
        while (row >= len(selected_sheet)):
            # fill the sheet with more ROWS in order to access the given index
            selected_sheet.append([])

        while (col >= len(selected_sheet[row])):
            # fill the sheet with more COLUMNS in order to be able to access the given index
            for i in range(0, (col + 1)):
                selected_sheet[row].append('')

        book[sheet_name][row][col] = message
        pyexcel_ods.save_data(cleansed_filename, book)

        if (self.enable_sync):
            print("connection to Nextcloud is a WIP")
示例#16
0
文件: query.py 项目: ocftw/PttCrawler
 def _export_ods(self):
     data = {'Query': self._get_export_rows()}
     output_filename = 'Ptt_query_{export_datetime}'.format(
         export_datetime=datetime.now().strftime('%Y-%m-%d'))
     output_path = os.path.join(
         self.output_folder, '{filename}.ods'.format(filename=output_filename))
     save_data(output_path, data)
示例#17
0
文件: views.py 项目: irfanpule/ngods
def demo(request):
    session_key = get_session_key(request)
    if cache.get(session_key):
        cache.delete(session_key)
        os.system(f'rm -rf tmp/{session_key}-*.ods')

    ods_file = settings.STATIC_ROOT + '/demo.ods'
    save_data(path_file_save(session_key), get_data(ods_file))
    return redirect('website:show_ods')
示例#18
0
 def new_book(self, assigned_name):
     self.book_filename = assigned_name
     self.book = collections.OrderedDict()
     self.new_worksheet("default")
     if (assigned_name == None or assigned_name == ""):
         pyexcel_ods.save_data(local_file_prefix + "dataSheet.ods",
                               self.book)
     else:
         pyexcel_ods.save_data(assigned_name, self.book)
示例#19
0
def usufyToXlsxExport(d, fPath):
    '''
        Workaround to export to a .xlsx file.
        :param d: Data to export.
        :param fPath: File path.
    '''
    tabularData = _generateTabularData(d)
    from pyexcel_xlsx import save_data
    # Storing the file        
    save_data(fPath, tabularData)
示例#20
0
    def _export_ods(self):
        output_filename = 'Ptt_report_{export_datetime}'.format(
            export_datetime=datetime.now().strftime('%Y-%m-%d'))

        output_path = os.path.join(
            self.output_folder, '{prefix}{filename}.{file_format}'.format(
                prefix=self.output_prefix,
                filename=output_filename,
                file_format=self.file_format.name))
        data = self._get_export_rows()
        save_data(output_path, data)
示例#21
0
文件: get_values.py 项目: Ziul/tcc2
def process(file='firefox_test_apt-search-ori.txt'):
    sheet = file.split('_')[0]
    name = file.split('-')[-1].split('.')[0]
    model = type_algorithm[name][1]
    name = type_algorithm[name][0]
    with open(file, 'rb') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=':')
        print('Loading %s from %s') % (name, sheet)
        for line in spamreader:
            data[sheet][model].append(float(line[1]))
    save_data('teste.ods', data)
示例#22
0
def write_to_ods(students, filename):
    from pyexcel_ods import save_data
    data = {"Sheet1": []}
    data["Sheet1"].append(
        ["Name", "Academics", "IELTS", "Interview", "Total Avarage"])
    for student in students:
        temp = [student]
        for score in students[student].values():
            temp.append(score)
        data["Sheet1"].append(temp)
    save_data(filename, data)
def convert_xml_to_ods(xml_dir=XML_DIR, ods_dir=ODS_DIR):
    xml_files = get_xml_files(xml_dir)
    for xml_file in tqdm(xml_files,
                         total=len(xml_files),
                         desc='XML files processed'):
        with open(xml_file, 'r', encoding='cp1251') as f:
            content = f.read()
            content = transliterate_cyrillic_tags(content)
            xml_attributes = get_xml_attributes(content)
            data = {"XML Data": xml_attributes}
            xml_file_name = os.path.basename(xml_file)
            ods_file_path = os.path.join(ods_dir, f'{xml_file_name}.ods')
            save_data(ods_file_path, data)
示例#24
0
    def export(self, action):

        stand = []
        pit = []

        item = "template.json"
        teamstand = [item.replace(".json", "")]      
        teampit = [item.replace(".json", "")]      
        with open("Teams/" + item, 'r') as f:
            data = json.load(f)

        for category in data["Stand"]:

            teamstand.append(category)


            for item in data["Stand"][category]:
                teamstand.append(item)

        for item in data["Pit"]:
            teampit.append(item)

        stand.append(teamstand)

        dirListing = os.listdir("Teams")   
        editFiles = []
        for item in dirListing:
            if ".json" in item and item != "template.json":
                teamstand = [item.replace(".json", "")]      
                teampit = [item.replace(".json", "")]      
                with open("Teams/" + item, 'r') as f:
                    data = json.load(f)

                for category in data["Stand"]:

                    teamstand.append("")


                    for item in data["Stand"][category]:
                        teamstand.append(data["Stand"][category][item])

                for item in data["Pit"]:
                    teampit.append(data["Pit"][item])

                stand.append(teamstand)
        sheetdata = OrderedDict()
        sheetdata.update({"Stand Scouting": stand})
        sheetdata.update({"Pit Scouting": pit})
        save_data("output.ods", sheetdata)
示例#25
0
def generate_odsFile(matrix):
    data = OrderedDict()
    for item in range(len(matrix)):
        lastAux = np.array(matrix[item])
        newList = lastAux.tolist()
        nl = []
        for i in range(len(newList)):
            el = []
            for j in range(len(newList[i])):
                if type(newList[i][j]) is str:
                    el.append(newList[i][j].replace('.',
                                                    ',').replace('nan', ''))
            nl.append(el)
        data.update({matrix[item][0][0]: nl})
    save_data("resultado.ods", data)
示例#26
0
    def main(self, final_path):

        main_list = []
        data = OrderedDict()
        self.list_order = (self.sorter(self.total_scr_lst))
        for j in range(len(self.list_order)):
            tup1 = self.list_order[j]
            main_list.append(list(tup1))

        main_list = list(reversed(main_list))
        main_list.insert(0, ['Name', 'overall score (100)'])
        data.update({"Sheet 1": main_list})
        string = final_path + "/result.ods"
        print(string)
        save_data(string, data)
    def make(self, data):
        filename = 'entered_documents_{}.ods'.format(
            datetime.today().timestamp())
        filepath = os.path.abspath(
            os.path.join('static', 'uploads', 'reports', filename))

        # Формирование данных
        table_data = [['Документ', 'Дата внесения']]
        for item in data:
            table_data.append([str(item[0]), str(item[1])])

        data = {"Sheet 1": table_data}
        save_data(filepath, data)

        return filepath
示例#28
0
def configToOds(ods_file_path):
    rows = []
    rows.append(list(["Name", "Help", "Default value", "Optional", "Public"]))
    for c in ConfigIter():
        row = list([
            str(c.name),
            str(c.help),
            str(c.default_value),
            str(c.is_optional),
            str(c.is_public)
        ])
        rows.append(row)
    print(rows)
    d = OrderedDict()
    d.update({"Config": rows})
    save_data(str(ods_file_path), d)
示例#29
0
def generateODS():
  printDBG(1, "Generating report in ODS format")
  sheet1 = {"Hosts Report":[]}
  sheet1['Hosts Report'].append(["Host Name", "IP Address", "Lifecycle Environment", 
      "Content View", "Security errata count", "Bugfix errata count", 
      "Enhancement errata count", "Upgradeable package count",
      "Subscription status", "Katello Agent Status"])
  for key in hostObjects.keys():
    hostItem = hostObjects[key]
    sheet1['Hosts Report'].append([key, hostItem['ip'], hostItem['lifecycleEnvironment'],
        hostItem['contentView'], hostItem['secErrata'], hostItem['bugErrata'],
        hostItem['enhErrata'], hostItem['pkgUpdates'], hostItem['subStatus'],
        hostItem['katelloAgent']])
  printDBG(2, "Saving host report")
  hostWorkbook.update(sheet1)
  save_data('basicHostInfo.ods', hostWorkbook)
示例#30
0
    def make(self, data):
        filename = 'library_fund_{}.ods'.format(datetime.today().timestamp())
        filepath = os.path.join(settings.MEDIA_ROOT, 'reports', filename)

        # Формирование данных
        table_data = [[
            'Сотрудник', 'Количество выданных книг',
            'Количество зарегистрированных читателей',
            'Зарегистрированные читатели'
        ]]
        for item in data:
            table_data.append([item[0], item[1], item[2], item[3]])

        data = {"Sheet 1": table_data}
        save_data(filepath, data)

        return filepath
示例#31
0
 def save_ods_from_excel(excel_file, target_ods_file):
     # First read into dataframe
     df = pd.read_excel(excel_file)
     # Change everything to string since we're just writing
     df = df.astype(str)
     # Initiliaze data to be written as an empty list, as pyods needs a list to write
     whole_data_list = []
     # Initiliaze the empty dict() for the data
     d = OrderedDict()
     # Write the columns first to be the first entries
     whole_data_list.append(list(df.columns))
     # loop through data frame and update the data list
     for index, row in df.iterrows():
         whole_data_list.append(list(row.values))
     # Populate dict() with updated data list
     d.update({"Moved sheet": whole_data_list})
     # Finally call save_data() from pyods to store the ods file
     save_data(target_ods_file, d)
示例#32
0
    def new_worksheet(self, book_name, sheet_name, row_ct=2000, col_ct=26):
        finished_filename = self.finish_filename(book_name)
        book_obj = self.get_book(finished_filename, False)
        assert (book_obj != None), "Spreadsheet book has not been set!!"
        self.book = book_name
        self.worksheet_name = sheet_name
        print("setting the sheet name to \"%s\"" % self.worksheet_name)
        sheet_as_arr = [[]]
        for i in range(0, row_ct):
            sheet_as_arr.append([])
            for h in range(0, col_ct):
                sheet_as_arr[i].append('')

        sheet_as_dict = {sheet_name: sheet_as_arr}

        book_obj.update(sheet_as_dict)
        pyexcel_ods.save_data(finished_filename, book_obj)
        return book_obj[sheet_name]
示例#33
0
def export(request):
    """Export the data in the current view."""
    # Get JSON string posted to `data`
    raw_data = request.POST.get('data')

    # Reject a request without data
    if not raw_data:
        return HttpResponseBadRequest()

    # Convert JSON to a Python dict
    data = json.loads(raw_data)

    # Clean up session data
    for n in data["sessions"]:
        # Convert dates into datetime.date objects
        n[0] = datetime.datetime.strptime(n[0], '%Y%m%d').strftime("%Y-%m-%d")
        # Convert session counts into integers
        n[2] = int(n[2])
        # Second key is just the index, which isn't needed
        del n[1]

    # Clean up referrers data
    for n in data["referrers"]:
        # Convert counts into integers
        n[1] = int(n[1])

    # Format spreadsheet file
    ods = OrderedDict()
    ods["Sessions"] = [["Date", "Sessions"]] + data["sessions"]
    ods["Popular Content"] = [["Page URL", "Views"]] + data["pages"]
    ods["Top Referrers"] = [["Source", "Views"]] + data["referrers"]

    # Save the spreadsheet into memory
    io = BytesIO()
    save_data(io, ods)

    # Set response metadata
    response = HttpResponse(
        content_type='application/vnd.oasis.opendocument.spreadsheet')
    response['Content-Disposition'] = 'attachment; filename="wagalytics.ods"'

    # Write spreadsheet into response
    response.write(io.getvalue())
    return response
示例#34
0
    def handle(self, *args, **kwargs):
        recipient = kwargs.get('recipient', None)
        if not recipient:
            raise ValueError('need a recipient')

        path = kwargs.get('path', None)
        if not path:
            raise ValueError('need a path')

        transactions = Transaction.objects \
                                  .last_month() \
                                  .filter(amount__lt=0)

        sheet = [(t.timestamp.strftime('%m-%d-%Y'),
                  t.description,
                  str(t.amount)) for t in transactions]

        save_data(path, {'Sheet 1': sheet})
        send_email_with_export(recipient=recipient, path=path, body=kwargs.get('body', None))
示例#35
0
def export(request):
    # Get JSON string posted to `data`
    raw_data = request.POST.get('data')

    # Reject a request without data
    if not raw_data:
        return HttpResponseBadRequest()

    # Convert JSON to a Python dict
    data = json.loads(raw_data)

    # Clean up session data
    for n in data["sessions"]:
        # Convert dates into datetime.date objects
        n[0] = datetime.datetime.strptime(n[0], '%Y%m%d').strftime("%Y-%m-%d")
        # Convert session counts into integers
        n[2] = int(n[2])
        # Second key is just the index, which isn't needed
        del n[1]

    # Clean up referrers data
    for n in data["referrers"]:
        # Convert counts into integers
        n[1] = int(n[1])

    # Format spreadsheet file
    ods = OrderedDict()
    ods["Sessions"] = [["Date", "Sessions"]] + data["sessions"]
    ods["Popular Content"] = [["Page URL", "Views"]] + data["pages"]
    ods["Top Referrers"] = [["Source", "Views"]] + data["referrers"]

    # Save the spreadsheet into memory
    io = BytesIO()
    save_data(io, ods)

    # Set response metadata
    response = HttpResponse(content_type='application/vnd.oasis.opendocument.spreadsheet')
    response['Content-Disposition'] = 'attachment; filename="wagalytics.ods"'

    # Write spreadsheet into response
    response.write(io.getvalue())
    return response
def load_csv(fname, do_normalize=False, save_as_csv=False, split_percent=80):
    print('Loading data...', end='', flush=True)
    raw = np.array(get_data(fname)[fname][1:])
    x = raw[:, :5]
    y = raw[:, 6:]
    print('done', flush=True)
    if do_normalize:
        print('Normalizing inputs...', end='', flush=True)
        coeffs = [[], []]
        for i in range(x.shape[1]):
            [x[:, i], min, max] = normalize(x[:, i])
            coeffs[0].append(min)
            coeffs[1].append(max)
        coeffs = OrderedDict([('', coeffs)])
        save_data('normalize_{}.csv'.format(model_name), coeffs)
        for i in range(y.shape[1]):
            y[:, i] = roundup(y[:, i])
        if save_as_csv:
            cols = [[
                'latitude', 'longitude', 'weekday', 'hour', 'min', 'green',
                'orange', 'red', 'darkred'
            ]]
            rows = np.concatenate((x, y), axis=1)
            data = OrderedDict([('', np.concatenate((cols, rows), axis=0))])
            save_data('normalized_data.csv', data)
        print('done', flush=True)
    total = len(x)
    n_train = (int)(total * (split_percent / 100))
    training_set = {'x': [], 'y': []}
    test_set = {'x': [], 'y': []}
    print(
        'Splitting into training and test set at {}%...'.format(split_percent),
        end='',
        flush=True)
    for i in range(n_train):
        training_set['x'].append(x[i])
        training_set['y'].append(y[i])
    for i in range(n_train, total):
        test_set['x'].append(x[i])
        test_set['y'].append(y[i])
    print('done', flush=True)
    return [training_set, test_set]
    c_matrices_all.append(c_matrices)

cv_averages = []
for i in range(len(locations)+1):
    if i == 0:
        continue
    cv_averages.append(0.0)
for rates in cv_rates_all:
    for i in range(len(locations)+1):
        if i == 0:
            continue
        cv_averages[i-1] += rates[i]
for i in range(len(locations)+1):
    if i == 0:
        continue
    cv_averages[i-1] = cv_averages[i-1] / len(c_matrices_all)

cv_averages.insert(0, 'avg')
locations.insert(0, 'participant')

cv_rates_all.insert(0, locations)
cv_rates_all.append(cv_averages)

data.update({featureSet: cv_rates_all})
#data.update({"Sheet 1": [cv_rates]})
save_data("results.ods", data)
sheet = pe.get_book(file_name="results.ods")
print sheet

exit(0);