Exemplo n.º 1
0
 def storeDataInDictionary(self, path, file):
     print("Reading from file: ", path)
     data = get_data(path)
     print(json.dumps(data))
     data2 = get_data(file)
     print(json.dumps(data2))
     self.dataDictionary[file] = json.dumps(data)
     print("Dictionary: ", self.dataDictionary)
Exemplo n.º 2
0
 def openOds(self,failinimi):
     sh=get_data(failinimi).keys()
     for shn in range(len(sh)):
         shval=get_data(failinimi).values()[shn]
         headers=shval[0]
         iterval=iter(shval)
         next(iterval)
         values=[shval for shval in iterval]
         self.vd=[]
         self.d={}
         for ncol in range(len(headers)-1):
             for nrow in range(len(values)-1):
                 self.vd.append(values[nrow][ncol])
                 self.d[headers[ncol]]=self.vd
def helper(exp_name):
    orig_path = os.path.join(SYM, exp_name, 'SYMLOG_metrics_original.ods')
    test_path = os.path.join(SYM, exp_name, 'SYMLOG_metrics_test.ods')
    out_path = os.path.join(SYM, exp_name, 'SYMLOG_metrics.ods')

    orig_data = get_data(orig_path)['SYMLOG']
    # print('orig_path',orig_path)
    # code.interact(local=dict(globals(),**locals()))
    test_data = get_data(test_path)['SYMLOG']

    # code.interact(local=dict(globals(),**locals()))
    all_data = orig_data[0:1] + [
        row for row in orig_data + test_data if row[0] != 'repository'
    ]
    save_data(out_path, {'SYMLOG': all_data})
Exemplo n.º 4
0
def batchupload():
	form = UploadForm()
	if form.validate_on_submit():
		file = form.file.data
		Data = get_data(file, start_row=1, start_column=0)
		ActiveData = Data['Sheet1']
		for item in ActiveData:
			if len(item)==0:#Checks for blank rows, especially at the end of the list, in order to avoid 'out of range' errors
				continue# Skips the current item
			elif len(item)<7:
				flash(item[0] + ' is missing some information.', 'danger')
				continue
			elif item[5] not in KeyCycle.keys():#Checks whether the key information is consistent with the database key system
				flash(item[0] + ' - ' + item[2] + ' does not have a recognised key.', 'danger')
			elif item[6] not in (1, 2, 3, 4, 5):
				flash(str(item[0]) + ' - ' + str(item[2]) + ' does not have a recognised energy value.', 'danger')
			elif Track.query.filter_by(artist=item[0], album=item[1], title=item[2]).first():# Checks whether item is already in the database
				flash(str(item[0]) + ' - ' + str(item[2]) + ' is already in the database.', 'danger')
			else:
				track = Track(artist=str(item[0]), album=str(item[1]), title=str(item[2]), year=item[3], tempo=item[4],
							key=item[5], energy=item[6], contributor=current_user)
				if track.tempo >= 130:
					track.tempo = track.tempo/2
				db.session.add(track)
				flash(str(item[0]) + ' - ' + str(item[2]) + ' - uploaded.', 'success')
		db.session.commit()
		return redirect(url_for('batchupload'))
	return render_template('batchupload.html', title='Batch Upload', form=form, nav=True)
Exemplo n.º 5
0
def read_file_to_df(fname: str) -> dict:
    name, ext = os.path.splitext(fname)
    ext = ext.lower()
    if ext in supported_text_formats:
        df = read_csv_to_df(fname)

    elif ext in supported_excel_formats :
        df = pd.read_excel(fname, sheet_name=0, header=0, skiprows=0)#,
                #comment="#")#, skip_blank_lines=True)

    elif ext in supported_ods_formats :
        data = pyexcel_ods3.get_data(fname)
        ave_line_length = np.mean([len(line) for line in data])
        data_lines = []
        for line in data:
            if len(line) >= ave_line_length: # assume this is the data
                data_lines.append(line)
        header = data_lines[0]
        data_lines = data_lines[1:]
        df_dict = dict([(column, []) for column in header])
        for line in data_lines:
            for column, pt in zip(df_dict.keys(), line):
                df_dict[column].append(pt)

        df = pd.DataFrame(df_dict)
        #read_ods(fname, sheet=0)

    else:
        raise UserWarning(f"Extension {ext} unsupported")

    return pd.DataFrame(df)
Exemplo n.º 6
0
def get_planilha(file):
	"""
		return a list with all desired values of one spreadshet
	"""
	planilha = get_data(file, start_columm=3, columm_limit=2)
	planilha = planilha['CÁLCULOS']

	local_amostra = planilha[2][-1]
	
	values = []
	
	values.append(planilha[3][-1])
	values.append(planilha[5][-1])
	#values.append(planilha[11][-1])
	values.append(planilha[13][-1])
	values.append(planilha[15][-1])
	values.append(planilha[17][-1])
	values.append(planilha[20][-1])
	values.append(planilha[21][-1])
	
	if planilha[24][-1] == 'Branda':
		values.append(1)
	elif planilha[24][-1] == 'Moderada':
		values.append(2)
	elif planilha[24][-1] == 'Dura':
		values.append(3)
	elif planilha[24][-1] == 'Muito dura':
		values.append(4)
	#values.append(local_amostra)
		
	return values
Exemplo n.º 7
0
def read_data_entry(file_in, **pandas_kwargs):
    '''
    Read data from data entry in ods or csv format.
    '''
    file_ending = file_in.split('.')[-1]
    if file_ending == 'ods':
        # Choose first sheet from workbook.
        sheet = list(get_data(file_in).values())[0]
        header = sheet[0]
        content = sheet[1:]
        # Take care of completely empty trailing columns.
        header_length = len(header)
        content = [row + [None] * max(header_length - len(row), 0)
                   for row in content]
        # Handle 'dtypes' manually as pd.DataFrame does not accept it as a
        # dictionary.
        dtypes = pandas_kwargs.pop('dtype', None)
        sheet = pd.DataFrame(columns=header, data=content,
                             **pandas_kwargs)
        # take care of completely empty rows
        sheet = sheet.dropna(0, 'all')
        if dtypes:
            string_columns = [k for k, v in dtypes.items() if v == 'str']
            sheet[string_columns] = sheet[string_columns].fillna('')
            sheet = sheet.astype(dtypes)

    elif file_ending == 'csv':
        sheet = pd.read_csv(file_in, **pandas_kwargs)

    else:
        raise NotImplementedError('File ending {} not supported.'.
                                  format(file_ending))
    return sheet
Exemplo n.º 8
0
 def readFromFiles(self, folder):
     for dp, dn, fn in os.walk(folder):
         for f in fn:
             name, ext = os.path.splitext(f)
             if ext == ".ods":
                 data = get_data(os.path.join(folder, f))
                 print(json.dumps(data))
Exemplo n.º 9
0
def importods(inp, outp):
    data = get_data(inp, encoding="utf-8")
    sheets = {}
    headers = [
        "Datum", "Vorname", "Nachname", "Strasse", "Hausnummer", "PLZ",
        "Stadt", "Telefon", "Code"
    ]
    #print(json.dumps(data))
    for key in data.keys():
        #print("key:", key)
        val1 = data.get(key)
        #print("val1:", val1)
        for l1 in val1:
            #print("  l1:", l1)
            if len(l1) == 0 or l1[0] == "Datum":
                continue
            datum = l1[0][0:10]
            if not datum in sheets:
                sheets[datum] = []
            l1 = l1[0:8]  # Datum - Telefon
            l1.append("")  # add empty code value
            sheets[datum].append(l1)
    #print("sheets1", sheets)
    for datum in sheets.keys():
        sheets[datum].sort(key=lambda l1: l1[0])
        sheets[datum].insert(0, headers)
    #print("sheets2", sheets)
    #print(json.dumps(sheets, indent=4))
    save_data(outp, sheets, encoding="utf-8")
Exemplo n.º 10
0
 def loadPinout(fileName):
     """
     Load pinout from ods file.
     """
     try:
         sheet = np.array(pyexcel_ods3.get_data(fileName)["pinout"])
         test=sheet[:,0] #check proper conversion to numpy.array
     except Exception as ex:
         print("Error! Maybe sheet contains empty cells (especially at ends of rows)?")
         raise ex
     rowV = sheet[0]
     nVersions = int((len(rowV)-1)/2)
     ret=[0]*nVersions #initialize return structure
     for nV in range(nVersions):
         ret[nV]={}
         ret[nV]["name"]=rowV[nV*2+2]
         partNames=sheet[1,nV*2+2]
         partNames=partNames.split("\n")
         ret[nV]["partNames"]=partNames
         ret[nV]["pins"]=sheet[2:,0]
         pinTypes=sheet[2:,nV*2+1]
         pinNames=sheet[2:,nV*2+2]
         ret[nV]["pinTypes"]={}
         ret[nV]["pinNames"]={}
         for i in range(len(ret[nV]["pins"])):
             ret[nV]["pinTypes"][ret[nV]["pins"][i]]=pinType.fromStr[pinTypes[i]]
             ret[nV]["pinNames"][ret[nV]["pins"][i]]=pinNames[i]
     return ret
Exemplo n.º 11
0
    def read_ods_sheet(self, referencial_spreadsheet):
        content_sheet = get_data(referencial_spreadsheet)
        content_sheet = content_sheet['Fournisseur']
        content_sheet = pd.DataFrame(
            content_sheet,
            columns=[
                self.referencialSupplierArray['name'],
                self.referencialSupplierArray['VATNumber'],
                self.referencialSupplierArray['SIRET'],
                self.referencialSupplierArray['SIREN'],
                self.referencialSupplierArray['adress1'],
                self.referencialSupplierArray['adress2'],
                self.referencialSupplierArray['adressPostalCode'],
                self.referencialSupplierArray['adressTown'],
                self.referencialSupplierArray['typology'],
                self.referencialSupplierArray['companyType']
            ])
        # Drop row 0 because it contains the indexes columns
        content_sheet = content_sheet.drop(0)
        # Drop empty rows
        content_sheet = content_sheet.dropna(axis=0,
                                             how='all',
                                             thresh=None,
                                             subset=None)

        return content_sheet
Exemplo n.º 12
0
def add_doi(target, source, output=False):
    '''
    Add doi column to target using information from source.
    '''
    matching_columns = ['title']
    df_source = pd.read_csv(source, usecols=matching_columns + ['doi'])

    sheet = get_data(target)['ajps_reference_coding']
    header = sheet[0]
    content = sheet[1:]
    df_target = pd.DataFrame(columns=header, data=content)

    # Add doi information only for unique articles.
    df_target = df_target.assign(doi='', add_doi='')
    article_start = df_target['article_ix'] != ''
    df_target.loc[article_start, 'add_doi'] = ~df_target.loc[
        article_start, matching_columns].duplicated(keep=False)

    # Merge doi information from source for selected articles.
    fill_columns_down(df_target,
                      matching_columns + ['add_doi'])
    df_target[df_target['add_doi']] = df_target[df_target['add_doi']].\
        merge(df_source, how='left', on=matching_columns,
              suffixes=('_x', '')).drop('doi_x', 1).values
    df_target.loc[df_target[matching_columns + ['doi']].duplicated(),
                  matching_columns + ['doi']] = ''
    df_target = df_target[['doi', 'article_ix', 'title', 'match', 'context',
                           'reference_category']]
    df_target.sort_index(inplace=True)
    if output:
        df_target.to_csv(output, index=None, encoding='utf-8')
    else:
        return df_target
Exemplo n.º 13
0
def compile(name):

    global address
    if name == None:
        name = "new.ods"
    else:

        name = str(name) + ".ods"

    data = pyexcel_ods3.get_data(address)
    new_data = OrderedDict()
    cell = OrderedDict()
    new_data.update({"sheet1": []})
    # pyexcel_ods3.save_data("Diana - single sheet.ods",new_data)
    list1 = data.keys()
    for i in list1:
        list2 = data[i]
        cell.update({"sheet1": str(i)})
        for items in range(len(list2)):
            x = list2[items]
            if (len(x) > 0):
                new_data["sheet1"].append(x)
            else:
                print("none")
        pyexcel_ods3.write_data(name, cell)
        pyexcel_ods3.write_data(name, new_data)
Exemplo n.º 14
0
def create_cache(path):
    """Converts an ODS file to a single numpy array"""
    # Parse filename / path
    match = FILENAME_RE.search(path).groupdict()
    veugel_id = int(match["id"])
    day = int(match["day"])

    cache_file = os.path.join(CACHE_DIR, "{veugel_id}_{day}.npy".format(**locals()))
    if not os.path.exists(cache_file):
        # Parse ods files
        print("Parsing {path}..".format(path=path))
        sheets = pyexcel_ods3.get_data(path).values()

        # FILTERING HERE...
        rows = all_filters(to_rows(sheets))

        # Create cache dir if it not exists
        if not os.path.exists(CACHE_DIR):
            os.mkdir(CACHE_DIR)

        # Save numpy array in cache file
        array = numpy.array(list(rows), dtype=list(FIELDS.items()))
        numpy.save(open(cache_file, "wb"), array)

    return veugel_id, day, cache_file
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.description = DESCRIPTION

    parser.add_argument(dest='ods',
        help='Read source .ods file', metavar='FILE')

    parser.add_argument(dest='output_dir',
        help='Directory where .csv files will be created')

    parser.add_argument('--skip-row', action='append', type=int,
        help='Skip row with index IDX', metavar='IDX')

    parser.add_argument('--skip-column', action='append', type=int,
        help='Skip column with index IDX', metavar='IDX')

    args = parser.parse_args()

    data = pyexcel_ods3.get_data(args.ods)

    for name, sheet in data.items():
        output_name = os.path.join(args.output_dir, name + '.csv')
        with open(output_name, 'w') as f:
            skipped_rows = set(args.skip_row)
            skipped_columns = set(args.skip_column)
            write_csv(f, sheet, skipped_rows=skipped_rows, skipped_columns=skipped_columns)

    return 0
Exemplo n.º 16
0
 def load_ods_sheet(fileName, sheetName):
     try:
         data = pyexcel_ods3.get_data(fileName)[sheetName]
     except KeyError:
         raise ValueError("No \"%s\" tab found in %s" %(sheetName, fileName))
     except:
         raise ValueError("Error opening file " + fileName)
     return data
Exemplo n.º 17
0
 def getListNonKeywords(self, searchFileName):
     searchFile = get_data(searchFileName)
     sheet2 = searchFile["Sheet2"]
     sheet2.pop(0)
     result = []
     for row in sheet2:
         result.append(row[0])
     return result
Exemplo n.º 18
0
 def load_symbol_advanced(fileName, symbolName):
     try:
         data = pyexcel_ods3.get_data(fileName)["Symbol"]
     except KeyError:
         raise ValueError("No \"Symbol\" tab found in " + fileName)
     except:
         raise ValueError("Error opening file " + fileName)
     pinout_start=0
     params={}
     pinout=[]
     symbols=[]
     col=0
     for line in data:
         if line:
             if line[0]:
                 if pinout_start==1:
                     for i in range(4,len(line),2):
                         if str(line[i]).strip()==str(symbolName).strip():
                             col=i
                             break
                     pinout_start=2
                 elif pinout_start==2:
                     if col:
                         pin = line[0]
                         unit = line[1]
                         side = 0 if line[2]=="L" else 1
                         position = line[3]
                         pin_name = line[i]
                         direction = line[i+1]
                         while len(pinout)<=unit:
                             pinout.append([[],[]])
                         while len(pinout[unit][side])<=position:
                             pinout[unit][side].append(None)
                         pinout[unit][side][position]=[pin_name, pin, direction]
                     else:
                         raise ValueError("Symbol %s not found in file %s" % (symbolName, fileName))
                 else:
                     if str(line[0]).lower()=="pinout":
                         pinout_start = 1
                     else:
                         if len(line)>=2:
                             params[str(line[0]).strip()]=line[1]
             else:
                 pass
         else:
             pass
     i=0
     for sym in pinout:
         if len(pinout)>1:
             postfix = "_%d"%i
             width = params["Width%d"%i]
         else:
             postfix = ""
             width = params["Width"]
         symbols.append(symbolIC(symbolName + postfix,\
             pinsLeft=sym[0], pinsRight=sym[1], width=width, refDes=defaults.icRefDes,showPinNames=True, showPinNumbers=True))
         i=i+1
     return symbols
Exemplo n.º 19
0
def read_lexicon(*args, config_file=lexicon_config, number_of_columns=18):
    """Reads the .ods and returns a list of dictionary items representing the lexicon,
    unlike create_lexicon_entries() it doesn't group senses under 1 headword - it's just a data dump."""
    if args:
        logger.info(
            "Function not designed to accept arguments. \nDefine the settings in lexicon_config.py or pass a "
            "different config via the config_file **kwarg"
        )
    check_settings(
        config_file=config_file.settings
    )  # pass this in for testing purposes

    spreadsheet = config_file.settings["spreadsheet_name"]
    # read the file with pyexcel
    try:
        raw_data = pyexcel_ods3.get_data(spreadsheet)[
            config_file.settings["sheet_name"]
        ]
        # Convert column letters to list integers
        col = {
            k: letter_to_number(v) for k, v in config_file.spreadsheet_config.items()
        }
        assert (
            len(col) == number_of_columns
        ), "{n} items expected in spreadsheet_config, {m} defined".format(
            n=number_of_columns, m=len(col)
        )
        # pop the header if it exists
        if type(raw_data[0][col["id_col"]]) == str:  # Str == 'ID'
            raw_data.pop(0)
        raw_data = [x for x in raw_data if x != []]  # get rid of blank rows
    except KeyError:
        msg = "{sheet} is not a valid sheet name.".format(sheet=spreadsheet)
        logger.exception(msg)
        raise KeyError(msg)
    except pyexcel_io.exceptions.NoSupportingPluginFound:
        _, extension = os.path.splitext(spreadsheet)
        msg = (
            "{ext} is not a valid file extension. Must be .ods, .xls or .xlsx.".format(
                ext=extension
            )
        )
        logger.exception(msg)
        raise TypeError(msg)
    except IndexError:
        msg = "The file is blank"
        logger.exception(msg)
        raise AttributeError(msg)

    # pre process
    raw_data = pre_process_raw_data(raw_data, col)
    # format as a list of dict
    dict_data = raw_data_to_dict(raw_data, col, number_of_columns)
    # post process
    processed_data = post_process_raw_data(dict_data)

    logger.info("   -%d dictionary entries read" % len(processed_data))
    return processed_data
Exemplo n.º 20
0
 def write_typo_ods_sheet(self, vat_number, typo):
     content_sheet = get_data(self.referencialSuppplierSpreadsheet)
     for line in content_sheet['Fournisseur']:
         if line and line[1] == vat_number:
             try:
                 line[8] = typo
             except IndexError:
                 line.append(typo)
     save_data(self.referencialSuppplierSpreadsheet, content_sheet)
Exemplo n.º 21
0
def make_spreadsheet(file_name):
    sheet = {}
    if os.path.exists(file_name):
        sheet = pyexcel_ods3.get_data(file_name)

    for category_type in get_category_list():
        sheet.update({category_type: [['Item name', 'Value in chaos orbs']]})

    return sheet
def get_recorded_stats_for_run(stats_file, runid):
    stats = dict()
    lines = get_data(stats_file)['Sheet1']
    headers = lines[0]
    data = lines[1:]
    for d in data:
        if d:
            run_sub = d[0].replace('FDG', 'FGD')
            if run_sub == runid:
                for metric, score in zip(headers, d):
                    stats[metric] = str(score).strip()
    return stats
Exemplo n.º 23
0
def control_harvest_csv_extract(file, sheet):
    data = pods.get_data(file)

    not_empty = lambda x: len(x) > 0
    filtered_data = list(filter(not_empty, data[sheet][1::]))
    labels = data[sheet][0]

    documents = generate_documents_to_create(filtered_data)

    for doc in documents.keys():
        create = CreateDocumentThread(labels, documents[doc])
        create.start()
Exemplo n.º 24
0
def run(exp_name, reorient=False, num_threads=4):
    pre = 'run'
    log('running', exp_name, pre=pre)
    filenames = os.listdir(INDIR)
    data = {}  # {reponame:{metric_name: metric_value}}
    ods_dir = os.path.join(OUTDIR, exp_name)
    if not os.path.exists(ods_dir):
        os.mkdir(ods_dir)
    plots_dir = os.path.join(OUTDIR, exp_name, 'plots')
    if not os.path.exists(plots_dir):
        os.mkdir(plots_dir)
    # allocate the inputs for multithreading
    filenames_lsts = []
    for i, filename in enumerate(filenames):
        index = i % num_threads
        if index == len(filenames_lsts):
            filenames_lsts.append([])
        filenames_lsts[index].append(filename)
    inputs = [[lst, i, plots_dir, ods_dir, reorient]
              for i, lst in enumerate(filenames_lsts)]
    # run the multithreadable function
    pool = Pool(processes=num_threads)
    pool.map(run_helper, inputs)

    # aggregate the data together
    filepaths = [
        os.path.join(ods_dir, 'SYMLOG_metrics_{0}.ods'.format(i))
        for i in range(num_threads)
    ]
    all_data = get_data(filepaths[0])
    key = list(all_data.keys())[0]
    for filepath in filepaths[1:]:
        new_data = get_data(filepath)
        all_data[key].extend(
            new_data[key]
            [1:])  # need to make sure to exclude the header of each file
    out_path = os.path.join(ods_dir, 'SYMLOG_metrics.ods')
    save_data(out_path, all_data)
    log('done the run', pre=pre)
    print('done', exp_name)
def import_ods(plan_file):
    from pyexcel_ods3 import get_data

    Fields.objects.all().delete()
    ModelsInline.objects.all().delete()
    Models.objects.all().delete()
    Apps.objects.all().delete()

    data = get_data(plan_file)
    import_apps(data['dag_apps'][1:])
    import_models(data['dag_models'][1:])
    import_fields(data['dag_fields'][1:])
    update_inline_models()
Exemplo n.º 26
0
Arquivo: cdc.py Projeto: obeezzy/cdc
 def load(self):
     if os.path.isfile(os.path.abspath(self.filename)):
         data = py_ods.get_data(self.filename)
         sheet_name = list(data.keys())[0]
         chord_table = data[sheet_name]
         chord_table.pop(0)  # Remove table header
         for chord_row in chord_table:
             self.add_chord(Chord(chord_row))
     elif isinstance(self.filename, str) \
             and self.filename.endswith('.ods'):
         raise RuntimeError('ODS file does not exist.')
     else:
         raise RuntimeError('ODS file does not exist.')
Exemplo n.º 27
0
 def test_write_book(self):
     self.content = {
         "Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
         "Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]],
         "Sheet3": [[u"X", u"Y", u"Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]],
     }
     self.testfile = "writer.ods"
     writer = Writer(self.testfile, "ods")
     writer.write(self.content)
     writer.close()
     content = get_data(self.testfile)
     for key in content.keys():
         content[key] = list(content[key])
     assert content == self.content
Exemplo n.º 28
0
def import_ods():
    from pyexcel_ods3 import get_data
    import json

    Fields.objects.all().delete()
    Models.objects.all().delete()
    Apps.objects.all().delete()

    data = get_data("plan.ods")
    import_apps(data['dag_apps'][1:])
    import_models(data['dag_models'][1:])
    import_fieldtypes(data['dag_fieldtypes'][1:])
    import_fields(data['dag_fields'][1:])
    update_inline_models()
Exemplo n.º 29
0
    def parse(self, path):
        headers = []
        if not os.path.exists(path):
            return [], []

        wb = get_data(path)
        sh = wb['Attributen']

        headers = self._get_headers(sh)

        records = self._get_records(sh, headers)
        clean_records = self._clean_records(records)

        return clean_records
Exemplo n.º 30
0
 def __init__(self, filename, sheet):
     """
     The calc sheet containing the words is loaded to the set self.words
     And the user is given choice of what he may want to do.
     """
     self.file = filename
     self.words = set()
     try:
         self.data = get_data(filename)
         self.words = set(x[0] for x in self.data[sheet])
     except KeyError:
         pass
     self.count = len(self.words)
     while True:
         self.choice_reader()
    def parse(self, response):
        yield Source(**self.source)
        wb = get_data(io.BytesIO(response.body))
        for sheet in ['Maintained', 'Independent', 'PRU']:
            headers = wb[sheet][0]
            data = wb[sheet][1:]
            for k, row in enumerate(data):

                if self.settings.getbool(
                        "DEBUG_ENABLED") and k >= self.settings.getint(
                            "DEBUG_ROWS", 100):
                    break

                row = dict(zip(headers, row))
                row["type"] = sheet
                yield self.parse_row(row)
Exemplo n.º 32
0
    def parse_file(self, response, source_url):
        link = list(
            response.html.find("div.document", first=True).absolute_links)[0]
        self.logger.info("Using url {}".format(link))
        self.set_download_url(link)
        r = self.session.get(link)
        r.raise_for_status()

        wb = get_data(io.BytesIO(r.content))
        for sheet in ["Maintained", "Independent", "PRU"]:
            headers = wb[sheet][0]
            data = wb[sheet][1:]
            for k, row in enumerate(data):

                row = dict(zip(headers, row))
                row["type"] = sheet
                self.parse_row(row)
Exemplo n.º 33
0
    def get_test_cases(self, variant):
        data = get_data(str(self.input_file_path(variant)))
        sheet1 = data[list(data.keys())[0]]
        rows = [row[:12] for row in sheet1 if len(row) > 0 and len(row[0]) > 0]
        header = rows[0]
        raw_cases = [OrderedDict(zip(header, row)) for row in rows[1:]]

        cases = []
        for case in raw_cases:
            case['variant'] = variant
            case['width'] = int(case['board_dimensions'].split(',')[0].strip())
            case['height'] = int(case['board_dimensions'].split(',')[1].strip())
            case['row'] = int(case['test_position'].split(',')[1].strip())
            case['column'] = int(case['test_position'].split(',')[0].strip())
            cases.append(case)

        return cases
Exemplo n.º 34
0
def read_additional_sheet(sheet_name, config_file=lexicon_config):
    """Reads additional sheets with columns [Kovol, Phonetic, Dialect, Description]. Intended to separate
    out proper nouns from the main sheet."""
    spreadsheet = config_file.settings["spreadsheet_name"]
    number_of_columns = 4
    # read the file with pyexcel
    try:
        raw_data = pyexcel_ods3.get_data(spreadsheet)[sheet_name]
        raw_data.pop(0)
        raw_data = [x for x in raw_data if x != []]  # get rid of blank rows
        data = []
        for row in raw_data:
            while (
                len(row) < number_of_columns
            ):  # add blank columns to avoid index errors
                row.append("")
            d = {
                "kovol": row[letter_to_number("A")],
                "phonetic": row[letter_to_number("B")],
                "dialect": row[letter_to_number("C")],
                "description": row[letter_to_number("D")],
            }
            data.append(d)
        return data

    except KeyError:
        msg = "{sheet} is not a valid sheet name.".format(sheet=spreadsheet)
        logger.exception(msg)
        raise KeyError(msg)
    except pyexcel_io.exceptions.NoSupportingPluginFound:
        _, extension = os.path.splitext(spreadsheet)
        msg = (
            "{ext} is not a valid file extension. Must be .ods, .xls or .xlsx.".format(
                ext=extension
            )
        )
        logger.exception(msg)
        raise TypeError(msg)
    except IndexError:
        msg = "The file is blank"
        logger.exception(msg)
        raise AttributeError(msg)
Exemplo n.º 35
0
def Batchuploader(file):
    Data = get_data(file, start_row=1, start_column=0)
    ActiveData = Data['Sheet1']
    for item in ActiveData:
        item_exists = Track.query.filter_by(artist=item[0],
                                            album=item[1],
                                            title=item[2]).first()
        if item_exists:
            print(item[0] + ' - ' + item[2] + ' - is already in the database.')
        else:
            track = Track(artist=item[0],
                          album=item[1],
                          title=item[2],
                          year=item[3],
                          tempo=item[4],
                          key=item[5],
                          energy=item[6])
            if track.tempo >= 130:
                track.tempo = track.tempo / 2
            db.session.add(track)
            db.session.commit()
            print(item[0] + ' - ' + item[2] + ' - uploaded.')
Exemplo n.º 36
0
    def update_supplier_ods_sheet(self, _db):
        content_sheet = get_data(self.referencialSuppplierSpreadsheet)

        res = _db.select({
            'select': ['*'],
            'table': ['suppliers'],
            'where': ['status = ?'],
            'data': ['ACTIVE'],
        })
        try:
            sheet_name = False
            for sheet in content_sheet:
                sheet_name = sheet

            if sheet_name:
                content_sheet[sheet_name] = content_sheet[sheet_name][:1]
                for supplier in res:
                    line = [
                        supplier['name'] if supplier['name'] is not None else
                        '', supplier['vat_number'] if supplier['vat_number']
                        is not None else '', supplier['siret']
                        if supplier['siret'] is not None else '',
                        supplier['siren'] if supplier['siren'] is not None else
                        '', supplier['adress1'] if supplier['adress1']
                        is not None else '', supplier['adress2']
                        if supplier['adress2'] is not None else '',
                        supplier['postal_code'] if supplier['postal_code']
                        is not None else '', supplier['city']
                        if supplier['city'] is not None else '',
                        supplier['typology'] if supplier['typology']
                        is not None else '', supplier['company_type']
                        if supplier['company_type'] is not None else ''
                    ]
                    content_sheet[sheet_name].append(line)

        except IndexError:
            self.Log.error("IndexError while updating ods reference file.")

        save_data(self.referencialSuppplierSpreadsheet, content_sheet)
Exemplo n.º 37
0
def main():
    parser = argparse.ArgumentParser(description='weight-gazer')

    parser.add_argument('file_path', metavar='path to file', type=str, nargs=1,
                        help='path to the ods file')
    parser.add_argument('-sw', metavar='source wallpaper directory', type=str, nargs=1,
                        help='directory where source wallpapers are stored')
    parser.add_argument('-ow', metavar='output wallpaper directory', type=str, nargs=1,
                        help='directory where output wallpapers will be saved')
    parser.add_argument('-af', metavar='appearance frequency in percent', type=float,
                        nargs=1, help='percentage of wallpapers that will be overlaid')
    args = parser.parse_args()

    ods_data = get_data(args.file_path.pop())
    src_wallpaper_dir = args.sw.pop()
    out_wallpaper_dir = args.ow.pop()
    appearance_frequency = args.af.pop()

    WeightGazer(ods_data,
                src_wallpaper_dir,
                out_wallpaper_dir,
                appearance_frequency,
                )
Exemplo n.º 38
0
def verb_sheet_to_csv(
    spreadsheet=lexicon_config.settings["verb_spreadsheet"],
    csv_name="verbs.csv",
    checked=False,
):
    """Read the verb spreadsheet and return a .csv object of columns B-F for kovol-language-tools"""
    csv_path = os.path.join(lexicon_config.settings["target_folder"], csv_name)

    # Returns an alphabetically sorted list of Verb objects
    assert os.path.exists(spreadsheet), "Verb spreadsheet missing"
    raw_data = pyexcel_ods3.get_data(spreadsheet)["Paradigms"]
    # get rid rows lacking data an English translation
    raw_data = [x for x in raw_data if len(x) >= 6]
    if checked:
        # only include rows with something marked in checked column
        raw_data = [x[1:6] for x in raw_data if x[6]]
    else:
        raw_data = [x[1:6] for x in raw_data]

    # Create the csv and return the path
    with open(csv_path, "w") as csvfile:
        writer = csv.writer(csvfile)
        writer.writerows(raw_data)
    return csv_path
Exemplo n.º 39
0
 def getListSearchTouplesFromFile(self, searchFileName):
     searchfile = get_data(searchFileName)
     sheet1 = searchfile["Sheet1"]
     listSearchTouples = []
     sheet1.pop(0)
     self.averagePrices = []
     self.listingsCount = []
     self.sellThroughs = []
     for row in sheet1:
         keywordID = int(row[0])
         company = str(row[1])
         company.replace(" ", "%20")
         model = str(row[2])
         model.replace(" ", "%20")
         keyword = company + "%20" + model
         ammount = int(row[3])
         averagePrice = float(row[6].replace("€", ""))
         listingCount = str(row[8])
         sellThrough = str(row[9])
         listSearchTouples.append((keyword, ammount, keywordID))
         self.averagePrices.append(averagePrice)
         self.listingsCount.append(listingCount)
         self.sellThroughs.append(sellThrough)
     return listSearchTouples
Exemplo n.º 40
0
def test_issue_8():
    from pyexcel_ods3 import get_data

    test_file = "12_day_as_time.ods"
    data = get_data(get_fixtures(test_file), skip_empty_rows=True)
    eq_(data["Sheet1"][0][0].days, 12)
Exemplo n.º 41
0
    parser = argparse.ArgumentParser('Generateur de cosmogonie')
    parser.add_argument(
        '-n', default=-1, type=int,
        help='nombre de paragraphes (-1 pour infini)'
    )
    parser.add_argument(
        '-p', nargs=2, default=(2, 5), type=int,
        help='longueur des paragraphes'
    )
    parser.add_argument(
        '-d', nargs=2, default=(1, 3), type=int,
        help='delai entre les paragraphes'
    )
    args = parser.parse_args()

    data = get_data("GR_AN_V1.ods")['Feuille1']

    n = 0
    while True:
        n_para_cells = random.randint(args.p[0], args.p[1])
        para = []
        while len(para) < n_para_cells:
            row = random.randint(1, len(data) - 1)
            col = random.randint(0, len(data[row]) - 1)
            cell = data[row][col].strip()
            if not cell:
                continue
            para.append(cell)
        print(', '.join('"%s"' % cell for cell in para))
        n += 1
        if args.n > 0 and n >= args.n:
Exemplo n.º 42
0
def get_sheet(file_name_with_path, sheet_name):
    #Ajustar o path(file_name_with_path) para o seu caso
    #abrir o arquivo ods que possui a munic
    data = get_data(file_name_with_path)
    return data.get(sheet_name)
Exemplo n.º 43
0
 def read_sheet(self, ods_file):
     self.sheet = pyexcel.get_data(ods_file)
Exemplo n.º 44
0
    uniqxls = list(set(xls))
    uniqods = list(set(ods))
    #open('notinxls.csv', 'a') as notinxls
    with   open('uploads/notinods.csv', 'a') as notinods:
        #for i in uniqods:
            #if i not in uniqxls:
                #notinxls.write('"' + str(i) + '",\n')
        for i in uniqxls:
            if i not in uniqods:
                notinods.write('"' + str(i) + '",\n')

# Работа.
# Для веба.

f_excel = xlrd.open_workbook('uploads/' + str(text1), encoding_override='cp1251')
f_ods = get_data('uploads/' + str(text2))

# Для исполнения в командной строке
#f_excel = xlrd.open_workbook(os.sys.argv[1], encoding_override='cp1251')
#f_ods =  get_data(os.sys.argv[2])

get_from_xls(f_excel)
get_from_ods(f_ods)
compire(from_xls,from_ods)

# Веб.
print("Content-type: text/html\n")
print('Завершено!\n')
# Форма получения результата.
print('''\n
        <button onclick="location.href = 'http://localhost:8000/uploads/notinods.csv'">Получить файл</button>
Exemplo n.º 45
0
# coding: iso-8859-15
"""Realiza testes com o modulo pyoo 1.2
Manipulação do Libre office Calc
"""
from pyexcel_ods3 import get_data

if __name__ == "__main__":
    print('inicio')
    ARQUIVO_ODS = get_data('C:\\arquivo_com_planilha.ods')

    #pode trabalhar com json.
    #resultado_json = json.dumps(ARQUIVO_ODS)

    PLANILHA = ARQUIVO_ODS['Planilha1']
    
    QUEBRA_LINHA = '\n'
    RESULTADO = ''
    for linha in PLANILHA:
        coluna1 = linha[0]
        RESULTADO += coluna1 + QUEBRA_LINHA

    #salva em um arquivo
    ARQUIVO = open('C:\\resultado_gravado.sql', 'w')
    ARQUIVO.write(RESULTADO)
    ARQUIVO.close()
    print('fim')
Exemplo n.º 46
0
            sheet = np.array(pyexcel_ods3.get_data(fileName)["pinout"])
            test=sheet[:,0] #check proper conversion to numpy.array
        except Exception as ex:
            print("Error! Maybe sheet contains empty cells (especially at ends of rows)?")
            raise ex
        rowV = sheet[0]
        nVersions = int((len(rowV)-1)/2)
        print(rowV)
        ret=[0]*nVersions #initialize return structure
        nPins=len(sheet)-2
        for nV in range(nVersions):
            ret[nV]={}
            ret[nV]["name"]=rowV[nV*2+2]
            partNames=sheet[1,nV*2+2]
            partNames=partNames.split("\n")
            ret[nV]["partNames"]=partNames
            ret[nV]["pins"]=sheet[2:,0]
            pinTypes=sheet[2:,nV*2+1]
            pinNames=sheet[2:,nV*2+2]
            ret[nV]["pinTypes"]={}
            ret[nV]["pinNames"]={}
            for i in range(len(ret[nV]["pins"])):
                ret[nV]["pinTypes"][ret[nV]["pins"][i]]=pinTypes[i]
                ret[nV]["pinNames"][ret[nV]["pins"][i]]=pinNames[i]
        return ret

if 1:
    print(__file__)
else:
    print (pyexcel_ods3.get_data("test.ods"))
    
Exemplo n.º 47
0
def import_data(modeladmin, request, queryset):

    class ProfesorHandler(xml.sax.handler.ContentHandler):

        def __init__(self, request, queryset):
            self.buffer = ""
            self.inField = 0
            self.modeladmin = modeladmin
            self.request = request
            self.queryset = queryset
            self.resultado = u'<h5>Resultado del proceso</h5>'

        def get_resultado(self):
            return self.resultado

        def startElement(self, name, attrs):
            if name == "dni":
                self.inField = 1
            elif name == "nombre":
                self.inField = 1
            elif name == "primer-apellido":
                self.inField = 1
            elif name == "segundo-apellido":
                self.inField = 1
            elif name == "es-usuario":
                self.inField = 1
            elif name == "login":
                self.inField = 1
            elif name == "id-usuario":
                self.inField = 1
            elif name == "departamento":
                self.inField = 1
            elif name == "grupos":
                self.grupos = []
            elif name == "grupo":
                self.inField = 1

        def characters(self, data):
            if self.inField:
                self.buffer += data

        def endElement(self, name):
            if name == "profesor":
                updated_values = {
                    'dni' : self.dni,
                    'es_usuario': self.esusuario,
                    'usuario_rayuela': self.login,
                    'id_usuario': self.idusuario,
                    'first_name': self.nombre,
                    'last_name': '%s %s' % (self.primerapellido, self.segundoapellido),
                    'is_staff': True,
                    'is_active': True
                }
                user, created = User.objects.update_or_create(username=self.login, defaults=updated_values)
                self.resultado += u'<ul>Procesando profesor %s %s, %s' % (self.primerapellido, self.segundoapellido,
                                                                            self.nombre)
                if created:
                    self.resultado += u'<li>Se ha creado el profesor %s</li>' % (user)
                #veamos si existe el profesor en el curso
                curso = self.queryset[0].curso
                cursoprofesor, created = CursoProfesor.objects.get_or_create(profesor=user, curso=curso)
                if created:
                    self.resultado += u'<li>Se ha asignado %s al curso %s</li>' % (user, curso)
                if self.departamento:
                    departamento, created = Departamento.objects.get_or_create(departamento=self.departamento)
                    if created:
                        self.resultado += u'<li>Se ha creado el departamento %s</li>' % (departamento)
                    cursodepartamento, created = CursoDepartamento.objects.get_or_create(departamento=departamento,
                                                                                         curso=curso)
                    if created:
                        self.resultado += u'<li>Se ha creado el departamento %s en el curso %s</li>' % (departamento, curso)
                    try:
                        cursodepartamentoprofesor, created = CursoDepartamentoProfesor.objects.update_or_create(curso_profesor=cursoprofesor,
                                                                                defaults={'curso_departamento': cursodepartamento})
                        if created:
                            self.resultado += u'<li>Se ha asignado el profesor %s al departamento %s en el curso %s</li>' %\
                                            (cursoprofesor, cursodepartamento, curso)
                        else:
                            self.resultado += u'<li>Se ha cambiado el profesor %s al departamento %s en el curso %s</li>' %\
                                            (cursoprofesor, cursodepartamento, curso)
                    except MultipleObjectsReturned:
                            self.resultado += u'<li style="color: red;">PROBLEMA>>>>>>>>>>>>>>>: ' \
                                              u'profesor %s en más de un departamento en el curso %s</li>' % \
                                            (cursoprofesor, curso)

                    #cursodepartamentoprofesor, created = CursoDepartamentoProfesor.objects.get_or_create(curso_departamento=cursodepartamento,
                    #                                                                           curso_profesor=cursoprofesor)

                if self.grupos:
                    for grupoit in self.grupos:
                        grupo, created = Grupo.objects.get_or_create(grupo=grupoit)
                        if created:
                            self.resultado += u'<li>Se ha creado el grupo %s</li>' % (grupo)
                        cursogrupo, created = CursoGrupo.objects.get_or_create(grupo=grupo, curso=curso)
                        if created:
                            self.resultado += u'<li>Se ha creado el grupo %s en el curso %s</li>' % (grupo, curso)
                        cursogrupoprofesor, created = CursoGrupoProfesor.objects.get_or_create(curso_grupo=cursogrupo,
                                                                                    curso_profesor=cursoprofesor)
                        if created:
                            self.resultado += u'<li>Se ha asignado el profesor %s al grupo %s en el curso %s</li>' %\
                                            (cursogrupoprofesor, cursogrupo, curso)
                self.resultado += u'</ul>'
            elif name == "dni":
                self.inField = 0
                self.dni = self.buffer
            elif name == "nombre":
                self.inField = 0
                self.nombre = self.buffer
            elif name == "primer-apellido":
                self.inField = 0
                self.primerapellido = self.buffer
            elif name == "segundo-apellido":
                self.inField = 0
                self.segundoapellido = self.buffer
            elif name == "es-usuario":
                self.inField = 0
                if self.buffer == "true":
                    self.esusuario = True
                else:
                    self.esusuario = False
            elif name == "login":
                self.inField = 0
                self.login = self.buffer
            elif name == "id-usuario":
                self.inField = 0
                self.idusuario = self.buffer
            elif name == "departamento":
                self.inField = 0
                self.departamento = self.buffer
            elif name == "grupo":
                self.inField = 0
                self.grupo = self.buffer
                self.grupos.append(self.grupo)

            self.buffer = ""


    class AlumnoHandler(xml.sax.handler.ContentHandler):

        def __init__(self, request, queryset, dirname):
            self.buffer = ""
            self.inField = 0
            self.modeladmin = modeladmin
            self.request = request
            self.queryset = queryset
            self.dirname = dirname
            self.resultado = u'<h5>Resultado del proceso</h5>'

        def get_resultado(self):
            return self.resultado

        def startElement(self, name, attrs):
            if name == "nie":
                self.inField = 1
            elif name == "nombre":
                self.inField = 1
            elif name == "primer-apellido":
                self.inField = 1
            elif name == "segundo-apellido":
                self.inField = 1
            elif name == "fecha-nacimiento":
                self.inField = 1
            elif name == "es-usuario":
                self.inField = 1
            elif name == "login":
                self.inField = 1
            elif name == "id-usuario":
                self.inField = 1
            elif name == 'con-foto':
                self.inField = 1
            elif name == 'formato':
                self.inField = 1
            elif name == 'nombre-fichero':
                self.inField = 1
            elif name == "grupo":
                self.inField = 1

        def characters(self, data):
            if self.inField:
                self.buffer += data

        def endElement(self, name):
            if name == "alumno":
                updated_values = {
                    'nombre': self.nombre,
                    'apellidos': '%s %s' % (self.primerapellido, self.segundoapellido),
                    'fecha_nacimiento': self.fechanacimiento
#                    'usuario_rayuela': self.login
                }
                try:
                    updated_values['usuario_rayuela'] = self.login
                except:
                    pass

                alumno, created = Alumno.objects.update_or_create(nie=self.nie, defaults=updated_values)
                if self.nombrefichero:
                    ficherofoto = os.path.join(self.dirname, self.nombrefichero)
                    myfile = File(open(ficherofoto, 'rb'))
                    alumno.foto.save(self.nombrefichero, myfile)
                    myfile.close()
                self.resultado += u'<ul>Procesando alumno %s' % (alumno)
                if created:
                    self.resultado += u'<li>Se ha creado el alumno %s</li>' % (alumno)
                curso = self.queryset[0].curso
                alumno.save()
                cursoalumno, created = CursoAlumno.objects.get_or_create(curso=curso, alumno=alumno)
                if created:
                    self.resultado += u'<li>Se ha añadido el alumno %s al curso %s</li>' % (alumno, curso)
                if self.grupo:
                    grupo, created = Grupo.objects.get_or_create(grupo=self.grupo)
                    if created:
                        self.resultado += u'<li>Se ha creado el grupo %s</li>' % (grupo)
                    cursogrupo, created = CursoGrupo.objects.get_or_create(grupo=grupo, curso=curso)
                    if created:
                        self.resultado += u'<li>Se ha creado el grupo %s en el curso %s</li>' % (grupo, curso)
                    try:
                        cursogrupoalumno, created = CursoGrupoAlumno.objects.update_or_create(curso_alumno=cursoalumno,
                                                                                defaults={'curso_grupo': cursogrupo})
                        if created:
                            self.resultado += u'<li>Se ha asignado el alumno %s al grupo %s en el curso %s</li>' %\
                                            (cursoalumno, cursogrupo, curso)
                        else:
                            self.resultado += u'<li>Se ha cambiado el alumno %s al grupo %s en el curso %s</li>' %\
                                            (cursoalumno, cursogrupo, curso)
                    except MultipleObjectsReturned:
                            self.resultado += u'<li style="color: red;">PROBLEMA>>>>>>>>>>>>>>>: ' \
                                              u'alumno %s en más de un grupo en el curso %s</li>' % \
                                            (cursoalumno, curso)

                    #cursogrupoalumno, created = CursoGrupoAlumno.objects.get_or_create(curso_grupo=cursogrupo,
                    #                                                             curso_alumno=cursoalumno)

                self.resultado += u'</ul>'
            elif name == "nie":
                self.inField = 0
                self.nie = self.buffer
            elif name == "nombre":
                self.inField = 0
                self.nombre = self.buffer
            elif name == "primer-apellido":
                self.inField = 0
                self.primerapellido = self.buffer
            elif name == "segundo-apellido":
                self.inField = 0
                self.segundoapellido = self.buffer
            elif name == "fecha-nacimiento":
                self.inField = 0
                self.fechanacimiento = self.buffer[-4:]+'-'+self.buffer[3:5]+'-'+self.buffer[0:2]
            elif name == "es-usuario":
                self.inField = 0
                if self.buffer == "true":
                    self.esusuario = True
                else:
                    self.esusuario = False
            elif name == "login":
                self.inField = 0
                self.login = self.buffer
            elif name == "id-usuario":
                self.inField = 0
                self.idusuario = self.buffer
            elif name == "grupo":
                self.inField = 0
                self.grupo = self.buffer
            elif name == 'con-foto':
                self.inField = 0
                if self.buffer == "true":
                    self.confoto = True
                else:
                    self.confoto = False
            elif name == 'formato':
                self.inField = 0
                self.formato = self.buffer
            elif name == 'nombre-fichero':
                self.inField = 0
                self.nombrefichero = self.buffer

            self.buffer = ""


    for rayuela in queryset:
        parser = xml.sax.make_parser()
        if rayuela.tipo == 'PR':
            handler = ProfesorHandler(request, queryset)
            parser.setContentHandler(handler)
            parser.parse(rayuela.archivo.path)
            rayuela.resultado = handler.get_resultado()
        elif rayuela.tipo == 'AL':
            temp = descomprime(rayuela.archivo.path)
            handler = AlumnoHandler(request, queryset, temp)
            parser.setContentHandler(handler)
            parser.parse(os.path.join(temp, 'Alumnos.xml'))
            try:
                shutil.rmtree(temp)
            except:
                pass
            rayuela.resultado = handler.get_resultado()
        elif rayuela.tipo == 'DA':
            rayuela.resultado = u'<h5>Resultado del proceso</h5><ul>'
            data = get_data(rayuela.archivo.path)
            datos_alumnos = data['Alumnado del centro'][1:]
            for datos_alumno in datos_alumnos:
                alumno = Alumno.objects.filter(nie=datos_alumno[2]).first()
                if alumno:
                    rayuela.resultado += u'<li>Procesando alumno {}</li>'.format(alumno)
                    alumno.dni = datos_alumno[3]
                    alumno.direccion = datos_alumno[4]
                    alumno.codigo_postal = datos_alumno[5]
                    alumno.localidad = datos_alumno[6]
                    alumno.provincia = datos_alumno[9]
                    alumno.telefono = datos_alumno[10] + ' ' + datos_alumno[11]
                    alumno.email = datos_alumno[12]
                    alumno.expediente = datos_alumno[37]
                    alumno.save()
                    #dni primer tutor
                    lista_tutores = []
                    if datos_alumno[18]:
                        #procesamos primer tutor
                        updated_values = {
                            'nombre' : datos_alumno[21],
                            'apellidos' : datos_alumno[19] + ' ' + datos_alumno[20]
                        }
                        tutor, created = Tutor.objects.get_or_create(dni=datos_alumno[18], defaults=updated_values)
                        if not created:
                            tutor.nombre = datos_alumno[21]
                            tutor.apellidos = datos_alumno[19] + ' ' + datos_alumno[20]
                            tutor.save()
                        lista_tutores.append(tutor)
                    #dni segundo tutor
                    if datos_alumno[23]:
                        #procesamos segundo
                        updated_values = {
                            'nombre' : datos_alumno[26],
                            'apellidos' : datos_alumno[24] + ' ' + datos_alumno[25]
                        }
                        tutor, created = Tutor.objects.get_or_create(dni=datos_alumno[23], defaults=updated_values)
                        if not created:
                            tutor.nombre = datos_alumno[26]
                            tutor.apellidos = datos_alumno[24] + ' ' + datos_alumno[25]
                            tutor.save()

                        lista_tutores.append(tutor)
                    if len(lista_tutores) > 0:
                        alumno.tutores.set(lista_tutores)
                else:
                    rayuela.resultado += u'<li style="color: red;">PROBLEMA>>>>>>>>>>>>>>>: ' \
                                         u'alumno {} con nie {} no existe</li>'.format(datos_alumno[0], datos_alumno[2])

            rayuela.resultado += u'</ul>'
        elif rayuela.tipo == 'TU':
            rayuela.resultado = u'<h5>Resultado del proceso</h5><ul>'
            data = get_data(rayuela.archivo.path)
            datos_tutores = data['Registro de tutores del centro'][1:]
            for datos_tutor in datos_tutores:
                updated_values = {
                    'telefono1' : datos_tutor[2],
                    'telefono2' : datos_tutor[3],
                    'domicilio' : datos_tutor[4],
                    'codigo_postal' : datos_tutor[5],
                    'municipio' : datos_tutor[6],
                    'provincia' : datos_tutor[7]
                }
                tutor, created = Tutor.objects.get_or_create(dni=datos_tutor[1], defaults=updated_values)
                if not created:
                    tutor.telefono1 = datos_tutor[2]
                    tutor.telefono2 = datos_tutor[3]
                    tutor.domicilio = datos_tutor[4]
                    tutor.codigo_postal = datos_tutor[5]
                    tutor.municipio = datos_tutor[6]
                    tutor.provincia = datos_tutor[7]
                    tutor.save()
                rayuela.resultado += u'<li>Procesando tutor {}</li>'.format(tutor)
            rayuela.resultado += u'</ul>'

        rayuela.procesado = True
        rayuela.save()