def knn1demo(ipfile,fpath,fname,lpath): X=np.array(pyexcel.get_array(file_name=ipfile,start_row=1,start_column=1,column_limit=2)) y=np.array(pyexcel.get_array(file_name=ipfile,start_row=1,start_column=3,column_limit=1)) y=np.ravel(y) l=len(X) Y=np.reshape(y,l) n_neighbors=1 clf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') clf.fit(X,Y) x=gennumsev(fpath,fname,lpath) label=x[2] xx=[x[:2]] Z=clf.predict(xx) if Z[0]==1: ans='Y' else: ans='N' p=np.append(X,xx) print "Is cyber bullying present?" print "Answer from knn (k=1) approach:",ans print "Actual scenario:",label
def __init__(self, dataset=pe.get_array(file_name=path()[0])): self.excel_dataset = pe.get_array(file_name=path()[1]) self.dataset = dataset self.classes = [] self.set_list = [] self.numb_arr = [] self.x = []
def test_issue_83_file_handle_no_generator(): proc = psutil.Process() test_files = [ os.path.join("tests", "fixtures", "bug_01.csv"), os.path.join("tests", "fixtures", "test-single.csvz"), os.path.join("tests", "fixtures", "date_field.xls"), ] for test_file in test_files: open_files_l1 = proc.open_files() # start with a csv file p.get_array(file_name=test_file) open_files_l2 = proc.open_files() delta = len(open_files_l2) - len(open_files_l1) # no open file handle should be left assert delta == 0
def load_feeder_info_from_file(path): # Read from local file print('Fetching feeder data from: {}'.format(path)) for row in pyexcel.get_array(file_name=path, start_row=1): # skip header if (row[0] != "Stop"): # Add a new feeder using these values available_feeders.append( Feeder( feeder_ID=row[1], device_name=row[2], stack_x_offset=stof(row[3]), stack_y_offset=stof(row[4]), # dpv expects the size of components to be in 1/100 mm component_size_x=stof(row[5]) * 100, component_size_y=stof(row[6]) * 100, height=stof(row[7]), speed=stoi(row[8]), head=stoi(row[9]), angle_compensation=stoi(row[10]), feed_spacing=stoi(row[11]), place_component=(row[12]), check_vacuum=(row[12] == 'Y'), use_vision=(row[14] == 'Y'), centroid_correction_x=stof(row[15]), centroid_correction_y=stof(row[16]), aliases=row[17])) else: break # We don't want to read in values after STOP print("Feeder update complete")
def test_get_array_from_sql(self): array = pe.get_array(session=Session(), table=Signature) assert array == [ ["X", "Y", "Z"], [1, 2, 3], [4, 5, 6] ]
def test_force_file_type(self): pe.save_as( array=[[1, 2]], dest_file_name="a.txt", dest_force_file_type="csv" ) actual = pe.get_array(file_name="a.txt", force_file_type="csv") eq_([[1, 2]], actual) os.unlink("a.txt")
def __init__(self, input_fname, header_size=1): # get absolute file path self.__fname = os.path.abspath(input_fname) # Read data from the log file into a np array sst_narray = np.array(pxl.get_array(file_name=self.__fname)) # TODO: check if the log file is valid # i.e at least 3 columns with the correct types # Create a DF with columns: subject_id, start_time, stop time log = pd.DataFrame(sst_narray[1:, 1:3], index=sst_narray[1:, 0], columns=sst_narray[0, 1:3]).astype({ sst_narray[0, 1]: 'datetime64[ns]', sst_narray[0, 2]: 'datetime64[ns]' }) # Add `duration` column log['Duration'] = log[sst_narray[0, 2]] - log[sst_narray[0, 1]] # Inplace drop of NA log.dropna(inplace=True) self.__log = log
def get_words_list(lang: Language): sheet_to_use = get_sheet_name(lang) entry_list = pe.get_array(file_name=c.XL_FILE, sheet_name=sheet_to_use, start_column=c.XL_TABLE_START_POSITION['column'], start_row=c.XL_TABLE_START_POSITION['row']) return entry_list
def __from_excel(cls, input_fname, index_name): """ Read start/stop-times from excel-like files. Specific function to read start and stop times from .ods/.xls(x) files. Parameters ---------- input_fname: str Path to the log file. index_name: str Name of the index. Returns ------- log : a pandas.DataFrame A dataframe with the start and stop times (columns) """ # Read data from the log file into a np array sst_narray = np.array(pxl.get_array(file_name=input_fname)) # Create a DF with columns: index_name, start_time, stop time log = pd.DataFrame(sst_narray[1:, 1:3], index=sst_narray[1:, 0], columns=['Start_time', 'Stop_time'] # dtype='datetime64[ns]' ) log.index.name = index_name return log
def load_feeder_info_from_file(path): available_feeders = [] # Read from local file logging.info('Fetching feeder data from: {}'.format(path)) for row in pyexcel.get_array(file_name=path, start_row=1): # skip header if (row[0] != "Stop"): # Add a new feeder using these values available_feeders.append( Feeder(feeder_ID=row[1], device_name=clear_utf8_characters(row[2]), stack_x_offset=stof(row[3]), stack_y_offset=stof(row[4]), height=stof(row[5]), speed=stoi(row[6]), head=stoi(row[7]), angle_compensation=stoi(row[8]), feed_spacing=stoi(row[9]), place_component=(row[10] == 'Y'), check_vacuum=(row[11] == 'Y'), use_vision=(row[12] == 'Y'), centroid_correction_x=stof(row[13]), centroid_correction_y=stof(row[14]), aliases=row[15])) else: break # We don't want to read in values after STOP logging.info("Feeder update complete") return available_feeders
def edit(): # shutil.copyfile(app.config['EXTRA_STUDENTS_SOURCE_PATH'], app.config['EXTRA_STUDENTS_WORKING_PATH']) records = get_array(file_name=app.config['EXTRA_STUDENTS_WORKING_PATH']) if request.method == 'POST': changed = False removes = [] for key in request.form.keys(): m = re.match('remove_(\d+)', key) if m: removes.append(int(m.group(1))) changed = True records = [r for r in records if not (r[0] in removes)] if request.form['student_number'] and request.form['first_name'] and \ request.form['last_name'] and request.form['email']: records.append([ request.form['student_number'], request.form['first_name'], request.form['last_name'], '104', request.form['gender'], request.form['email'], '9919.1' ]) changed = True if changed: save_data(app.config['EXTRA_STUDENTS_WORKING_PATH'], records, lineterminator='\n') return render_template('students.html', page_title='Edit Students', records=records)
def test_get_array_from_file(self): sheet = pe.Sheet(self.test_data) testfile = "testfile.xls" sheet.save_as(testfile) result = pe.get_array(file_name=testfile) eq_(result, self.test_data) os.unlink(testfile)
def test_get_sheet_from_recrods(self): records = [ {"X": 1, "Y": 2, "Z": 3}, {"X": 4, "Y": 5, "Z": 6} ] result = pe.get_array(records=records) eq_(result, self.test_data)
def extract_tape_data(feeders_file): """ returns data of the form [ {"alias":["ALIAS", ...], feeder_index:X}, ] """ feeeders_data = [] first_row = True for row in pyexcel.get_array(file_name=feeders_file): # skip header if first_row: first_row = False continue cmp = row[2] alias = row[16] index = row[1] alias = [cmp] + alias.split(":") if (row[0] != "Stop"): feeeders_data.append({"alias": alias, "feeder_index": index}) else: break return feeeders_data
def convert_excel_to_txt(filename): fileout = home + (filename.split("/")[-1]).split('.')[0]+".txt" print fileout #print "Reading file ",filename records = pe.get_array(file_name=filename) f = open(fileout,'w') #print "Starting to process data. Hold your breath" for count,rec in enumerate(records[1:]): rec[0] = "DATALIFE" rec[1] = "RPAY" rec[5] = "04182010000104" rec[4] = time.strftime("%d/%m/%Y") line = "" for value in rec: if value and type(value) is unicode: value = unicodedata.normalize('NFKD', value).encode('ascii','ignore') if rec[6] % 2 == 0: rec[6] = int(rec[6]) # Cross check payment types with mahesh if rec[2] == "NEFT" or rec[2] == "IFT": line = line + str(value)+"~" else: showerror("Error","Your Payment Type is Wrong in column %d. Please correct it and run the script again."%(count+2)) #print "Exiting Script" delete_content(f) f.close() root.quit() #sys.exit() f.write(line[:-1]) f.write("\n") f.close() showinfo("Final Status","File converted. Please see this path %s"%(fileout)) root.quit()
def __init__(self, dataset=pe.get_array(file_name=path()[1])): self.dataset = dataset self.model = Sequential() self.x = [] self.y_categorical = [] self.numb_arr = [] self.set_list = []
def get(self, request, graphId): print(request.data) if FileUploader.objects.last() is None: return Response(status=status.HTTP_204_NO_CONTENT) if graphId < 1: path = FileUploader.objects.last().file.path else: path = FileUploader.objects.filter(id=graphId).last().file.path # Excel Parsing, cleaning from empty rows and cells excel_data = pyexcel.get_array(file_name=path) excel_data = [list(filter(None, el)) for el in excel_data] excel_data = list(filter(None, excel_data)) excel_data.pop(0) # Checking number of columns testdata = [] if excel_data[0].__len__() == 2: for (i, row) in enumerate(excel_data): testdata.append(dict(x=row[0], y=row[1])) else: if excel_data[0].__len__() == 1: for (i, row) in enumerate(excel_data): testdata.append(dict(x=i, y=row[0])) resEx = dict(points=testdata) return Response(resEx, status=status.HTTP_200_OK)
def test_get_performance_platform_report(client, platform_admin_user, mocker): mock_get_user(mocker, user=platform_admin_user) client.login(platform_admin_user) mocker.patch('app.service_api_client.get_live_services_data', return_value={ 'data': [ { 'service_id': 'abc123', 'service_name': 'jessie the oak tree', 'organisation_name': 'Forest', 'consent_to_research': True, 'contact_name': 'Forest fairy', 'organisation_type': 'Ecosystem', 'contact_email': '*****@*****.**', 'contact_mobile': '+447700900986', 'live_date': 'Sat, 29 Mar 2014 00:00:00 GMT', 'sms_volume_intent': 100, 'email_volume_intent': 50, 'letter_volume_intent': 20, 'sms_totals': 300, 'email_totals': 1200, 'letter_totals': 0 }, { 'service_id': 'def456', 'service_name': 'james the pine tree', 'organisation_name': 'Forest', 'consent_to_research': None, 'contact_name': None, 'organisation_type': 'Ecosystem', 'contact_email': None, 'contact_mobile': None, 'live_date': None, 'sms_volume_intent': None, 'email_volume_intent': 60, 'letter_volume_intent': 0, 'sms_totals': 0, 'email_totals': 0, 'letter_totals': 0 }, ] }) response = client.get(url_for('main.performance_platform_xlsx')) assert response.status_code == 200 assert pyexcel.get_array( file_type='xlsx', file_stream=response.get_data(), ) == [ [ 'service_id', 'agency', 'service_name', '_timestamp', 'service', 'count' ], [ 'abc123', 'Forest', 'jessie the oak tree', '2014-03-29T00:00:00Z', 'govuk-notify', 1 ], ['def456', 'Forest', 'james the pine tree', '', 'govuk-notify', 1], ]
def read_excel(name_xls): my_dict = pyexcel.get_array(file_name=name_xls, name_columns_by_row=0) for i in range(len(my_dict)): for j in range(len(my_dict[i])): if (my_dict[i][j] == 'Краснодарский край') or (my_dict[i][j] == 'Краснодар'): my_dict[i][j] = 0 elif (my_dict[i][j] == 'Ростовская область') or (my_dict[i][j] == 'Кропоткин'): my_dict[i][j] = 1 elif (my_dict[i][j] == 'Ставропольский край') or (my_dict[i][j] == 'Славянск'): my_dict[i][j] = 2 elif my_dict[i][j] == 'Ростов': my_dict[i][j] = 3 elif my_dict[i][j] == 'Шахты': my_dict[i][j] = 4 elif my_dict[i][j] == 'Батайск': my_dict[i][j] = 5 elif my_dict[i][j] == 'Ставрополь': my_dict[i][j] = 6 elif my_dict[i][j] == 'Пятигорск': my_dict[i][j] = 7 elif my_dict[i][j] == 'Кисловодск': my_dict[i][j] = 8 return my_dict
def __set_file_sheet(self): if self.file_type == 'CSV': self.sheet = pyexcel.get_array(file_name=self.file_name) elif self.file_type == 'XLSX': pass elif self.file_type == 'XLS': pass
def test_get_array_from_array(self): data = [ ["X", "Y", "Z"], [1, 2, 3], [4, 5, 6] ] result = pe.get_array(array=data) assert result == data
def test_get_array_from_file(self): data = [["X", "Y", "Z"], [1, 2, 3], [4, 5, 6]] sheet = pe.Sheet(data) testfile = "testfile.xls" sheet.save_as(testfile) result = pe.get_array(file_name=testfile) assert result == data os.unlink(testfile)
def test_get_array_from_dict(self): adict = { "X": [1, 4], "Y": [2, 5], "Z": [3, 6] } result = pe.get_array(adict=adict) eq_(result, self.test_data)
def project(self, new_ordered_columns, exclusion=False): """ Rearrange the sheet. Example: >>> sheet = Sheet( ... [["A", "B", "C"], [1, 2, 3], [11, 22, 33], [111, 222, 333]], ... name_columns_by_row=0) >>> sheet.project(["B", "A", "C"]) pyexcel sheet: +-----+-----+-----+ | B | A | C | +=====+=====+=====+ | 2 | 1 | 3 | +-----+-----+-----+ | 22 | 11 | 33 | +-----+-----+-----+ | 222 | 111 | 333 | +-----+-----+-----+ >>> sheet.project(["B", "C"]) pyexcel sheet: +-----+-----+ | B | C | +=====+=====+ | 2 | 3 | +-----+-----+ | 22 | 33 | +-----+-----+ | 222 | 333 | +-----+-----+ >>> sheet.project(["B", "C"], exclusion=True) pyexcel sheet: +-----+ | A | +=====+ | 1 | +-----+ | 11 | +-----+ | 111 | +-----+ """ from pyexcel import get_array the_dict = self.to_dict() new_dict = OrderedDict() if exclusion: for column in the_dict.keys(): if column not in new_ordered_columns: new_dict[column] = the_dict[column] else: for column in new_ordered_columns: new_dict[column] = the_dict[column] array = get_array(adict=new_dict) return Sheet(array, name=self.name, name_columns_by_row=0)
def knn15demo(ipfile,fpath,fname,lpath): X=np.array(pyexcel.get_array(file_name=ipfile,start_row=1,start_column=1,column_limit=2)) y=np.array(pyexcel.get_array(file_name=ipfile,start_row=1,start_column=3,column_limit=1)) y=np.ravel(y) l=len(X) Y=np.reshape(y,l) n_neighbors=15 clf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') clf.fit(X,Y) x=gennumsev(fpath,fname,lpath) label=x[2] x1=[x[:2]] Z=clf.predict(x1) if Z[0]==1: ans='Y' else: ans='N' print "Is cyber bullying present?" print "Answer from knn (k=15) approach:",ans print "Actual scenario:",label #res=[ans,label] #return res h=1 cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) plt.figure() plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold, edgecolor='k', s=20) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.title("yes or no classification (k = %i, weights = 'distances')"% (n_neighbors)) plt.show()
def test_save_as_and_append_colnames(self): data = [[1, 2, 3], [4, 5, 6]] sheet = pe.Sheet(data) testfile = "testfile.xls" testfile2 = "testfile.xls" sheet.save_as(testfile) pe.save_as(file_name=testfile, out_file=testfile2, colnames=["X", "Y", "Z"]) array = pe.get_array(file_name=testfile2) assert array == [["X", "Y", "Z"], [1, 2, 3], [4, 5, 6]]
def create_list_pyexcel(self): """ Создает массив Метож работает с помощью библиотеки pyexcel. Форматы: -xls -xlsx """ self.common_list = pyexcel.get_array(file_name=self.file) return self.common_list
def data_from_csv (path, file_name, **kwargs): """ Extract a csv using available parameters """ data = pe.get_array(file_name = os.path.join(path, file_name), encoding = 'utf-8-sig') data = clean.clean_data(data, trim_strings = True) return data
def convert_to_xlsx(file): """Convert the given .ods file into a .xlsx with the given group name in the new filename.""" filename, group = file #remove any illegal characters from group for use in filename legalGroup = ''.join(c for c in group if c.isalnum()) xlsxName = 'attendance ' + legalGroup + '.xlsx' array = pyexcel.get_array(file_name = filename) pyexcel.save_as(array = array, dest_file_name = xlsxName) return [xlsxName, group]
def calculation(test_path, ans_path): my_array = pyexcel.get_array(file_name=test_path) ans = pyexcel.get_array(file_name=ans_path) book = xlwt.Workbook(encoding="utf-8") results = book.add_sheet("Результаты") iq = book.add_sheet("IQ") for row in range(1, len(my_array)): total = 0 for i in range(1, len(my_array[row])): if str(my_array[row][i]).lower().replace(" ", '') == str(ans[i][0]): total += 1 results.write(row - 1, 0, my_array[row][0]) results.write(row - 1, 1, total) iq.write(row - 1, 0, my_array[row][0]) if total == 0: iq.write(row, 1, "<75") else: iq.write(row, 1, 75 + 2.5 * total) book.save("results.xls")
def test_download(self): response = self.app.get('/download') ret = pe.get_array(file_type="csv", file_content=response.data) assert ret == [ ["REVIEW_DATE","AUTHOR","ISBN","DISCOUNTED_PRICE"], ["1985/01/21","Douglas Adams",'0345391802','5.95'], ["1990/01/12","Douglas Hofstadter",'0465026567','9.95'], ["1998/07/15","Timothy \"The Parser\" Campbell",'0968411304','18.99'], ["1999/12/03","Richard Friedman",'0060630353','5.95'], ["2004/10/04","Randel Helms",'0879755725','4.5'] ]
def test_save_as_and_append_colnames(self): data = [[1, 2, 3], [4, 5, 6]] sheet = pe.Sheet(data) testfile = "testfile.xls" testfile2 = "testfile.xls" sheet.save_as(testfile) pe.save_as(file_name=testfile, dest_file_name=testfile2, colnames=["X", "Y", "Z"]) array = pe.get_array(file_name=testfile2) eq_(array, [["X", "Y", "Z"], [1, 2, 3], [4, 5, 6]])
def read_events(fpath, response_type): """Read data from the file an Excel work book. Parameters ---------- fpath : str or `pathlib.Path` Filename of the input file. response_type : str Type of response. Valid options are: 'psa' for psuedo-spectral acceleration, or 'fa' for Fourier amplitude. Returns ------- ext : str Extension of input file reference : :class:`numpy.ndarray` Reference of the response. This is either period (sec) for response_type 'psa' or frequency (Hz) for response_type 'fa' events : List[dict] List of events read from the file. See ``Note`` in :func:`.calc_compatible_spectra` for more information on structure of the dictionaries. """ assert response_type in ['psa', 'fa'] fpath = pathlib.Path(fpath) data = pyexcel.get_array(file_name=str(fpath)) ext = fpath.suffix parameters = { key: data[i][1:] for i, (key, label) in enumerate(PARAMETER_NAMES) } event_row = len(parameters) + 1 event_count = len(data[0]) - 1 reference = np.array([row[0] for row in data[event_row:]]) events = [] for i in range(event_count): resps = np.array([row[i + 1] for row in data[event_row:]]) # Extract the appropriate attributes e = {k: v[i] for k, v in parameters.items()} e[response_type] = resps if 'region' in e: e['region'] = get_region(e['region']) events.append(e) return ext, reference, events
def test_get_sheet_from_recrods(self): records = [ {"X": 1, "Y": 2, "Z": 3}, {"X": 4, "Y": 5, "Z": 6} ] result = pe.get_array(records=records) expected = [ ["X", "Y", "Z"], [1, 2, 3], [4, 5, 6] ] assert expected == result
def get_array(self, **keywords): """ Get a list of lists from the file :param sheet_name: For an excel book, there could be multiple sheets. If it is left unspecified, the sheet at index 0 is loaded. For 'csv', 'tsv' file, *sheet_name* should be None anyway. :param keywords: additional key words :returns: A list of lists """ params = self.get_params(**keywords) return pe.get_array(**params)
def test_get_array_from_memory(self): data = [ ["X", "Y", "Z"], [1, 2, 3], [4, 5, 6] ] content = pe.save_as(dest_file_type="xls", array=data) array = pe.get_array(file_content=content.getvalue(), file_type="xls") assert array == [ ["X", "Y", "Z"], [1, 2, 3], [4, 5, 6] ]
def test_get_array_from_dict(self): adict = { "X": [1, 4], "Y": [2, 5], "Z": [3, 6] } result = pe.get_array(adict=adict) expected = [ ["X", "Y", "Z"], [1, 2, 3], [4, 5, 6] ] assert expected == result
def test_single_sheet_file(self): array = [["id", "name"], [1, "News"], [2, "Sports"]] for upload_file_type in ["ods", "xls"]: self.init() print("Uploading %s" % upload_file_type) file_name = "test.%s" % upload_file_type io = pe.save_as(array=array, dest_file_type=upload_file_type) if not PY2: if isinstance(io, BytesIO): content = io.getvalue() else: content = io.getvalue().encode("utf-8") else: content = io.getvalue() response = self.app.post("/upload/categories", upload_files=[("file", file_name, content)]) ret = pe.get_array(file_type="xls", file_content=response.body) assert array == ret self.done()
def validate_data_file(file): tmp_name_file = 'tmp' + get_extension(file) tmp_file = open('{}/{}'.format(settings.MEDIA_ROOT, tmp_name_file), 'wb') tmp_file.write(file.read()) tmp_file.close() file_data = pyexcel.get_array(file_name="{}/{}".format(settings.MEDIA_ROOT, tmp_name_file)) list_error = [] list_error += _validate_prev_data_in_file(file_data[: settings.EXCEL_START_STRING]) list_error += _validate_excel_data(file_data[settings.EXCEL_START_STRING:]) if len(list_error): text = ['В файе присутствуют ошибки:'] + list_error raise ValidationError(text)
def test_single_sheet_file(self): array = [ ["id", "name"], [1, "News"], [2, "Sports"] ] for upload_file_type in ['xls', 'ods']: with app.app_context(): db.drop_all() db.create_all() print("Uploading %s" % upload_file_type) file_name = "test.%s" % upload_file_type io = pe.save_as(array=array, dest_file_type=upload_file_type) response = self.app.post('/upload/categories', buffered=True, data={"file": (io, file_name)}, content_type="multipart/form-data") ret = pe.get_array(file_type="xls", file_content=response.data) assert array == ret
def upload(request): if request.method == "POST": form = UploadFileForm(request.POST, request.FILES) if form.is_valid(): on_date = form.cleaned_data['load_date'] file_handle = request.FILES['file'] tmp_name_file = 'tmp' + get_extension(file_handle) path = "{}/{}".format(settings.MEDIA_ROOT, tmp_name_file) data = pyexcel.get_array(file_name=path) job = loader.delay(data[settings.EXCEL_START_STRING:], on_date) os.remove(path) response = render_to_response( 'loader/index.html', { 'message': 'Данные будут загружены', 'message_type': 'success', 'form': form, }, context_instance=RequestContext(request)) response.set_cookie('kids_loader_job', job.get_id()) response.set_cookie('kids_job_task', '2') return response else: return render_to_response( 'loader/index.html', { 'message': 'Ошибка загрузки', 'message_type': 'danger', 'form': form, }, context_instance=RequestContext(request)) else: form = UploadFileForm() return render_to_response( 'loader/index.html', {'form': form}, context_instance=RequestContext(request))
def read_deepsoil_results(name): data = pyexcel.get_array(file_name=str(fpath_data / (name + '.xlsx'))) names = ','.join(string.ascii_uppercase[:len(data[0])]) records = np.rec.fromrecords(data, names=names) def extract_cols(records, cols, first, last, names): return { name: records[col][first:last].astype(float) for col, name in zip(cols, names) } d = dict() # Read the time series d['time_series'] = extract_cols( records, 'ABCDE', 1, 11800, ['time', 'accel', 'strain', 'stress', 'arias_int']) # Read the response spectrum d['resp_spec'] = extract_cols(records, 'GH', 1, 114, ['period', 'psa']) # Read the Fourier amplitude d['fourier_spec'] = extract_cols(records, 'JKL', 1, 16384, ['freq', 'ampl', 'ratio']) return d
import pyexcel as pe import pyexcel.ext.xls import unicodedata import sys import time def delete_content(pfile): pfile.seek(0) pfile.truncate() filename = sys.argv[1] fileout = filename.split('.')[0]+".txt" print "Reading file ",filename records = pe.get_array(file_name=filename) f = open(fileout,'w') print "Starting to process data. Hold your breath" for count,rec in enumerate(records[1:]): rec[0] = "DATALIFE" rec[1] = "RPAY" rec[5] = "04182010000104" rec[4] = time.strftime("%d/%m/%Y") line = "" for value in rec: if value and type(value) is unicode: value = unicodedata.normalize('NFKD', value).encode('ascii','ignore') if rec[6] % 2 == 0: rec[6] = int(rec[6]) # Cross check payment types with mahesh
def test_get_array(self): expected = pe.get_array(x="something") assert expected == None
def test_get_array(self): pe.get_array(x="something")