def bookings_report(_date): try: bpoint, cash = [], [] bookings = Booking.objects.filter(created__date=_date) history_bookings = BookingHistory.objects.filter(created__date=_date) strIO = StringIO() fieldnames = ['Date','Confirmation Number','Name','Amount','Invoice','Booking Type'] writer = csv.writer(strIO) writer.writerow(fieldnames) types = dict(Booking.BOOKING_TYPE_CHOICES) for b in bookings: b_name = u'{} {}'.format(b.details.get('first_name',''),b.details.get('last_name','')) created = timezone.localtime(b.created, pytz.timezone('Australia/Perth')) writer.writerow([created.strftime('%d/%m/%Y %H:%M:%S'),b.confirmation_number,b_name.encode('utf-8'),b.active_invoice.amount if b.active_invoice else '',b.active_invoice.reference if b.active_invoice else '', types[b.booking_type] if b.booking_type in types else b.booking_type]) #for b in history_bookings: # b_name = '{} {}'.format(b.details.get('first_name',''),b.details.get('last_name','')) # writer.writerow([b.created.strftime('%d/%m/%Y %H:%M:%S'),b.booking.confirmation_number,b_name,b.invoice.amount,b.invoice.reference,'Yes']) strIO.flush() strIO.seek(0) return strIO except: raise
def test_freq(self): """tests FREQ, FREQ1, FREQ2, FREQ4""" model = BDF(debug=False) sid = 101 freqs = 0.1 freq = model.add_freq(sid, freqs, comment='freq') #print(freq) freqs = [2.0, 3.0] freq = model.add_freq(sid, freqs, comment='freq') #print(freq) f1 = 0. df = 2.0 freq1 = model.add_freq1(sid, f1, df, ndf=5, comment='freq1') assert len(freq1.freqs) == 6, 'freqs=%s' % freq1.freqs #print(freq1) f1 = 1. f2 = 8.0 freq2 = model.add_freq2(sid, f1, f2, nf=6, comment='freq2') assert len(freq2.freqs) == 7, 'freqs=%s' % freq2.freqs assert np.allclose(freq2.freqs.max(), f2), freq2.freqs #print(freq2) freq4 = model.add_freq4(sid, f1, f2, fspread=0.1, nfm=3, comment='freq4') #print(model.frequencies[sid]) #print(freq4) model.validate() freq.raw_fields() freq.write_card() freq.write_card(size=16) freq1.raw_fields() freq1.write_card() freq1.write_card(size=16) freq2.raw_fields() freq2.write_card() freq2.write_card(size=16) freq4.raw_fields() freq4.write_card() freq4.write_card(size=16) bdf_file = StringIO() model.write_bdf(bdf_file, close=False) out = bdf_file.getvalue() bdf_file.seek(0) model2 = read_bdf(bdf_file, punch=True, debug=False) model2.uncross_reference() model2.safe_cross_reference() model2.uncross_reference()
def _test_dynamic2(self): """ xref test for: - LOADSET -> LSEQ -> FORCE, PLOAD - DLOAD -> RLOAD1 -> TABLED1 LOADSET take priority useful for generalized dynamic forces/disp/vel/acc """ msg = """ SOL 108 CEND SUBCASE 1 LOADSET = 27 DLOAD = 25 DISP(PLOT) = ALL BEGIN BULK $LSEQ SID EXCITEID LID LSEQ, 27, 28, 26 $RLOAD1 SID EXCITEID DELAY DPHASE TC TD RLOAD1, 25, 28, 0.0, 10.0, 29 $FORCE SID GRID CID F N1 N2 N3 FORCE, 26, 425, , 2.5, 1.0 $PLOAD SID PRES GRID1 GRID2 GRID3 GRID4 PLOAD, 26, 50.0, 63, 64, 88, 91 $TABLED1 TID XAXIS YAXIS $ x1 y1 x2 y2 x3 y3 x4 y4 TABLED1, 29, LINEAR, LINEAR ,0.0, 0.5, 0.6, 0.4, 0.8, 0.7, ENDT """ model = BDF(debug=False) bdf_file = StringIO() bdf_file.write(msg) bdf_file.seek(0) model.read_bdf(bdf_file)
def csv_content(self): """ Returns the objects in the form's current queryset as csv content. """ if not hasattr(self, 'CSV_COLUMNS'): raise NotImplementedError( 'Child classes of CsvForm must implement the CSV_COLUMNS constant' ) # Get column fields and headers csv_columns = [i['column'] for i in self.CSV_COLUMNS] csv_headers = [i['title'].encode('utf-8') for i in self.CSV_COLUMNS] # Build data for csv writer csv_data = [] for obj in self.get_queryset(): csv_data.append([ unicode(csv_getvalue(obj, column)).encode('utf-8') for column in csv_columns ]) # Create buffer with csv content content = StringIO() writer = csv.writer(content) writer.writerow(csv_headers) writer.writerows(csv_data) content.seek(0) return content
def test_split_good(self): source = StringIO("""@header AGCTTTTT + IIIIB+++ """) target1 = StringIO() target2 = StringIO() srf2fastq.convert_single_to_two_fastq(source, target1, target2) target1.seek(0) lines1 = target1.readlines() self.assertEqual(len(lines1),4) self.assertEqual(lines1[0].rstrip(), '@header/1') self.assertEqual(lines1[1].rstrip(), 'AGCT') self.assertEqual(lines1[2].rstrip(), '+') self.assertEqual(lines1[3].rstrip(), 'IIII') target2.seek(0) lines2 = target2.readlines() self.assertEqual(len(lines2),4) self.assertEqual(lines2[0].rstrip(), '@header/2') self.assertEqual(lines2[1].rstrip(), 'TTTT') self.assertEqual(lines2[2].rstrip(), '+') self.assertEqual(lines2[3].rstrip(), 'B+++')
def test_split_at_with_header(self): source = StringIO("""@header1 AGCTTTTT + @IIIB+++ @header2 AGCTTTTT + IIIIB+++ """) target1 = StringIO() target2 = StringIO() srf2fastq.convert_single_to_two_fastq(source, target1, target2, header="foo_") target1.seek(0) lines1 = target1.readlines() self.assertEqual(len(lines1),8) self.assertEqual(lines1[0].rstrip(), '@foo_header1/1') self.assertEqual(lines1[1].rstrip(), 'AGCT') self.assertEqual(lines1[2].rstrip(), '+') self.assertEqual(lines1[3].rstrip(), '@III') target2.seek(0) lines2 = target2.readlines() self.assertEqual(len(lines2),8) self.assertEqual(lines2[0].rstrip(), '@foo_header1/2') self.assertEqual(lines2[1].rstrip(), 'TTTT') self.assertEqual(lines2[2].rstrip(), '+') self.assertEqual(lines2[3].rstrip(), 'B+++')
def user_report(): strIO = None # Create or update view cursor = connection.cursor() sql = 'CREATE OR REPLACE VIEW accounts_emailuser_report_v AS \ select md5(CAST((first_name,last_name,dob)AS text)) as hash,count(*) as occurence, first_name,last_name,\ dob from accounts_emailuser group by first_name,last_name,dob;' cursor.execute(sql) users = EmailUserReport.objects.filter(occurence__gt=1) if users: strIO = StringIO() fieldnames = ['Occurence', 'Given Name(s)','Last Name','DOB'] writer = csv.DictWriter(strIO, fieldnames=fieldnames) writer.writeheader() for u in users: info = { 'Occurence': u.occurence, 'Given Name(s)': u.first_name, 'Last Name': u.last_name, 'DOB': u.dob } writer.writerow(info) strIO.flush() strIO.seek(0) return strIO
def test_serialize_file(self): p = Pipeline.from_functions(['infer_central_dogma']) sio = StringIO() p.dump(sio) sio.seek(0) p_reconstituted = Pipeline.load(sio) self.assertEqual(p.protocol, p_reconstituted.protocol)
def test_contradictory_date_entries_warn(self): """4.8.5.3 Emit warning on contradictory date entries.""" stream = StringIO( wrap_document_text(construct_document_from(**{ "Author": { "ForeName": "John", "LastName": "Smith" }, "DateCompleted": { "Year": "2011", "Month": "01", "Day": "01" }, "DateRevised": { "Year": "2010", "Month": "01", "Day": "01" }, })) ) stderr = StringIO() self.patch(sys, "stderr", stderr) result = parsexml.parse_element_tree( parsexml.file_to_element_tree(stream) ) stderr.seek(0) stderr_out = stderr.read() self.assertThat(result["pubDate"], Is(None)) self.assertThat(result["reviseDate"], Is(None)) self.assertThat(stderr_out, Contains("is greater than"))
def _load_config_file(self, fp): config = StringIO() config.write('[' + self.SECTION + ']' + os.linesep) config.write(fp.read()) config.seek(0, os.SEEK_SET) self.cp.readfp(config)
def user_report(): strIO = None # Create or update view cursor = connection.cursor() sql = 'CREATE OR REPLACE VIEW accounts_emailuser_report_v AS \ select md5(CAST((first_name,last_name,dob)AS text)) as hash,count(*) as occurence, first_name,last_name,\ dob from accounts_emailuser group by first_name,last_name,dob;' cursor.execute(sql) users = EmailUserReport.objects.filter(occurence__gt=1) if users: strIO = StringIO() fieldnames = ['Occurence', 'First Name','Last Name','DOB'] writer = csv.DictWriter(strIO, fieldnames=fieldnames) writer.writeheader() for u in users: info = { 'Occurence': u.occurence, 'First Name': u.first_name, 'Last Name': u.last_name, 'DOB': u.dob } writer.writerow(info) strIO.flush() strIO.seek(0) return strIO
def get_pydoc(spec): obj = load_obj(spec) if obj: output = StringIO() pydoc.Helper(output=output).help(obj) output.seek(0) return output.read()
def outstanding_bookings(): try: outstanding = [] today = datetime.date.today() for b in Booking.objects.filter(is_canceled=False,departure__gte=today).exclude(booking_type__in=['1','3']): if not b.paid: outstanding.append(b) strIO = StringIO() fieldnames = ['Confirmation Number','Customer','Campground','Arrival','Departure','Outstanding Amount'] writer = csv.writer(strIO) writer.writerow(fieldnames) for o in outstanding: fullname = '{} {}'.format(o.details.get('first_name'),o.details.get('last_name')) writer.writerow([o.confirmation_number,fullname,o.campground.name,o.arrival.strftime('%d/%m/%Y'),o.departure.strftime('%d/%m/%Y'),o.outstanding]) strIO.flush() strIO.seek(0) _file = strIO dt = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') recipients = [] recipients = OutstandingBookingRecipient.objects.all() email = EmailMessage( 'Unpaid Bookings Summary as at {}'.format(dt), 'Unpaid Bookings as at {}'.format(dt), settings.DEFAULT_FROM_EMAIL, to=[r.email for r in recipients]if recipients else [settings.NOTIFICATION_EMAIL] ) email.attach('OustandingBookings_{}.csv'.format(dt), _file.getvalue(), 'text/csv') email.send() except: raise
def bookings_report(_date): try: bookings = Booking.objects.filter(created__date=_date) strIO = StringIO() fieldnames = [ 'Date', 'Confirmation Number', 'Name', 'Amount', 'Invoice', 'Booking Type' ] writer = csv.writer(strIO) writer.writerow(fieldnames) types = dict(Booking.BOOKING_TYPE_CHOICES) for b in bookings: b_name = u'{} {}'.format(b.details.get('first_name', ''), b.details.get('last_name', '')) created = timezone.localtime(b.created, pytz.timezone('Australia/Perth')) writer.writerow([ created.strftime('%d/%m/%Y %H:%M:%S'), b.confirmation_number, b_name.encode('utf-8'), b.active_invoice.amount if b.active_invoice else '', b.active_invoice.reference if b.active_invoice else '', types[b.booking_type] if b.booking_type in types else b.booking_type ]) strIO.flush() strIO.seek(0) return strIO except BaseException: raise
def test_contradictory_date_entries_warn(self): """4.8.5.3 Emit warning on contradictory date entries.""" stream = StringIO( wrap_document_text( construct_document_from( **{ "Author": { "ForeName": "John", "LastName": "Smith" }, "DateCompleted": { "Year": "2011", "Month": "01", "Day": "01" }, "DateRevised": { "Year": "2010", "Month": "01", "Day": "01" }, }))) stderr = StringIO() self.patch(sys, "stderr", stderr) result = parsexml.parse_element_tree( parsexml.file_to_element_tree(stream)) stderr.seek(0) stderr_out = stderr.read() self.assertThat(result["pubDate"], Is(None)) self.assertThat(result["reviseDate"], Is(None)) self.assertThat(stderr_out, Contains("is greater than"))
def run_simulate(args): """ Runs the simulation and outputs the results in text. """ n = args.sample_size m = args.num_loci rho = args.recombination_rate num_populations = args.num_populations migration_matrix = [[ args.migration_rate * int(j != k) for j in range(num_populations) ] for k in range(num_populations)] sample_configuration = [0 for j in range(num_populations)] population_growth_rates = [0 for j in range(num_populations)] population_sizes = [1 for j in range(num_populations)] sample_configuration[0] = n if args.sample_configuration is not None: sample_configuration = args.sample_configuration if args.population_growth_rates is not None: population_growth_rates = args.population_growth_rates if args.population_sizes is not None: population_sizes = args.population_sizes random.seed(args.random_seed) s = Simulator(n, m, rho, migration_matrix, sample_configuration, population_growth_rates, population_sizes, args.population_growth_rate_change, args.population_size_change, args.migration_matrix_element_change, args.bottleneck, 10000) s.simulate() nodes_file = StringIO() edgesets_file = StringIO() s.write_text(nodes_file, edgesets_file) nodes_file.seek(0) edgesets_file.seek(0) ts = msprime.load_text(nodes_file, edgesets_file) process_trees(ts)
def dumps(collection, **json_args): """ Dump a collection of JSON objects into a string. Primarily included to match the `json` library's functionality. This may be more appropriate: >>> os.linesep.join(list(map(json.dumps, collection)) Parameters ---------- collection : iter Iterable that produces one JSON object per iteration. json_args : **json_args, optional Additional keyword arguments for `NLJWriter()`. Returns ------- str """ f = StringIO() # No __exit__ in older Python try: with NLJWriter(f, 'w', **json_args) as dst: for item in collection: dst.write(item) f.seek(0) return f.read() finally: f.close()
def test_show_urls(mfindurls): mfindurls.return_value = URLS args = argparse.Namespace( server='test.datafind.com:443', observatory='X', type='test', gpsstart=0, gpsend=10, url_type='file', match=None, lal_cache=False, names_only=False, frame_cache=False, gaps=None, ) out = StringIO() main.show_urls(args, out) assert mfindurls.called_with(args.observatory, args.type, args.gpsstart, args.gpsend, match=args.match, urltype=args.url_type, on_gaps='ignore', host=args.server) out.seek(0) assert list(map(str.rstrip, out.readlines())) == URLS
def annual_admissions_booking_report(aadata): try: bookings = [] today = datetime.date.today() strIO = StringIO() fieldnames = [ 'ID', 'First Name', 'Last Name', 'Mobile', 'Phone', 'Vessel Rego', 'Vessel Length', 'Sticker No', 'Year', 'Status', 'Booking Period', 'Postal Address 1', 'Postal Address 2', 'Suburb', 'Post Code', 'State', 'Country' ] writer = csv.writer(strIO) writer.writerow(fieldnames) for o in aadata: email = '' country = '' postal_address_line_1 = '' postal_address_line_2 = '' post_code = '' state = '' vessel_length = '' vessel_name = '' vessel_rego = '' phone = '' mobile = '' sticker_no = '' suburb = '' if o['sticker_no']: sticker_no = o['sticker_no'] if o['details']: phone = o['details']['phone'] mobile = o['details']['mobile'] vessel_length = o['details']['vessel_length'] vessel_name = o['details']['vessel_name'] vessel_rego = o['details']['vessel_rego'] country = o['details']['country'] postal_address_line_1 = o['details']['postal_address_line_1'] postal_address_line_2 = o['details']['postal_address_line_2'] state = o['details']['state'] vessel_length = o['details']['vessel_length'] if 'postcode' in o['details']: post_code = o['details']['post_code'] if 'suburb' in o['details']: suburb = o['details']['suburb'] writer.writerow([ 'AA' + str(o['id']), o['customer']['first_name'], o['customer']['last_name'], mobile, phone, vessel_rego, vessel_length, sticker_no, o['year'], o['status'], o['annual_booking_period_group_name'], postal_address_line_1, postal_address_line_2, suburb, post_code, state, country ]) strIO.flush() strIO.seek(0) return strIO except: raise
def __str__(self): stream = StringIO() pprint.pprint(self.extractors, stream) stream.seek(0) template_data = stream.read() if template_data: return "%s[\n%s\n]" % (self.__class__.__name__, template_data) return "%s[none]" % (self.__class__.__name__)
def test_ping(mping): args = argparse.Namespace(server='test.datafind.com:443') out = StringIO() main.ping(args, out) assert mping.called_with(host=args.server) out.seek(0) assert out.read().rstrip() == ( 'LDRDataFindServer at test.datafind.com:443 is alive')
def upload_prox_config(self, config_file, prox_config_dict): # prox can't handle spaces around ' = ' so use custom method out = StringIO(self.write_prox_config(prox_config_dict)) out.seek(0) remote_path = os.path.join("/tmp", config_file) self.ssh_helper.put_file_obj(out, remote_path) return remote_path
def test_show(self): """Establish that the show method will properly route to an alternate file. """ sio = StringIO() ex = TowerCLIError("Fe fi fo fum; I smell the blood of an Englishman.") ex.show(file=sio) sio.seek(0) self.assertIn("Fe fi fo fum;", sio.read())
def test_show(self): """Establish that the show method will properly route to an alternate file. """ sio = StringIO() ex = TowerCLIError('Fe fi fo fum; I smell the blood of an Englishman.') ex.show(file=sio) sio.seek(0) self.assertIn('Fe fi fo fum;', sio.read())
def __deepcopy__(self, memo): # http://stackoverflow.com/questions/23416370 # /manually-building-a-deep-copy-of-a-configparser-in-python-2-7 config_string = StringIO() self.write(config_string) config_string.seek(0) new_config = self.__class__() new_config.readfp(config_string) return new_config
def print_ascii_graph(model_): """ pip install img2txt.py python -c """ from PIL import Image # NOQA from six.moves import StringIO # import networkx as nx import copy model = copy.deepcopy(model_) assert model is not model_ # model.graph.setdefault('graph', {})['size'] = '".4,.4"' model.graph.setdefault('graph', {})['size'] = '".3,.3"' model.graph.setdefault('graph', {})['height'] = '".3,.3"' pydot_graph = nx.to_pydot(model) png_str = pydot_graph.create_png(prog='dot') sio = StringIO() sio.write(png_str) sio.seek(0) pil_img = Image.open(sio) # NOQA logger.info('pil_img.size = %r' % (pil_img.size, )) # def print_ascii_image(pil_img): # img2txt = ut.import_module_from_fpath('/home/joncrall/venv/bin/img2txt.py') # import sys # pixel = pil_img.load() # width, height = pil_img.size # bgcolor = None # #fill_string = # img2txt.getANSIbgstring_for_ANSIcolor(img2txt.getANSIcolor_for_rgb(bgcolor)) # fill_string = "\x1b[49m" # fill_string += "\x1b[K" # does not move the cursor # sys.stdout.write(fill_string) # img_ansii_str = img2txt.generate_ANSI_from_pixels(pixel, width, height, bgcolor) # sys.stdout.write(img_ansii_str) def print_ascii_image(pil_img): # https://gist.github.com/cdiener/10491632 SC = 1.0 GCF = 1.0 WCF = 1.0 img = pil_img S = (int(round(img.size[0] * SC * WCF * 3)), int(round(img.size[1] * SC))) img = np.sum(np.asarray(img.resize(S)), axis=2) logger.info('img.shape = %r' % (img.shape, )) img -= img.min() chars = np.asarray(list(' .,:;irsXA253hMHGS#9B&@')) img = (1.0 - img / img.max())**GCF * (chars.size - 1) logger.info('\n'.join((''.join(r) for r in chars[img.astype(int)]))) print_ascii_image(pil_img) pil_img.close() pass
def test_parsing_file_with_no_fields_throws(self): """4.5.3.4 Print error file has no relevant fields.""" stream = StringIO("<PubmedArticleSet><PubmedArticle>" "</PubmedArticle></PubmedArticleSet>") stderr = StringIO() self.patch(sys, "stderr", stderr) parsexml.parse_element_tree(parsexml.file_to_element_tree(stream)) stderr.seek(0) stderr_out = stderr.read() self.assertThat(stderr_out, Contains("skipping"))
def load_cookies_file(cookies_file): logging.debug('Loading cookie file %s into memory.', cookies_file) cookies = StringIO() cookies.write('# Netscape HTTP Cookie File') cookies.write(open(cookies_file, 'rU').read()) cookies.flush() cookies.seek(0) return cookies
def read_write(model): """reads/writes the model as a StringIO""" bdf_file = StringIO() model.write_bdf(out_filename=bdf_file, close=False) model.clear_attributes() bdf_file.seek(0) model.read_bdf(bdf_file, punch=True) model.uncross_reference() bdf_file.seek(0) model.write_bdf(out_filename=bdf_file, size=16, close=False)
def generateOracleParserFile(oracle_codes): strIO = StringIO() fieldnames = ['Activity Code', 'Amount'] writer = csv.writer(strIO) writer.writerow(fieldnames) for k, v in oracle_codes.items(): if v != 0: writer.writerow([k, v]) strIO.flush() strIO.seek(0) return strIO
def test_show_observatories(mfindobs): mfindobs.return_value = ['A', 'B', 'C'] args = argparse.Namespace( server='test.datafind.com:443', match='test', ) out = StringIO() main.show_observatories(args, out) out.seek(0) assert mfindobs.called_with(host=args.server, match=args.match) assert list(map(str.rstrip, out.readlines())) == ['A', 'B', 'C']
def generateOracleParserFile(oracle_codes): strIO = StringIO() fieldnames = ['Activity Code','Amount'] writer = csv.writer(strIO) writer.writerow(fieldnames) for k,v in oracle_codes.items(): if v != 0: writer.writerow([k,v]) strIO.flush() strIO.seek(0) return strIO
def test_save_image_from_pil(self): from PIL import Image, ImageDraw size = (256, 256) im = Image.new("RGB", size, "white") draw = ImageDraw.Draw(im) draw.line((0, 0) + im.size, fill=128) draw.line((0, im.size[1], im.size[0], 0), fill=128) with afs.connect('localtmp') as fs: buff = StringIO() im.save(buff, format="PNG") buff.seek(0) fs.save('imagen.png', buff)
def test_postprocess_cache_sft(): args = argparse.Namespace( type='TEST_1800SFT', lal_cache=False, names_only=False, frame_cache=False, gaps=None, ) out = StringIO() main.postprocess_cache(URLS, args, out) out.seek(0) assert out.read() == OUTPUT_URLS.replace('.gwf', '.sft')
def test_inject_yum_mirrors(orig_repos_cfg, mirrors_dict, expected_repos_cfg, expected_repos_proxied_cfg): my_out_fil = StringIO() inject_yum_mirrors(mirrors_dict, StringIO(orig_repos_cfg), my_out_fil) my_out_fil.seek(0) assert expected_repos_cfg == my_out_fil.read() # Test when proxies are allowed my_out_fil = StringIO() inject_yum_mirrors(mirrors_dict, StringIO(orig_repos_cfg), my_out_fil, True) my_out_fil.seek(0) assert expected_repos_proxied_cfg == my_out_fil.read()
def booking_bpoint_settlement_report(_date): try: bpoint, cash = [], [] bpoint.extend([x for x in BpointTransaction.objects.filter(created__date=_date,response_code=0,crn1__startswith='0019').exclude(crn1__endswith='_test')]) cash = CashTransaction.objects.filter(created__date=_date,invoice__reference__startswith='0019').exclude(type__in=['move_out','move_in']) strIO = StringIO() fieldnames = ['Payment Date','Settlement Date','Confirmation Number','Name','Type','Amount','Invoice'] writer = csv.writer(strIO) writer.writerow(fieldnames) for b in bpoint: booking, invoice = None, None try: invoice = Invoice.objects.get(reference=b.crn1) try: booking = BookingInvoice.objects.get(invoice_reference=invoice.reference).booking except BookingInvoice.DoesNotExist: pass if booking: b_name = u'{} {}'.format(booking.details.get('first_name',''),booking.details.get('last_name','')) created = timezone.localtime(b.created, pytz.timezone('Australia/Perth')) writer.writerow([created.strftime('%d/%m/%Y %H:%M:%S'),b.settlement_date.strftime('%d/%m/%Y'),booking.confirmation_number,b_name.encode('utf-8'),str(b.action),b.amount,invoice.reference]) else: writer.writerow([b.created.strftime('%d/%m/%Y %H:%M:%S'),b.settlement_date.strftime('%d/%m/%Y'),'','',str(b.action),b.amount,invoice.reference]) except Invoice.DoesNotExist: pass for b in cash: booking, invoice = None, None try: invoice = b.invoice try: booking = BookingInvoice.objects.get(invoice_reference=invoice.reference).booking except BookingInvoice.DoesNotExist: pass if booking: b_name = u'{} {}'.format(booking.details.get('first_name',''),booking.details.get('last_name','')) created = timezone.localtime(b.created, pytz.timezone('Australia/Perth')) writer.writerow([created.strftime('%d/%m/%Y %H:%M:%S'),b.created.strftime('%d/%m/%Y'),booking.confirmation_number,b_name.encode('utf-8'),str(b.type),b.amount,invoice.reference]) else: writer.writerow([b.created.strftime('%d/%m/%Y %H:%M:%S'),b.created.strftime('%d/%m/%Y'),'','',str(b.type),b.amount,invoice.reference]) except Invoice.DoesNotExist: pass strIO.flush() strIO.seek(0) return strIO except: raise
def PrintReleaseNotesDiff(release_notes_url, current_version, latest_version): """Prints the release notes diff based on your current version. If any of the arguments are None, a generic message will be printed telling the user to go to the web to view the release notes. If the release_notes_url is also None, it will print the developers site page for the SDK. Args: release_notes_url: str, The URL to download the latest release notes from. current_version: str, The current version of the SDK you have installed. latest_version: str, The version you are about to update to. """ if release_notes_url and current_version and latest_version: notes = ReleaseNotes.FromURL(release_notes_url) if notes: release_notes_diff = notes.Diff(latest_version, current_version) else: release_notes_diff = None else: release_notes_diff = None if not release_notes_diff: # We failed to print the release notes. Send people to a nice web page with # the release notes. log.status.write( 'For the latest full release notes, please visit:\n {0}\n\n'. format(config.INSTALLATION_CONFIG.release_notes_url)) return if len(release_notes_diff) > ReleaseNotes.MAX_DIFF: log.status.Print("""\ A lot has changed since your last upgrade. For the latest full release notes, please visit: {0} """.format(config.INSTALLATION_CONFIG.release_notes_url)) return log.status.Print("""\ The following release notes are new in this upgrade. Please read carefully for information about new features, breaking changes, and bugs fixed. The latest full release notes can be viewed at: {0} """.format(config.INSTALLATION_CONFIG.release_notes_url)) full_text = StringIO() for _, text in release_notes_diff: full_text.write(text) full_text.write('\n') full_text.seek(0) render_document.RenderDocument('text', full_text, log.status) log.status.Print()
def test_throw_exception_if_input_data_invalid(self): """4.5.5.3 Throw exception if input data is invalid.""" membuf = StringIO() membuf.write("invalid") membuf.seek(0) self.patch(sys, "stdin", membuf) if sys.version_info.major <= 2: with ExpectedException(ValueError): load.main() elif sys.version_info.major >= 3: with ExpectedException(json.decoder.JSONDecodeError): load.main()
def log_traceback(log_fn, prefix=''): """Log the stack traceback into a logger. log_fn: One of the logging.Logger logging functions, such as logging.Logger.warning. prefix: A string, which will be put in front of each log entry. Default is no string. """ import traceback sfd = StringIO() traceback.print_exc(file=sfd) sfd.seek(0) for line in sfd: log_fn("%s%s", prefix, line)
def print_ascii_graph(model_): """ pip install img2txt.py python -c """ from PIL import Image from six.moves import StringIO #import networkx as netx import copy model = copy.deepcopy(model_) assert model is not model_ # model.graph.setdefault('graph', {})['size'] = '".4,.4"' model.graph.setdefault('graph', {})['size'] = '".3,.3"' model.graph.setdefault('graph', {})['height'] = '".3,.3"' pydot_graph = netx.to_pydot(model) png_str = pydot_graph.create_png(prog='dot') sio = StringIO() sio.write(png_str) sio.seek(0) pil_img = Image.open(sio) print('pil_img.size = %r' % (pil_img.size,)) #def print_ascii_image(pil_img): # img2txt = ut.import_module_from_fpath('/home/joncrall/venv/bin/img2txt.py') # import sys # pixel = pil_img.load() # width, height = pil_img.size # bgcolor = None # #fill_string = # img2txt.getANSIbgstring_for_ANSIcolor(img2txt.getANSIcolor_for_rgb(bgcolor)) # fill_string = "\x1b[49m" # fill_string += "\x1b[K" # does not move the cursor # sys.stdout.write(fill_string) # img_ansii_str = img2txt.generate_ANSI_from_pixels(pixel, width, height, bgcolor) # sys.stdout.write(img_ansii_str) def print_ascii_image(pil_img): #https://gist.github.com/cdiener/10491632 SC = 1.0 GCF = 1.0 WCF = 1.0 img = pil_img S = (int(round(img.size[0] * SC * WCF * 3)), int(round(img.size[1] * SC))) img = np.sum( np.asarray( img.resize(S) ), axis=2) print('img.shape = %r' % (img.shape,)) img -= img.min() chars = np.asarray(list(' .,:;irsXA253hMHGS#9B&@')) img = (1.0 - img / img.max()) ** GCF * (chars.size - 1) print( "\n".join( ("".join(r) for r in chars[img.astype(int)]) ) ) print_ascii_image(pil_img) pil_img.close() pass
def load_cookies_file(cookies_file): """ Loads the cookies file. We pre-pend the file with the special Netscape header because the cookie loader is very particular about this string. """ cookies = StringIO() cookies.write('# Netscape HTTP Cookie File') cookies.write(open(cookies_file, 'rU').read()) cookies.flush() cookies.seek(0) return cookies
def exceptions_csv(): data = StringIO() writer = csv.writer(data) writer.writerow(["Count", "Message", "Traceback", "Nodes"]) for exc in six.itervalues(runners.locust_runner.exceptions): nodes = ", ".join(exc["nodes"]) writer.writerow([exc["count"], exc["msg"], exc["traceback"], nodes]) data.seek(0) response = make_response(data.read()) file_name = "exceptions_{0}.csv".format(time()) disposition = "attachment;filename={0}".format(file_name) response.headers["Content-type"] = "text/csv" response.headers["Content-disposition"] = disposition return response
def test_inject_yum_mirrors( orig_repos_cfg, mirrors_dict, expected_repos_cfg, expected_repos_proxied_cfg ): my_out_fil = StringIO() inject_yum_mirrors(mirrors_dict, StringIO(orig_repos_cfg), my_out_fil) my_out_fil.seek(0) assert expected_repos_cfg == my_out_fil.read() # Test when proxies are allowed my_out_fil = StringIO() inject_yum_mirrors( mirrors_dict, StringIO(orig_repos_cfg), my_out_fil, True ) my_out_fil.seek(0) assert expected_repos_proxied_cfg == my_out_fil.read()
def load_cookies_file(cookies_file): """ Loads the cookies file. We pre-pend the file with the special Netscape header because the cookie loader is very particular about this string. """ logging.debug("Loading cookie file %s into memory.", cookies_file) cookies = StringIO() cookies.write("# Netscape HTTP Cookie File") cookies.write(open(cookies_file, "rU").read()) cookies.flush() cookies.seek(0) return cookies
def deepcopy(self): """Returns a deep copy of config object :returns: a copy of the config object """ # Save actual config to a string config_string = StringIO() self.write(config_string) # We must reset the buffer ready for reading. config_string.seek(0) # Create a new config object config_copy = ExtendedConfigParser() config_copy.readfp(config_string) return config_copy
def _consume_stderr(self, chan, call_line_handler_func=False): """ Try to consume stderr data from chan if it's receive ready. """ out = bytearray() stderr = StringIO() if chan.recv_stderr_ready(): data = chan.recv_stderr(self.CHUNK_SIZE) if six.PY3 and isinstance(data, six.text_type): data = data.encode('utf-8') out += data while data: ready = chan.recv_stderr_ready() if not ready: break data = chan.recv_stderr(self.CHUNK_SIZE) if six.PY3 and isinstance(data, six.text_type): data = data.encode('utf-8') out += data stderr.write(self._get_decoded_data(out)) if self._handle_stderr_line_func and call_line_handler_func: data = strip_shell_chars(stderr.getvalue()) lines = data.split('\n') lines = [line for line in lines if line] for line in lines: # Note: If this function performs network operating no sleep is # needed, otherwise if a long blocking operating is performed, # sleep is recommended to yield and prevent from busy looping self._handle_stderr_line_func(line=line + '\n') stderr.seek(0) return stderr
def to(cls, format, **options): """Serialize YANG container to the given output `format`. """ # pyang output plugins need an output stream stream = StringIO() plugin = PYANG_PLUGINS[format] # register plugin options according to pyang script optparser = OptionParser() plugin.add_opts(optparser) # pyang output plugins also need a pyang.Context ctx = pyang.Context(DummyRepository()) # which offers plugin-specific options (just take defaults) ctx.opts = optparser.parse_args([])[0] # ready to serialize! plugin.emit(ctx, [cls.to_statement(**options)], stream) # and return the resulting data stream.seek(0) return stream.read()
class TestProgressMeter(TestCase): def setUp(self): self.buf = StringIO() def tearDown(self): del self.buf def _assert_in(self, output, string): assert_(string in output, "Output '{0}' does not match required format '{1}'.".format( output.replace('\r', '\\r'), string)) def test_default_ProgressMeter(self, n=101, interval=10): format = "Step %(step)5d/%(numsteps)d [%(percentage)5.1f%%]\r" with RedirectedStderr(self.buf): pm = MDAnalysis.lib.log.ProgressMeter(n, interval=interval) for frame in range(n): pm.echo(frame) self.buf.seek(0) output = "".join(self.buf.readlines()) self._assert_in(output, format % {'step': 1, 'numsteps': n, 'percentage': 100./n}) # last line always has \n instead of \r! self._assert_in(output, format.replace('\r', '\n') % {'step': n, 'numsteps': n, 'percentage': 100.}) def test_custom_ProgressMeter(self, n=51, interval=7): format = "RMSD %(rmsd)5.2f at %(step)03d/%(numsteps)4d [%(percentage)5.1f%%]\r" with RedirectedStderr(self.buf): pm = MDAnalysis.lib.log.ProgressMeter(n, interval=interval, format=format, offset=1) for frame in range(n): rmsd = 0.02 * frame * (n+1)/float(n) # n+1/n correction for 0-based frame vs 1-based counting pm.echo(frame, rmsd=rmsd) self.buf.seek(0) output = "".join(self.buf.readlines()) self._assert_in(output, format % {'rmsd': 0.0, 'step': 1, 'numsteps': n, 'percentage': 100./n}) # last line always has \n instead of \r! self._assert_in(output, format.replace('\r', '\n') % {'rmsd': 0.02*n, 'step': n, 'numsteps': n, 'percentage': 100.0})
def _askYesNo(question=None): message = StringIO() while True: askString = "\r%s? (yes|no): " % (question) logging.debug("asking user: %s" % askString) message.write(askString) message.seek(0) raw = raw_input(message.read()) if not len(raw): continue answer = raw[0].lower() logging.debug("user answered read: %s" % (answer)) if answer not in 'yn': continue return answer == 'y'
def test_single_at(self): source = StringIO("""@header1 AGCTTTTT + @IIIB+++ @header2 AGCTTTTT + IIIIB+++ """) target1 = StringIO() srf2fastq.convert_single_to_fastq(source, target1) target1.seek(0) lines1 = target1.readlines() self.assertEqual(len(lines1),8) self.assertEqual(lines1[0].rstrip(), '@header1') self.assertEqual(lines1[1].rstrip(), 'AGCTTTTT') self.assertEqual(lines1[2].rstrip(), '+') self.assertEqual(lines1[3].rstrip(), '@IIIB+++')
def push(name, api, domain): repo = git.Repo(os.getcwd()) branch = "temp-{}".format(str(uuid.uuid4())[:8]) set_deploy_branch(name, branch, api, domain) remote = git_url(name, api, domain) if is_dirty(): print("Nuking changes.") git.reset(repo, "hard") with TempBranch(branch, repo, delete=True): for fname, file_info in openshift_files.items(): with open(fname, 'w') as f: f.write(file_info.get("contents", "")) repo.stage(fname) repo.do_commit("Commit openshift files") push_out = StringIO() push_err = StringIO() print("Pushing to openshift (may take a few minutes)") git.push(repo, remote, "refs/heads/{}".format(branch), outstream=push_out, errstream=push_err) push_out.seek(0) out = push_out.read() if not re.match(r'^Push to .* successful.', out): print("There was a failure while pushing") print("---BEGIN STDERR---") push_err.seek(0) print(push_err.read()) print("---BEGIN STDOUT---") print(out) print("There was a failure while pushing") git.rm(repo, openshift_files.keys()) map(os.remove, openshift_files.keys()) return get_app(name, api, domain)['app_url']
class TestStreamStructure(TestInMemoryStructure): def json(self, head): return jl( jd( (q(self.LIST_TOKEN), jl(*(q(w) for w in self.LIST))), (q(self.DICT_TOKEN), jd(*((q(k), q(v)) for k,v in self.DICT.items()))), (q(self.HEAD_TOKEN), jd(*((q(k), q(v)) for k,v in self.HEAD.items()))), ), q(head), ) def get_read_store(self): self.stream = StringIO(self.json(self.HEAD_TOKEN)) return structure.JSONStreamReadStructure(self.stream) def get_write_store(self): self.stream = StringIO() return structure.JSONStreamWriteStructure(self.stream) def verify_dict(self, received, expect): tools.assert_equal(dict(expect), received) def verify_list(self, received, expect): tools.assert_equal(list(expect), received) def verify_write(self, store, head): # Nothing written to stream until close(). tools.assert_equal(0, self.stream.tell()) tools.assert_equal('', self.stream.read()) store.close() self.stream.seek(0) tools.assert_equal(self.json(head), self.stream.read()) def test_no_close(self): store = self.get_read_store() tools.assert_raises(AttributeError, getattr, store, 'close')
def csv_content(self): """ Returns the objects in the form's current queryset as csv content. """ if not hasattr(self, 'CSV_COLUMNS'): raise NotImplementedError('Child classes of CsvForm must implement the CSV_COLUMNS constant') # Get column fields and headers csv_columns = [i['column'] for i in self.CSV_COLUMNS] csv_headers = [i['title'].encode('utf-8') for i in self.CSV_COLUMNS] # Build data for csv writer csv_data = [] for obj in self.get_queryset(): csv_data.append([unicode(csv_getvalue(obj, column)).encode('utf-8') for column in csv_columns]) # Create buffer with csv content content = StringIO() writer = csv.writer(content) writer.writerow(csv_headers) writer.writerows(csv_data) content.seek(0) return content
def json_stream(data): stream = StringIO() json.dump(data, stream) stream.seek(0) return stream
def _getInputFromUser(param): """ this private func reads the data from the user for the given param """ loop = True userInput = None try: if param.USE_DEFAULT: logging.debug("setting default value (%s) for key (%s)" % (mask(param.DEFAULT_VALUE), param.CONF_NAME)) controller.CONF[param.CONF_NAME] = param.DEFAULT_VALUE else: while loop: # If the value was not supplied by the command line flags if param.CONF_NAME not in commandLineValues: message = StringIO() message.write(param.PROMPT) val_list = param.VALIDATORS or [] if(validators.validate_regexp not in val_list and param.OPTION_LIST): message.write(" [%s]" % "|".join(param.OPTION_LIST)) if param.DEFAULT_VALUE: message.write(" [%s] " % (str(param.DEFAULT_VALUE))) message.write(": ") message.seek(0) # mask password or hidden fields if (param.MASK_INPUT): userInput = getpass.getpass("%s :" % (param.PROMPT)) else: userInput = raw_input(message.read()) else: userInput = commandLineValues[param.CONF_NAME] # If DEFAULT_VALUE is set and user did not input anything if userInput == "" and len(str(param.DEFAULT_VALUE)) > 0: userInput = param.DEFAULT_VALUE # Param processing userInput = process_param_value(param, userInput) # If param requires validation try: validate_param_value(param, userInput) controller.CONF[param.CONF_NAME] = userInput loop = False except ParamValidationError: if param.LOOSE_VALIDATION: # If validation failed but LOOSE_VALIDATION is true, ask user answer = _askYesNo("User input failed validation, " "do you still wish to use it") loop = not answer if answer: controller.CONF[param.CONF_NAME] = userInput continue else: if param.CONF_NAME in commandLineValues: del commandLineValues[param.CONF_NAME] else: # Delete value from commandLineValues so that we will prompt the user for input if param.CONF_NAME in commandLineValues: del commandLineValues[param.CONF_NAME] loop = True except KeyboardInterrupt: # add the new line so messages wont be displayed in the same line as the question print("") raise except: logging.error(traceback.format_exc()) raise Exception(output_messages.ERR_EXP_READ_INPUT_PARAM % (param.CONF_NAME))
class ResultCollector(object): """Collecter for test results. This handles creating :class:`~.TestResult` instances and handing them off the registered result output handlers. """ # Temporary compatibility with unittest's runner separator2 = separator2 def __init__(self, buffer=False, failfast=False): self.buffer = buffer self.failfast = failfast self._result_handlers = [] self._sorted_handlers = None self.testsRun = 0 self.expectedFailures = [] self.unexpectedSuccesses = [] self.skipped = [] self.failures = [] self.errors = [] self.shouldStop = False self._successful = True self._mirror_output = False self._stderr_buffer = None self._stdout_buffer = None self._original_stderr = sys.stderr self._original_stdout = sys.stdout self._test_timing = {} @property def _handlers(self): if self._sorted_handlers is None: from .plugins.result_handler import sort_result_handlers self._sorted_handlers = sort_result_handlers(self._result_handlers) return self._sorted_handlers @staticmethod def _testcase_to_key(test): return (type(test), test._testMethodName) def _setup_stdout(self): """Hook stdout and stderr if buffering is enabled. """ if self.buffer: if self._stderr_buffer is None: self._stderr_buffer = StringIO() self._stdout_buffer = StringIO() sys.stdout = self._stdout_buffer sys.stderr = self._stderr_buffer def _restore_stdout(self): """Unhook stdout and stderr if buffering is enabled. """ if self.buffer: if self._mirror_output: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' self._original_stdout.write(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' self._original_stderr.write(STDERR_LINE % error) sys.stdout = self._original_stdout sys.stderr = self._original_stderr self._stdout_buffer.seek(0) self._stdout_buffer.truncate() self._stderr_buffer.seek(0) self._stderr_buffer.truncate() def printErrors(self): # pragma: no cover # FIXME: Remove pass def add_result_handler(self, handler): """Register a new result handler. """ self._result_handlers.append(handler) # Reset sorted handlers if self._sorted_handlers: self._sorted_handlers = None def startTest(self, test, start_time=None): """Indicate that an individual test is starting. Parameters ---------- test : unittest.TestCase The test that is starting. start_time : datetime An internal parameter to allow the parallel test runner to set the actual start time of a test run in a subprocess. """ if start_time is None: start_time = datetime.utcnow() self._test_timing[self._testcase_to_key(test)] = start_time self._mirror_output = False self._setup_stdout() self.testsRun += 1 for handler in self._handlers: handler.start_test(test) def stopTest(self, test): """Indicate that an individual test has completed. Parameters ---------- test : unittest.TestCase The test that has completed. """ for handler in self._handlers: handler.stop_test(test) self._restore_stdout() self._mirror_output = False def startTestRun(self): """Indicate that the test run is starting. """ for handler in self._handlers: handler.start_test_run() def stopTestRun(self): """Indicate that the test run has completed. """ for handler in self._handlers: handler.stop_test_run() def add_result(self, result): """Add an already-constructed :class:`~.TestResult` to this :class:`~.ResultCollector`. This may be used when collecting results created by other ResultCollectors (e.g. in subprocesses). """ for handler in self._handlers: handler(result) if self._successful and result.status not in _successful_results: self._successful = False def _handle_result(self, test, status, exception=None, message=None): """Create a :class:`~.TestResult` and add it to this :class:`~ResultCollector`. Parameters ---------- test : unittest.TestCase The test that this result will represent. status : haas.result.TestCompletionStatus The status of the test. exception : tuple ``exc_info`` tuple ``(type, value, traceback)``. message : str Optional message associated with the result (e.g. skip reason). """ if self.buffer: stderr = self._stderr_buffer.getvalue() stdout = self._stdout_buffer.getvalue() else: stderr = stdout = None started_time = self._test_timing.get(self._testcase_to_key(test)) if started_time is None and isinstance(test, ErrorHolder): started_time = datetime.utcnow() elif started_time is None: raise RuntimeError( 'Missing test start! Please report this error as a bug in ' 'haas.') completion_time = datetime.utcnow() duration = TestDuration(started_time, completion_time) result = TestResult.from_test_case( test, status, duration=duration, exception=exception, message=message, stdout=stdout, stderr=stderr, ) self.add_result(result) return result @failfast def addError(self, test, exception): """Register that a test ended in an error. Parameters ---------- test : unittest.TestCase The test that has completed. exception : tuple ``exc_info`` tuple ``(type, value, traceback)``. """ result = self._handle_result( test, TestCompletionStatus.error, exception=exception) self.errors.append(result) self._mirror_output = True @failfast def addFailure(self, test, exception): """Register that a test ended with a failure. Parameters ---------- test : unittest.TestCase The test that has completed. exception : tuple ``exc_info`` tuple ``(type, value, traceback)``. """ result = self._handle_result( test, TestCompletionStatus.failure, exception=exception) self.failures.append(result) self._mirror_output = True def addSuccess(self, test): """Register that a test ended in success. Parameters ---------- test : unittest.TestCase The test that has completed. """ self._handle_result(test, TestCompletionStatus.success) def addSkip(self, test, reason): """Register that a test that was skipped. Parameters ---------- test : unittest.TestCase The test that has completed. reason : str The reason the test was skipped. """ result = self._handle_result( test, TestCompletionStatus.skipped, message=reason) self.skipped.append(result) def addExpectedFailure(self, test, exception): """Register that a test that failed and was expected to fail. Parameters ---------- test : unittest.TestCase The test that has completed. exception : tuple ``exc_info`` tuple ``(type, value, traceback)``. """ result = self._handle_result( test, TestCompletionStatus.expected_failure, exception=exception) self.expectedFailures.append(result) @failfast def addUnexpectedSuccess(self, test): """Register a test that passed unexpectedly. Parameters ---------- test : unittest.TestCase The test that has completed. """ result = self._handle_result( test, TestCompletionStatus.unexpected_success) self.unexpectedSuccesses.append(result) def wasSuccessful(self): """Return ``True`` if the run was successful. """ return self._successful def stop(self): """Set the ``shouldStop`` flag, used by the test cases to determine if they should terminate early. """ self.shouldStop = True
describe "mainline": it "catches DelfickError errors and prints them nicely": fle = StringIO() class MyApp(App): def execute(slf, args_obj, args_dict, extra_args, handler): raise DelfickError("Well this should work", blah=1, _errors=[DelfickError("SubError", meh=2), DelfickError("SubError2", stuff=3)]) try: MyApp().mainline([], print_errors_to=fle) assert False, "This should have failed" except SystemExit as error: self.assertEqual(error.code, 1) fle.flush() fle.seek(0) self.assertEqual(fle.read(), dedent(""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Something went wrong! -- DelfickError \t"Well this should work"\tblah=1 errors: ======= \t"SubError"\tmeh=2 ------- \t"SubError2"\tstuff=3 ------- """)) it "Converts KeyboardInterrupt into a UserQuit": fle = StringIO()
def booking_refunds(start,end): try: bpoint, cash = [], [] bpoint.extend([x for x in BpointTransaction.objects.filter(settlement_date__gte=start, settlement_date__lte=end,action='refund',response_code=0)]) cash.extend([x for x in CashTransaction.objects.filter(created__gte=start, created__lte=end,type='refund')]) strIO = StringIO() fieldnames = ['Confirmation Number', 'Name', 'Type','Amount','Oracle Code','Date','Refunded By','Reason','Invoice'] writer = csv.writer(strIO) writer.writerow(fieldnames) # Get the required invoices for e in cash: booking, invoice = None, None if e.invoice.system == '0516': try: booking = BookingInvoice.objects.get(invoice_reference=e.invoice.reference).booking invoice = e.invoice except BookingInvoice.DoesNotExist: pass #raise ValidationError('Couldn\'t find a booking matched to invoice reference {}'.format(e.invoice.reference)) for line in invoice.order.lines.all(): for k,v in line.refund_details['cash'].items(): if k == str(e.id): track = None try: track = TrackRefund.objects.get(type=1,refund_id=k) except TrackRefund.DoesNotExist: pass name = '' reason = '' if track: name = track.user.get_full_name() if track.user.get_full_name() else track.user.email reason = track.details if booking: b_name = '{} {}'.format(booking.details.get('first_name',''),booking.details.get('last_name','')) writer.writerow([booking.confirmation_number,b_name,'Manual',v,line.oracle_code,e.created.strftime('%d/%m/%Y'),name,reason,invoice.reference]) else: writer.writerow(['','','Manual',v,line.oracle_code,e.created.strftime('%d/%m/%Y'),name,invoice.reference]) for b in bpoint: booking, invoice = None, None try: invoice = Invoice.objects.get(reference=b.crn1) if invoice.system == '0516': try: booking = BookingInvoice.objects.get(invoice_reference=invoice.reference).booking except BookingInvoice.DoesNotExist: pass #raise ValidationError('Couldn\'t find a booking matched to invoice reference {}'.format(e.invoice.reference)) for line in invoice.order.lines.all(): for k,v in line.refund_details['card'].items(): if k == str(b.id): track = None try: track = TrackRefund.objects.get(type=2,refund_id=k) except TrackRefund.DoesNotExist: pass name = '' reason = '' if track: name = track.user.get_full_name() if track.user.get_full_name() else track.user.email reason = track.details if booking: b_name = '{} {}'.format(booking.details.get('first_name',''),booking.details.get('last_name','')) writer.writerow([booking.confirmation_number,b_name,'Card',v,line.oracle_code,b.created.strftime('%d/%m/%Y'),name,reason,invoice.reference]) else: writer.writerow(['','','Card',v,line.oracle_code,b.created.strftime('%d/%m/%Y'),name,invoice.reference]) except Invoice.DoesNotExist: pass strIO.flush() strIO.seek(0) return strIO except: raise