def _dummy_rubric(self): """ Randomly generate a rubric and select options from it. Returns: rubric (dict) options_selected (dict) """ rubric = {'criteria': []} options_selected = {} words = loremipsum.Generator().words for criteria_num in range(self.NUM_CRITERIA): criterion = { 'name': words[criteria_num], 'prompt': " ".join(loremipsum.get_sentences(2)), 'order_num': criteria_num, 'options': [] } for option_num in range(self.NUM_OPTIONS): criterion['options'].append({ 'order_num': option_num, 'points': option_num, 'name': words[option_num], 'explanation': " ".join(loremipsum.get_sentences(2)) }) rubric['criteria'].append(criterion) options_selected[criterion['name']] = criterion['options'][0]['name'] return rubric, options_selected
def handle(self, *args, **options): amount = 1 if len(args) > 0: try: amount = int(args[0]) except TypeError: print 'Number of posts required' self.categories = self.generate_categories() for counter in xrange(amount): data = { 'title': ''.join(get_sentences(1)), 'content': ' '.join(get_sentences(random.randint(8, 16))).replace('.', ' ', 6), 'user': self.get_user(), } post = BlogPost.objects.create(**data) filedata = self.get_remote_image() file_path = 'uploads/blog/%s.jpg' % filedata.name uploadedfile = default_storage.save(file_path, filedata) post.featured_image = file_path for x in xrange(random.randint(1, 3)): cat = self.get_category() if cat: post.categories.add(cat) post.save()
def handle(self, *args, **options): # Remove old test data call_command('resetdb') # host = User.objects.get(email='*****@*****.**') host = User(email='*****@*****.**', username='******') host.set_password('password') host.save() details = UserDetails(user=host, display_name="Sam") for i in range(0, 12): name = "Auto Generated Event {}".format(i) date = datetime.now() + timedelta(days=i) description = ''.join(loremipsum.get_sentences(15)) event = Event(name=name, description=description, location="UNSW", host=host, date=date) event.save() # Create a toplevel post post_text = ''.join(loremipsum.get_sentences(15)) post = Post(author=host, eventID=event, date=date, message=post_text) post.save() # Generate some replies for j in range(0, 10): reply_text = ''.join(loremipsum.get_sentences(5)) post = Post(author=host, eventID=event, date=date, message=reply_text) post.save()
def _dummy_rubric(self): """ Randomly generate a rubric and select options from it. Returns: rubric (dict) options_selected (dict) """ rubric = {'criteria': list()} options_selected = dict() words = loremipsum.Generator().words for criteria_num in range(self.NUM_CRITERIA): criterion = { 'name': words[criteria_num], 'prompt': " ".join(loremipsum.get_sentences(2)), 'order_num': criteria_num, 'options': list() } for option_num in range(self.NUM_OPTIONS): criterion['options'].append({ 'order_num': option_num, 'points': option_num, 'name': words[option_num], 'explanation': " ".join(loremipsum.get_sentences(2)) }) rubric['criteria'].append(criterion) options_selected[criterion['name']] = criterion['options'][0]['name'] return rubric, options_selected
def test_post_comment_pagination(self): comment = generate_comment() post = generate_submission(reply=comment) # Tons of text that needs paginating body = '\n\n'.join( [' '.join(loremipsum.get_sentences(20)) for _ in range(50)]) # Also mock up having a _really_ long line body += '\n\n' body += ' '.join(loremipsum.get_sentences(1000)) # TADA!! The main event: last_comment = post_comment(repliable=post, body=body) # Make sure we replied to the previous comment instead of having them # all reply as top-level comments comment.reply.assert_called_once() post.reply.assert_called_once() assert last_comment.kind == 't1', 'Return type is not Comment' # Find depth of comments ptr = last_comment num_comments = 0 for i in range(100): num_comments = i if ptr.kind != 't1': break else: ptr = ptr.parent() assert num_comments > 1, "Comments did not need to be paginated"
def trap_random_page(): return render_template( 'traps/random.html', title=get_sentences(1, False)[0], content=get_sentences(random.randint(1, 5)), config=config )
def create_entry(permitted_user_ids, private): # 3 different types of posts: # * text only # * image only # * text and image from models import FileWrapper, MAX_CONTENT_LENGTH def shorten(data): return (data[:MAX_CONTENT_LENGTH] + '..') if len(data) > MAX_CONTENT_LENGTH else data post_type = random.randint(0, 2) sample_images = ['panda.jpg', 'icon.pNg', 'panda.png', 'panda.jpeg'] if post_type == 0: sentence = shorten(' '.join(get_sentences(random.randint(1, 4)))) image = None elif post_type == 1: sentence = '' image = random.choice(sample_images) else: sentence = shorten(' '.join(get_sentences(random.randint(1, 4)))) image = random.choice(sample_images) filename = None if image: image_path = os.path.join(app.root_path, '..', 'tests', 'test_data', image) with open(image_path, 'rb') as fp: file = FileStorage(fp) wrapper = FileWrapper.create(file, permitted_user_ids, private) filename = wrapper.get_filename() return sentence, filename
def generate_categories(self): categories = BlogCategory.objects.all() if categories.count() < self.categories_amount: for x in xrange(self.categories_amount - categories.count()): cat = BlogCategory.objects.create(title=''.join(get_sentences(1))[:15]) cat.save() for x in xrange(random.randint(0, 6)): nested = BlogCategory.objects.create(title=''.join(get_sentences(1))[:15]) relation, _ = BlogCategoryParentRelation.objects.get_or_create(parent=cat) relation.children.add(nested) else: return categories return BlogCategory.objects.all()
def step_impl(context, amount): from loremipsum import get_sentences sentences_list = get_sentences(int(amount)) text = u"".join(sentences_list) context.nadej.text(text)
def __init__(self, work_queue): super(Worker, self).__init__() self.api_client = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host')) self.work_queue = work_queue self.es = Elasticsearch(es_hosts) self.sentence_list = loremipsum.get_sentences(1000) self.re_first_word = re.compile('([A-z]+)')
def trap_depth(current_path=""): # Starts at 2 because of initial depth: trap/depth/ if current_path == "": next_path = "3/" current_depth = 2 next_depth = str(current_depth + 1) else: current_depth = len(current_path.split("/")) + 1 next_depth = str(current_depth + 1) next_path = current_path + next_depth + "/" if config.trap_depth_max_depth == -1 or current_depth <= config.trap_depth_max_depth: title = "current depth: " + str(current_depth) if config.trap_depth_max_depth != -1: title += ", max_depth: " + str(config.trap_depth_max_depth) return render_template('traps/depth.html', title=title, content=get_sentences(random.randint(1, 5)), config=config, current_path=current_path, next_path=next_path, next_depth=next_depth) else: message = "You have reached max depth (config.trap_depth_max_depth): " + str( config.trap_depth_max_depth) return redirect(url_for('fail', challenge="depth", message=message))
def buildNotes(): '''After the users list is generated, Notes can be built. Notes generated with random text The user writing the note is determined randomly A data in the specified month is determined random - any day within the specified month, month can be changed Every other show will get between 1 and 4 notes. ''' SENTENCECOUNT = 1250 #LINESPERPAGE = 10 STEP = 2 NUMUSERS = 6 RANDYEAR = 2018 RANDMONTH = 4 sentences_list = [] sentences = get_sentences(SENTENCECOUNT, start_with_lorem=True) # this routine uses "from loremipsum import get_sentences" for sentence in sentences: line1 = re.sub("b'", '', sentence) line2 = re.sub("'", '', line1) sentences_list.append(line2) recNum = 0 for num in range(1, len(show_list), STEP): for innerNum in range(randint(1,4)): recNum += 1 rand = randint(1, NUMUSERS) pop1 = sentences_list.pop() myDate = randomdate(RANDYEAR, RANDMONTH) #print(myDate) nText = sentences_list.pop() + sentences_list.pop() + sentences_list.pop() + sentences_list.pop() note_json.append({"model": "api.note", "pk": recNum, "fields": {"show": num, "user": rand, "title": pop1, "text": nText, "posted_date": str(myDate)}})
def confirm(portfolioid, message): reasons = [] if message == "RECHAZADO": reasons = get_sentences(random.choice([3, 4, 5])) values = { "Message": message, "PortfolioID": portfolioid, "Reason": reasons, "Origin": args.university } url = "{0}/bob/confirmation".format(apps[args.app]) print("Sending {0} confirmation for '{1}'...".format( message, portfolioid)) response = send_request(url, values) try: print(response.content) except: print("Not a JSON response") print("Failed.")
def loremipsum(line_count, para=True, classes='', style=''): """ Return loremipsum paragraphs with line_count sentences. If line_count is iterable returns multiple concatenated paragraphs. Optionally will set classes and style. .. sourcecode:: python LI(3) # Creates paragraph with 3 sentences. LI((3, 5)) # Creates paragraph with 3 sentences and paragraph with 5 sentences :param line_count: number of sentences in paragraph :type line_count: int or long or iterable :param para: if True wrap output in a paragraph, default True :type para: bool :param classes: classes to add to output :type classes: str or unicode or DWidget :param style: styles to add to output :type style: str or unicode or DWidget :return: HTML for loremipsum :rtype: unicode """ if isinstance(line_count, Iterable): rtn = '' for lc in line_count: rtn += loremipsum(lc, para, classes, style) return rtn content = ' '.join(li.get_sentences(line_count)) if para: template = '<p class="{classes}" style="{style}">{content}</p>' elif classes or style: template = '<span class="{classes}" style="{style}">{content}</span>' else: template = '{content}' return template.format(content=content, classes=classes, style=style)
def gen_projects(): # get the adjectives adjs = [] f = open('adjective-list.txt', 'r') for line in f: s = line.rstrip('\n') adjs.append(s) f.close() # get the nouns nouns = [] f = open('noun-list.txt', 'r') for line in f: s = line.rstrip('\n') nouns.append(s) f.close() # get sentences sentences_list = get_sentences(len(adjs)) print len(adjs), len(nouns), len(sentences_list) for i in adjs: t = i + nouns[random.randint(0, len(nouns) - 1)] d = sentences_list[random.randint(0, len(sentences_list) - 1)] p = Project(title=t, description=d) p.save()
def build(project, **kwargs): kwargs.setdefault('label', get_sentences(1)[0][:128]) kwargs.setdefault('status', Status.finished) kwargs.setdefault('result', Result.passed) kwargs.setdefault('repository', project.repository) kwargs.setdefault('duration', random.randint(10000, 100000)) kwargs.setdefault('target', uuid4().hex) cur_no_query = db.session.query( coalesce(func.max(Build.number), 0) ).filter( Build.project_id == project.id, ).scalar() kwargs['project'] = project kwargs['repository_id'] = kwargs['repository'].id kwargs['project_id'] = kwargs['project'].id kwargs['author_id'] = kwargs['author'].id build = Build( number=cur_no_query + 1, **kwargs ) db.session.add(build) return build
def __init__(self, work_queue): super(Worker, self).__init__() self.api_client = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host')) self.work_queue = work_queue self.es = Elasticsearch(es_hosts) self.sentence_list = loremipsum.get_sentences(1000) self.re_first_word = re.compile('([A-z]+)')
def trap_depth(current_path=""): # Starts at 2 because of initial depth: trap/depth/ if current_path == "": next_path = "3/" current_depth = 2 next_depth = str(current_depth + 1) else: current_depth = len(current_path.split("/")) + 1 next_depth = str(current_depth + 1) next_path = current_path + next_depth + "/" if config.trap_depth_max_depth == -1 or current_depth <= config.trap_depth_max_depth: title = "current depth: " + str(current_depth) if config.trap_depth_max_depth != -1: title += ", max_depth: " + str(config.trap_depth_max_depth) return render_template( 'traps/depth.html', title=title, content=get_sentences(random.randint(1, 5)), config=config, current_path=current_path, next_path=next_path, next_depth=next_depth ) else: message = "You have reached max depth (config.trap_depth_max_depth): " + str(config.trap_depth_max_depth) return redirect(url_for('fail', challenge="depth", message=message))
def create_files(number): for i in range(number): size = rng.randint(0, max_size) idx = rng.randint(0, len(file_names)) ext = rng.randint(0, len(file_ext)) with open("{}{}".format(file_names[idx], file_ext[ext]), 'w') as f: f.write('\n'.join(get_sentences(size)))
def gen_car_parts(): print 'Generating parts and car fittings.' f = open('sample_data/parts.txt', 'r') data = [] for line in f: data.append(line.rstrip('\n')) f.close() car_models = Car.objects.all().distinct('model') print car_models.count(), 'distinct models' part_nums = range(PART_NUM_RANGE) for car in car_models: for part_name in data: r = random.randint(0, len(part_nums) - 1) p_num = part_nums[r] del part_nums[r] # don't reuse part nums part = Part(num=p_num, name=part_name, desc=get_sentences(1)[0]) part.save() cars = Car.objects.filter(model=car.model) for c in cars: car_part = CarPart(part=part, car=c) car_part.save() print 'Generated parts for', car.make.name, car.model print 'Parts in db', Part.objects.all().count() print 'Fittings in db', CarPart.objects.all().count()
def build(project, **kwargs): kwargs.setdefault('collection_id', uuid4().hex) kwargs.setdefault('label', get_sentences(1)[0][:128]) kwargs.setdefault('status', Status.finished) kwargs.setdefault('result', Result.passed) kwargs.setdefault('duration', random.randint(10000, 100000)) kwargs.setdefault('target', uuid4().hex) if 'source' not in kwargs: kwargs['source'] = source(project.repository) kwargs['project'] = project kwargs['project_id'] = kwargs['project'].id kwargs['author_id'] = kwargs['author'].id build = Build(**kwargs) db.session.add(build) event = Event( type=EventType.green_build, item_id=build.id, data={'status': 'success'} ) db.session.add(event) return build
def step_impl(context,amount): from loremipsum import get_sentences sentences_list = get_sentences(int(amount)) text = u"".join(sentences_list) context.nadej.text(text)
def handle(self, *args, **options): # first, create test users rand_len = lambda: random.randint(4, 12) rand_string = lambda: ''.join(random.choice(string.ascii_lowercase) for i in range(rand_len())) # for i in range(1, 6): # ukwargs = dict( # centrul_local = CentruLocal.objects.order_by('?')[0], # porecla = rand_string(), # first_name = rand_string(), # last_name = rand_string(), # oncr_id = "AA147") # # u = Utilizator.objects.create(**ukwargs) # email = "*****@*****.**" % i # user = User.objects.create_user(username=email, email=email, first_name=ukwargs.get("first_name"), # last_name=ukwargs.get("last_name"), password="******") # u.user = user # u.save() import os base_path = os.path.join(settings.MEDIA_ROOT, "badges") files = [f for f in os.listdir(base_path) if os.path.isfile(os.path.join(base_path, f))] # generate badges and events for i in range(1, 60): event = Eveniment.objects.create(nume=rand_string(), an=random.randint(1996, 2015)) user = User.objects.filter(username__startswith="user").order_by('?')[0] amintire = " ".join(get_sentences(rand_len())) implicit_eveniment = True imagine = "badges/%s" % files[random.randint(0, len(files) - 1)] Badge.objects.create(poster=user, amintire=amintire, implicit_eveniment=True, eveniment=event, imagine=imagine)
def xcom_choice(): dlg['Layer TypeComboBox'].Select('XCOM') dlg['ComboBox0'].Click() dlg['ComboBox0'].Select(random.randrange(0, dlg['ComboBox0'].ItemCount())) dlg['ComboBox0'].Click() for l in get_sentences(random.randrange(1,4)): dlg['Breach DescriptionEdit'].TypeKeys(l) dlg['OKButton'].Click()
def main(): cur = conn.cursor() while True: cur.execute( 'INSERT INTO events (timestamp, type, payload) VALUES (%s, %s, %s)', (datetime.now(), random.choice(['A', 'B', 'C', 'X', 'Y', 'Z' ]), ' '.join(get_sentences(5)))) conn.commit() sleep(1)
def create_account(email, university="upn"): headers = { "Content-Type": "application/json", "Accept": "application/json", "BobUniversity": "upn", "BobToken": ""} t = pyotp.TOTP("R3A7PZLCUQIJFUGX", interval=90) token = t.now() headers["BobToken"] = str(token) firstname = get_sentences(1)[0].split(" ")[0] paternallastname = get_sentences(1)[0].split(" ")[0] maternallastname = get_sentences(1)[0].split(" ")[0] password = get_sentences(1)[0].split(" ")[0] phonenumber = get_sentences(1)[0].split(" ")[0].upper() values = { "EmailAddress": email, "FirstName": firstname, "PaternalLastName": paternallastname, "MaternalLastName": maternallastname, "Password": password, "PhoneNumber": phonenumber, "Agreement": True, "Origin": university, "PortfolioID": "" } url = "{0}/bob/createaccount".format(apps[args.app]) print("Creating account for '{0}' from '{1}'...".format(email, university)) response = requests.post(url, params=values, headers=headers) try: print("done.") print("Json response:") print(response.json()) except: print("Not a JSON response") print("Failed.")
def create_blog(number_of_posts, name=None): blog_id = "-%s-" % str(uuid.uuid4()).replace('-', '') posts = [] for i in xrange(number_of_posts): items_ids, items = create_items(random.randint(1, 5)) posts += items posts.append( { "resource": "posts", "username": "******", "data": { "firstcreated": "2013-11-11T11:11:14+00:00", "blog": blog_id, "groups": [{ "id": "root", "refs": [{ "idRef": "main" }], "role": "grpRole:NEP"}, { "id": "main", "refs": items_ids, "role": "grpRole:Main" }] } } ) blog = [ { "resource": "users", "data": { "username": "******", "first_name": "Victor", "last_name": "the Editor", "role": "-id Editor role-", "user_type": "administrator", "password": "******", "email": "*****@*****.**", "sign_off": "eo" } }, { "resource": "blogs", "username": "******", "data": { "title": name or get_sentences(1)[0], "versioncreated": "2014-02-03T19:34:00+0000", "description": "", "members": [] }, "id_name": blog_id } ] blog += posts return blog
def performance_test_1(): u = User.objects.get(email='*****@*****.**') for x in range(50): bb = BitBook(title='A new bitbook number %s'%x, description=get_sentences(1)[0]) bb.save() for y in range(100): bn = BitNote() bn.save() bf1 = BlogPost(title='blog post 1', body=''.join(get_paragraphs(10))) bf2 = Quote(title='a fancy qoute', body=get_sentences(1)[0], author='Author Aouthorski') bf3 = Rating(title='rating') bn.bitfields.append(bf1) bn.bitfields.append(bf2) bn.bitfields.append(bf3) bn.save() bn.save_to_bitbook(bitbook=bb) u.bitbooks.append(bb) u.save() print 'bb no %s'%x
def get(self, count, response_format=None): count = min(count, 100) items = get_sentences(count) if response_format.lower() == 'html': return output_html(items, 200) elif response_format.lower() == 'html_code': return output_html_code(items, 200) elif response_format.lower() == 'text': return output_text(items, 200) return {'sentences': items}
def handle(self, *args, **kwargs): provider = User.objects.get(pk=1) for i in range(0, kwargs['amount']): location = CITIES[random.randint(0, len(CITIES) - 1)] experience = Experience.objects.create( user=provider, title=' '.join( get_sentence().split()[:random.randint(2, 5)]).title(), description='\n\n'.join(get_paragraphs(random.randint(1, 3))), location=location[0], coords=Point(location[1], location[2]), terms='\n'.join(get_paragraphs(random.randint(1, 2))), pax_adults=random.randint(1, 4), pax_children=random.randint(0, 3), ) for inc in get_sentences(random.randint(1, 4)): ExperienceInclusion.objects.create( experience=experience, name=' '.join(inc.split()[:random.randint(2, 4)]).title()) for exc in get_sentences(random.randint(1, 4)): ExperienceExclusion.objects.create( experience=experience, name=' '.join(exc.split()[:random.randint(2, 4)]).title()) images = self._get_images() num_images = random.randint(2, 6) for i in range(0, num_images): image = images.pop(random.randint(0, len(images) - 1)) with open(image, 'rb') as fh: path = os.path.join(settings.MEDIA_ROOT, ExperienceImage.image.field.upload_to, os.path.basename(image)) ei = ExperienceImage(experience=experience, default=i == 0) ei.image.save(path, ContentFile(fh.read()), True) ei.image.name = os.path.join( ExperienceImage.image.field.upload_to, os.path.basename(image)) ei.save() print('Finished creating {} experiences'.format(kwargs['amount']))
def author(**kwargs): if 'name' not in kwargs: kwargs['name'] = ' '.join(get_sentences(1)[0].split(' ')[0:2]) if 'email' not in kwargs: kwargs['email'] = '{0}@example.com'.format(slugify(kwargs['name'])) try: result = Author.query.filter_by(email=kwargs['email'])[0] except IndexError: result = Author(**kwargs) db.session.add(result) return result
def getBlankFiller(page_elements): each_element = page_elements # This text is completely random, so we can just fill it in right away ans_len = choice([2, 3, 4, 5, 6]) # random_text = settings.tg.generate_sentences(ans_len) random_text = ' '.join(get_sentences(ans_len)) # print random_text survey_object = {"Question": each_element['Question']['Question_Text'], "Type": each_element['Question']['Type'], "Answer": random_text } return survey_object
async def write(message): await message.delete() try: number = int(message.content.split("!write ")[1]) except: number = 3 sentences_list = get_sentences(number) for sentence in sentences_list: sentence = sentence.replace("B'", "") sentence = sentence.replace("b'", "") sentence = sentence.replace("'", "") await message.channel.send(sentence)
def display_content(value): data = [{ 'x': [ 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 ], 'y': [ 219, 146, 112, 127, 124, 180, 236, 207, 236, 263, 350, 430, 474, 526, 488, 537, 500, 439 ], 'name': 'Rest of world', 'marker': { 'color': 'rgb(55, 83, 109)' }, 'type': ['bar', 'scatter', 'box'][int(value) % 3] }, { 'x': [ 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 ], 'y': [ 16, 13, 10, 11, 28, 37, 43, 55, 56, 88, 105, 156, 270, 299, 340, 403, 549, 499 ], 'name': 'China', 'marker': { 'color': 'rgb(26, 118, 255)' }, 'type': ['bar', 'scatter', 'box'][int(value) % 3] }] return html.Div([ dcc.Graph(id='graph', figure={ 'data': data, 'layout': { 'margin': { 'l': 30, 'r': 0, 'b': 30, 't': 0 }, 'legend': { 'x': 0, 'y': 1 } } }), html.Div(' '.join(get_sentences(10))) ])
def getBlankFiller(page_elements): each_element = page_elements # This text is completely random, so we can just fill it in right away ans_len = choice([2, 3, 4, 5, 6]) # random_text = settings.tg.generate_sentences(ans_len) random_text = ' '.join(get_sentences(ans_len)) # print random_text survey_object = { "Question": each_element['Question']['Question_Text'], "Type": each_element['Question']['Type'], "Answer": random_text } return survey_object
def feature_choice(): dlg['Layer TypeComboBox'].Select('Feature') dlg['Action TypeComboBox'].Select(random.randrange(0, dlg['Action TypeComboBox'].ItemCount())) dlg['GroupComboBox'].Select(random.randrange(0, dlg['GroupComboBox'].ItemCount())) dlg['FeatureComboBox'].Select(random.randrange(0, dlg['FeatureComboBox'].ItemCount())) dlg['BufferComboBox'].TypeKeys(random.randrange(0,20)) for l in get_sentences(random.randrange(1,4)): dlg['CommentEdit1'].TypeKeys(l) dlg['GNSS Used'].Click() dlg['Remote Sensing'].Click() dlg['Aerial Photography'].Click() dlg['MasterMapCheckBox'].Click() dlg['OKButton'].Click()
def _generate_random_note(): from loremipsum import get_sentences notes = [ {'preview': ''.join(get_sentences(random.randint(1, 2))), 'id': index+1, 'age': str(random.randint(1, 10)) + "d", 'formated_due': '2017-12-02' if (random.randint(0,10) > 8) else None, 'image': {'url': "https://lekvam.no/static/imgs/logo.png"} if (random.randint(0,10) > 8) else None, 'hashtags': ["tag"] if (random.randint(0,10) > 8) else [], 'text': "Her kan du endre og fikse ting"} for index in range(random.randint(0, 10)) ] return render_to_string('list.html', {'list': notes})
def boundary_choice(): # First case - Boundary dlg['Layer TypeComboBox'].Select('Boundary') action = random.randrange(0, dlg['Action TypeComboBox'].ItemCount()) dlg['ActionTypeComboBox'].Select(action) for l in get_sentences(random.randrange(1, 4)): dlg['CommentEdit1'].TypeKeys(l) dlg['GNSS Used'].Click() dlg['Remote Sensing'].Click() dlg['Aerial Photography'].Click() dlg['MasterMapCheckBox'].Click() dlg['LandCoverCheckBox'].Click() dlg['OKButton'].Click()
def reply(): # print(request.args['message']) message = request.args['message'] botid = request.args['bot_id'] doc_id = request.args['doc_id'] reply = get_sentences(1)[0] mycursor = mydb.cursor() # sql = "INSERT INTO customers (name, address) VALUES (%s, %s)" sql = "INSERT INTO `chat`( `bot_name`, `document_id`, `user_id`, `message`, `reply`) VALUES (%s, %s, %s, %s, %s)" val = (botid, doc_id, 0, message, reply) mycursor.execute(sql, val) mydb.commit() return reply
def create_logchunk(self, source, text=None, **kwargs): # TODO(dcramer): we should default offset to previous entry in LogSource kwargs.setdefault('offset', 0) kwargs['job'] = source.job kwargs['project'] = source.project if text is None: text = '\n'.join(get_sentences(4)) logchunk = LogChunk(source=source, text=text, size=len(text), **kwargs) db.session.add(logchunk) db.session.commit() return logchunk
def logchunk(source, **kwargs): # TODO(dcramer): we should default offset to previosu entry in LogSource kwargs.setdefault('offset', 0) text = kwargs.pop('text', None) or '\n'.join(get_sentences(4)) logchunk = LogChunk(source=source, job=source.job, project=source.project, text=text, size=len(text), **kwargs) db.session.add(logchunk) return logchunk
def entries_add_auto(mode, num): if not session.get('logged_in'): abort(401) db = get_db() for i in range(0, num): db.cursor().execute("insert into " + mode + " (title, text) values ('" + str(get_sentences(1, False)[0].replace(".", "")) + "', '" + str(get_paragraphs(1, False)[0]) + "')") db.commit() flash('New ' + mode + ' automatic %d %s entrie%s successfully posted' % (num, mode, 's were' if (num > 1) else ' was')) return "OK"
def entries_add_auto(mode, num): if not session.get('logged_in'): abort(401) db = get_db() for i in range(0, num): db.execute( 'insert into ' + mode + ' (title, text) values (?, ?)', [get_sentences(1, False)[0].replace(".", ""), get_paragraphs(1, False)[0]] ) db.commit() flash('New ' + mode + ' automatic %d %s entrie%s successfully posted' % (num, mode, 's were' if (num > 1) else ' was')) return "OK"
def create_author(self, email=None, **kwargs): if not kwargs.get('name'): kwargs['name'] = ' '.join(get_sentences(1)[0].split(' ')[0:2]) if not email: email = '{0}-{1}@example.com'.format(slugify(kwargs['name']), uuid4().hex) kwargs.setdefault('name', 'Test Case') author = Author(email=email, **kwargs) db.session.add(author) db.session.commit() return author
def create_author(self, email=None, **kwargs): if not kwargs.get('name'): kwargs['name'] = ' '.join(get_sentences(1)[0].split(' ')[0:2]) if not email: email = '{0}-{1}@example.com'.format( slugify(kwargs['name']), uuid4().hex) kwargs.setdefault('name', 'Test Case') author = Author(email=email, **kwargs) db.session.add(author) db.session.commit() return author
def generate_message(sender, recipient): """ Generate a Lorem Ipsum email with sender and recipient. """ if li is None: raise ValueError("Could not import loremipsum to generate messages!") msg = MIMEMultipart() msg['From'] = sender msg['To'] = recipient msg['Subject'] = li.get_sentences(1)[0] body = "\n\n".join(li.get_paragraphs(random.randint(1, 12))) msg.attach(MIMEText(body, 'plain')) return msg
def gen_parts(num=0): partList = [] with open('parts.txt', 'r') as f: for line in f: strip = line.rstrip('\n') partList.append(strip) rand_sentences = get_sentences(len(partList)) for i in range(num): part_name = partList[random.randint(1, len(partList) - 1)] part_num = random.randint(1, 10000) rand_desc = rand_sentences[random.randint(1, len(rand_sentences) - 1)] p = Part(number=part_num, name=part_name, description=rand_desc) p.save()
def gen_tasks(): cats = Category.objects.all() projs = Project.objects.all() for project in projs: num_tasks = random.randint(1, 10) for future_task in range(num_tasks): t = "TASK " + str(future_task + 1) d = get_sentences(1) c = cats[random.randint(0, 3)] task = Task(title=t, description=d, order=future_task, project=project, category=c) task.save()
def generate_message(sender, recipient): """ Generate a Lorem Ipsum email with sender and recipient. """ if li is None: raise ValueError("Could not import loremipsum to generate messages!") msg = MIMEMultipart() msg['From'] = sender msg['To'] = recipient msg['Subject'] = li.get_sentences(1)[0] body = "\n\n".join(li.get_paragraphs(random.randint(1,12))) msg.attach(MIMEText(body, 'plain')) return msg
def _gen_fake_docs(count, author=None, source=None, project=None): authors = ["Joe", "Jane", "Bill", "Mary"] sources = ["web", "twitter", "email"] projects = ["solr", "lucene", "elasticsearch"] def _random_choice(value, choices): if value is not None: return value else: return random.choice(choices) for i in range(count): yield Document(id="doc-%d" % i, author=_random_choice(author, authors), source=_random_choice(source, sources), project=_random_choice(project, projects), content="".join(get_sentences(random.randint(1, 4))), link="http://example.com/%d" % i, created_at="2015-01-01T%02d:%02d:00Z" % (random.randint(0,23), random.randint(0,59)))
def logchunk(source, **kwargs): # TODO(dcramer): we should default offset to previosu entry in LogSource kwargs.setdefault('offset', 0) text = kwargs.pop('text', None) or '\n'.join(get_sentences(4)) logchunk = LogChunk( source=source, job=source.job, project=source.project, text=text, size=len(text), **kwargs ) db.session.add(logchunk) return logchunk
def build(project, **kwargs): kwargs.setdefault('label', get_sentences(1)[0][:128]) kwargs.setdefault('status', Status.finished) kwargs.setdefault('result', Result.passed) kwargs.setdefault('repository', project.repository) kwargs.setdefault('duration', random.randint(10000, 100000)) kwargs.setdefault('target', uuid4().hex) kwargs['project'] = project kwargs['repository_id'] = kwargs['repository'].id kwargs['project_id'] = kwargs['project'].id kwargs['author_id'] = kwargs['author'].id build = Build(**kwargs) db.session.add(build) return build
def create_test_forum(self): s = self.db() for section_num in range(0, 3): section = ForumSection() section.title = u'Section {}'.format(section_num) section.description = u'Description for section {}'.format(section_num) section.sort_index = section_num s.add(section) s.flush() for board_num in range(0, 4): board = ForumBoard() board.section = section.id board.title = u'Board {}-{}'.format(section_num, board_num) board.description = u'Description for board {}-{}'.format(section_num, board_num) board.req_level = section_num board.sort_index = board_num s.add(board) s.flush() for thread_num in range(0, 11): thread_starter = self.test_users[random.randint(0, 2)].id thread = ForumThread() thread.board = board.id thread.user = thread_starter thread.title = u'Thread {}-{}-{}'.format(section_num, board_num, thread_num) s.add(thread) s.flush() for post_num in range(0, 14): post = ForumPost() post.thread = thread.id post.user = thread_starter if post_num == 0 else self.test_users[random.randint(0, 2)].id post.message = u'\n'.join(loremipsum.get_sentences(random.randint(1, 8))) s.add(post) s.flush() for edit_num in range(0, random.randint(0, 2)): edit = ForumPostEdit() edit.post = post.id edit.user = post.user edit.message = u'Edit for post {}'.format(post_num) s.add(edit) s.commit()
def display_content(value): data = [ { 'x': [1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012], 'y': [219, 146, 112, 127, 124, 180, 236, 207, 236, 263, 350, 430, 474, 526, 488, 537, 500, 439], 'name': 'Rest of world', 'marker': { 'color': 'rgb(55, 83, 109)' }, 'type': ['bar', 'scatter', 'box'][int(value) % 3] }, { 'x': [1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012], 'y': [16, 13, 10, 11, 28, 37, 43, 55, 56, 88, 105, 156, 270, 299, 340, 403, 549, 499], 'name': 'China', 'marker': { 'color': 'rgb(26, 118, 255)' }, 'type': ['bar', 'scatter', 'box'][int(value) % 3] } ] return html.Div([ dcc.Graph( id='graph', figure={ 'data': data, 'layout': { 'margin': { 'l': 30, 'r': 0, 'b': 30, 't': 0 }, 'legend': {'x': 0, 'y': 1} } } ), html.Div(' '.join(get_sentences(10))) ])
def create_logchunk(self, source, text=None, **kwargs): # TODO(dcramer): we should default offset to previosu entry in LogSource kwargs.setdefault('offset', 0) kwargs['job'] = source.job kwargs['project'] = source.project if text is None: text = '\n'.join(get_sentences(4)) logchunk = LogChunk( source=source, text=text, size=len(text), **kwargs ) db.session.add(logchunk) db.session.commit() return logchunk
def build(project, **kwargs): kwargs.setdefault('label', get_sentences(1)[0][:128]) kwargs.setdefault('status', Status.finished) kwargs.setdefault('result', Result.passed) kwargs.setdefault('repository', project.repository) kwargs.setdefault('duration', random.randint(10000, 100000)) kwargs.setdefault('target', uuid4().hex) kwargs['project'] = project kwargs['repository_id'] = kwargs['repository'].id kwargs['project_id'] = kwargs['project'].id kwargs['author_id'] = kwargs['author'].id build = Build( **kwargs ) db.session.add(build) return build
def generate(self): """ Generate loremipsum paragraphs with line_count sentences. """ line_count, para, classes, style = self.args if isinstance(line_count, (tuple, list)): rtn = '' for lc in line_count: rtn += LI(lc, para, classes, style) return rtn content = ' '.join(loremipsum.get_sentences(line_count)) if classes: classes = 'class="{}" '.format(classes) if style: style = 'style="{}" '.format(style) template = '<p {classes} {style}>' \ '{content}' \ '</p>' if para: return template.format(content=content, classes=classes, style=style) return content