def test_BioProvDB(): # Compare to TinyDB db_path = Path(bp.__file__).parent.joinpath("db.json") bp_db = BioProvDB(db_path) tinydb_ = TinyDB(db_path) assert len(bp_db) == len( tinydb_), "BioProvDB and TinyDB behaviour differs!" assert isinstance( bp_db, type(tinydb_) ), f"Type {type(bp_db)} should inherit or be an instance of {type(tinydb_)}" # Try a Query q = Query() slug, slug_ = generate_slug(2), generate_slug(4) results = bp_db.search(q[slug] == slug_) assert results == [], f"Query result should be an empty list! Results: {results}" # Create and erase database non_db_path = "./." + generate_slug(4) + ".json" non_db = BioProvDB(non_db_path) non_db.clear_db(confirm=True) assert len( non_db) == 0, f"Did not correctly erase the database at {non_db_path}" remove(non_db_path)
def main(argv): if sys.version_info[:2] < (3, 3): sys.stderr.write('This script requires Python 3.3+\n') return 1 parser = argparse.ArgumentParser(description='Generate slug to stdout') parser.add_argument('length', default=None, nargs='?', type=int, help='Number of words') parser.add_argument('-w', '--word', help='With particular substring') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output (with timing)') args = parser.parse_args(argv) generate_slug(args.length) # for more precise timing if args.word: slug = None max_slugs = 100000 for i in range(0, max_slugs): start_time = time.perf_counter() s = generate_slug(args.length) elapsed_time = time.perf_counter() - start_time if args.word in s: slug = s break if slug is None: print('Failed to generate in {} attempts'.format(max_slugs)) return 1 else: start_time = time.perf_counter() slug = generate_slug(args.length) elapsed_time = time.perf_counter() - start_time print(slug) if args.verbose: sys.stderr.write('Generated in {:0.06f} seconds\n'.format(elapsed_time)) return 0
def test_Config(): """ Testing for the Config class :return: """ config = Config() assert config.env.env_dict == dict(environ.items()) assert config.user == config.env.user assert config.genomes.exists() assert config.data.exists() assert str( config) == f"BioProv Config class set in {bp.src.config.__file__}" assert type(len(config.db_all())) == int # ProvStore properties # getters if Path(config.provstore_file).is_file(): # no cover because default is the file not existing assert config.provstore_user is not None # no cover assert config.provstore_token is not None # no cover else: assert config._provstore_user is None assert config._provstore_token is None # setters config.provstore_user = None config.provstore_token = None with NamedTemporaryFile() as f: config.provstore_file = f.name config.create_provstore_file(user=generate_slug(2), token=generate_slug(4)) config.read_provstore_file()
def genslug(): global slugset slug = generate_slug(2) while slug in slugset: slug = generate_slug(2) slugset.add(slug) return "tp2-" + slug
def output(i): if with_index: return '{}-{}'.format( coolname.generate_slug(words), str(i+1).zfill(num_of_leading_zero) ) else: return '{}'.format(coolname.generate_slug(words))
def get_doc(): return { '_id': str(bson.ObjectId()), 'name': coolname.generate_slug(), 'dt': datetime.fromtimestamp(random.randint(0, NOW_TS)), 'count': random.randint(0, 5000), 'city': random.choice(('Philly', 'Santa Fe', 'Reno')), 'content': ' '.join(lorem.paragraph() for _ in range(3)), 'percent': random.random(), 'dict': { 'name': coolname.generate_slug(), 'count': random.randint(0, 5000), 'elements': ['a', 'b', 'c'], }, }
def test_random_default(self): # NOTE: four slugs in this test must be updated every time you change word lists # 1. Re-seed default generator random.seed(123) assert random.random() == 0.052363598850944326 if six.PY2: # randrange returns different results in Python 2 self.assertEqual(coolname.generate_slug(), six.u('smooth-tuscan-limpet-of-opportunity')) else: self.assertEqual( coolname.generate_slug(), 'accelerated-salamander-of-enjoyable-abracadabra') # 2. Replace default generator rand = random.Random() rand.seed(456) assert rand.random() == 0.7482025358782363 coolname.replace_random(rand) if six.PY2: self.assertEqual(coolname.generate_slug(), six.u('resilient-ingenious-skunk-from-pluto')) else: self.assertEqual(coolname.generate_slug(), 'glorious-rose-mouflon-of-opportunity') # 3. Custom generator with custom Random config = { 'all': { 'type': 'cartesian', 'lists': ['digits', 'digits'] }, 'digits': { 'type': 'words', 'words': list(str(x) for x in range(10)) } } generator = RandomGenerator(config) generator.random.seed(12) if six.PY2: self.assertEqual(generator.generate_slug(), '4-7') else: self.assertEqual(generator.generate_slug(), '6-0') generator.random = FakeRandom(33) if six.PY2: generator.generate_slug() == '3-3' else: generator.generate_slug() == '3-3'
def test_File_and_Directory(): """ Tests objects in the File module: - existing File instance - non-existing File instance - get_size(), convert_bytes() :return: """ # Test existing file file, tag = bp.__file__, "Init file for BioProv." f = File(file, tag) d = Directory(f.path.parent) non_existing = generate_slug(2) nf = File("./" + non_existing) attributes = { # File class - existing file "path": f.path == Path(file).absolute(), "name": f.name == f.path.stem, "dir": f.directory == f.path.parent, "extension": f.extension == f.path.suffix, "tag": f.tag == tag, "exists": f.exists is True, "repr": f.__repr__() == str(f.path), "Directory_exists": d.exists, # Non existing file "non_existing": nf.exists is False, "no_size": nf.size == 0, "nf_repr": nf.__repr__() == str(nf.path), # get_size() function "get_size": f.size == utils.get_size(f.path), "raw_get_size": f.raw_size == utils.get_size(f.path, convert=False), # get_size(convert=False) # Convert bytes function "convert_bytes": utils.convert_bytes(2**10) == "1.0 KB", } for k, statement in attributes.items(): assert statement, f"{k} did not pass!" d.replace_path("", "") # test hashes nf.exists = True nf.replace_path("", "", warnings=True) # no cover nf.sha256 = generate_slug(2) # nf.replace_path(non_existing, bp.__file__) # no cover _ = f.entity f.entity = ProvEntity(None, generate_slug(2))
def get_experiment_name(self, build: int) -> Optional[str]: experiment_dir_names = os.listdir(self.config.experiments_dir) for dir_name in experiment_dir_names: if dir_name.startswith("{}_".format(build)): experiment_name_start = dir_name.find("_") + 1 return dir_name[experiment_name_start:] return coolname.generate_slug(2)
def on_create(init_state): """Create and join a watch room""" # Create unique token while True: room_token = generate_slug(3) if not db.hexists('rooms', room_token): break # Annotate state with timestamp state = add_current_time_to_state(init_state) state = add_set_time_to_state(state) # Create room in db room = {'state': state, 'count': 1} db.hset('rooms', room_token, json.dumps(room)) # Join socket.io room join_room(room_token) # Publish init state emit('video_state_update', state, room=room_token) # Publish the room user count emit('room_user_count_update', {"users": 1}, room=room_token) # Return response return {'roomId': room_token, 'status_code': 200}, 200
def projects(): """ render webpage to display existing project for a user User can select a project from this page, we then set user.current_project_name to the selected project Also handle requests to create new projects """ next_page = 'input_size' # process GET requests if request.method == 'GET': if 'projectID' in request.args: projectID = request.args.get('projectID') project_status = db.getProject(projectID) return (render_template('projects.html', project_just_created=False, proj_description="", projectID=projectID, check_proj_status=True, project_status=project_status)) else: return (render_template('projects.html', project_just_created=False, proj_description="", check_proj_status=False, project_status=None)) # process POST requests if request.method == 'POST': if 'command' in request.form and request.form[ 'command'] == 'Create Project': # user's input of project desc proj_description = request.form['p_desc'] # keep trying until find a unique projectID while True: # generate a random projectID projectID = coolname.generate_slug() # check if projectID already exists in DB if not db.projectExists(projectID): break # save project to DB db.saveProject(projectID, proj_description) # re-render the page showing a "Start Input" button return (render_template('projects.html', project_just_created=True, proj_description=proj_description, projectID=projectID, check_proj_status=False, project_status=None)) elif 'command' in request.form and request.form[ 'command'] == 'Start this project': projectID = request.form['projectID'] return (redirect(url_for(next_page, projectID=projectID))) elif 'command' in request.form and request.form[ 'command'] == 'Check project status': projectID = request.form['projectID'] return (redirect(url_for('projects', projectID=projectID))) else: abort(400, 'Unknown request')
def generate_signed_url(self, expiration=datetime.timedelta(days=7)): """Generate a signed URL for the dataset. This is done by uploading a uniquely named metadata file containing signed URLs to the datasets' files and returning a signed URL to that metadata file. :param datetime.datetime|datetime.timedelta expiration: the amount of time or date after which the URL should expire :return str: the signed URL for the dataset """ if not self.exists_in_cloud: raise CloudLocationNotSpecified( f"{self!r} must exist in the cloud for a signed URL to be generated for it." ) signed_metadata = self.to_primitive() signed_metadata["files"] = [ datafile.generate_signed_url(expiration=expiration) for datafile in self.files ] path_to_signed_metadata_file = storage.path.join( self.path, SIGNED_METADATA_DIRECTORY, coolname.generate_slug()) storage_client = GoogleCloudStorageClient() storage_client.upload_from_string( string=json.dumps(signed_metadata, cls=OctueJSONEncoder), cloud_path=path_to_signed_metadata_file, ) return storage_client.generate_signed_url( cloud_path=path_to_signed_metadata_file, expiration=expiration)
def test_Program(): """ Testing for the Program class. :return: """ _attributes = { "name": "prodigal", "params": {"-h": ""}, "tag": "gene annotation", "found": True, "version": "Prodigal V2.6.3: February, 2016", } p = Program(name="prodigal") for attr, v in _attributes.items(): if ( attr != "version" ): # check if it gets the version automatically (won't see version attribute). setattr(p, attr, v) assert getattr(p, attr) == v, f"{attr} attribute is wrong!" assert p.cmd == p.generate_cmd() slug = generate_slug(3) some_random_program = Program(slug) assert ( not some_random_program.found ), f"You shouldn't have a program called {slug} lying around!"
def create_instance(url=None, instance_class='db.t2.micro', storage=10, master_username='******', backup_days=0, feedback=None): rds = aws_client('rds') instance_name = generate_slug(2) password = random_ident(36) action = 'Use' if url else 'Provision' with feedback( f'{action} database instance {colored(instance_name, "blue")}'): if not url: rds.create_db_instance(DBName='master', DBInstanceIdentifier=instance_name, DBInstanceClass='db.t2.micro', Engine='postgres', AllocatedStorage=storage, StorageType='gp2', MasterUsername=master_username, MasterUserPassword=password, BackupRetentionPeriod=backup_days, PubliclyAccessible=True, Tags=[{ 'Key': 'Builder', 'Value': 'Polecat' }]) waiter = rds.get_waiter('db_instance_available') waiter.wait(DBInstanceIdentifier=instance_name) address = get_rds_instance(instance_name)['Endpoint']['Address'] url = get_db_url('master', password, address, 'master') set_parameter(f'/polecat/aws/db/instances/{instance_name}/url', url) return instance_name, url
def get_unique_faces(embeddings, face_cos_distance_threshold): logging.debug(f"finding unique faces ...") if len(embeddings) == 1: labels_clustered = np.array([0]) elif len(embeddings) == 0: return None else: ac = AgglomerativeClustering( n_clusters=None, affinity="cosine", linkage="average", distance_threshold=face_cos_distance_threshold, ) clustering = ac.fit(embeddings) labels_clustered = clustering.labels_ labels_unique = np.unique(labels_clustered) while True: names_unique = [coolname.generate_slug(2) for _ in labels_unique] if len(labels_unique) == len(names_unique): break label2name = {lbl: nm for lbl, nm in zip(labels_unique, names_unique)} face_names = [label2name[lbl] for lbl in labels_clustered] return face_names
def _create_nested_cloud_dataset(self, dataset_name=None): """Create a dataset in cloud storage with the given name containing a nested set of files. :param str|None dataset_name: the name to give the dataset; a random name is generated if none is given :return str: the cloud path for the dataset """ cloud_storage_client = GoogleCloudStorageClient() dataset_path = storage.path.generate_gs_path( TEST_BUCKET_NAME, dataset_name or coolname.generate_slug(2)) cloud_storage_client.upload_from_string("[1, 2, 3]", cloud_path=storage.path.join( dataset_path, "file_0.txt")) cloud_storage_client.upload_from_string("[4, 5, 6]", cloud_path=storage.path.join( dataset_path, "file_1.txt")) cloud_storage_client.upload_from_string( "['a', 'b', 'c']", cloud_path=storage.path.join(dataset_path, "sub-directory", "sub_file.txt"), ) cloud_storage_client.upload_from_string( "['blah', 'b', 'c']", cloud_path=storage.path.join(dataset_path, "sub-directory", "sub-sub-directory", "sub_sub_file.txt"), ) return dataset_path
def create_test(): form = UploadForm() if request.method == 'POST' and form.validate_on_submit(): f = form.doc.data filename = secure_filename(f.filename) f.save('questions/' + filename) test_id = generate_slug(2) with open('questions/' + filename) as csvfile: reader = csv.DictReader(csvfile, delimiter=',') cur = mysql.connection.cursor() for row in reader: cur.execute( 'INSERT INTO questions(test_id,qid,q,a,b,c,d,ans,marks) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)', (test_id, row['qid'], row['q'], row['a'], row['b'], row['c'], row['d'], row['ans'], 1)) cur.connection.commit() start_date = form.start_date.data end_date = form.end_date.data start_time = form.start_time.data end_time = form.end_time.data start_date_time = str(start_date) + " " + str(start_time) end_date_time = str(end_date) + " " + str(end_time) password = form.password.data subject = form.subject.data topic = form.topic.data cur.execute( 'INSERT INTO teachers (username, test_id, start, end, password, subject, topic) values(%s,%s,%s,%s,%s,%s,%s)', (dict(session)['username'], test_id, start_date_time, end_date_time, password, subject, topic)) cur.connection.commit() cur.close() flash(f'Test ID: {test_id}', 'success') return redirect(url_for('dashboard')) return render_template('create_test.html', form=form)
def store_meta(self, query, name="", generate_name=True, _is_current_time=True): """ Takes a query and settings to generate the dictionary """ _timestamp = 0.0 if self.check_required(query) == False: raise AttributeError("Missing either 'type' or 'timestamp'") if generate_name == False: if self.check_name(query) == False: raise AttributeError( "If you choose not to generate a name, please ensure that it exist and is valid" ) file_name = f"{query['name']}.cereal" query['filename'] = file_name if generate_name: name = generate_slug() file_name = f"{name}.cereal" query['name'] = name query['filename'] = file_name if _is_current_time: query["timestamp"] = float(int(time.time())) return query
def create_run_name(custom_name: str = None, date: bool = True, date_format: str = '%Y-%m-%d-%H.%M', n_slugs: int = 2, suffix: str = ""): """ custom_name (str|None): custom name of the run, typically with a date added if it is none it will use the slug Example: >>> run_name = create_run_name(date=True, date_format="%Y-%m-%d", \ n_slugs=2) >>> len(run_name.split("_")) == 2 True >>> run_name = create_run_name(date=False, n_slugs=2) >>> len(run_name.split("-")) >= 2 True """ from coolname import generate_slug if custom_name is None: name = generate_slug(n_slugs) else: name = custom_name if date: from datetime import datetime name = datetime.today().strftime('%Y-%m-%d-%H.%M') + "_" + name name += suffix return name
def event_create_file(min_dist: float, max_dist: float, num: int, name: str): """ Generate a number of database entries within given radius bounds of set coordinates :param min_dist: min radius in m :param max_dist: max radius in m :param num: number of points to generate :param name: name of csv file :return: None """ lon = 18.865644 # Longitude of origin point lat = -33.930755 # Latitude of origin point points = [] for number, _ in enumerate(range(0, num), 1): # Create random coordinates within radius of distance provided lon2, lat2, _ = getEndpoint(lat, lon, min_dist, max_dist) # Generate random name event_name = generate_slug(2) points.append((event_name, lon2, lat2)) with open('{}.csv'.format(name), 'w', newline='') as out: csv_out = csv.writer(out) csv_out.writerow(['Name', 'Longitude', 'Latitude']) for row in points: csv_out.writerow(row) return None
async def serve(self, socket, path): user = User(coolname.generate_slug(2), socket) logger.info("%s connected", user) while True: try: data = await user.socket.recv() except ( websockets.exceptions.ConnectionClosedError, websockets.exceptions.ConnectionClosedOK, ) as e: logger.info("%s closed connection (%s)", user, e) await self.stage.on_disconnect(user) break message = messages.deserialize(data) logger.info("Received %s from %s", message, user) if isinstance(message, messages.AuthCode): await self.stage.on_auth_code(user, message) elif isinstance(message, messages.CountCode): await self.stage.on_count_code(user, message) elif isinstance(message, messages.QuestionAnswers): await self.stage.on_question_answers(user, message) else: raise NotImplementedError(f"Unsupported message {message}")
def create_reana_workflow(): """Create a reana workflow by json.""" _args = request.get_json() # try fetch the deposit with the provided PID try: resolver = Resolver(pid_type='depid', object_type='rec', getter=lambda x: x) deposit, rec_uuid = resolver.resolve(_args.get('pid')) except PIDDoesNotExistError: abort( 404, "You tried to create a workflow and connect" " it with a non-existing record") # if record exist check if the user has 'deposit-update' rights with UpdateDepositPermission(deposit).require(403): token = get_reana_token(rec_uuid) name = _args.get('workflow_name') workflow_name = generate_slug(2) workflow_json = _args.get('workflow_json') try: resp = create_workflow(workflow_json, workflow_name, token) except ValidationError as e: return jsonify({'message': e.message}), 400 except Exception: return jsonify({ 'message': 'An exception has occured while creating ' 'the workflow in REANA.' }), 400 # create a workflow dict, which can be used to populate # the db, but also used in the serializer _workflow = { 'service': 'reana', 'user_id': current_user.id, 'name': name, 'workflow_name': workflow_name, 'name_run': resp['workflow_name'], 'workflow_id': resp['workflow_id'], 'rec_uuid': str(rec_uuid), 'depid': _args.get('pid'), 'status': 'created', 'workflow_json': workflow_json, } # TOFIX: check for integrity errors workflow = ReanaWorkflow(**_workflow) db.session.add(workflow) db.session.commit() workflow_serialized = ReanaWorkflowSchema().dump(_workflow).data return jsonify(workflow_serialized)
async def create_upload_file(upload: bytes = File("upload")): name = generate_slug() path = app_path / "site" / "assets" / "upload" / f"{name}.jpeg" path.write_bytes(upload) # url = f"https://{meta.url}/images/model.png" return { "url": f"assets/upload/{name}.jpeg", }
def channel_token(number): """ Encodes a phone number to a Slack channel name of the form is a two word slug. Repeats the same slug for the same number """ random.seed("999" + str(number)) return generate_slug(2)
def test_SeqFile(): """ Tests the SeqFile constructor. :return: """ tag = "Synechococcus elongatus PCC 6301" genome = SeqFile(synechococcus_genome, tag, import_records=True) nf_genome, nf_tag = generate_slug(2), generate_slug(2) nf_genome = SeqFile(nf_genome, nf_tag) # Instance where file exists existing_instance = { "exists": genome.exists, "tag": genome.tag == tag, "class": type(genome) == SeqFile, "records": all(( type(genome.records) == dict, len(genome.records), genome.records["NC_006576.1"], )), } for dict_ in (existing_instance, ): for k, statement in dict_.items(): assert statement, f"{k} did not pass!" # Testing generator property genome.generator = None _ = genome.generator # Testing seqstats property genome.seqstats = None _ = genome.seqstats # Test _calculate_seqstats args genome._calculate_seqstats(percentage=True, megabases=True) genome._calculate_seqstats(calculate_gc=False) # Test FileNotFound warning none = seqrecordgenerator(nf_genome.path, "fasta", warnings=True) assert none is None, f"{none} should be a NoneType object!"
def test_slug(self): # Basic test, to check that it doesn't crash. # Output of default generator is always unicode. items = coolname.generate() self.assertIsInstance(items[0], six.text_type) name = coolname.generate_slug() self.assertIsInstance(name, six.text_type) self.assertGreater(len(name), 10) self.assertIn('-', name)
def random_mail(): domains = [ 'hotmail.com', 'gmail.com', 'yahoo.com', 'mymail.com', 'myspace.com', 'euromail.eu' ] mail = '' mail += coolname.generate_slug() mail += '@' + random.choice(domains) return mail
def main(argv): if sys.version_info[:2] < (3, 3): sys.stderr.write('This script requires Python 3.3+\n') return 1 parser = argparse.ArgumentParser(description='Generate slug to stdout') parser.add_argument('length', default=None, nargs='?', type=int, help='Number of words') parser.add_argument('-w', '--word', help='With particular substring') parser.add_argument('-a', '--attempts', type=int, default=100000, help='Number of attempts before giving up') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output (with timing)') args = parser.parse_args(argv) generate_slug(args.length) # for more precise timing if args.word: words = args.word.split(',') slug = None for i in range(0, args.attempts): start_time = time.perf_counter() s = generate_slug(args.length) elapsed_time = time.perf_counter() - start_time if any(x in s for x in words): slug = s break if slug is None: print('Failed to generate in {} attempts'.format(args.attempts)) return 1 else: start_time = time.perf_counter() slug = generate_slug(args.length) elapsed_time = time.perf_counter() - start_time print(slug) if args.verbose: sys.stderr.write( 'Generated in {:0.06f} seconds\n'.format(elapsed_time)) return 0
def get_arguments(): """ Pretty self explanatory, gets arguments for training and adds them to config """ parser = argparse.ArgumentParser(description='Train model for lofar-dev') parser.add_argument( 'training_data', metavar='-d', type=str, nargs=1, help='a dataset in the format [x_train,x_test,y_train,y_test]') parser.add_argument('architecture', metavar='-a', type=str, nargs=1, choices=[ 'skip_mag_phase', 'skip_real_imag', 'vae_mag', 'vae_phase', 'vae_real', 'vae_imag' ], help='the architecture type like vae or ae_tnse') parser.add_argument('-latent_dim', metavar='-l', type=str, nargs=1, help='the dimension of the VAE embedding') parser.add_argument( '-notes', metavar='-n', type=str, nargs=1, help='a filter for the clustering model to be visualised') parser.add_argument('-project', metavar='-p', type=str, nargs=1, help='The project name to be saved under in wandb') parser.add_argument( '-wandb', metavar='-w', type=str, nargs=1, choices=[0, 1], help='Flag to set whether the wandb environment is used') args = parser.parse_args() config['architecture'] = args.architecture[0] config['training_data'] = args.training_data[0] config['name'] = generate_slug() if args.notes is not None: config['Notes'] = args.notes[0] if args.latent_dim is not None: config['latent_dim'] = int(args.latent_dim[0]) if args.wandb is not None: config['wandb'] = int(args.wandb[0]) else: args.wandb = True if args.project is not None: config['project'] = args.project[0]
def create_session(packages, for_development): server = libtmux.Server() session_name = generate_slug(2) session = server.new_session(session_name) for package_name, package_settings in sort(packages): run(session, package_name, package_settings, for_development) time.sleep(1) session.list_windows()[0].kill_window() return session, session_name
def __init__(self, *args, **kwargs): existing_name = getattr(self, "name", None) or kwargs.pop("name", None) if existing_name: self.name = existing_name self._cool_named = False else: self.name = generate_slug(2) self._cool_named = True super().__init__(*args, **kwargs)
def create_rpm_or_srpm(model, i): name = coolname.generate_slug(2) unit, created = model.objects.get_or_create( name=name, epoch='epoch', version='version', release='release', arch='arch') num_repos_added = int(random.random() * to_create[platform.Repository]) + 1 repos = platform.Repository.objects.all().order_by('?')[:num_repos_added] unit.add_repos(*repos) if not unit.files.all(): filename = '{}.{}'.format(unit, unit.content_type) cuf = platform.ContentUnitFile() cuf.content = File(io.StringIO(filename), name=filename) cuf.unit = unit cuf.save() return unit
def populate_repository(model, i): slug = coolname.generate_slug(2) repo, created = platform.Repository.objects.get_or_create(slug=slug) return repo
# Measure average call time number = 100000 print('generate() time: {:.6f}'.format(timeit(generate, number=number) / number)) print('generate_slug() time: {:.6f}'.format(timeit(generate_slug, number=number) / number)) # Total combinations count print('Total combinations: {:,}'.format(get_combinations_count())) print('Combinations(4): {:,}'.format(get_combinations_count(4))) print('Combinations(3): {:,}'.format(get_combinations_count(3))) print('Combinations(2): {:,}'.format(get_combinations_count(2))) # Check probability of repeat if we have used 0.1% of total namespace. # It should be around 0.0001. if arguments.all: combinations = get_combinations_count() items = set({}) items_count = combinations // 10000 while len(items) < items_count: items.add(generate_slug()) repeats = 0 loops = 100000 for i in range(loops): if generate_slug() in items: repeats += 1 print('Repeat probability: {:.6f} (with {} names used)'.format(repeats / loops, len(items))) # Dump tree if arguments.dump: print() import coolname.impl print(coolname.impl._default._dump(sys.stdout, object_ids=True))
# minimum number of things to cram into the db num_repos = 10 num_rpm = 100 num_srpm = 10 # not DRY at all... count_repos = platform.Repository.objects.count() count_rpm = rpm.RPM.objects.count() count_srpm = rpm.SRPM.objects.count() for i in range(num_repos): repo_ident = 'repo{}'.format(i) if i < count_repos: repo = platform.Repository.objects.all()[i] else: repo_name = coolname.generate_slug(2) repo, created = platform.Repository.objects.get_or_create(repo_id=repo_name) print('Repo {} created.'.format(repo_name), file=sys.stderr) globals()[repo_ident] = repo for i in range(num_rpm): rpm_ident = 'rpm{}'.format(i) if i < count_rpm: unit = rpm.RPM.objects.all()[i] else: rpm_name = coolname.generate_slug(2) unit, created = rpm.RPM.objects.get_or_create( name=rpm_name, epoch='epoch', version='version', release='release', arch='arch') num_repos_added = int(random.random() * num_repos) + 1 repos = platform.Repository.objects.all().order_by('?')[:num_repos_added] unit.repositories.add(*repos)