def alias(alias, id, all): gi, cnfg, aliases = _login() # print(aliases) if all: workflow_ids = [wf['id'] for wf in gi.workflows.get_workflows()] dataset_ids = [ds['id'] for ds in gi.histories.show_history(cnfg['hid'], contents=True)] for id in workflow_ids + dataset_ids: if id not in aliases.values(): # we do not overwrite if an alias already exists while True: alias = namesgenerator.get_random_name() # we can allow one id to have multiple aliases but NOT the reverse if alias not in aliases: break click.echo("Alias assigned to ID {}: ".format(id) + click.style(alias, bold=True)) aliases[alias] = id else: if not alias: alias = namesgenerator.get_random_name() click.echo("Alias assigned to ID {}: ".format(id) + click.style(alias, bold=True)) aliases[alias] = id f = _read_configfile() f['aliases'] = aliases _write_to_file(f)
def __create_blob_container(self, storage_acc_name): sms = self.__get_service_mgmt_object() # Retrieve the primary key of your storage account # Maybe the secondary key works too? storage_acc_key = None acounts = sms.list_storage_accounts() for account in acounts: if account.service_name == storage_acc_name: storageServiceObj = sms.get_storage_account_keys(account.service_name) storage_acc_key = storageServiceObj.storage_service_keys.primary # Create a container blob_service = BlobService(account_name=storage_acc_name, account_key=storage_acc_key) container_name = namesgenerator.get_random_name() container_name += "container" blob_service.create_container(container_name) # This is the url to the container we just created container_url_template = "http://{}.blob.core.windows.net/{}" container_url = container_url_template.format(storage_acc_name, container_name) #print "Created blob container with URL ",container_url return container_url
def random_name_subclass(of=object, **classdef): classdef['__module__'] = __name__ new_typename = namesgenerator.get_random_name(sep=' ').title().replace( ' ', '') + of.__name__ new_type = type(new_typename, (of, ), classdef) globals()[new_typename] = new_type return new_type
def generate_sbi_config(num_pbs: int = 3) -> dict: """Generate a SBI configuration dictionary. Args: num_pbs (int, optional): Number of Processing Blocks (default = 3) Returns: dict, SBI configuration dictionary """ utc_now = datetime.datetime.utcnow() date = utc_now.strftime('%Y/%m/%d %H:%M:%S') project = generate_project() sb_id = str(uuid4())[:8] sbi_pb_config = {} pb_ids = [] for _ in range(num_pbs): pb_id = generate_pb_id() sbi_pb_config[pb_id] = generate_pb_config(pb_id) pb_ids.append(pb_id) sbi_config = dict(id=generate_sbi_id(utc_now, project, sb_id), scheduling_block_id=sb_id, sub_array_id=randint(-1, 15), date=date, status='', project=project, name=namesgenerator.get_random_name(), processing_block_ids=pb_ids, processing_block_data=sbi_pb_config) return sbi_config
def post(request): if 'name' in request.POST or 'code' in request.POST: if 'name' in request.POST: room = WatchRoom(name=request.POST['name'], owner=request.user) room.save() else: try: room = WatchRoom.objects.get( join_code=request.POST['code']) watcher = WatchRoomWatcher.objects.filter( room=room, watcher=request.user) if len(watcher) > 0: return Response({'status': 'Already Joined'}) except (WatchRoom.DoesNotExist, KeyError): return Response(status=status.HTTP_404_NOT_FOUND) relation = WatchRoomWatcher( room=room, watcher=request.user, color= f'#{"".join(random.choice(string.hexdigits) for _ in range(6))}', name=namesgenerator.get_random_name().replace( '_', ' ').capitalize()) relation.save() return Response({ 'room': { 'id': room.id, 'currentVideo': room.current_video, 'joinCode': room.join_code, 'owner': request.user.id == room.owner.id, 'name': room.name, 'memberCount': len(WatchRoomWatcher.objects.filter(room=room)) } })
def send(): """ start sending messages on whatsapp :return: rendered form with the `Sending Messages!` alert """ # wait till user is logged into whatsapp meow.wait_till_login(driver[session['username']]) print(session['username'], "logged into whatsapp") # check if log number exists if not meow.check_if_number_exists(driver[session['username']], session['log_phone']): print(session['username'], "gave invalid phone number for logging") # Close driver driver[session['username']].close() del driver[session['username']] print("closed driver for", session['username']) # go back to form with error message return render_template('form.html', msg="Incorrect number given for logging", **session) # start thread that will send messages on whatsapp Thread(target=send_messages, kwargs=dict(session)).start() # go back to form with a success message session['username'] = get_random_name() print(session['username'], "logged in") return render_template('form.html', msg="Sending Messages!")
def form(): """ display message details form :return: rendered HTML page of the form """ session['username'] = get_random_name() # set username for session print(session['username'], "logged in") return render_template('form.html', **session)
def __init__(self, center, img): self.img = img self.center = np.array(center) self.size = np.array(img.shape[:2]).astype(np.float64) self.centerSpeed = np.zeros(2) self.sizeSpeed = np.zeros(2) self.timeNotVisible = 0 self.timeSinceCreation = 0 self.hash = binascii.hexlify(os.urandom(16)) self.name = namesgenerator.get_random_name()
def add_single(id, alias): """ Add an alias to a single ID. """ gi, cnfg, aliases = utils._login() if not alias: alias = namesgenerator.get_random_name() click.echo("Alias assigned to ID {}: ".format(id) + click.style(alias, bold=True)) aliases[alias] = id _update_aliases(aliases)
def __create_cloud_service(self): sms = self.__get_service_mgmt_object() # Create a cloud service # Because the name has to be unique in Their cloud :/ hosted_service_name = namesgenerator.get_random_name() label = 'DevOps' desc = 'Service for basic nginx server' location = 'East US' sms.create_hosted_service(hosted_service_name, label, desc, location) #print "Created hosted service with name ",hosted_service_name return hosted_service_name
def __create_storage_service(self): sms = self.__get_service_mgmt_object() # Create a storage service storage_acc_name = namesgenerator.get_random_name(True) label = 'mystorageaccount' location = 'East US' desc = 'My storage account description.' sms.create_storage_account(storage_acc_name, desc, label, location=location) #print "Created storage service with name ",storage_acc_name return storage_acc_name
def cli(ctx, alias, obj, profile, **kwds): """ Add an alias for a path or a workflow or dataset ID. Aliases are associated with a particular planemo profile. """ if not alias: if not namesgenerator: raise ImportError(("Random generation of aliases requires installation of the namesgenerator package." "Either install this, or specify the alias name with --alias.")) alias = namesgenerator.get_random_name() exit_code = profiles.create_alias(ctx, alias, obj, profile) info("Alias {} created.".format(alias)) ctx.exit(exit_code) return
def __init__(self, project: str = namesgenerator.get_random_name(), log_level: int = logging.INFO, save: bool = False): """Creation. The moons and the planets are there.""" self.project: str = project self.project += "_%d" % (random.randint(100000, 999999)) self._log: logging.Logger = gen_logger(self.NAME, log_level) self.output: bool = save self.folder: str = os.getcwd() self._config_bootstrap() self._processed: List = list() self.results: List = list() self.saved: List = list()
def new_run(cls, app, testcase: TestCase): run_id = namesgenerator.get_random_name() fs.ensure_dir_exists(cls._runs_dir(app)) temp_dir = tempfile.mkdtemp(prefix='_{}'.format(run_id), dir=cls._runs_dir(app)) cls._save_meta(temp_dir, testcase) # make a copy to make sure any alterations of the source won't affect us testcase.save(fs.join(temp_dir, 'testcase.yml')) # TODO prevent collisions os.rename(temp_dir, fs.join(cls._runs_dir(app), run_id)) return cls(app, run_id)
def __init__(self, console=None, event_loop=None, name=None, thread_id=None, stack=None, memory=None): self.id = id(self) if thread_id is None else thread_id self.thread_name = namesgenerator.get_random_name( ) if name is None else name self.stack = [] if stack is None else stack self.memory = {} if memory is None else memory self.console = console #main_loop self.event_loop = event_loop self.event_loop.register_callback(EventID.MESSAGE, self.id, self.recv) self.event_loop.register_callback(EventID.ALARM, self.id, self.recv)
def get_queryset(self): parameter_from_url = self.kwargs[self.lookup_url_kwarg] pub_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") likes = random.randint(0, 200) nick = namesgenerator.get_random_name() observers = likes = random.randint(0, 800) observed = likes = random.randint(0, 800) try: if Profile.objects.filter(name=parameter_from_url).exists(): queryset = Profile.objects.filter( name=parameter_from_url).values() else: raise Profile.DoesNotExist except Profile.DoesNotExist: new_profile_urls = scrape_images(parameter_from_url) new_profile_urls_json = json.dumps( scrape_images(parameter_from_url)) new_profile = Profile(name=parameter_from_url, images_source=new_profile_urls_json, pub_date=pub_date, likes=likes, nick=nick, profile_img=new_profile_urls[0], observers=observers, observed=observed) new_post = Post(profile_img=new_profile_urls[0], name=parameter_from_url, image=new_profile_urls[-1], pub_date=pub_date, likes=likes, nick=nick) new_profile.save() new_post.save() queryset = Profile.objects.filter(name=parameter_from_url).values() return queryset
def add_all(): """ Add randomly generated aliases to all workflows and datasets which do not currently have one. """ gi, cnfg, aliases = utils._login() workflow_ids = [wf['id'] for wf in gi.workflows.get_workflows()] dataset_ids = [ ds['id'] for ds in gi.histories.show_history(cnfg['hid'], contents=True) ] for id in workflow_ids + dataset_ids: if id not in aliases.values( ): # we do not overwrite if an alias already exists while True: alias = namesgenerator.get_random_name() # we can allow one id to have multiple aliases but NOT the reverse if alias not in aliases: break click.echo("Alias assigned to ID {}: ".format(id) + click.style(alias, bold=True)) aliases[alias] = id _update_aliases(aliases)
def PackageCompilationGCC(jmgr, os_target, sql, args): container_name = namesgenerator.get_random_name() # cmd = ['docker', 'run', '--rm', '-v', '/filer/bin:/filer', '-w', # '/filer', '--network=host', 'aakshintala/ubuntu-compiler:gcc', '/filer/run-compile-gcc.sh'] cmd = [ 'timeout', '-s', 'SIGKILL', '300', 'docker', 'run', '--rm', '--name', container_name, '-v', '/filer/bin:/filer', '-w', '/filer', '--network=host', 'aakshintala/ubuntu-compiler:gcc', '/filer/run-compile-gcc.sh' ] logging.error(container_name) p = subprocess.Popen(cmd + [args[0]], stdout=subprocess.PIPE, stderr=null_dev) (stdout, stderr) = p.communicate() if p.wait() == -9: kill_and_remove(container_name) logging.error("Cannot compile-timeout") raise Exception("Cannot compile-GCC timeout") if p.returncode != 0: print stderr logging.error(stderr) raise Exception("Cannot compile - GCC")
class Card(Base): __tablename__ = 'cards' id = Column(Integer, primary_key=True) name = Column(String, default=namesgenerator.get_random_name()) email = Column(String) rfid = Column(String, unique=True) credits = Column(Integer) email_token = Column(String) account_id = Column(Integer, ForeignKey('accounts.id')) created_at = Column(DateTime, default=datetime.datetime.utcnow()) updated_at = Column(DateTime, default=datetime.datetime.utcnow()) account = relationship('Account', back_populates='cards') def to_json(self): return { 'id': self.id, 'name': self.name, 'email': self.email, 'rfid': self.rfid, 'credits': self.credits, 'email_token': self.email_token, 'created_at': self.created_at, 'updated_at': self.updated_at } def generate_email_token(self): self.email_token = uuid.uuid4() def send_signup_email(self): print 'Email sent to %s with token %s' % (self.email, self.email_token) def __repr__(self): return "<Card(name='%s', email='%s', rfid='%s', credits='%s')>" % ( self.name, self.email, self.rfid, str(self.credits))
# ########################### Retrieving Data ################################### G = nx.Graph() gi = nx.Graph() fileNums = [3980, 686, 414, 348, 0] group_center = [[48.891986, 2.319287], [48.878562, 2.360369], [48.843491, 2.351834], [48.858370, 2.294481], [48.864049, 2.331053]] g = 0 for i in fileNums: fileName = "fb-edges/" + str(i) + ".edges" gi = nx.read_edgelist(fileName, nodetype=int) for j in gi.nodes(): gi.add_node(j, group=g) gi.add_node(j, name=namesgenerator.get_random_name()) gi.add_node(j, istrain=bool(random.getrandbits(1))) cen_lat = group_center[g][0] cen_lon = group_center[g][1] point = genpoint(cen_lon, cen_lat, 0.5 * len(gi.nodes()) / 60) gi.add_node(j, lat=point['lat']) gi.add_node(j, lon=point['lon']) gi.add_node(j, id=int(j)) g += 1 G = nx.compose(G, gi) M = G # ########################### Retrieving Data ################################### #Write out initial graph data in JSON files
def test_namesgenerator(self): self.assertEqual(type(namesgenerator.get_random_name()), str)
if ex['name'] is None or len(ex['name']) == 0: raise ValueError(f'Experiment name is empty: {ex["name"]}') if ex['tags'] is None: raise ValueError('Experiment tags is None') if ex['model']['fn'] is None: raise ValueError('Model constructor function not defined') if ex['optimizer']['fn'] is None: raise ValueError('Optimizer constructor function not defined') validate_history(ex['history']) validate_losses(ex['loss']) ex['completed_epochs'] = sum((session['completed_epochs'] for session in ex['history']), 0) ex['samples'] = sum((session['samples'] for session in ex['history']), 0) ex['fullname'] = ex['fullname'].format(tags='_'.join(ex['tags']), rand=namesgenerator.get_random_name()) # These args are computed based on other stuff, the user should not provide a value, # unless we are continuing training on top a previous session. if ex['completed_epochs'] == 0: model_kwargs = {'enc_in_nodes', 'enc_in_edges'} if not set.isdisjoint(set(ex['model'].keys()), model_kwargs): raise ValueError(f'Model config dict can not have any of {model_kwargs} in its arguments, ' f'found: {", ".join(set.intersection(set(ex["model"].keys()), model_kwargs))}') ex['model']['enc_in_nodes'] = sum(( ex['model']['residue_emb_size'], 23 if ex['data']['partial_entropy'] else 0, 23 if ex['data']['self_information'] else 0, 15 if ex['data']['dssp'] else 0, )) ex['model']['enc_in_edges'] = sum((
# class not necessary import namesgenerator namesgenerator.get_random_name()
def create_readable_name(): "Docker-style random name from namesgenerator" return namesgenerator.get_random_name()
def _reset(self) -> None: """Reset some of the state in the class for multi-searches.""" self.project: str = namesgenerator.get_random_name() self.project += "_%d" % (random.randint(100000, 999999)) self._processed: List = list() self.results: List = list()
def plan(config, listing, autostart, container, noautostart, inputfile, start, stop, delete, delay, plan): """Create/Delete/Stop/Start vms from plan file""" vmprofiles = {key: value for key, value in config.profiles.iteritems() if 'type' not in value or value['type'] == 'vm'} containerprofiles = {key: value for key, value in config.profiles.iteritems() if 'type' in value and value['type'] == 'container'} k = config.get() if listing: vms = {} plans = PrettyTable(["Name", "Vms"]) for vm in sorted(k.list(), key=lambda x: x[4]): vmname = vm[0] plan = vm[4] if plan in vms: vms[plan].append(vmname) else: vms[plan] = [vmname] for plan in sorted(vms): planvms = ','.join(vms[plan]) plans.add_row([plan, planvms]) print(plans) return if plan is None: plan = namesgenerator.get_random_name() if delete: networks = [] if plan == '': click.secho("That would delete every vm...Not doing that", fg='red') return click.confirm('Are you sure about deleting plan %s' % plan, abort=True) found = False for vm in sorted(k.list()): name = vm[0] description = vm[4] if description == plan: vmnetworks = k.vm_ports(name) for network in vmnetworks: if network != 'default' and network not in networks: networks.append(network) k.delete(name) click.secho("VM %s deleted!" % name, fg='green') found = True if container: for cont in sorted(k.list_containers()): name = cont[0] container_plan = cont[3] if container_plan == plan: k.delete_container(name) click.secho("Container %s deleted!" % name, fg='green') found = True for network in networks: k.delete_network(network) click.secho("Unused network %s deleted!" % network, fg='green') found = True if found: click.secho("Plan %s deleted!" % plan, fg='green') else: click.secho("Nothing to do for plan %s" % plan, fg='red') return if autostart: click.secho("Set vms from plan %s to autostart" % (plan), fg='green') for vm in sorted(k.list()): name = vm[0] description = vm[4] if description == plan: k.update_start(name, start=True) click.secho("%s set to autostart!" % name, fg='green') return if noautostart: click.secho("Preventing vms from plan %s to autostart" % (plan), fg='green') for vm in sorted(k.list()): name = vm[0] description = vm[4] if description == plan: k.update_start(name, start=False) click.secho("%s prevented to autostart!" % name, fg='green') return if start: click.secho("Starting vms from plan %s" % (plan), fg='green') for vm in sorted(k.list()): name = vm[0] description = vm[4] if description == plan: k.start(name) click.secho("VM %s started!" % name, fg='green') if container: for cont in sorted(k.list_containers()): name = cont[0] containerplan = cont[3] if containerplan == plan: k.start_container(name) click.secho("Container %s started!" % name, fg='green') click.secho("Plan %s started!" % plan, fg='green') return if stop: click.secho("Stopping vms from plan %s" % (plan), fg='green') for vm in sorted(k.list()): name = vm[0] description = vm[4] if description == plan: k.stop(name) click.secho("%s stopped!" % name, fg='green') if container: for cont in sorted(k.list_containers()): name = cont[0] containerplan = cont[3] if containerplan == plan: k.stop_container(name) click.secho("Container %s stopped!" % name, fg='green') click.secho("Plan %s stopped!" % plan, fg='green') return if inputfile is None: inputfile = 'kcli_plan.yml' click.secho("using default input file kcli_plan.yml", fg='green') inputfile = os.path.expanduser(inputfile) if not os.path.exists(inputfile): click.secho("No input file found nor default kcli_plan.yml.Leaving....", fg='red') os._exit(1) default = config.default with open(inputfile, 'r') as entries: entries = yaml.load(entries) vmentries = [entry for entry in entries if 'type' not in entries[entry] or entries[entry]['type'] == 'vm'] diskentries = [entry for entry in entries if 'type' in entries[entry] and entries[entry]['type'] == 'disk'] networkentries = [entry for entry in entries if 'type' in entries[entry] and entries[entry]['type'] == 'network'] containerentries = [entry for entry in entries if 'type' in entries[entry] and entries[entry]['type'] == 'container'] if networkentries: click.secho("Deploying Networks...", fg='green') for net in networkentries: profile = entries[net] if k.net_exists(net): click.secho("Network %s skipped!" % net, fg='blue') continue cidr = profile.get('cidr') nat = bool(profile.get('nat', True)) if cidr is None: print "Missing Cidr for network %s. Not creating it..." % net continue dhcp = profile.get('dhcp', True) result = k.create_network(name=net, cidr=cidr, dhcp=dhcp, nat=nat) handle_response(result, net, element='Network ') if vmentries: click.secho("Deploying Vms...", fg='green') for name in vmentries: profile = entries[name] if k.exists(name): click.secho("VM %s skipped!" % name, fg='blue') continue if 'profile' in profile and profile['profile'] in vmprofiles: customprofile = vmprofiles[profile['profile']] title = profile['profile'] else: customprofile = {} title = plan description = plan pool = next((e for e in [profile.get('pool'), customprofile.get('pool'), default['pool']] if e is not None)) template = next((e for e in [profile.get('template'), customprofile.get('template')] if e is not None), None) numcpus = next((e for e in [profile.get('numcpus'), customprofile.get('numcpus'), default['numcpus']] if e is not None)) memory = next((e for e in [profile.get('memory'), customprofile.get('memory'), default['memory']] if e is not None)) disks = next((e for e in [profile.get('disks'), customprofile.get('disks'), default['disks']] if e is not None)) disksize = next((e for e in [profile.get('disksize'), customprofile.get('disksize'), default['disksize']] if e is not None)) diskinterface = next((e for e in [profile.get('diskinterface'), customprofile.get('diskinterface'), default['diskinterface']] if e is not None)) diskthin = next((e for e in [profile.get('diskthin'), customprofile.get('diskthin'), default['diskthin']] if e is not None)) guestid = next((e for e in [profile.get('guestid'), customprofile.get('guestid'), default['guestid']] if e is not None)) vnc = next((e for e in [profile.get('vnc'), customprofile.get('vnc'), default['vnc']] if e is not None)) cloudinit = next((e for e in [profile.get('cloudinit'), customprofile.get('cloudinit'), default['cloudinit']] if e is not None)) reserveip = next((e for e in [profile.get('reserveip'), customprofile.get('reserveip'), default['reserveip']] if e is not None)) reservedns = next((e for e in [profile.get('reservedns'), customprofile.get('reservedns'), default['reservedns']] if e is not None)) nested = next((e for e in [profile.get('nested'), customprofile.get('nested'), default['nested']] if e is not None)) start = next((e for e in [profile.get('start'), customprofile.get('start'), default['start']] if e is not None)) nets = next((e for e in [profile.get('nets'), customprofile.get('nets'), default['nets']] if e is not None)) iso = next((e for e in [profile.get('iso'), customprofile.get('iso')] if e is not None), None) keys = next((e for e in [profile.get('keys'), customprofile.get('keys')] if e is not None), None) cmds = next((e for e in [profile.get('cmds'), customprofile.get('cmds')] if e is not None), None) netmasks = next((e for e in [profile.get('netmasks'), customprofile.get('netmasks')] if e is not None), None) gateway = next((e for e in [profile.get('gateway'), customprofile.get('gateway')] if e is not None), None) dns = next((e for e in [profile.get('dns'), customprofile.get('dns')] if e is not None), None) domain = next((e for e in [profile.get('domain'), customprofile.get('domain')] if e is not None), None) ips = profile.get('ips') scripts = next((e for e in [profile.get('scripts'), customprofile.get('scripts')] if e is not None), None) if scripts is not None: scriptcmds = [] for script in scripts: script = os.path.expanduser(script) if not os.path.exists(script): click.secho("Script %s not found.Ignoring..." % script, fg='red') else: scriptlines = [line.strip() for line in open(script).readlines() if line != '\n'] if scriptlines: scriptcmds.extend(scriptlines) if scriptcmds: if cmds is None: cmds = scriptcmds else: cmds = cmds + scriptcmds result = k.create(name=name, description=description, title=title, numcpus=int(numcpus), memory=int(memory), guestid=guestid, pool=pool, template=template, disks=disks, disksize=disksize, diskthin=diskthin, diskinterface=diskinterface, nets=nets, iso=iso, vnc=bool(vnc), cloudinit=bool(cloudinit), reserveip=bool(reserveip), reservedns=bool(reservedns), start=bool(start), keys=keys, cmds=cmds, ips=ips, netmasks=netmasks, gateway=gateway, dns=dns, domain=domain, nested=nested) handle_response(result, name) if delay > 0: sleep(delay) if diskentries: click.secho("Deploying Disks...", fg='green') for disk in diskentries: profile = entries[disk] pool = profile.get('pool') vms = profile.get('vms') template = profile.get('template') size = int(profile.get('size', 10)) if pool is None: print "Missing Key Pool for disk section %s. Not creating it..." % disk continue if vms is None: print "Missing or Incorrect Key Vms for disk section %s. Not creating it..." % disk continue if k.disk_exists(pool, disk): click.secho("Disk %s skipped!" % disk, fg='blue') continue if len(vms) > 1: shareable = True else: shareable = False newdisk = k.create_disk(disk, size=size, pool=pool, template=template, thin=False) click.secho("Disk %s deployed!" % disk, fg='green') for vm in vms: k.add_disk(name=vm, size=size, pool=pool, template=template, shareable=shareable, existing=newdisk, thin=False) if containerentries: click.secho("Deploying Containers...", fg='green') label = "plan=%s" % (plan) for container in containerentries: if k.exists_container(container): click.secho("Container %s skipped!" % container, fg='blue') continue profile = entries[container] if 'profile' in profile and profile['profile'] in containerprofiles: customprofile = containerprofiles[profile['profile']] else: customprofile = {} image = next((e for e in [profile.get('image'), profile.get('template'), customprofile.get('image'), customprofile.get('template')] if e is not None), None) nets = next((e for e in [profile.get('nets'), customprofile.get('nets')] if e is not None), None) ports = next((e for e in [profile.get('ports'), customprofile.get('ports')] if e is not None), None) volumes = next((e for e in [profile.get('volumes'), profile.get('disks'), customprofile.get('volumes'), customprofile.get('disks')] if e is not None), None) cmd = next((e for e in [profile.get('cmd'), customprofile.get('cmd')] if e is not None), None) click.secho("Container %s deployed!" % container, fg='green') k.create_container(name=container, image=image, nets=nets, cmd=cmd, ports=ports, volumes=volumes, label=label)
parser.add_argument('-f', '--file', help='Compiled pipeline file [.tar.gz, .yaml, .zip]', required=True) parser.add_argument('-e', '--experiment', help='Experiment name to run pipeline on', default='Default') parser.add_argument('-r', '--run-name', help="Run name", default=None) parser.add_argument('-k', '--kubeflow', help="Host, where Kubeflow instance is running", required=True) args = parser.parse_args() # Create client client = kfp.Client(args.kubeflow) run_name = 'mnist_' + namesgenerator.get_random_name( ) if not args.run_name else args.run_name try: experiment_id = client.get_experiment(experiment_name=args.experiment).id except: experiment_id = client.create_experiment(args.experiment).id # Submit a pipeline run result = client.run_pipeline(experiment_id, run_name, args.file, params={"drift_detector_steps": "500"}) print(result)
requests.post(sites_uri, json=site) print("Checking on brands, creating if necessary") brands = requests.get(brand_uri) if brands.json() is None or len(brands.json()) < brand_count: if brands.json() is None: max_range = brand_count else: max_range = brand_count - len(brands.json()) print(f'Creating {max_range} brands.') for i in range(0, max_range): brand = {'name': namesgenerator.get_random_name()} request_data = json.dumps(brand) response = requests.post(brand_uri, json=brand) brands = requests.get(brand_uri) brands = brands.json() print('Checking on products, creating if necessary') products = requests.get(products_uri) if products.json() is None or len(products.json()) < 300: colors = [ 'white', 'black', 'red', 'blue', 'yellow', 'titanium', 'steel-grey',
def random_name(): nice_name = namesgenerator.get_random_name() random_letters = "".join(chr(random.randint(ord("A"), ord("Z"))) for _ in range(4)) return nice_name + "_" + random_letters
def generate_task_name(): return namesgenerator.get_random_name()
parser.add_argument('-e', '--experiment', help='Experiment name to run pipeline on', default='MNIST Showreal') parser.add_argument('-r', '--run-name', help="Run name", default=None) parser.add_argument('-n', '--namespace', help="Namespace, where kubeflow and serving are running", required=True) args = parser.parse_args() arguments = args.__dict__ # Create client client = kfp.Client( f"http://{arguments['namespace']}.kubeflow.odsc.k8s.hydrosphere.io") run_name = namesgenerator.get_random_name( ) if not arguments["run_name"] else arguments["run_name"] try: experiment_id = client.get_experiment( experiment_name=arguments["experiment"]).id except: experiment_id = client.create_experiment(arguments["experiment"]).id # Submit a pipeline run result = client.run_pipeline( experiment_id, run_name, arguments["file"], { "hydrosphere-address": f"http://{arguments['namespace']}.serving.odsc.k8s.hydrosphere.io", })
def __create_actual_vm(self, container_url, hosted_service_name): #print("__create_actual_vm(%s, %s) "%(container_url, hosted_service_name)) sms = self.__get_service_mgmt_object() image_name = self.get_property(self.section_name, "VM_Image_Name") blob_url = container_url + "/ubuntu.vhd" # Create the Virtual Hardrive. It basically creates a harddrive at blob_url with the image specified os_hd = OSVirtualHardDisk(image_name, blob_url) # Upload the certificate we'd created earlier. cert_path = os.getcwd() + self.get_property(self.section_name, "Cert_Upload_Path") with open(cert_path, "rb") as bfile: cert_data = base64.b64encode(bfile.read()).decode() # decode to make sure this is a str and not a bstr cert_format = 'pfx' cert_password = '' cert_res = sms.add_service_certificate(service_name=hosted_service_name, data=cert_data, certificate_format=cert_format, password=cert_password) # Create a LinuxConfigurationSet for configuring Linux VMs, there's an equivalent Windows Set vm_name = namesgenerator.get_random_name() linux_config = LinuxConfigurationSet(hosted_service_name, self.get_property(self.section_name, "VM_Default_Username"), self.get_property(self.section_name, "VM_Default_Password"), True) SERVICE_CERT_THUMBPRINT = self.get_property(self.section_name, "Service_Certificate_Thumbprint") # Let's add the public keys to be uploaded pk = PublicKey(SERVICE_CERT_THUMBPRINT, os.getcwd() + self.get_property(self.section_name, "Public_Key_Upload_Path")) pair = KeyPair(SERVICE_CERT_THUMBPRINT, os.getcwd() + self.get_property(self.section_name, "Public_Key_Upload_Path")) linux_config.ssh = SSH() linux_config.ssh.key_pairs.key_pairs.append(pair) linux_config.ssh.public_keys.public_keys.append(pk) # Configure the VM to accept SSH connections on port 22 endpoint_config = ConfigurationSet() endpoint_config.configuration_set_type = 'NetworkConfiguration' ssh_endpoint = ConfigurationSetInputEndpoint(name='ssh', protocol='tcp', port='22', local_port='22', load_balanced_endpoint_set_name=None, enable_direct_server_return=False) endpoint_config.input_endpoints.input_endpoints.append(ssh_endpoint) # Finally create the VM: sms.create_virtual_machine_deployment(service_name=hosted_service_name, deployment_name=hosted_service_name, deployment_slot='production', label=hosted_service_name, role_name=hosted_service_name, system_config=linux_config, network_config=endpoint_config, os_virtual_hard_disk=os_hd, role_size='Small') return (hosted_service_name, hosted_service_name+'.cloudapp.net', self.get_property(self.section_name, "VM_Default_Username"))
def random_scan_name(): return "{}-{}".format(namesgenerator.get_random_name(sep="-"), str(uuid4()))
import namesgenerator print(namesgenerator.get_random_name())
def create_instance(self): #print "creating a digital ocean instance using url: ",self.base_url name = namesgenerator.get_random_name() droplet_id = self.__create_droplet(name) #print droplet_id return (name, self.__check_if_droplet_is_up(droplet_id), "root")