def checkvalue(server,key,*value): if key[0] == "TEST": return value[0] if key == ("exe_name",): if len(value)!=1: raise ServerError("Only one value supported for 'exe_name'") return value[0] if key[0] == ("backup"): try: return backups.checkdatavalue(server.data.get("backup",{}),key[1:],*value) except backups.BackupError as ex: raise ServerError(ex) raise ServerError("{} invalid key to set".format(".".join(str(k) for k in key)))
def train(request): data = request.get_json() gd = fetch_gesture(data) if 'name' not in data: raise ServerError('No class provided', 400) if data['name'] not in Classifier.classes: raise ServerError('Invalid class', 400) gd.draw_pos() image_path = gd.pos_to_image(dir='data/imgs/' + data['name'], filename='img_' + random_str()) gesture = Gesture(gd, image_path) return {'success': True, 'gesture': gesture.to_dict()}
def get_start_command(server): # sample start command # ./srcds_run -game csgo -console -usercon +game_type 0 +game_mode 0 +mapgroup mg_active +map de_dust2 -maxplayers 30 exe_name = server.data["exe_name"] if not os.path.isfile(server.data["dir"] + exe_name): ServerError("Executable file not found") if exe_name[:2] != "./": exe_name = "./" + exe_name steamcmd_dir = steamcmd.STEAMCMD_DIR steam_updatescript = steamcmd.get_autoupdate_script( server.name, server.data["dir"], steam_app_id) return [ exe_name, "-game", "csgo", "-console", "-usercon", "+game_type", str(server.data["gametype"]), "+game_mode", str(server.data["gamemode"]), "+sv_pure", "1", "+ip", "0.0.0.0", "-secured", "-timeout 0", "-strictportbind", "-port", str(server.data["port"]), "+mapgroup", str(server.data["mapgroup"]), "+map", str(server.data["startmap"]), "-maxplayers", str(server.data["maxplayers"]), "-autoupdate", "-steam_dir", steamcmd_dir, "-steamcmd_script", steam_updatescript, "+sv_shutdown_timeout_minutes", "2" ], server.data["dir"]
def split_server_name(name): """ This is used to select a server owned by a user name: - if name is a string without "/", then this represents the server name, and thus this server is owned by the current user - if name is a string with "/" we split it, the left hand side is the user name and the right hand side is the server owned by that user returns User, server if User = None, then we act upon the server owned by the current user """ split = name.split("/") if len(split) == 1: # server is owned by user return None, split[0] elif len(split) == 2: # server is owned by another user # split[0] = user, split[1] is the server return split[0], split[1] else: # invalid input, raise error raise ServerError("Invalid server name. Only one / allowed")
def splitservername(name): split = name.split("/") if len(split) == 1: return None, split[0] elif len(split) == 2: return split[0], split[1] else: raise ServerError("Invalid server name. Only one / allowed")
def install(server, *, eula=False): if not os.path.isdir(server.data["dir"]): os.makedirs(server.data["dir"]) mcjar = os.path.join(server.data["dir"], server.data["exe_name"]) if not os.path.isfile(mcjar): raise ServerError( "Can't find server jar ({}). Please place the files in the directory and/or update the 'exe_name' then run setup again" .format(mcjar)) server.data.save()
def configure(server, ask, port=None, dir=None, *, url=None, modpack_url=None, exe_name="Tekkit.jar", download_name="Tekkit.zip"): if url == None: if "url" in server.data and server.data["url"] is not None: url = server.data["url"] # attempt to find the download url if ask or url is None: if modpack_url == None: if "modpack_url" in server.data and server.data[ "modpack_url"] is not None: modpack_url = server.data["modpack_url"] try: # get the URL from a known keyword modpack_url = MODPACK_URL[modpack_url] except KeyError: # ok, lets just use this URL anyway pass server.data["modpack_url"] = modpack_url latest_url = get_file_url(modpack_url) if url is None: url = latest_url if ask: print( "Which url should we use to download tekkit?\nThe latest url is '{}'." .format(latest_url)) inp = input( "Please enter the url to download tekkit from or 'latest' for the latest version: [{}] " .format(url)).strip() if inp != "": if inp.lower() == "latest": url = latest_url else: url = inp if url == None: raise ServerError("No download URL available") # tekkit run time updates so must have copied of everything so it can update them return van.configure(server, ask, port=port, dir=dir, eula=False, version=None, url=url, exe_name=exe_name, download_name=download_name, download_data={ "linkdir": (), "copy": (r"\.", ) })
def fetch_gesture(data): if 'gesture_data' not in data: raise ServerError('No gesture data provided', 400) gd = GestureData(data['gesture_data']) # gd.normalize_gyro() gd.normalize() gd.filter() gd.find_average() return gd
def dobackup(server,profile=None): if screen.check_screen_exists(server.name): screen.send_to_server(server.name,"\nsave-off\nsave-all\n") time.sleep(30) try: backups.backup(server.data["dir"],server.data['backup'],profile) except backups.BackupError as ex: raise ServerError("Error backing up server: {}".format(ex)) finally: if screen.check_screen_exists(server.name): screen.send_to_server(server.name,"\nsave-on\nsave-all\n")
def backup(server,profile=None,*,activate=None,when=None): if activate is None: dobackup(server,profile) else: if profile is not None: raise ServerError("Can't specify a profile if activating. Edit the backup schedule to change what backups are done when") if activate not in ("weekly","monthly","yearly","daily","none"): raise ServerError("Invalid frequency for backups. Options are 'yearly', 'monthly', 'weekly' or 'daily'") import crontab from core import program programpath=program.PATH ct=crontab.CronTab(user=True) jobs=((job,job.command.split()) for job in ct if job.is_enabled() and job.command.startswith(programpath)) jobs=[job for job,cmd in jobs if cmd[0]==programpath and server.name == cmd[1] and cmd[2:]==["backup"]] if activate == "none": if len(jobs)==0: raise ServerError("backups aren't active. Can't deactivate") else: for job in jobs: ct.remove(job) else: for job in jobs: ct.remove(job) job=ct.new(command=programpath+" "+server.name+" backup") if not job.setall(*_parsewhen(activate,when)): print("Error parsing time spec") if job.slices[0].parts==[]: job.slices[0].parse(random.randint(0,59)) if job.slices[1].parts==[]: job.slices[1].parse(random.randint(2,6)) if activate in ("monthly","yearly") and job.slices[2].parts==[]: job.slices[2].parse(random.randint(1,28)) if activate == "yearly" and job.slices[3].parts==[]: job.slices[3].parse(random.randint(1,12)) if activate == "weekly" and job.slices[4].parts==[]: job.slices[4].parse(random.randint(0,6)) for slice in job.slices: slice.parse(slice.render(True)) print("Job schedule set to {}".format(job.slices)) ct.write()
def install(server, *, eula=False): if not os.path.isdir(server.data["dir"]): os.makedirs(server.data["dir"]) mcjar = os.path.join(server.data["dir"], server.data["exe_name"]) mcdwl = os.path.join(server.data["dir"], server.data["download_name"]) # if URL has changed, or the executable does not exist, redownload the server if "current_url" not in server.data or server.data[ "current_url"] != server.data["url"] or not os.path.isfile(mcjar): download_name, download_extension = os.path.splitext( server.data["download_name"]) print(download_extension) decompress = () if download_extension == ".zip": decompress = ("zip", ) try: downloadpath = downloader.getpath( "url", (server.data["url"], server.data["download_name"]) + decompress) if decompress == (): try: os.remove(mcjar) except FileNotFoundError: pass os.symlink(os.path.join(downloadpath, server.data["exe_name"]), mcjar) else: basetagpath = os.path.join(server.data["dir"], ".~basetag") try: oldpath = os.readlink(basetagpath) except FileNotFoundError: oldpath = "/dev/null/INVALID" else: os.remove(basetagpath) utils.updatefs.update(oldpath, downloadpath, server.data["dir"], server.data["download"]["linkdir"], server.data["download"]["copy"]) os.symlink(downloadpath, basetagpath) except downloader.DownloaderError as ex: print("Error downloading minecraft_server.jar: ") raise ServerError( "Error setting up server. Server file isn't already downloaded and can't download requested version" ) server.data["current_url"] = server.data["url"] else: print("Skipping download") server.data.save() cust.install(server, eula=eula)
def install(server,*,eula=False): if not os.path.isdir(server.data["dir"]): os.makedirs(server.data["dir"]) mcjar=os.path.join(server.data["dir"],server.data["exe_name"]) if not os.path.isfile(mcjar): raise ServerError("Can't find server jar ({}). Please place the files in the directory and/or update the 'exe_name' then run setup again".format(mcjar)) server.data.save() eulafile=os.path.join(server.data["dir"],"eula.txt") configfile=os.path.join(server.data["dir"],"server.properties") if not os.path.isfile(configfile) or (eula and not os.path.isfile(eulafile)): # use as flag for has the server created it's files print("Starting server to create settings") try: ret=sp.check_call(["java","-jar",server.data["exe_name"],"nogui"],cwd=server.data["dir"],shell=False,timeout=20) except sp.CalledProcessError as ex: print("Error running server. Java returned status: "+ex.returncode) except sp.TimeoutExpired as ex: print("Error running server. Process didn't complete in time") updateconfig(configfile,{"server-port":str(server.data["port"])}) if eula: updateconfig(eulafile,{"eula":"true"})
def get_start_command(server): # example run ./srcds_run -game tf -port 27015 +maxplayers 32 +map cf_2fort # TODO define a map using the -m optional argument exe_name = server.data["exe_name"] if not os.path.isfile(server.data["dir"] + exe_name): ServerError("Executable file not found") if exe_name[:2] != "./": exe_name = "./" + exe_name steam_updatescript = steamcmd.get_autoupdate_script( server.name, server.data["dir"], steam_app_id) steamcmd_dir = steamcmd.STEAMCMD_DIR return [ exe_name, "-game", "tf", "-port", str(server.data["port"]), "+maxplayers", str(server.data["maxplayers"]), "+sv_pure", "1", "+ip", "0.0.0.0", "-secured", "-timeout 0", "-strictportbind", "+randommap", "-autoupdate", "-steam_dir", steamcmd_dir, "-steamcmd_script", steam_updatescript, "+sv_shutdown_timeout_minutes", "2" ], server.data["dir"]
def retrain(request): if classifier.retraining: raise ServerError('Model is retraining', 400) classifier.retrain() return {'success': True}
def _recursiveInstall(self, recursionLevel, manifest, product, version, flavor, pkgroot, productRoot, updateTags=False, alsoTag=None, opts=None, depends=DEPS_ALL, noclean=False, noeups=False, searchDep=None, setups=None, installed=None, tag=None, ances=None): if installed is None: installed = [] if ances is None: ances = [] if setups is None: setups = [] instflavor = flavor if instflavor == "generic": instflavor = self.eups.flavor if alsoTag is None: alsoTag = [] # a function for creating an id string for a product prodid = lambda p, v, f: " %s %s for %s" % (p, v, f) idstring = prodid(manifest.product, manifest.version, flavor) if self.verbose >0: msg=None if depends == self.DEPS_NONE: msg = "Skipping dependencies for {0} {1}".format(product, version) elif depends == self.DEPS_ONLY: msg = ("Installing dependencies for {0} {1}, but not {0} itself" .format(product, version)) if msg is not None: print >> self.log, msg products = manifest.getProducts() if self.verbose >= 0 and len(products) == 0: print >> self.log, "Warning: no installable packages associated", \ "with", idstring # check for circular dependencies: if idstring in ances: if self.verbose >= 0: print >> self.log, "Detected circular dependencies", \ "within manifest for %s; short-circuiting." % idstring.strip() if self.verbose > 2: print >> self.log, "Package installation already in progress:%s" % "".join(ances) return True # # See if we should process dependencies # if searchDep is None: prod = manifest.getDependency(product, version, flavor) if prod and self.repos[pkgroot].getDistribFor(prod.distId, opts, flavor, tag).PRUNE: searchDep = False # no, we shouldn't process them if searchDep: nprods = "" # cannot predict the total number of products to install else: nprods = "/%-2s" % len(products) # # Process dependencies # defaultProduct = hooks.config.Eups.defaultProduct["name"] productRoot0 = productRoot # initial value for at, prod in enumerate(products): pver = prodid(prod.product, prod.version, instflavor) # check for circular dependencies: if False: if pver in ances: if self.verbose >= 0: print >> self.log, "Detected circular dependencies", \ "within manifest for %s; short-circuiting." % idstring.strip() if self.verbose > 2: print >> self.log, "Package installation already in progress:%s" % "".join(ances) continue ances.append(pver) is_product = (prod.product == product and prod.version == version) # is_product==False => prod.product is a dependency if depends == self.DEPS_NONE and not is_product: continue elif depends == self.DEPS_ONLY and is_product: continue if pver in installed: # we've installed this via the current install() call continue productRoot = productRoot0 thisinstalled = None if not noeups: thisinstalled = self.eups.findProduct(prod.product, prod.version, flavor=instflavor) shouldInstall = True if thisinstalled: msg = " [ %2d%s ] %s %s" % (at+1, nprods, prod.product, prod.version) if prod.product == defaultProduct: continue # we don't want to install the implicit products if prod.version == "dummy": continue # we can't reinstall dummy versions and don't want to install toolchain if manifest.mapping and manifest.mapping.noReinstall(prod.product, prod.version, flavor): msg += "; manifest.remap specified no reinstall" if self.eups.force: msg += " (ignoring --force)" if self.verbose >= 0: print >> self.log, msg continue if self.eups.force: # msg += " (forcing a reinstall)" msg = '' else: shouldInstall = False msg += " (already installed)" if self.verbose >= 0 and msg: print >> self.log, msg, productRoot = thisinstalled.stackRoot() # now we know which root it's installed in if shouldInstall: recurse = searchDep if recurse is None: recurse = not prod.distId or prod.shouldRecurse if recurse and \ (prod.distId is None or (prod.product != product or prod.version != version)): # This is not the top-level product for the current manifest. # We are ignoring the distrib ID; instead we will search # for the required dependency in the repositories pkg = self.findPackage(prod.product, prod.version, prod.flavor) if pkg: dman = self.repos[pkg[3]].getManifest(pkg[0], pkg[1], pkg[2]) thisinstalled = \ self._recursiveInstall(recursionLevel+1, dman, prod.product, prod.version, prod.flavor, pkg[3], productRoot, updateTags, alsoTag, opts, depends, noclean, noeups, searchDep, setups, installed, tag, ances) if thisinstalled: shouldInstall = False elif self.verbose > 0: print >> self.log, \ "Warning: recursive install failed for", prod.product, prod.version elif not prod.distId: msg = "No source is available for package %s %s" % (prod.product, prod.version) if prod.flavor: msg += " (%s)" % prod.flavor raise ServerError(msg) if shouldInstall: if self.verbose >= 0: if prod.flavor != "generic": msg1 = " (%s)" % prod.flavor else: msg1 = ""; msg = " [ %2d%s ] %s %s%s" % (at+1, nprods, prod.product, prod.version, msg1) print >> self.log, msg, "...", self.log.flush() pkg = self.findPackage(prod.product, prod.version, prod.flavor) if not pkg: msg = "Can't find a package for %s %s" % (prod.product, prod.version) if prod.flavor: msg += " (%s)" % prod.flavor raise ServerError(msg) # Look up the product, which may be found on a different pkgroot pkgroot = pkg[3] dman = self.repos[pkgroot].getManifest(pkg[0], pkg[1], pkg[2]) nprod = dman.getDependency(prod.product) if nprod: prod = nprod self._doInstall(pkgroot, prod, productRoot, instflavor, opts, noclean, setups, tag) if pver not in ances: ances.append(pver) if self.verbose >= 0: if self.log.isatty(): print >> self.log, "\r", msg, " "*(70-len(msg)), "done. " else: print >> self.log, "done." # Whether or not we just installed the product, we need to... # ...add the product to the setups setups.append("setup --just --type=build %s %s" % (prod.product, prod.version)) # ...update the tags if updateTags: self._updateServerTags(prod, productRoot, instflavor, installCurrent=opts["installCurrent"]) if alsoTag: if self.verbose > 1: print >> self.log, "Assigning Tags to %s %s: %s" % \ (prod.product, prod.version, ", ".join([str(t) for t in alsoTag])) for tag in alsoTag: try: self.eups.assignTag(tag, prod.product, prod.version, productRoot) except Exception, e: msg = str(e) if not self._msgs.has_key(msg): print >> self.log, msg self._msgs[msg] = 1 # ...note that this package is now installed installed.append(pver)
def checkvalue(server, key, value): if key == "exe_name": return value raise ServerError("All read only as not yet implemented")
def __init__(self, df: pd.DataFrame, n_groups: int, alphas: list, messages: dict, listener=None): assert len(alphas) == 3, 'Three weights required' self.df = df self.messages = messages if len(self.df) < n_groups: raise UserError( 'There are not enough users available to fill the groups.') if not np.isclose(sum(alphas), 1, rtol=1.e-3): raise UserError('The alpha weights need to sum up to 1.') if 'Foreigner' in self.df: self.foreigners = self.df['Foreigner'].to_numpy().astype(np.int32) if len(self.foreigners) != len(self.df): raise UserError( 'A foreigner state must be given for each user.') else: self.foreigners = None # We only need the first to weights when no foreigners are given alphas = alphas[:2] alphas_total = sum(alphas) if not np.isclose(alphas_total, 1): # Make sure they sum up to 1 alphas[0] /= alphas_total alphas[1] /= alphas_total self.messages['notes'].append( fr'The weights were rescaled to \(\alpha_s = {alphas[0]:.2}\) and \(\alpha_m = {alphas[1]:.2}\) since they had not summed up to 1.' ) self.alphas = alphas iterations = GroupSearch(n_groups, len(self.df)).total_iterations() if iterations > 50_000_000: raise UserError( 'The computation would take too long for this configuration. Please specify less groups and/or less users.' ) # The algorithm is run as a separate process cmd = [ 'group_allocation_c++/x64/Release/group_allocation_c++.exe', '--n_groups', str(n_groups), '--n_users', str(len(self.df)) ] if self.foreigners is not None: cmd.append('--foreigners') cmd.append(json.dumps(self.foreigners, cls=JSONNumpyEncoder)) cmd.append('--alphas') cmd.append(json.dumps(self.alphas, cls=JSONNumpyEncoder)) # Popen works asynchronously (approach inspired by https://stackoverflow.com/a/28319191) with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) as process: for line in process.stdout: try: # The first lines denote the progress progress = float(line) if listener is not None: listener(progress) except ValueError: try: # The last line contains the result of the algorithm self.alloc = np.asarray(json.loads(line)) except json.JSONDecodeError as error: raise ServerError(ServerError.CODE_RESULT_PARSING, str(error)) process.wait() if process.returncode != 0: raise ServerError(ServerError.CODE_EXTERNAL_PROGRAM, process.stderr.read())