def _usagecapacity(service): """calculate the current usage of the service.""" usage_gpu = 0 usage_cpu = 0 capacity_gpus = 0 capacity_cpus = 0 busy = 0 detail = {} servers = service.list_servers() for resource in service.list_resources(): detail[resource] = {'busy': '', 'reserved': ''} r_capacity = service.list_resources()[resource] detail[resource]['capacity'] = r_capacity capacity_gpus += r_capacity detail[resource]['ncpus'] = servers[resource]['ncpus'] capacity_cpus += servers[resource]['ncpus'] reserved = redis.get("reserved:%s:%s" % (service.name, resource)) if reserved: detail[resource]['reserved'] = reserved count_map_gpu = Counter() task_type = {} count_map_cpu = {} count_used_gpus = 0 count_used_cpus = 0 r_usage_gpu = redis.hgetall("gpu_resource:%s:%s" % (service.name, resource)).values() for t in r_usage_gpu: task_type[t] = redis.hget("task:%s" % t, "type") count_map_gpu[t] += 1 count_used_gpus += 1 if t not in count_map_cpu: count_map_cpu[t] = int(redis.hget("task:%s" % t, "ncpus")) count_used_cpus += count_map_cpu[t] r_usage_cpu = redis.lrange("cpu_resource:%s:%s" % (service.name, resource), 0, -1) for t in r_usage_cpu: task_type[t] = redis.hget("task:%s" % t, "type") if t not in count_map_cpu: count_map_cpu[t] = int(redis.hget("task:%s" % t, "ncpus")) count_map_gpu[t] = 0 count_used_cpus += count_map_cpu[t] detail[resource]['usage'] = ["%s %s: %d (%d)" % (task_type[k], k, count_map_gpu[k], count_map_cpu[k]) for k in count_map_gpu] detail[resource]['avail_cpus'] = int(redis.get("ncpus:%s:%s" % (service.name, resource))) detail[resource]['avail_gpus'] = r_capacity-count_used_gpus err = redis.get("busy:%s:%s" % (service.name, resource)) if err: detail[resource]['busy'] = err busy = busy + 1 usage_cpu += count_used_cpus usage_gpu += count_used_gpus queued = redis.llen("queued:"+service.name) return ("%d (%d)" % (usage_gpu, usage_cpu), queued, "%d (%d)" % (capacity_gpus, capacity_cpus), busy, detail)
def server_listconfig(service): pool_entity = service[0:2].upper() if not has_ability(flask.g, "edit_config", pool_entity): abort(make_response(jsonify(message="insufficient credentials for edit_config " "(entity %s)" % pool_entity), 403)) current_configuration = redis.hget("admin:service:%s" % service, "current_configuration") configurations = redis.hget("admin:service:%s" % service, "configurations") return flask.jsonify({ 'current': current_configuration, 'configurations': json.loads(configurations) })
def _usagecapacity(service): """calculate the current usage of the service.""" usage_xpu = Capacity() capacity_xpus = Capacity() busy = 0 detail = {} for resource in service.list_resources(): detail[resource] = {'busy': '', 'reserved': ''} r_capacity = service.list_resources()[resource] detail[resource]['capacity'] = r_capacity capacity_xpus += r_capacity reserved = redis.get("reserved:%s:%s" % (service.name, resource)) if reserved: detail[resource]['reserved'] = reserved count_map_gpu = Counter() count_map_cpu = Counter() task_type = {} count_used_xpus = Capacity() r_usage_gpu = redis.hgetall("gpu_resource:%s:%s" % (service.name, resource)).values() for t in r_usage_gpu: if t not in task_type: task_type[t] = redis.hget("task:%s" % t, "type") count_map_gpu[t] += 1 count_used_xpus.incr_ngpus(1) r_usage_cpu = redis.hgetall("cpu_resource:%s:%s" % (service.name, resource)).values() for t in r_usage_cpu: if t not in task_type: task_type[t] = redis.hget("task:%s" % t, "type") count_map_cpu[t] += 1 count_used_xpus.incr_ncpus(1) detail[resource]['usage'] = [ "%s %s: %d (%d)" % (task_type[t], t, count_map_gpu[t], count_map_cpu[t]) for t in task_type ] detail[resource][ 'avail_gpus'] = r_capacity.ngpus - count_used_xpus.ngpus detail[resource][ 'avail_cpus'] = r_capacity.ncpus - count_used_xpus.ncpus err = redis.get("busy:%s:%s" % (service.name, resource)) if err: detail[resource]['busy'] = err busy = busy + 1 usage_xpu += count_used_xpus queued = redis.llen("queued:" + service.name) return ("%d (%d)" % (usage_xpu.ngpus, usage_xpu.ncpus), queued, "%d (%d)" % (capacity_xpus.ngpus, capacity_xpus.ncpus), busy, detail)
def github_authorized(): resp = GithubOAuth.github.authorized_response() session['github_token'] = (resp['access_token'], '') current_user.github_token = resp['access_token'] db.session.add(current_user) db.session.commit() # 缓存个人的基本信息 redis.hset( current_user.id, 'github_data', json.dumps(GithubOAuth.github.get('user').data)) # 缓存拥有的组织的基本信息 redis.hset( current_user.id, 'github_orgs_data', urllib2.urlopen( 'https://api.github.com/user/orgs?access_token=%s&per_page=100' % resp['access_token']).read() ) # 缓存个人项目信息 redis.hset( current_user.id, 'github_user_repos', urllib2.urlopen( 'https://api.github.com/user/repos?access_token=%s&type=owner&per_page=100' % resp['access_token']).read() ) # 缓存组织的项目信息 github_orgs_data = redis.hget(current_user.id, 'github_orgs_data') if github_orgs_data is not None: github_orgs_data = eval(github_orgs_data) for github_org_data in github_orgs_data: redis.hset( current_user.id, github_org_data['login'], urllib2.urlopen(github_org_data['repos_url'] + '?per_page=100').read() ) # 目前只是缓存所有的个人项目,其中包括组织中的所有项目,key 为项目的名称 github_user_repos = redis.hget( current_user.id, 'github_user_repos' ) if github_user_repos is not None: github_user_repos = eval(github_user_repos) for github_user_repo in github_user_repos: redis.hset( current_user.id, github_user_repo['name'], github_user_repo['clone_url'] ) return redirect(url_for('build_code_new'))
def _link_id_change(cls, old_field, new_field): """ change the hash field value in {model}:{models} """ key = rmkey(cls, cls.model) id = redis.hget(key, old_field) redis.hdel(key, old_field) redis.hset(key, new_field, id)
def go_to_next_filter(user_id, **kwargs): try: next_filter = int(redis.hget(user_id, 'next_filter')) + 1 except TypeError: next_filter = 0 send_text_message(user_id, GREETINGS[next_filter]) update_session(user_id, next_filter=next_filter, **kwargs)
def validate_credential(form): req = ['username', 'password'] for field in req: if not form.get(field): flash("Forgot to enter your " + field + "? :)") return False, 'front' username = form['username'] password = form['password'] #try: #redis.get("redis-app22172284") # except requests.ConnectionError: # app.logger.error("Unable to connect to Redis, username: "******"Server unavailable")) # except Exception: # app.logger.warning(traceback.format_exc() + "username: "******"Server unavailable")) correct_password = redis.hget("user::" + username, "password") # Checks if the user is in the DB if correct_password is not None: # Password validation if password == correct_password: return True, 'main' else: #flash(parser.unescape(gettext("errorLogin"))) #return redirect(url_for('login')) return False, 'front' else: # redirect user to sign up for us return False, 'signup'
def _validate_credential(self, form): req = ['username', 'password'] for field in req: if not form.get(field): flash("Forgot to enter your " + field + "?") return False, 'front' username = form['username'] password = form['password'] #try: #redis.get("redis-app22172284") # except requests.ConnectionError: # app.logger.error("Unable to connect to Redis, username: "******"Server unavailable")) # except Exception: # app.logger.warning(traceback.format_exc() + "username: "******"Server unavailable")) correct_password = redis.hget(USER_PREFIX + username, "password") # Checks if the user is in the DB if correct_password is not None: # Password validation if password == correct_password: session['username'] = username return True, 'menu' else: flash("Invalid credential, try again") return False, 'front' else: # redirect user to sign up for us return False, 'signup'
def create_project(): data = json.loads(request.get_data()) code = str(data['code']).rstrip() proName = str(data['proName']) verify = ''.join(map(lambda xx: (hex(ord(xx))[2:]), os.urandom(16))) code_address = redis.hget(current_user.id, code) application = ProjectModel.query.filter_by(proname=proName).first() if application is not None: return json.dumps({'msg': '应用名字重复'}) else: project = ProjectModel( proname=proName, address=code_address, verify=verify ) db.session.add(project) db.session.commit() if os.path.exists(app.config['CODE_FOLDER']+'/'+proName): # 更新代码 c2 = subprocess.Popen( 'git pull origin master', cwd=app.config['CODE_FOLDER']+'/'+proName, shell=True) subprocess.Popen.wait(c2) else: # 首次克隆代码 c1 = subprocess.Popen( 'git clone ' + code_address, cwd=app.config['CODE_FOLDER'], shell=True) subprocess.Popen.wait(c1) return json.dumps({'msg': 'ok', 'verify': project.verify})
def get_log(): logs = [] json_data = json.loads(request.get_data()) project = ProjectModel.query.filter_by(proname=json_data['data']).first() if not project.is_build(): os.chdir(app.config['CODE_FOLDER'] + '/' + json_data['data']) cli = Client(base_url=HOST) for line in cli.build(path=os.getcwd(), stream=True, decode=True, tag=str(REGISTRY + json_data['data'])): send_log(line) logs.append(line) # 向私有仓库推送镜像, 没有log的打印 for line in cli.push(REGISTRY + json_data['data'], stream=True): # 打印出上传的log print line assert line redis.hset(project.id, project.verify, logs) project.build = True if 'Successfully built' in logs[-1]['stream']: project.success = 1 else: project.success = 2 db.session.add(project) db.session.commit() else: lines = eval(redis.hget(project.id, project.verify)) for line in lines: send_log(line) return json.dumps({'msg': 'ok'})
def get_service(service): """Wrapper to fail on invalid service.""" def_string = redis.hget("admin:service:"+service, "def") if def_string is None: response = flask.jsonify(message="invalid service name: %s" % service) abort(flask.make_response(response, 404)) return pickle.loads(def_string)
def get_user_last_interact_time(openid): """ 获取最后一次交互时间 """ last_time = redis.hget('wechat:user:'******'last_interact_time') if last_time: return last_time else: return 0
def get_explore_post_count(pid): """ 返回该文章ID对应文章的访问数 :param pid: 文章ID :return: 返回文章访问数 """ return redis.hget('blog_explore_post_count', 'pid:' + str(pid))
def _check_code(uuid, code): if rd.hget(uuid, 'code').decode('utf8') != code: raise Forbidden('wrong captcha') rd.hmset(uuid, { 'success': 1, }) rd.expire(uuid, 60)
def get_colors(): colors_str = redis.hget(opts_key(), 'colors') if not colors_str: colors = {'default': default_color, 'threads': {}} else: colors = json.loads(colors_str) return colors
def verify_token(json): json_data = request.get_json() email = json_data["email"] token_verify = json_data["token"] token = redis.hget(email, "field1") if (token == token_verify): return True return False
def process_prompt(next_prompt, user_id, message_text): if next_prompt < len(PROMPTS): prompt = PROMPTS[next_prompt] value = prompt['formatter'](message_text) redis.hset(user_id, prompt['name'], value) redis.hset(user_id, 'next_prompt', next_prompt+1) redis.expire(user_id, app.config['EXPIRE']) if next_prompt < len(PROMPTS): send_text_message(user_id, PROMPT_GREETINGS[next_prompt+1]) return res = book_a_table(redis.hget(user_id, 'id_rest'), redis.hget(user_id, 'time'), redis.hget(user_id, 'persons'), redis.hget(user_id, 'firstName'), redis.hget(user_id, 'phone')) send_text_message(user_id, str(res))
def get_locked_since(self, obj): if self.is_locked(obj): try: lock_data = json.loads( redis.hget(locked_posts_redis_key, obj.id)) return lock_data["timestamp"] except: pass return None
def get_lock_expires(self, obj): if self.is_locked(obj): try: lock_data = json.loads( redis.hget(locked_posts_redis_key, obj.id)) return lock_data["expires"] except: pass return None
def get_favorites(): favorites_str = redis.hget(opts_key(), 'fav_threads') if not favorites_str: favorites = [] else: favorites = json.loads(favorites_str) return favorites
def get_id(cls, field): """ get id based on a model's field e.g. get id for a username from the key user:users that holds [username=4, username2=6 ...] """ key = rmkey(cls, cls.model) return redis.hget(key, field)
def get_locked_by(self, obj): if self.is_locked(obj): try: lock_data = json.loads( redis.hget(locked_posts_redis_key, obj.id)) user_schema = UserSchema(only=("id", "name", "email")) return user_schema.dump(User.query.get( lock_data["by_user_id"])).data except: pass return None
def is_locked(self, obj): if redis.hexists(locked_posts_redis_key, obj.id): try: lock_data = json.loads( redis.hget(locked_posts_redis_key, obj.id)) expires = datetime.fromisoformat(lock_data["expires"]) if expires >= datetime.now(pytz.utc): return True else: redis.hdel(locked_posts_redis_key, obj.id) # clean up expired locks PostBroadcast.unlocked(obj.id) except Exception: pass return False
def post(self): json_data = data_json() email = json_data["email"] password = json_data["password"] user = User.query.filter_by(email=email).first() if (user.verify_password(password)): if (redis.hexists(email, "field1")): token = redis.hget(email, "field1") return jsonify({"token": token}) min_expire_token = 25 number_bytes = 256 token = token_urlsafe(number_bytes) redis.hset(email, "field1", token) redis.expire(email, 60 * min_expire_token) return jsonify({"token": token}) else: return jsonify({"error": "password dont check"}), 400
def save_diffs(data, threadid, user_id): prw = redis.hget('dlg_%s' % threadid, 'usr_%s' % user_id) if prw: prw = eval(prw) text = str(prw['msg']) else: text = '' if 'color' in data: color = data['color'] else: color = '' if 'isCommenting' in data: isCommenting = data['isCommenting'] else: isCommenting = False msg, nottext = dmp.patch_apply(dmp.patch_fromText(data['diffs']),text) pr_msg = {'userid': user_id, 'msg': msg, 'color': color, 'isCommenting':isCommenting} redis.hset('dlg_%s'%threadid,'usr_%s'%user_id, pr_msg)
def build_code_new(): org_repos = {} # github 中的个人数据 github_data = redis.hget(current_user.id, 'github_data') if github_data is not None: github_data = json.loads(github_data) # github个人的组织信息 github_orgs_data = redis.hget(current_user.id, 'github_orgs_data') if github_orgs_data is not None: github_orgs_data = eval(github_orgs_data) # 个人拥有的项目信息 github_user_repos = redis.hget(current_user.id, 'github_user_repos') if github_user_repos is not None: github_user_repos = eval(github_user_repos) # 组织拥有的项目信息 if github_orgs_data is not None: for github_org_data in github_orgs_data: repo = eval(redis.hget( current_user.id, github_org_data['login'] )) org_repos[github_org_data['login']] = repo '''以下是gitlab数据''' # gitlab 个人数据 gitlab_data = redis.hget(current_user.id, 'gitlab_data') if gitlab_data is not None: gitlab_data = json.loads(gitlab_data) # gitlab 个人项目 gitlab_user_repos = redis.hget(current_user.id, 'gitlab_user_repos') if gitlab_user_repos is not None: gitlab_user_repos = eval(gitlab_user_repos) return render_template( 'build-new.html', github_data=github_data, github_orgs_data=github_orgs_data, github_user_repos=github_user_repos, github_org_repos=org_repos, gitlab_data=gitlab_data, gitlab_user_repos=gitlab_user_repos )
def add_new_user(msg): chat_id = msg.chat.id if not redis.hget(user_key(), chat_id): data = {'nickname': utils.get_nickname(msg.chat)} redis.hset(user_key(), chat_id, json.dumps(data))
def _verification(uuid): if not int(rd.hget(uuid, 'success').decode('utf8')): raise Forbidden() rd.delete(uuid)
def update_markdown(post_id): content = redis.hget('post:%s' % post_id, 'content') content_markdown = markdown2.markdown(content, extras=['fenced-code-blocks']) redis.hset('post:%s' % post_id, 'content_markdown', content_markdown) return redirect(url_for('.detail', post_id=post_id))
def get_noteID(threadid): return redis.hget('noteID', threadid)
def get_thread_is_comment(threadid): isCommenting=False inRedis = redis.hget(opts_key(), 'is_commenting_'+str(threadid)) if inRedis: isCommenting = inRedis=='True' return isCommenting
def update_session_inc(user_id, **kwargs): next_filter = int(redis.hget(user_id, 'next_filter')) + 1 update_session(user_id, next_filter=next_filter, **kwargs)
def get_last_thread(): threadid = redis.hget(opts_key(), 'threadid') if not threadid: threadid = 0 return threadid
def launch(service): pool_entity = service[0:2].upper() if not has_ability(flask.g, "train", pool_entity): abort(make_response(jsonify(message="insufficient credentials for train " "(entity %s)" % pool_entity), 403)) current_configuration_name = redis.hget("admin:service:%s" % service, "current_configuration") configurations = json.loads(redis.hget("admin:service:%s" % service, "configurations")) current_configuration = json.loads(configurations[current_configuration_name][1]) content = flask.request.form.get('content') if content is not None: content = json.loads(content) else: abort(flask.make_response(flask.jsonify(message="missing content in request"), 400)) files = {} for k in flask.request.files: files[k] = flask.request.files[k].read() service_module = get_service(service) content["service"] = service exec_mode = content.get('exec_mode', False) if not exec_mode: task_type = '????' if "train" in content["docker"]["command"]: task_type = "train" elif "trans" in content["docker"]["command"]: task_type = "trans" elif "preprocess" in content["docker"]["command"]: task_type = "prepr" elif "release" in content["docker"]["command"]: task_type = "relea" elif "buildvocab" in content["docker"]["command"]: task_type = "vocab" else: task_type = 'exec' if task_type == '????': abort(flask.make_response(flask.jsonify(message="incorrect task definition"), 400)) elif task_type != "exec": task_suffix = task_type else: task_suffix = get_docker_action(content["docker"]["command"]) if task_suffix is None: task_suffix = task_type # Sanity check on content. if 'options' not in content or not isinstance(content['options'], dict): abort(flask.make_response(flask.jsonify(message="invalid options field"), 400)) if 'docker' not in content: abort(flask.make_response(flask.jsonify(message="missing docker field"), 400)) if ('image' not in content['docker'] or 'registry' not in content['docker'] or 'tag' not in content['docker'] or 'command' not in content['docker']): abort(flask.make_response(flask.jsonify(message="incomplete docker field"), 400)) if content['docker']['registry'] == 'auto': content['docker']['registry'] = _get_registry(service_module, content['docker']['image']) elif content['docker']['registry'] not in service_module._config['docker']['registries']: abort(flask.make_response(flask.jsonify(message="unknown docker registry"), 400)) resource = service_module.get_resource_from_options(content["options"]) iterations = 1 if "iterations" in content: iterations = content["iterations"] if exec_mode: abort(flask.make_response(flask.jsonify(message="chain mode unavailable in exec mode"), 400)) if (task_type != "train" and iterations != 1) or iterations < 1: abort(flask.make_response(flask.jsonify(message="invalid value for iterations"), 400)) ngpus = 1 if "ngpus" in content: ngpus = content["ngpus"] ncpus = content.get("ncpus") # check that we have a resource able to run such a request if not _find_compatible_resource(service_module, ngpus, ncpus, resource): abort(flask.make_response( flask.jsonify(message="no resource available on %s for %d gpus (%s cpus)" % (service, ngpus, ncpus and str(ncpus) or "-")), 400)) if "totranslate" in content: if exec_mode: abort(flask.make_response(flask.jsonify(message="translate mode unavailable for exec cmd"), 400)) totranslate = content["totranslate"] del content["totranslate"] else: totranslate = None if "toscore" in content: if exec_mode: abort(flask.make_response(flask.jsonify(message="score mode unavailable for exec cmd"), 400)) toscore = content["toscore"] del content["toscore"] else: toscore = None if "totuminer" in content: if exec_mode: abort(flask.make_response(flask.jsonify(message="tuminer chain mode unavailable for exec cmd"), 400)) totuminer = content["totuminer"] del content["totuminer"] else: totuminer = None docker_version = content['docker']['tag'] if docker_version.startswith('v'): docker_version = docker_version[1:] try: chain_prepr_train = (not exec_mode and not content.get("nochainprepr", False) and task_type == "train" and semver.match(docker_version, ">=1.4.0")) can_trans_as_release = semver.match(docker_version, ">=1.8.0") trans_as_release = (not exec_mode and not content.get("notransasrelease", False) and semver.match(docker_version, ">=1.8.0")) content["support_statistics"] = semver.match(docker_version, ">=1.17.0") except ValueError as err: # could not match docker_version - not valid semver chain_prepr_train = False trans_as_release = False priority = content.get("priority", 0) (xxyy, parent_task_id) = shallow_command_analysis(content["docker"]["command"]) parent_struct = None parent_task_type = None if not exec_mode and parent_task_id: (parent_struct, parent_task_type) = model_name_analysis(parent_task_id) # check that parent model type matches current command if parent_task_type: if (parent_task_type == "trans" or parent_task_type == "relea" or (task_type == "prepr" and parent_task_type != "train" and parent_task_type != "vocab")): abort(flask.make_response(flask.jsonify(message="invalid parent task type: %s" % (parent_task_type)), 400)) task_ids = [] task_create = [] while iterations > 0: if (chain_prepr_train and parent_task_type != "prepr") or task_type == "prepr": prepr_task_id, explicitname = build_task_id(content, xxyy, "prepr", parent_task_id) if explicitname: patch_config_explicitname(content, explicitname) idx = 0 prepr_command = [] train_command = content["docker"]["command"] while train_command[idx] != 'train' and train_command[idx] != 'preprocess': prepr_command.append(train_command[idx]) idx += 1 # create preprocess command, don't push the model on the catalog, # and generate a pseudo model prepr_command.append("--no_push") prepr_command.append("preprocess") prepr_command.append("--build_model") content["docker"]["command"] = prepr_command content["ncpus"] = ncpus or \ get_cpu_count(current_configuration, 0, "preprocess") content["ngpus"] = 0 preprocess_resource = service_module.select_resource_from_capacity( resource, Capacity(content["ngpus"], content["ncpus"])) # launch preprocess task on cpus only task_create.append( (redis, taskfile_dir, prepr_task_id, "prepr", parent_task_id, preprocess_resource, service, _duplicate_adapt(service_module, content), files, priority, 0, content["ncpus"], {})) task_ids.append("%s\t%s\tngpus: %d, ncpus: %d" % ("prepr", prepr_task_id, 0, content["ncpus"])) remove_config_option(train_command) change_parent_task(train_command, prepr_task_id) parent_task_id = prepr_task_id content["docker"]["command"] = train_command if task_type != "prepr": task_id, explicitname = build_task_id(content, xxyy, task_suffix, parent_task_id) if explicitname: patch_config_explicitname(content, explicitname) file_to_transtaskid = {} if task_type == "trans": try: idx = content["docker"]["command"].index("trans") output_files = get_params(("-o", "--output"), content["docker"]["command"][idx+1:]) for ofile in output_files: file_to_transtaskid[ofile] = task_id except Exception: pass content["ncpus"] = ncpus or \ get_cpu_count(current_configuration, ngpus, task_type) content["ngpus"] = ngpus if task_type == "trans" and can_trans_as_release: if "--as_release" not in content["docker"]["command"] and trans_as_release: content["docker"]["command"].append("--as_release") content["ngpus"] = ngpus = 0 task_resource = service_module.select_resource_from_capacity( resource, Capacity(content["ngpus"], content["ncpus"])) task_create.append( (redis, taskfile_dir, task_id, task_type, parent_task_id, task_resource, service, _duplicate_adapt(service_module, content), files, priority, content["ngpus"], content["ncpus"], {})) task_ids.append("%s\t%s\tngpus: %d, ncpus: %d" % ( task_type, task_id, content["ngpus"], content["ncpus"])) parent_task_type = task_type[:5] remove_config_option(content["docker"]["command"]) if totranslate: content_translate = deepcopy(content) content_translate["priority"] = priority + 1 if trans_as_release: content_translate["ngpus"] = 0 else: content_translate["ngpus"] = min(ngpus, 1) content_translate["ncpus"] = ncpus or \ get_cpu_count(current_configuration, content_translate["ngpus"], "trans") translate_resource = service_module.select_resource_from_capacity( resource, Capacity(content_translate["ngpus"], content_translate["ncpus"])) if ngpus == 0 or trans_as_release: file_per_gpu = len(totranslate) else: file_per_gpu = (len(totranslate)+ngpus-1) / ngpus subset_idx = 0 while subset_idx * file_per_gpu < len(totranslate): content_translate["docker"]["command"] = ["trans"] if trans_as_release: content_translate["docker"]["command"].append("--as_release") content_translate["docker"]["command"].append('-i') subset_totranslate = totranslate[subset_idx*file_per_gpu: (subset_idx+1)*file_per_gpu] for f in subset_totranslate: content_translate["docker"]["command"].append(f[0]) change_parent_task(content_translate["docker"]["command"], task_id) trans_task_id, explicitname = build_task_id(content_translate, xxyy, "trans", task_id) content_translate["docker"]["command"].append('-o') for f in subset_totranslate: ofile = f[1].replace('<MODEL>', task_id) file_to_transtaskid[ofile] = trans_task_id content_translate["docker"]["command"].append(ofile) task_create.append( (redis, taskfile_dir, trans_task_id, "trans", task_id, translate_resource, service, _duplicate_adapt(service_module, content_translate), (), content_translate["priority"], content_translate["ngpus"], content_translate["ncpus"], {})) task_ids.append("%s\t%s\tngpus: %d, ncpus: %d" % ( "trans", trans_task_id, content_translate["ngpus"], content_translate["ncpus"])) subset_idx += 1 if toscore: toscore_parent = {} for (ofile, rfile) in toscore: ofile = ofile.replace('<MODEL>', task_id) parent_task_id = file_to_transtaskid.get(ofile) if parent_task_id: if parent_task_id not in toscore_parent: toscore_parent[parent_task_id] = {"output": [], "ref": []} ofile_split = ofile.split(':') if len(ofile_split) == 2 and ofile_split[0] == 'launcher': ofile = 'launcher:../' + parent_task_id + "/" + ofile_split[1] toscore_parent[parent_task_id]["output"].append(ofile) toscore_parent[parent_task_id]["ref"].append(rfile) for parent_task_id, oref in six.iteritems(toscore_parent): content_score = deepcopy(content) content_score["priority"] = priority + 1 content_score["ngpus"] = 0 content_score["ncpus"] = 1 score_resource = service_module.select_resource_from_capacity(resource, Capacity(0, 1)) image_score = "nmtwizard/score" option_lang = [] if parent_struct is not None: option_lang.append('-l') option_lang.append(parent_struct['xxyy'][-2:]) content_score["docker"] = { "image": image_score, "registry": _get_registry(service_module, image_score), "tag": "latest", "command": ["score", "-o"] + oref["output"] + ["-r"] + oref["ref"] + option_lang + ['-f', "launcher:scores"] } score_task_id, explicitname = build_task_id(content_score, xxyy, "score", parent_task_id) task_create.append( (redis, taskfile_dir, score_task_id, "exec", parent_task_id, score_resource, service, content_score, (), priority+2, 0, 1, {})) task_ids.append("%s\t%s\tngpus: %d, ncpus: %d" % ( "score", score_task_id, 0, 1)) if totuminer: # tuminer can run in CPU only mode, but it will be very slow for large data ngpus_recommend = ngpus ncpus_recommend = ncpus totuminer_parent = {} for (ifile, ofile) in totuminer: #ofile = ofile.replace('<MODEL>', task_id) parent_task_id = file_to_transtaskid.get(ofile) if parent_task_id: if parent_task_id not in totuminer_parent: totuminer_parent[parent_task_id] = {"infile": [], "outfile": [], "scorefile": []} ofile_split = ofile.split(':') if len(ofile_split) == 2 and ofile_split[0] == 'launcher': ofile = 'launcher:../' + parent_task_id + "/" + ofile_split[1] totuminer_parent[parent_task_id]["infile"].append(ifile) totuminer_parent[parent_task_id]["outfile"].append(ofile) scorefile = ofile if scorefile.endswith(".gz"): scorefile = scorefile[:-3] totuminer_parent[parent_task_id]["scorefile"].append(scorefile[:-3]) for parent_task_id, in_out in six.iteritems(totuminer_parent): content_tuminer = deepcopy(content) content_tuminer["priority"] = priority + 1 content_tuminer["ngpus"] = ngpus_recommend content_tuminer["ncpus"] = ncpus_recommend tuminer_resource = service_module.select_resource_from_capacity(resource, Capacity(ngpus_recommend, ncpus_recommend)) image_score = "nmtwizard/tuminer" content_tuminer["docker"] = { "image": image_score, "registry": _get_registry(service_module, image_score), "tag": "latest", "command": ["tuminer", "--tumode", "score", "--srcfile"] + in_out["infile"] + ["--tgtfile"] + in_out["outfile"]+ ["--output"] + in_out["scorefile"] } tuminer_task_id, explicitname = build_task_id(content_tuminer, xxyy, "tuminer", parent_task_id) task_create.append( (redis, taskfile_dir, tuminer_task_id, "exec", parent_task_id, tuminer_resource, service, content_tuminer, (), priority+2, ngpus_recommend, ncpus_recommend, {})) task_ids.append("%s\t%s\tngpus: %d, ncpus: %d" % ( "tuminer", tuminer_task_id, ngpus_recommend, ncpus_recommend)) iterations -= 1 if iterations > 0: parent_task_id = task_id change_parent_task(content["docker"]["command"], parent_task_id) (task_ids, task_create) = post_function('POST/task/launch', task_ids, task_create) for tc in task_create: task.create(*tc) if len(task_ids) == 1: task_ids = task_ids[0] return flask.jsonify(task_ids)
def _get_airbnb_id(post_id): return redis.hget('listings:'+str(post_id), 'airbnb')
def _get_vrbo_id(post_id): return redis.hget('listings:'+str(post_id), 'vrbo')
def get_passwd_update_time(name): return redis.hget(update_passwd_time_string, name)