def update(self, username=None, password=None, last_login=None, tel=None, email=None, enabled=None, _roles=None): if username is not None: Consumer.get(self.consumer_id).username = username self.username = username if password is not None: self.hashed_password = utils.hash_pass(password) if last_login is not None: self.last_login = last_login if tel is not None: self.tel = tel if email is not None: self.email = email if enabled is not None: self.enabled = enabled if _roles is not None: Consumer.get( self.consumer_id).groups = [role.role_name for role in _roles] self.roles = _roles app.logger.debug( utils.logmsg('user info update<%s:%s>' % (self.username, self.user_id))) [state, msg] = self.save() if not state: app.logger.error(utils.logmsg(msg)) return [False, 'user update faild.'] return [state, 'user updated.']
def build(self, buildflags=['-Cdf'], recursive=False): if recursive: for d in self.deps: succeeded = d.build(buildflags=buildflags, recursive=True) if not succeeded: return False # one dep fails, the entire branch fails immediately, software will not be runnable if self.in_repos or (self.installed and not self.in_aur): return True if self.installed and self.in_aur and self.version_installed == self.version_latest: return True pkgs = pkg_in_cache(self) if len(pkgs) > 0: self.built_pkgs.append( pkgs[0] ) # we only need one of them, not all, if multiple ones with different extensions have been built return True utils.logmsg(self.ctx.v, 3, "building sources of {}".format(self.name)) if self.srcpkg.built: return self.srcpkg.build_success succeeded = self.srcpkg.build(buildflags=buildflags) if not succeeded: utils.logerr( None, "Building sources of package {} failed, aborting this subtree". format(self.name)) return False pkgext = os.environ.get('PKGEXT') or 'tar.xz' fullpkgname_x86_64 = "{}-{}-x86_64.pkg.{}".format( self.name, self.version_latest, pkgext) fullpkgname_any = "{}-{}-any.pkg.{}".format(self.name, self.version_latest, pkgext) if fullpkgname_x86_64 in os.listdir(self.srcpkg.srcdir): self.built_pkgs.append(fullpkgname_x86_64) shutil.move(os.path.join(self.srcpkg.srcdir, fullpkgname_x86_64), self.ctx.cachedir) elif fullpkgname_any in os.listdir(self.srcpkg.srcdir): self.built_pkgs.append(fullpkgname_any) shutil.move(os.path.join(self.srcpkg.srcdir, fullpkgname_any), self.ctx.cachedir) else: utils.logerr( None, "Package file {}-{}-{}.pkg.{} was not found in builddir, aborting this subtree" .format(self.name, self.version_latest, "{x86_64,any}", pkgext)) return False return True
def _save_to_kong(self): try: con = Consumer.get(self.consumer_id) con.username = self.username con.groups = [role.role_name for role in self.roles] msg = utils.logmsg('user name/groups save to kong.<%s:%s>' % (self.username, self.user_id)) state = True except KongError as e: msg = utils.logmsg('KongError: %s' % e) app.logger.error(msg) state = False return [state, msg]
def _save_to_kong(self): try: group = Group(self.role_name) users = self.users group.usernames = [user.username for user in users] msg = utils.logmsg('role usernames save to kong.<%s:%s>' % (self.role_name, self.role_id)) state = True except KongError as e: msg = utils.logmsg('KongError: %s' % e) app.logger.error(msg) state = False return [state, msg]
def calc_epoch_mse_loss(X,Y,nn_model,pprint=False): sample_num = X.shape[0] Y_pred = torch.zeros(sample_num) loss_per_sample = torch.zeros(sample_num) for i in range(sample_num): y_pred = nn_model.predict(X[i]) Y_pred[i] = y_pred loss_per_sample[i] = abs(y_pred-Y[i]) if pprint: logmsg(" prediced Y:{}, Real Y:{} loss={}".format(loss_per_sample[i])) total_loss = torch.norm((Y_pred-Y))/sample_num return total_loss, loss_per_sample.detach().numpy()
def get(self): token = self._get_arg_check() user_id = JwtCred.get_user_id(token) users = User.get_users(user_id=user_id) if not users: msg = "cannot find user when authorization" app.logger.info(utils.logmsg(msg)) raise utils.BadRequestError(msg) user = users[0] msg = "user token refresh.<username:%s>" % user.username response = {"message": msg, "user_info": user.get_dict_info()} app.logger.debug(utils.logmsg(msg)) return response, 200
def _save_to_db(self): db.session.add(self) try: db.session.commit() msg = utils.logmsg('role saved<%s:%s>.' % (self.role_name, self.role_id)) app.logger.debug(msg) state = True except Exception, e: db.session.rollback() msg = utils.logmsg('exception saving role<%s:%s>: %s.' % (self.role_name, self.role_id, e)) app.logger.info(msg) state = False
def _save_to_db(self): self._save_to_kong() db.session.add(self) try: db.session.commit() msg = utils.logmsg('user saved<%s:%s>.' % (self.username, self.user_id)) state = True except Exception as e: db.session.rollback() msg = utils.logmsg('exception: %s.' % e) app.logger.info(msg) state = False return [state, msg]
def _post_arg_check(self): self.reqparse.add_argument('username', type=str, location='json', required=True, help='user name must be string') self.reqparse.add_argument('password', type=str, location='json', required=True, help='password must be string') self.reqparse.add_argument('role_ids', type=list, location='json', help='role ids must be string list') self.reqparse.add_argument('tel', type=str, location='json', help='tel must be str') self.reqparse.add_argument('email', type=str, location='json', help='email must be str') args = self.reqparse.parse_args() username = args['username'] password = args['password'] tel = args['tel'] email = args['email'] role_ids = args['role_ids'] if role_ids: roles = list() for role_id in role_ids: got_roles = Role.get_roles(role_id=role_id) if not got_roles: msg = 'invalid role id:%s' % role_id app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) roles.append(got_roles[0]) else: roles = None users = User.get_users(username=username) if users: msg = 'user name<%s> in used.' % username app.logger.debug(utils.logmsg(msg)) raise utils.ConflictError(msg) return [username, password, roles, tel, email]
def _post_arg_check(self): self.reqparse.add_argument('role_name', type=str, location='json', required=True, help='role name must be string') self.reqparse.add_argument('description', type=unicode, location='json', help='description must be string') self.reqparse.add_argument('api_ids', type=list, location='json', help='privilege id must be string list') self.reqparse.add_argument('user_ids', type=list, location='json', help='user id must be list') args = self.reqparse.parse_args() role_name = args['role_name'] description = args['description'] api_ids = args['api_ids'] user_ids = args['user_ids'] roles = Role.get_roles(role_name=role_name) if roles: msg = 'role name is in used.' app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) users = list() if user_ids: for user_id in user_ids: got_users = User.get_users(user_id=user_id) if not got_users: msg = 'invalid user id<%s>' % user_id app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) users.append(got_users[0]) if api_ids: for api_id in api_ids: api = Api(api_id) if api is None: msg = 'api not found<%s>' % api_id app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) return [role_name, description, users, api_ids]
def calc_epoch_mape_loss(X,Y,nn_model,pprint=False): print("\ncalc mre loss...") sample_num = X.shape[0] print("samplenum:" + str(sample_num)) Y_pred = torch.zeros(sample_num) mre_per_sample = torch.zeros(sample_num) for i in range(sample_num): y_pred = nn_model.predict(X[i]) Y_pred[i] = y_pred mre_per_sample[i] = abs((y_pred-Y[i])/Y[i])*100 if pprint: logmsg(" prediced Y:{}, Real Y:{} loss={}".format(y_pred,Y[i],mre_per_sample[i])) total_mre_loss = mre_per_sample.sum()/sample_num return total_mre_loss,mre_per_sample.detach().numpy()
def __init__(self, name, ctx=None, firstparent=None, debug=False): self.ctx = ctx self.name = name self.installed = pacman.is_installed(name) self.deps = [] self.makedeps = [] self.optdeps = [] self.parents = [firstparent] if firstparent else [] self.built_pkgs = [] self.version_installed = pacman.installed_version( name) if self.installed else None self.in_repos = pacman.in_repos(name) self.srcpkg = None utils.logmsg(self.ctx.v, 3, "Instantiating package {}".format(self.name)) self.pkgdata = utils.query_aur("info", self.name, single=True) self.in_aur = not self.in_repos and self.pkgdata utils.logmsg( self.ctx.v, 4, 'Package details: {}; {}; {}'.format( name, "installed" if self.installed else "not installed", "in repos" if self.in_repos else "not in repos")) if self.in_aur: self.version_latest = self.pkgdata['Version'] if "Depends" in self.pkgdata: for pkg in self.pkgdata["Depends"]: self.deps.append(parse_dep_pkg(pkg, self.ctx)) if "MakeDepends" in self.pkgdata: for pkg in self.pkgdata["MakeDepends"]: self.makedeps.append(parse_dep_pkg(pkg, ctx)) if "OptDepends" in self.pkgdata: for pkg in self.pkgdata["OptDepends"]: self.optdeps.append(pkg) self.srcpkg = parse_src_pkg(self.pkgdata["PackageBase"], self.pkgdata["Version"], self.pkgdata["URLPath"], ctx=ctx) self.srcpkg.download() self.srcpkg.extract()
def put(self): """ modf a role """ [role, role_name, description, users, api_ids, enabled] = self._put_arg_check() # update user [state, msg] = role.update(role_name=role_name, users=users, description=description, api_ids=api_ids, enabled=enabled) if not state: app.logger.info(utils.logmsg(msg)) raise utils.ServerError(msg) msg = 'role updated.<%s>' % role.role_id app.logger.info(utils.logmsg(msg)) response = {"message": msg, "role_id": role.role_id} return response, 200
def update(self, role_name=None, users=None, description=None, api_ids=None, enabled=None): if users is not None: self.users = users if description is not None: self.description = description if enabled is not None: group = Group(self.role_name) if self.enabled is False and enabled is True: group.usernames = [user.username for user in self.users] group.api_ids = self.api_ids elif self.enabled is True and enabled is False: group.delete() self.enabled = enabled if api_ids is not None: group = Group(self.role_name) group.api_ids = api_ids if role_name is not None: if self.get_roles(role_name=role_name): if role_name != self.role_name: msg = 'role_name in used.<%s>' % role_name app.logger.info(utils.logmsg(msg)) return [False, msg] else: self._update(role_name) else: self._update(role_name) app.logger.debug( utils.logmsg('role info update<%s:%s>.' % (self.role_name, self.role_id))) [state, msg] = self.save() if not state: app.logger.info(utils.logmsg(msg)) return [False, 'role update failed.'] return [True, 'role updated.']
def _get_arg_check(self): self.reqparse.add_argument('role_id', type=str, location='args', help='role_id must be string.') args = self.reqparse.parse_args() role_id = args['role_id'] if role_id: roles = Role.get_roles(role_id=role_id) if not roles: msg = 'invalid role_id.' app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) return roles[0] return None
def _get_arg_check(self): self.reqparse.add_argument('user_id', type=str, location='args', help='user_id must be string.') args = self.reqparse.parse_args() user_id = args['user_id'] if user_id: users = User.get_users(user_id=user_id) if users: return users[0] else: msg = 'invalid user_id<%s>' % user_id app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) return None
def _put_arg_check(self): self.reqparse.add_argument('user_id', type=str, location='args', required=True, help='user name must be string') self.reqparse.add_argument('username', type=str, location='json', help='user name must be string') self.reqparse.add_argument('password', type=str, location='json', help='password must be string') self.reqparse.add_argument('role_ids', type=list, location='json', help='role id must be string list') self.reqparse.add_argument('tel', type=str, location='json', help='tel must be str') self.reqparse.add_argument('email', type=str, location='json', help='email must be str') self.reqparse.add_argument('enabled', type=bool, location='json', help='enabled must be boolean') args = self.reqparse.parse_args() # required args check user_id = args['user_id'] target_users = User.get_users(user_id=user_id) if not target_users: msg = 'invalid user_id<%s>' % user_id app.logger.debug(utils.logmsg(msg)) raise utils.ClientUnprocEntError(msg) target_user = target_users[0] # other args check role_ids = args['role_ids'] roles = list() if role_ids: for role_id in role_ids: got_roles = Role.get_roles(role_id=role_id) if not got_roles: msg = 'invalid role id<%s>' % role_id app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) roles.append(got_roles[0]) password = args['password'] tel = args['tel'] email = args['email'] username = args['username'] if username: users = User.get_users(username=username) for user in users: if not user.user_id == user_id: msg = 'user name<%s> in used.' % username app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) elif username is '': msg = 'user name should not be empty string.' app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) enabled = args['enabled'] return [target_user, username, password, roles, tel, email, enabled]
def run(epochs, train_set_size, test_set_size, lr, batch_size, neuron_num,lamda): data_set_file = args.dataset #data_set_file = X264_mat.csv HCS_dataset = DataLoader(DATA_DIR,do_torch=True) HCS_dataset.load(data_set_file) # Init training and test data X = torch.zeros([train_set_size,HCS_dataset.num_features], dtype=torch.float64) X_test = torch.zeros([test_set_size,HCS_dataset.num_features], dtype=torch.float64) Y = torch.zeros([train_set_size,1],dtype=torch.float64) Y_test = torch.zeros([test_set_size,1],dtype=torch.float64) for i in range(train_set_size): X[i],Y[i] = HCS_dataset[i] for i in range(test_set_size): X_test[i],Y_test[i] = HCS_dataset[i+train_set_size] X=torch.as_tensor(preprocessing.scale(X),dtype=torch.float64) X_test=torch.as_tensor(preprocessing.scale(X_test),dtype=torch.float64) # Init neural net model nn = ExperimentalNN(num_features=HCS_dataset.num_features, neuron_num=neuron_num, lr=lr, lamda=lamda) ## Train neural net with defined num of epochs, calc losses, plot data. epoch_step_num = int(math.ceil(train_set_size/batch_size)) #train_loss = np.zeros([epochs, epoch_step_num]) #test_loss = np.zeros([epochs, epoch_step_num]) train_loss = np.zeros(epochs) test_loss = np.zeros(epochs) mse_loss_fn = torch.nn.MSELoss() logmsg("**") logmsg("** Starting run for HCS {} with:".split('.')[0]) logmsg("** dataset samples num={}, trainset size={}, testset size={}," " batch size={}, epochs={}".format( len(HCS_dataset), train_set_size, train_set_size, batch_size, epochs)) logmsg("**\n") for epoch in range(epochs): '''plot = False if i == 9: plot = True''' permutations = torch.randperm(train_set_size) shuffled_X = X[permutations] shuffled_Y = Y[permutations] train_loss[epoch] = \ nn.train_net(X=shuffled_X, Y=shuffled_Y, plot=False, save_train_data=False, batch_size=batch_size) #test_loss[epoch],_ = calc_epoch_mse_loss(X_test,Y_test,nn, pprint=False) Y_test_pred = nn.predict(X_test).detach() test_loss[epoch] = pow(mse_loss_fn(Y_test_pred,Y_test).detach().item(),0.5) if epoch % 1 == 0: logmsg("epoch {} test loss: {}".format(epoch,test_loss[epoch])) final_test_mre_loss,_ = calc_epoch_mape_loss(X_test,Y_test,nn,pprint=False) print("\n\n#####\n# Test MRE Loss:{}".format(final_test_mre_loss)) plot_mse_loss(test_loss, train_loss, ylabel="Root of MSE Loss",xlabel="Steps")
def _put_arg_check(self): self.reqparse.add_argument('role_id', type=str, location='args', required=True, help='role_id must be string') self.reqparse.add_argument('role_name', type=str, location='json', help='role name must be string') self.reqparse.add_argument('description', type=unicode, location='json', help='description must be string') self.reqparse.add_argument('user_ids', type=list, location='json', help='user id must be list') self.reqparse.add_argument('api_ids', type=list, location='json', help='api ids must be list') self.reqparse.add_argument('enabled', type=bool, location='json', help='enabled must be boolean') args = self.reqparse.parse_args() role_id = args['role_id'] role_name = args['role_name'] description = args['description'] user_ids = args['user_ids'] api_ids = args['api_ids'] enabled = args['enabled'] roles = Role.get_roles(role_id=role_id) if not roles: msg = 'role not found<%s>' % role_id app.logger.debug(msg) raise utils.ResourceNotFoundError(msg) role = roles[0] if role_name is not None: roles_by_name = Role.get_roles(role_name=role_name) if roles_by_name: role_by_name = roles_by_name[0] if not role.role_id == role_by_name.role_id: msg = 'role name is in used<%s>' % role_name app.logger.debug(utils.logmsg(msg)) raise utils.ConflictError(msg) users = list() if user_ids: for user_id in user_ids: got_users = User.get_users(user_id=user_id) if not got_users: msg = 'invalid user id<%s>' % user_id app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) users.append(got_users[0]) if api_ids: for api_id in api_ids: api = Api(api_id) if api is None: msg = 'api not found<%s>' % api_id app.logger.debug(utils.logmsg(msg)) raise utils.ResourceNotFoundError(msg) return [role, role_name, description, users, api_ids, enabled]
def _put_arg_check(self): self.reqparse.add_argument('Authorization', type=str, location='headers', required=True, help='format as "Bearer <token>"') args = self.reqparse.parse_args() try: token = args['Authorization'].split(' ')[1] except: raise utils.AuthenticationError('wrong Authorization token') try: user = User.get_users(user_id=JwtCred.get_user_id(token))[0] except Exception as e: app.logger.error(utils.logmsg(e)) raise utils.BadRequestError('wrong token claims.') # check other argument self.reqparse.add_argument('username', type=str, location='json', help='user name must be string') self.reqparse.add_argument('origin_password', type=str, location='json', help='origin password must be string') self.reqparse.add_argument( 'new_password', type=str, location='json', help= 'U can modify password only if the CORRECT origin_password is given.' ) self.reqparse.add_argument('tel', type=str, location='json', help='tel must be str') self.reqparse.add_argument('email', type=str, location='json', help='email must be str') args = self.reqparse.parse_args() origin_password = args['origin_password'] new_password = args['new_password'] if new_password is not None and not new_password == '': if origin_password is None or origin_password == '': raise utils.BadRequestError('u need the origin_password.') if not utils.hash_pass(origin_password) == user.hashed_password: raise utils.BadRequestError('wrong origin_password.') else: new_password = None tel = args['tel'] email = args['email'] username = args['username'] if username and not username == user.username: users = User.get_users(username=username) if users: msg = 'user name is in used.' raise utils.ConflictError(msg) elif username is '': msg = 'user name should not be empty string.' raise utils.BadRequestError(msg) return [user, username, new_password, tel, email]
args = parser.parse_args() Config = namedtuple('Context', ['cachedir', 'builddir', 'logdir', 'v']) # process arguments if necessary args.aur_local = os.path.abspath(os.path.expanduser(args.aur_local)) ctx = Config(cachedir=os.path.join(args.aur_local, 'cache'), builddir=os.path.join(args.aur_local, 'build'), logdir=os.path.join(args.aur_local, 'logs'), v=args.verbosity) os.makedirs(ctx.cachedir, exist_ok=True) os.makedirs(ctx.builddir, exist_ok=True) os.makedirs(ctx.logdir, exist_ok=True) utils.logmsg(ctx.v, 2, ("builddir: {}".format(ctx.builddir))) utils.logmsg(ctx.v, 2, ("cachedir: {}".format(ctx.cachedir))) utils.logmsg(ctx.v, 2, ("makepkg-logdir: {}".format(ctx.logdir))) if args.buildonly: utils.logmsg(ctx.v, 0, "Sources can be found at {}".format(ctx.builddir)) def build_packages_from_aur(package_candidates, install_as_dep=False): aurpkgs, repopkgs, notfoundpkgs = utils.check_in_aur(package_candidates) if repopkgs: utils.logmsg( ctx.v, 1, "Skipping: {}: packaged in repos".format(", ".join(repopkgs))) if notfoundpkgs:
def build_packages_from_aur(package_candidates, install_as_dep=False): aurpkgs, repopkgs, notfoundpkgs = utils.check_in_aur(package_candidates) if repopkgs: utils.logmsg( ctx.v, 1, "Skipping: {}: packaged in repos".format(", ".join(repopkgs))) if notfoundpkgs: utils.logmsg( ctx.v, 1, "Skipping: {}: neither in repos nor AUR".format( ", ".join(notfoundpkgs))) packages = [] skipped_packages = [] utils.logmsg( ctx.v, 0, "Fetching information and files for dependency-graph for {} package{}". format(len(aurpkgs), '' if len(aurpkgs) == 1 else 's')) async def gen_package_obj(pkgnamelist, ctx): pkgobj = [] loop = asyncio.get_event_loop() futures = [ loop.run_in_executor(None, Package, p, ctx) for p in pkgnamelist ] for p in await asyncio.gather(*futures): pkgobj.append(p) return pkgobj loop = asyncio.get_event_loop() packages = loop.run_until_complete(gen_package_obj(aurpkgs, ctx)) for p in packages: if not p.review(): utils.logmsg(ctx.v, 0, "Skipping: {}: Did not pass review".format(p.name)) skipped_packages.append(p) # drop all packages that did not pass review for p in skipped_packages: packages.remove(p) uninstalled_makedeps = set() skipped_due_to_missing_makedeps = [] for p in packages: md = p.get_makedeps() md_not_found = [ p for p in md if not p.installed and not p.in_repos and not p.in_aur ] if len(md_not_found) > 0: utils.logerr( None, "{}: cannot satisfy makedeps from either repos, AUR or local installed packages, skipping" .format(p.name)) skipped_packages.append(p) skipped_due_to_missing_makedeps.append(p) md_available = set( [p for p in md if not p.installed and (p.in_repos or p.in_aur)]) uninstalled_makedeps = uninstalled_makedeps.union(md_available) # drop all packages whose makedeps cannot be satisfied for p in skipped_due_to_missing_makedeps: packages.remove(p) md_aur = [p for p in uninstalled_makedeps if p.in_aur] if len(md_aur) > 0: utils.logmsg( ctx.v, 0, "Building makedeps from aur: {}".format(", ".join( p.name for p in md_aur))) build_packages_from_aur(md_aur, install_as_dep=True) repodeps = set() for p in packages: repodeps = repodeps.union(p.get_repodeps()) md_repos = [p.name for p in uninstalled_makedeps if p.in_repos] repodeps_uninstalled = [p.name for p in repodeps if not p.installed] to_be_installed = set(repodeps_uninstalled).union(md_repos) if to_be_installed: utils.logmsg(ctx.v, 0, "Installing dependencies and makedeps from repos") if not pacman.install_repo_packages(to_be_installed, asdeps=True): utils.logerr(0, "Could not install deps and makedeps from repos") for p in packages: success = p.build(buildflags=['-Cfd'], recursive=True) if success: od = p.get_optdeps() for name, optdeplist in od: print(" :: Package {} has optional dependencies:") for odname in optdeplist: print(" - {}".format(odname)) built_pkgs = set() built_deps = set() for p in packages: built_pkgs = built_pkgs.union(set(p.built_pkgs)) for d in p.deps: built_deps = built_deps.union(d.get_built_pkgs()) os.chdir(ctx.cachedir) if args.buildonly: utils.logmsg(ctx.v, 1, "Packages have been built:") utils.logmsg(ctx.v, 1, ", ", join(built_deps + built_pkgs) or "None") else: if built_deps: utils.logmsg(ctx.v, 0, "Installing package dependencies") if not pacman.install_package_files(built_deps, asdeps=True): utils.logerr(2, "Failed to install built package dependencies") if built_pkgs: utils.logmsg(ctx.v, 0, "Installing built packages") if not pacman.install_package_files(built_pkgs, asdeps=install_as_dep): utils.logerr(2, "Failed to install built packages") else: utils.logmsg(ctx.v, 0, "No packages built, nothing to install") if uninstalled_makedeps: utils.logmsg(ctx.v, 0, "Removing previously uninstalled makedeps") if not pacman.remove_packages( [p for p in uninstalled_makedeps if pacman.is_installed(p.name)]): utils.logerr(None, "Failed to remove previously uninstalled makedeps") if not args.keep_sources == "all": for p in packages: p.remove_sources() if not args.keep_sources in ["all", "skipped"]: for p in skipped_packages: p.remove_sources()
def review(self): utils.logmsg(self.ctx.v, 3, "reviewing {}".format(self.name)) for dep in self.deps + self.makedeps: if not dep.review(): return False # already one dep not passing review is killer, no need to process further if self.in_repos: utils.logmsg(self.ctx.v, 3, "{} passed review: in_repos".format(self.name)) return True if self.installed: if not self.in_aur: utils.logmsg( self.ctx.v, 3, "{} passed review: installed and not in aur".format( self.name)) return True elif self.version_installed == self.version_latest: utils.logmsg( self.ctx.v, 3, "{} passed review: installed in latest version".format( self.name)) return True if self.srcpkg.reviewed: utils.logmsg( self.ctx.v, 3, "{} passed review due to positive pre-review".format( self.name)) return self.srcpkg.review_passed if self.in_aur and len(pkg_in_cache(self)) > 0: utils.logmsg(self.ctx.v, 3, "{} passed review: in cache".format(self.name)) return True return self.srcpkg.review()
def train_net(self, X, Y, X_test=None, Y_test=None, save_train_data=True, calc_test_loss=False, plot=False, batch_size=8): samples_num = X.shape[0] steps_num = int(math.ceil(samples_num / batch_size)) Y_pred = torch.zeros(samples_num) train_loss_for_step = torch.zeros(steps_num) test_loss_for_step = torch.zeros(steps_num) if calc_test_loss else None mse_loss_fn = torch.nn.MSELoss() batch_indexes = range(batch_size, samples_num, batch_size) batch_indexes = list(batch_indexes) + [ samples_num, ] prev_batch_end = 0 for i, batch_end in enumerate(batch_indexes): batch_start = prev_batch_end loss_at_step,Y_pred[batch_start:batch_end] = \ self.train_step(X[batch_start:batch_end,:], Y[batch_start:batch_end,:]) prev_batch_end = batch_end if save_train_data or plot: train_loss_for_step[i] = loss_at_step if calc_test_loss: Y_pred_test = self.net(X_test) test_loss_for_step[i] = mse_loss_fn(Y_pred_test, Y_test) if i + 1 % 50 == 0: logmsg("Done {} training steps".format(i + 1)) Y_pred = Y_pred.unsqueeze(1) train_loss_for_step = train_loss_for_step.detach().numpy() if plot: fig, ax = plt.subplots(figsize=(12, 7)) ax.set_title("Training loss progress") ax.set_xlabel('Loss') ax.set_ylabel('Steps') ax.plot(np.arange(samples_num), pow(train_loss_for_step, 0.5), color="g", lw="2") plt.show() try_test_loss = ((Y_pred - Y).pow(2).sum() / samples_num).pow(0.5) #logmsg("epoch train loss (root of MSE):{} num_samples:{} Yshape: {} {}" # .format(try_test_loss, samples_num, Y.shape, Y_pred.shape)) if save_train_data: return train_loss_for_step, test_loss_for_step else: #print("output mse loss ++++++++++++++++++++++") Y_pred = self.net(X) #print("MSE loss fn out: " + str(pow(mse_loss_fn(Y_pred.squeeze(), Y.squeeze()). # detach().item(),0.5)) + " " + str(pow(mse_loss_fn(Y_pred, Y). # detach().item(),0.5))) return pow(mse_loss_fn(Y_pred, Y).detach().item(), 0.5)