def main(): instructions = [ "/login", "/shops", "/enter", "/goods", "/customers", "/buy", "/leave", "/addgoods", "/bye" ] th0 = threading.Thread(target=receive) th0.start() while True: msg = input('请输入您的指令:') text = msg.split() try: assert text[0] in instructions if text[0] == "/bye": upd_client.sendto( Package("header#/bye").send(), server_ip_port) break msg = Package("header#" + text[0], cookie["user_id"], ",".join(text[1:])) upd_client.sendto(msg.send(), server_ip_port) time.sleep(0.2) except AssertionError: print("没有该类型的指令!") print("正确的指令有:", instructions) running[0] = False
def get(self): import urllib from google.appengine.ext.webapp import template url_n_template = "update_models" name = self.request.get("name", None) if name is None: # First request, just get the first name out of the datastore. pkg = Package.gql("ORDER BY name DESC").get() name = pkg.name q = Package.gql("WHERE name <= :1 ORDER BY name DESC", name) items = q.fetch(limit=DB_STEPS) if len(items) > 1: next_name = items[-1].name next_url = "/tasks/%s?name=%s" % (url_n_template, urllib.quote(next_name)) else: next_name = "FINISHED" next_url = "" # Finished processing, go back to main page. for current_pkg in items: # modify the model if needed here # fix_equivalence(current_pkg) # current_pkg.py2only = False if current_pkg.name in ("pylint", "docutils"): current_pkg.force_green = True else: current_pkg.force_green = False current_pkg.put() # end of modify models context = {"current_name": name, "next_name": next_name, "next_url": next_url} self.response.out.write(template.render("%s.html" % url_n_template, context))
def fetch_packagenames_from_libraryio(package_count): # Currently only fetchers packages in multiples of 30 # If package_count == -1, then all packages will be fetched keep_fetching = True page_no = 0 count_retrieved = 0 while keep_fetching: logging.info("Fetching page %d of package names", page_no) packages = make_request( libraries_io.search.get, q='', platforms='NPM', page=page_no ) if packages is not None: for p in packages: Package.get_or_create( name=p['name'], repository_url=p['repository_url'], page_no=page_no, ) count_retrieved += len(packages) page_no += 1 keep_fetching = ( (len(packages) > 0) and (package_count == -1 or count_retrieved < package_count) )
def fetch_package_list(): res = make_request( default_requests_session.get, "https://pypi.python.org/pypi?%3Aaction=index", ) # Use BeautifulSoup to parse the HTML and find the package names. page = BeautifulSoup(res.content, 'html.parser') package_table = page.find('table') all_rows = package_table.findAll('tr') logging.info("====There are currently %d packages on PyPI.", len(all_rows) - 1) num_fetched = 0 # Each row represents a single PyPI package. for row in all_rows: # Each row has 2 columns, a name (hyperlinked) and a description. link = row.find('a') # Only fetch package if it has a link to its own page. if link is not None: # Reformat spacing in extracted name. package_name = link.text.replace(u'\xa0', ' ') Package.get_or_create(name=package_name) num_fetched += 1 if num_fetched % 10 == 0: logging.info("%d packages fetched.", num_fetched) logging.info("====Done fetching package list. There were %d packages.", num_fetched)
def update_list_of_packages(): package_names = memcache.get(PACKAGES_CACHE_KEY) package_index = memcache.get(PACKAGES_CHECKED_INDEX) if package_index is None: package_index = 0 if package_names is None: package_names = pypi_parser.get_list_of_packages() memcache.add(PACKAGES_CACHE_KEY, package_names, 60 * 60 * 24) for name in package_names[package_index:package_index + DB_STEPS]: if name in TO_IGNORE: pass else: query = db.GqlQuery("SELECT __key__ FROM Package WHERE name = :name", name=name) if len(list(query)) == 0: p = Package(name=name) p.put() package_index += 1 if package_index % 5 == 0: memcache.set(PACKAGES_CHECKED_INDEX, package_index, 60 * 60 * 24) if package_index == len(package_names): return -1 return package_index
def update_list_of_packages(): package_names = memcache.get(PACKAGES_CACHE_KEY) package_index = memcache.get(PACKAGES_CHECKED_INDEX) if package_index is None: package_index = 0 if package_names is None: package_names = pypi_parser.get_list_of_packages() memcache.add(PACKAGES_CACHE_KEY, package_names, 60 * 60 * 24) for name in package_names[package_index : package_index + DB_STEPS]: if name in TO_IGNORE: pass else: query = db.GqlQuery("SELECT __key__ FROM Package WHERE name = :name", name=name) if len(list(query)) == 0: p = Package(name=name) p.put() package_index += 1 if package_index % 5 == 0: memcache.set(PACKAGES_CHECKED_INDEX, package_index, 60 * 60 * 24) if package_index == len(package_names): return -1 return package_index
def get_result(): if 'pkgname' in request.args.keys(): pkgname = request.args['pkgname'][0] total = Package.selectBy( pkgname=pkgname).orderBy("-id").count() else: total = Package.selectBy().orderBy("-id").count() return {'total': total}
def fetch_package_list(): res = make_request( default_requests_session.get, "https://skimdb.npmjs.com/registry/_all_docs", ) if res is not None: packages = res.json()['rows'] for p in packages: Package.get_or_create(name=p['id'])
def package(name): """ New or existing package """ try: package = Package.objects.get(name__exact=name) except: package = Package(name=name) package.save() return package
def populate(comps='comps-f16', do_dependencies=True): from yum.comps import Comps session = DBSession() c = Comps() c.add('comps/%s.xml' % comps) for group in c.groups: g = Group(id=group.groupid, name=group.name, description=group.description) session.add(g) for package in group.packages: p = session.query(Package).filter_by( name=to_unicode(package)).first() if not p: p = Package(name=package) session.add(p) p.group = g session.flush() root = Root(name=u'Fedora') session.add(root) session.flush() for category in c.categories: c = Category(id=category.categoryid, name=category.name, description=category.description) session.add(c) root.categories.append(c) for group in category.groups: g = session.query(Group).filter_by( group_id=to_unicode(group)).first() if not g: print "Cannot find group: %s" % group else: g.category = c session.flush() if do_dependencies: for package in session.query(Package).all(): add_dependencies(package, session) session.commit()
def post(self): data = self.request.json query = self.db.query(Package).filter_by(name=data["name"]) pkg = None if query.count() == 0: pkg = Package(**data) self.db.add(pkg) self.db.commit() else: pkg = query.one() for key, value in data.iteritems(): pkg.__setattr__(key, value) self.db.commit() self.finish(pkg.serialize())
def import_packages_to_hashtable(hashtable, packages, filename): """Import package data into a list and a hash table.""" with open(filename) as f: lines = f.readlines() # Read each line in the file, create a package item for it, and place # the package in the hashtable. for line in lines: line = line.strip() line = line.replace('"', '') line = line.split(",") # If the special notes column has commas in it, correct the multiple # comma separated columns to 1 column. special_notes = "" if len(line) > 8: special_notes = line[7] for i in range(8, len(line)): special_notes += ", " + line[i] line = line[:7] + [special_notes] # Create a package. package = Package(int(line[0]), str(line[1]), str(line[2]), str(line[3]), str(line[4]), convert_standard_time_to_minutes(str(line[5])), int(line[6]), str(line[7])) # Place the package into the hash table. hashtable.add(package) # Add the package to the provided queue. packages.append(package)
async def track(self, ctx, cod, *, tag = ""): """Register a track to a correios package Args: cod: Correios tracking code tag (optional): Tag to identify your package. Defaults to "" """ member = ctx.message.author result = session.query(Package).filter_by(id=cod).first() if not result: res = await self.get_correios(cod) if len(res): session.add(Package( id = cod, user_id = member.id, guild_id = ctx.guild.id, tag = tag )) session.commit() logging.info(f"Successfully added tracking for package {cod}.") return await ctx.reply(f"Objeto cadastrado com sucesso.") logging.info(f"Package {cod} not found in database.") return await ctx.reply(f"Não encontramos esse objeto.") logging.info(f"Package {cod} was already in database.") return await ctx.reply(f"Objeto já cadastrado.")
def main(): for p in Package.select().where(Package.readme != ''): # This is a heuristic for word-count. # It will be not be precisely correct, depending on your definition of word. # For example, a path like 'com.app.example' is split into three words here. word_count = len(re.findall('\w+', p.readme)) # Another heuristic. # In reStructuredText (reST), code blocks are introduced by ending a paragraph # with a special marker ::. The block must be indented and separated from the # surrounding paragraphs by blank lines. Thus, there must be at least two new line # characters after the special marker ::. # This may prove to be a broken heuristic. In that case, consider using Sphinx: # http://www.sphinx-doc.org/en/stable/index.html. block_count = len(re.findall('::.*\\n\\n', p.readme)) try: analysis = ReadmeAnalysis.get(ReadmeAnalysis.package == p) except ReadmeAnalysis.DoesNotExist: analysis = ReadmeAnalysis.create( package=p, code_count=block_count, word_count=word_count ) logging.debug("Created README analysis for package %s", p.name) else: analysis.code_count = block_count analysis.word_count = word_count analysis.save() logging.debug("Updated README analysis for package %s", p.name) logging.info("Finished analyzing READMEs.")
def parse_inactive(self, lines, package_state): packages_dict = {} if lines is None: return packages_dict found = False #lines = lines.splitlines() for line in lines: if found: location = None name = line.strip() #skip anything after the blank line if len(name) == 0: break package = Package(location=location, name=name, state=package_state) packages_dict[name] = package elif 'package' in line: found = True return packages_dict
def parse_active_and_committed(self, lines, package_state): packages_dict = {} if lines is None: return packages_dict #lines = lines.splitlines() trunks = self.get_trunks(lines) if len(trunks) > 0: # Collect all the packages package_list = [] for module in trunks: for package in trunks[module]: if not package in package_list and re.match( "(ncs.*)", package): package_list.append(package) for package_name in package_list: package = Package(name=package_name, location=None, state=package_state) # Check which module has this package for module in trunks: for line in trunks[module]: if line == package_name: package.modules_package_state.append( ModulePackageState( module_name=module, package_state=package_state)) packages_dict[package_name] = package return packages_dict
def main(): for p in Package.select().where(Package.readme != ''): readme_text = p.readme html = markdown.markdown(readme_text) soup = BeautifulSoup(html, 'html.parser') # This is a heuristic for word-count. # It will be not be precisely correct, depending on your definition of word. # For example, a path like 'com.app.example' is split into three words here. word_count = len(re.findall('\w+', soup.text)) # Another heuristic. As it's typical that inline code examples occur in <pre> # blocks, especially in formatted markdown, we count code blocks based # on the appearance of <pre> tags. code_blocks = soup.find_all('pre') block_count = len(code_blocks) try: analysis = ReadmeAnalysis.get(ReadmeAnalysis.package == p) except ReadmeAnalysis.DoesNotExist: analysis = ReadmeAnalysis.create( package=p, code_count=block_count, word_count=word_count ) logging.debug("Created README analysis for package %s", p.name) else: analysis.code_count = block_count analysis.word_count = word_count analysis.save() logging.debug("Updated README analysis for package %s", p.name)
def get_committed_packages(self, lines, package_state): """ lines contains the CLI outputs for 'show version running' """ package_dict = {} if lines: trunks = self.get_trunks(lines.splitlines()) if len(trunks) > 0: # Collect all the packages package_list = [] for module in trunks: for package in trunks[module]: package_list.append(package) for package_name in package_list: package = Package(name=package_name, location=None, state=package_state) # Check which module has this package for module in trunks: for line in trunks[module]: if line == package_name: package.modules_package_state.append( ModulePackageState( module_name=module, package_state=package_state)) package_dict[package_name] = package return package_dict
def parseContents(self, lines, package_state): packages_dict = {} if lines is None: return packages_dict found = False lines = lines.splitlines() for line in lines: if found: line = line.strip() if ':' in line: location, name = line.split(':') else: location = '' name = line # skip anything after the blank line if len(line) == 0: break package = Package(location=location, name=name, state=package_state) packages_dict[name] = package elif 'Packages' in line: found = True return packages_dict
def add_dependencies(package, session): deps = set() pkg = yumobj.pkgSack.searchNevra(name=package.name) if not pkg: print "Cannot find package: %s" % package.name return deps_d = yumobj.findDeps([pkg[0]]) for dep in deps_d.itervalues(): for req in dep.itervalues(): deps.add(req[0].name) for dep in deps: base_query = session.query(Package).filter_by(name=dep) if base_query.count() == 0: _new_package = Package(name=dep) session.add(_new_package) session.flush() add_dependencies(_new_package, session) dep_as_package = base_query.one() if dep_as_package not in package.dependencies: package.dependencies.append(dep_as_package) print "package: %s has (%i/%i) deps" % ( package.name, len(package.dependencies), len(deps)) session.flush()
def stats(): resp = dict() resp["count"] = Package.get_count() resp["day"] = Downloads.get_overall_downloads_count(timedelta(days=1)) resp["week"] = Downloads.get_overall_downloads_count(timedelta(days=7)) resp["month"] = Downloads.get_overall_downloads_count(timedelta(days=30)) return jsonify(resp)
def get_result(): content = json.loads(request.content.read(), object_pairs_hook=deunicodify_hook) id = content.get('taskid', 0) if Package.selectBy(id=id).count() != 0: package = Package.selectBy(id=id)[0] task_cache = os.path.join(config['cache']['tasks'], str(package.id)) if os.path.exists(task_cache): shutil.rmtree(task_cache) for job in Job.selectBy(packageID=package.id): job.destroySelf() package.destroySelf() return {'message': "task %s deleted" % str(id)} else: return {'message': 'no task %s found' % str(id)}
def package_update(request): if request.method == 'POST': package = Package(request.POST) print(package) print("---------------------------------") package.bundle_identifier = request.POST.get("bundle_identifer", None) print(package) print("package") print("package.bundle_identifier") print(package.bundle_identifier) print(package.id) form = UpdatePackageForm(request.POST) # pack = Package(form) # print(pack) return render(request, "Application/upload_success.html", context_instance=RequestContext(request)) else: return HttpResponse("FAIL")
def insert_packages(payload): body = request.json name = body.get('name', None) duration = body.get('duration', None) price = body.get('price', None) if any(arg is None for arg in [name, duration, price]) or '' in [ name, duration, price ]: abort(400, 'name , duration and price are required fields.') new_package = Package(name=name, duration=duration, price=price) new_package.insert() return jsonify({ 'success': True, 'packages': [Package.query.get(new_package.id).format()] })
def get_result(): limit = 25 contexts = [] if 'pkgname' in request.args.keys(): pkgname = request.args['pkgname'][0] packages = Package.selectBy( pkgname=pkgname).orderBy("-id")[(page - 1) * limit:page * limit] else: packages = Package.selectBy().orderBy("-id")[(page - 1) * limit:page * limit] for package in packages: result = package.dict() jobs = Job.selectBy(packageID=package.id) result['tasks'] = [job.dict() for job in jobs] contexts.append(result) return {'data': contexts}
def get_result(): content = json.loads(request.content.read(), object_pairs_hook=deunicodify_hook) id = content.get('taskid', 0) if Package.selectBy(id=id).count() != 0: package = Package.selectBy(id=id)[0] package.triggered = package.triggered + 1 for job in Job.selectBy(packageID=package.id): if job.status >= JobStatus.BUILDING: job.status = JobStatus.WAIT Log(section='task', message='rebuild %(pkgname)s %(pkgver)s to %(reponame)s' % package.dict()) message = "package set rebuilded" else: message = "no package set rebuilded" return {'message': message}
def add_package(self, siq_path, user_id): db = mongo_client.test packages = db.packages user = User.get(id=user_id) with ZipFile(siq_path) as siq: # Broken zip check if siq.testzip(): self.update_state(state='CORRUPTED') return siq_structure = siq.namelist() # Siq structure check if 'content.xml' not in siq_structure: self.update_state(state='BAD_STRUCTURE') return # Read content.xml bytes package_xml = siq.read('content.xml') package = GamePackage(package_xml) # Check gamepack existence in MongoDB if packages.find_one({'_id': package.id}): package_model, status = Package.get_or_create(id=package.id) UserPackage.create(user=user, package=package_model) os.remove(siq_path) return media_package_path = os.path.join(config.MEDIA_FOLDER, package.id) # Extract only media folders allowed_members = [ x for x in siq_structure if x.startswith(('Images/', 'Audio/', 'Video/')) ] siq.extractall(media_package_path, allowed_members) os.remove(siq_path) inserted_id = packages.insert_one(package.to_dict()).inserted_id package_model, status = Package.get_or_create(id=inserted_id) return package_model.id
def populate(comps='comps-f16', do_dependencies=True): from yum.comps import Comps session = DBSession() c = Comps() c.add('comps/%s.xml' % comps) for group in c.groups: g = Group(id=group.groupid, name=group.name, description=group.description) session.add(g) for package in group.packages: p = session.query(Package).filter_by(name=to_unicode(package)).first() if not p: p = Package(name=package) session.add(p) p.group = g session.flush() root = Root(name=u'Fedora') session.add(root) session.flush() for category in c.categories: c = Category(id=category.categoryid, name=category.name, description=category.description) session.add(c) root.categories.append(c) for group in category.groups: g = session.query(Group).filter_by(group_id=to_unicode(group)).first() if not g: print "Cannot find group: %s" % group else: g.category = c session.flush() if do_dependencies: for package in session.query(Package).all(): add_dependencies(package, session) session.commit()
def receive(): while running[0]: data, server_ip = upd_client.recvfrom(1024) # 接收信息 text = Package(data.decode("utf-8")) cookie["user_id"] = text.user_id if text.header == "reply": for each in text.reply.split("\n"): print(each) if text.header == "msg": print("您有一条新消息:", text.context)
def featured(): package_list = requests.get("https://atom.io/api/packages/featured") theme_list = requests.get("https://atom.io/api/themes/featured") featured_list = package_list.json() + theme_list.json() # limit data to multiples of three length = ((len(featured_list) + 2) / 3) * 3 featured_list = featured_list[:(length - 2)] json_data = [] for item in featured_list: obj = Package.get_package(item['name']) if obj is not None: json_data.append(obj.get_json()) for item in ["docblockr", "git-log"]: obj = Package.get_package(item) json_data.append(obj.get_json()) return jsonify(results=json_data)
def finish_jobs(self): for package in Package.selectBy(upload_status=UploadStatus.UNKNOWN): jobs = Job.selectBy(packageID=package.id) all_ok = True for job in jobs: if job.status != JobStatus.BUILD_OK: all_ok = False break if all_ok: package.upload_status = UploadStatus.WAIT
def get(self): name = self.request.get('name', None) q = Package.gql('WHERE name = :1', name) items = q.fetch(limit=1) if len(items) == 0: self.response.out.write('did not find "%s"' % name) return pkg = items[0] pkg = update_package_info(pkg) self.response.out.write(str(pkg))
def get(self): self.response.out.write( '<html><head><title>eggs</title></head><body><pre>') files = Package.query() for file in files: self.response.out.write( '<a href="/gs/{bucket}/{filename}">{filename}</a>\n'.format( bucket=app_identity.get_default_gcs_bucket_name(), filename=file.content)) self.response.out.write('</pre></body></html>')
def get(self): name = self.request.get("name", None) q = Package.gql("WHERE name = :1", name) items = q.fetch(limit=1) if len(items) == 0: self.response.out.write('did not find "%s"' % name) return pkg = items[0] pkg = update_package_info(pkg) self.response.out.write(str(pkg))
def doPackage(self): requested_package = self.request.path.strip('/') package = Package.query(Package.fullname == requested_package).get() if package and blobstore.get(package.blob): self.response.headers['Content-Type'] = 'application/octet-stream' self.send_blob(package.blob) else: self.error(404) self.response.write('That package does not exist.')
def Choose_Http(): if request.method == 'POST': host = request.form['host'] port = request.form['port'] delay = request.form['delay'] num = request.form['num'] nowTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')#现在 p = Package(dst = host, num = int(num), protocol = "HTTP", flow = int(int(num) * 58), time = nowTime) db_session.add(p) db_session.commit() scapy_http(host = host, port = int(port), delay = int(delay), num = int(num)) return render_template('http.html')
def packageReleaseCreate(packageId): if g.currentUser is None: return api.jsonMissingActionPermissionFailure("create package releases") package = Package.getById(g.databaseSession, packageId) if package is None: return api.jsonFailure("The package does not exist or has been deleted.") if not g.currentUser.canEditPackage(package): return api.jsonMissingActionPermissionFailure("create package releases") package.createRelease(g.databaseSession, flask.g.time) g.databaseSession.commit() return { "success": True }
def post(self): # OK, we can process the rest now. name = self.request.get('name') arch = self.request.get('arch') vers = self.request.get('vers') sha1 = self.request.get('sha1') if not (name and arch and vers and sha1): self.badrequest() return try: uploaded = self.get_uploads()[0] uploaded_key = uploaded.key() except: self.badrequest() return fullname = '%s-%s-%s.pup' % (name, vers, arch) # Do we already know of a package like this? known_package = Package.query(Package.fullname == fullname).get() if not known_package: # Now create the record. package = Package(fullname=fullname, package_name=name, architecture=arch, version=vers, sha1=sha1, blob=uploaded_key) package.put() else: # Wipe out the created item in blobstore, we don't need it. item = blobstore.get(known_package.blob) item.delete() # Update the package contents. known_package.sha1 = sha1 known_package.blob = uploaded_key known_package.put() self.response.headers['Content-Type'] = 'text/plain' self.response.write('ok')
def Choose_Dns(): if request.method == 'POST': srchost = request.form['srchost'] dsthost = request.form['dsthost'] qdcount = request.form['qdcount'] qname = request.form['qname'] nowTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')#现在 p = Package(dst = dsthost, num = int(qdcount), protocol = "DNS", flow = int(int(qdcount) * 56), time = nowTime) db_session.add(p) db_session.commit() dns_packets(srchost = srchost, dsthost = dsthost, qdcount = int(qdcount), qname = qname) return render_template('dns.html')
def doIndex(self): self.response.headers['Content-Type'] = 'text/html' query = Package.query(projection=['architecture'], distinct=True) archs = [x.architecture for x in query] # Load graphs. graphs = {} for dep in DepsModel.query().iter(): graphs[dep.deps_arch] = True packages = dedup_packages(Package.query().fetch(None)) template_data = { # TODO(miselin): this should be figured out from datastore. 'archs': archs, 'packages': packages, 'graphs': graphs, } template = JINJA_ENVIRONMENT.get_template('templates/index.html') self.response.write(template.render(template_data))
def main(): upd_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) server_ip_port = ('127.0.0.1', 25365) instructions = ["/msg", "/opennewshop", "/enter", "/goods", "/customers", "/shops", "/users", "/closeshop", "/bye"] while True: # 通信循环 msg = input('请输入您的指令:') text = msg.split() try: assert text[0] in instructions if text[0] == "/bye": break msg = Package("header#" + text[0], 999, ",".join(text[1:])) upd_client.sendto(msg.send(), server_ip_port) # 发送信息,信息量,服务端地址 data, server_ip = upd_client.recvfrom(1024) # 管理员不需要异步加载消息,防止管理员徇私舞弊 text = Package(data.decode("utf-8")) for each in text.reply.split("\n"): print(each) except AssertionError: print("没有该类型的指令!") print("正确的指令有:", instructions)
def post(self): name = self.request.get('name', None) version = self.request.get('version', None) action = self.request.get(':action', None) if action == 'file_upload': for key, value in self.request.params.items(): if isinstance(value, cgi.FieldStorage): uploaded_file = value fname = "/{bucket}/{filename}".format( bucket=app_identity.get_default_gcs_bucket_name(), filename=uploaded_file.filename.strip()) with cloudstorage.open(fname, 'w') as f: f.write(uploaded_file.file.read()) f.close() if name is not None and version is not None: pack = Package(name=name, version=version, content=uploaded_file.filename.strip()) pack.put()
def get_committed_packages(self, lines, package_state): """ lines contains the CLI outputs for 'show version' """ logging.warning("IOS.py get_committed_packages: lines = %s", lines) packages = [] lines = lines.splitlines() for line in lines: match = re.search(r'(asr.*bin)', line) if match: package_name = match.group() packages.append(Package(location=None, name=package_name, state=package_state)) return packages
def get(self): import urllib from google.appengine.ext.webapp import template url_n_template = 'update_models' name = self.request.get('name', None) if name is None: # First request, just get the first name out of the datastore. pkg = Package.gql('ORDER BY name DESC').get() name = pkg.name q = Package.gql('WHERE name <= :1 ORDER BY name DESC', name) items = q.fetch(limit=DB_STEPS) if len(items) > 1: next_name = items[-1].name next_url = '/tasks/%s?name=%s' % (url_n_template, urllib.quote(next_name)) else: next_name = 'FINISHED' next_url = '' # Finished processing, go back to main page. for current_pkg in items: # modify the model if needed here #fix_equivalence(current_pkg) #current_pkg.py2only = False if current_pkg.name in ('pylint', 'docutils'): current_pkg.force_green = True else: current_pkg.force_green = False current_pkg.put() # end of modify models context = { 'current_name': name, 'next_name': next_name, 'next_url': next_url, } self.response.out.write(template.render('%s.html' % url_n_template, context))
def packageDeletePost(packageId): if g.currentUser is None: return api.jsonMissingActionPermissionFailure("delete packages") package = Package.getById(g.databaseSession, packageId) if package is None: return api.jsonFailure("The package does not exist or has been deleted.") if not g.currentUser.canDeletePackage(package): return api.jsonMissingActionPermissionFailure("delete packages") package.remove(g.databaseSession) g.databaseSession.commit() return {"success": True}
def get_inactive_packages(self, lines, package_state): """ lines contains the CLI outputs for 'cd flash:', then 'dir' """ packages = [] lines = lines.splitlines() for line in lines: if '.bin' in line: match = re.search(r'asr\S*', line) if match: package_name = match.group() packages.append(Package(location=None, name=package_name, state=package_state)) return packages
def packageReleases(type): packages = [] packageDictionaries = {} for package in Package.getAll(g.databaseSession): packageDictionary = package.toDictionary() packageDictionary["releases"] = packageDictionary.get("releases", []) packageDictionaries[package.id] = packageDictionary packages.append(packageDictionary) packageReleases = PackageRelease.getLatestPackageReleases(g.databaseSession) for packageRelease in packageReleases: packageDictionary = packageDictionaries[packageRelease.packageId] if packageDictionary is None: continue packageDictionary["releases"].append(packageRelease.toDictionary()) return packages
def main(package_list_path): with open(package_list_path) as package_list: for line in package_list: package_name = line.strip() package = Package.get(Package.name == package_name) readme_path = os.path.join(README_DIR, package.name) + '.html' write_readme(readme_path, package.readme) print "====Showing README for package", package_name print "====Press Enter to continue." webbrowser.open("file://" + os.path.abspath(readme_path)) raw_input() # wait for user to type <Enter> print "You have viewed all packages!"
def upload_tasks(self): for package in Package.selectBy(upload_status=UploadStatus.WAIT)[:5]: package.upload_status = UploadStatus.UPLOADING repo_base = config['cache']['repos'] env = os.environ.copy() env['REPOPATH'] = repo_base env['NAME'] = package.reponame task_cache = os.path.join(config['cache']['tasks'], str(package.id)) command = "../tools/repo.py include --cache %(cache)s --base %(base)s" % { "cache": task_cache, "base": package.action } status, _ = functions.getstatusoutput(command, env=env) if status != 0: Log(status=False, section='task', message='upload tasks %(pkgname)s %(pkgver)s to %(reponame)s' % package.dict()) package.upload_status = UploadStatus.UPLOAD_FAILED else: package.upload_status = UploadStatus.UPLOAD_OK
def doDatabase(self): packages = Package.query().iter() all_packages = dedup_packages(packages) result = {} for package in all_packages: name = package.package_name arch = package.architecture vers = package.version sha1 = package.sha1 result['%s-%s' % (name, arch)] = { 'architecture': arch, 'version': vers, 'name': name, 'sha1': sha1, } self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(result))
if __name__ == '__main__': parser = argparse.ArgumentParser(description="Download package stats for PyPI") parser.add_argument( '--package-list', action='store_true', help="fetch list of all packages on PyPI" ) parser.add_argument( '--pypi-data', action='store_true', help="fetch PyPI data (READMES and downloads)" ) parser.add_argument( '--update', action='store_true', help="only update existing data (currently only for --pypi-data)" ) args = parser.parse_args() if args.package_list: create_tables() fetch_package_list() if args.pypi_data: if args.update: packages = Package.select().where(Package.description != '') else: packages = Package.select().where(Package.readme >> None) fetch_pypi_data(packages)
def get_instructions(pc_uid, update_data): """This function will ask for new instructions in the form of a list of jobs, which will be scheduled for execution and executed upon receipt. These jobs will generally take the form of bash scripts.""" pc = PC.objects.get(uid=pc_uid) pc.last_seen = datetime.datetime.now() pc.save() if not pc.is_active: # Fail silently return ([], False) update_pkgs = update_data.get('updated_packages', []) if len(update_pkgs) > 0: for pdata in update_pkgs: # Find or create the package in the global collection of packages try: p = Package.objects.get( name=pdata['name'], version=pdata['version'] ) except Package.DoesNotExist: p = Package( name=pdata['name'], version=pdata['version'], description=pdata['description'] ) p.save() # Change or create the package status for the package/PC p_status = pc.package_list.statuses.filter( package__name=pdata['name'], ).delete() p_status = PackageStatus( status='install', package=p, package_list=pc.package_list ) p_status.save() pc.package_list.statuses.filter( package__name=pdata['name'], package__version=pdata['version'], ).update(status='installed ok') remove_pkgs = update_data.get('removed_packages', []) if len(remove_pkgs) > 0: pc.package_list.statuses.filter(package__name__in=remove_pkgs).delete() # Get list of packages to install and remove. to_install, to_remove = pc.pending_package_updates # Add packages that are pending update to the list of packages we want # installed, as apt-get will upgrade any package in the package list # for apt-get install. for p in pc.package_list.pending_upgrade_packages: to_install.add(p.name) # Make sure packages we just installed are not flagged for removal for name in [p['name'] for p in update_pkgs]: if name in to_remove: pc.custom_packages.update_package_status(name, True) to_remove.remove(name) # Make sure packages we just removed are not flagged for installation for name in remove_pkgs: if name in to_install: pc.custom_packages.update_package_status(name, False) to_install.remove(name) if len(to_remove): sc = Script.get_system_script('remove_packages.sh') sc.run_on_pc(pc, ','.join(to_remove)) if len(to_install): sc = Script.get_system_script('install_or_upgrade_packages.sh') sc.run_on_pc(pc, ','.join(to_install)) jobs = [] for job in pc.jobs.filter(status=Job.NEW): job.status = Job.SUBMITTED job.save() jobs.append(job.as_instruction) result = { 'jobs': jobs, 'configuration': pc.get_full_config(), } if pc.do_send_package_info: result['do_send_package_info'] = True return result
def packageJson(packageId, type): package = Package.getById(g.databaseSession, packageId) if package is None: return None return package.toDictionaryRecursive(g.currentUser == package.creatorUser)
def test_setup(**kwargs): from django.contrib.auth.models import User from random import choice from desktopsite.apps.snapboard import chomsky from desktopsite.apps.repository.categories import REPOSITORY_CATEGORIES from models import Package, Rating, Version import datetime if not settings.DEBUG: return if Package.objects.all().count() > 0: # return, since there seem to already be threads in the database. return # ask for permission to create the test msg = """ You've installed Repository with DEBUG=True, do you want to populate the board with random users/packages/ratings to test-drive the application? (yes/no): """ populate = raw_input(msg).strip() while not (populate == "yes" or populate == "no"): populate = raw_input("\nPlease type 'yes' or 'no': ").strip() if populate == "no": return # create 10 random users users = ('john', 'sally', 'susan', 'amanda', 'bob', 'tully', 'fran' 'rick', 'alice', 'mary', 'steve', 'chris', 'becca', 'rob' 'peter', 'amy', 'bill', 'nick', 'dustin', 'alex', 'jesus') for u in users: user, created = User.objects.get_or_create(username=u) user.email = "%s@%s.com" % (u, u) user.set_password(u) user.save() # user.is_staff = True # create up to 30 posts tc = range(1, 20) words = chomsky.objects.split(' ') for i in range(0, 20): print 'package ', i, 'created' subj = words[i]+" "+words[i-4]+" "+words[i+2]+" "+words[i-3] package = Package( name=subj, sysname=subj.replace(" ", "_").replace(".", "").replace("(", "").replace(")", "").replace("\n", ""), category=choice(REPOSITORY_CATEGORIES)[0], description = '\n\n'.join([chomsky.chomsky() for x in range(0, choice(range(2, 5)))]), maintainer=choice(User.objects.all()), url="http://www.foo.com/", ) package.save() for j in range(0, choice(range(1, 10))): text = '\n\n'.join([chomsky.chomsky() for x in range(0, choice(range(2, 5)))]) v=Version( name="%s.%s.%s" % (choice(range(1, 5)), choice(range(1, 50)), choice(range(1, 170))), package=package, changelog=text, #package_url="http://www.foo.com/bar.lucid.zip", checksum= "".join([choice("abcdef0123456789") for x in range(1, 50)]), verified_safe=choice((True, False)), ) v.save() for adf in range(0, choice(tc)): rating = Rating( user=User.objects.get(pk=adf+1), version= v, score=choice((1,2,3,4,5)), ) rating.save()
def packageEditPost(packageId): json = flask.request.get_json() # Verify permissions if packageId is None: if g.currentUser is None: return api.jsonMissingActionPermissionFailure("create packages") else: if g.currentUser is None: return api.jsonMissingActionPermissionFailure("edit packages") package = Package.getById(g.databaseSession, packageId) if packageId is None: if not g.currentUser.canCreatePackages(): return api.jsonMissingActionPermissionFailure("create packages") package = Package() else: if package is None: return api.jsonFailure("The package does not exist or has been deleted.") if not g.currentUser.canEditPackage(package): return api.jsonMissingActionPermissionFailure("edit packages") # Validate Package and DirectoryTrees valid, validationResult = Package.validateFormFieldsDictionary(json["package"]) if not valid: return dict(validationResult, **{"success": False, "object": "package"}) package.fromFormFieldsDictionary(g.databaseSession, json["package"]) namedPackage = Package.getByName(g.databaseSession, package.name) if namedPackage is not None and namedPackage != package: return { "success": False, "object": "package", "field": "name", "message": "A package with this name already exists!", } valid, validationResult = DirectoryTree.validateFormFieldsDictionary(json["codeDirectoryTree"]) if not valid: return dict(validationResult, **{"success": False, "object": "package"}) valid, validationResult = DirectoryTree.validateFormFieldsDictionary(json["resourcesDirectoryTree"]) if not valid: return dict(validationResult, **{"success": False, "object": "package"}) # Commit Package if package.id is None: package.creatorUser = g.currentUser g.databaseSession.add(package) # Commit DirectoryTrees package.setCodeDirectoryTreeFromFormFieldsDictionary(g.databaseSession, json["codeDirectoryTree"]) package.setResourcesDirectoryTreeFromFormFieldsDictionary(g.databaseSession, json["resourcesDirectoryTree"]) g.databaseSession.commit() return {"success": True, "id": package.id}
def packageReleasesJson(packageId, type): package = Package.getById(g.databaseSession, packageId) if package is None: return [] return package.releases