Exemple #1
0
    def _get_releases_paginated(self, html):

        soup = BeautifulSoup(html, "lxml")
        items = soup.find_all('li', class_='release')
        rel_list = []

        for item in items:

            new_rel = Release()

            name = item.find('p', class_='release-title')

            link = name.find('a')
            link = link['href']

            name = name.text

            artists = item.find('p', class_='release-artists')
            artists = artists.text.strip()
            artists = re.sub(' +', ' ', artists)
            artists = " ".join(artists.split())

            label = item.find('p', class_="release-label")
            label = label.text.strip()
            label = re.sub(' +', ' ', label)


            new_rel.Name = name
            new_rel.LabelName = label
            new_rel.InfoPageLink = 'http://pro.beatport.com' + link

            rel_list.append(new_rel)

        return rel_list
Exemple #2
0
    def test__relase_page(self):

        f = open('tests/files/releasePage.html', 'r')
        html = f.read()
        f.close()

        rel = Release()

        rel.Name = 'ON THE EDGE VOL 1'
        rel.InfoPageLink = 'http://www.beatport.com/release/on-the-edge-vol-1/1164510'


        rel_page = BeatportWeb.ReleasePage(rel, True)

        catid = rel_page._get_catalog_id(html)

        self.assertEqual('SYNDROME13015', catid)


        tracks = rel_page._get_tracks(html)

        self.assertEqual(4, len(tracks))

        artwork = rel_page._get_artwork_link(html)

        self.assertEqual('http://geo-media.beatport.com/image_size/500x500/8195874.jpg', artwork)

        for t in tracks:
            print(t)
Exemple #3
0
    def test__get_key(self):
        f = open('tests/files/track.html', 'r')
        html = f.read()
        f.close()

        rel = Release()

        rel.Name = 'ON THE EDGE VOL 1'
        rel.InfoPageLink = 'http://www.beatport.com/release/on-the-edge-vol-1/1164510'


        rel_page = BeatportWeb.ReleasePage(rel, True)

        key = rel_page._get_key(html)

        self.assertEqual('hmoll', key)
Exemple #4
0
    def __CreateReleaseList__(self, results):
        for rel in results:
            cur_release = Release()

            if rel["name"]:
                cur_release.Name = rel["name"]

            if rel["catalogNumber"]:
                cur_release.Catid = rel["catalogNumber"]

            if rel["label"]["name"]:
                cur_release.LabelName = rel["label"]["name"]

            if rel["id"]:
                cur_release.InfoPageLink = rel["id"]

            self.__ReleaseList.append(cur_release)
Exemple #5
0
 def generate_data(self):
     self.requirement = [Requirement(i) for i in xrange(self.trequirements)]
     self.client = [
         Client(i, self.trequirements) for i in xrange(self.tclients)
     ]
     budget_release = int((sum(req.cost for req in self.requirement) *
                           (self.tbudget / 100)) / self.treleases)
     self.release = [
         Release(i, budget_release) for i in xrange(self.treleases)
     ]
     self.precedence = self.generate_precedence()
Exemple #6
0
    def getFull(self):
        if (not self.fieldsExists or not self.fileExists):
            raise StopIteration

        if (self.hash):
            return self.hash

        self.hash = {}
        self.client_name = ''

        try:
            while True:
                client_name, client_version, client_timestamp, client_previous_timestamp, dependency_name, dependency_type, dependency_resolved_version, dependency_resolved_version_change = self.next(
                )

                self.client_name = client_name
                # name, version, type
                dependency = Dependency(dependency_name,
                                        dependency_resolved_version,
                                        dependency_type,
                                        dependency_resolved_version_change)

                try:
                    # release.addDependency
                    release = self.hash[client_version]  # get the release

                    if release.client_previous_timestamp.__eq__(
                            ''):  # if client_previous_timestamp is null
                        release.client_previous_timestamp = client_previous_timestamp

                    release.addDependency(dependency)
                except KeyError:  # if is first release in csv file
                    release = Release(
                        client_version, client_timestamp,
                        client_previous_timestamp)  # create new release
                    release.addDependency(dependency)  # add dependency
                    self.hash[client_version] = release  # insert in hash

        except StopIteration:
            return self.hash
Exemple #7
0
 def accept_changeset(self, cs: Changeset):
     # This scheduler runs changesets in parallel, but only releases from the front
     # This means that new changesets must contain all of the changes contained
     # in changesets that are already queued. Therefore, we need to compile the
     # union of all the changed modules
     # However, we don't need to union the tests modules as we only need to validate the
     # new changes (previous changes were validated by previous changesets).
     mod_cs = Changeset(
         changed_modules=set.union(cs.changed_modules, *(r.changeset.changed_modules for r in self.active_q)),
         modules_to_test=cs.modules_to_test
     )
     logger.debug("Incoming changeset: %s, queued changeset: %s", cs, mod_cs)
     r = Release(name=f"R-{uuid.uuid4().hex}", changeset=mod_cs, queued_time=self.tick)
     self.active_q.append(r)
Exemple #8
0
def main():

    import os, sys, glob, re, datetime, getpass
    from OSUT3AnalysisMonitor import OSUT3AnalysisMonitor
    from Release import Release
    from Test import Test

    cwd = os.path.realpath(os.getcwd())

    if os.path.basename(cwd) != "OSUT3AnalysisMonitor":
        print sys.argv[0] + " is running in \"" + cwd + "\"."
        print "Should be run in OSUT3AnalysisMonitor instead. Exiting..."
        sys.exit(1)

    sys.path.append(cwd)
    os.environ["PATH"] += ":."

    monitor = OSUT3AnalysisMonitor()
    monitor.addRelease(Release("slc6_amd64_gcc530", "CMSSW_8_0_30"))

    resultsDir = os.path.realpath(
        os.path.expanduser("~") + "/public_html/test")  # hart
    condorDir = "/data/users/" + getpass.getuser()
    now = datetime.datetime.now()
    dateDir = now.strftime("%Y_%m_%d/%H_%M_%S")

    for dir in glob.glob("Tests/*"):
        if "__init__.py" in dir:
            continue
        testPackage = os.path.basename(dir)
        for test in glob.glob(dir + "/*"):
            if "__init__.py" in test:
                continue
            testName = os.path.basename(test)
            testDir = testPackage + "/" + testName

            exec("from Tests." + testPackage + "." + testName + "." +
                 testPackage + "_" + testName + " import " + testPackage +
                 "_" + testName)
            test = locals()[testPackage + "_" + testName]
            test.setResultsDir(resultsDir)
            test.setCondorDir(condorDir)
            test.setDateDir(dateDir)
            test.setTestDir(testDir)

            monitor.addTest(test)

    monitor.runTests()
Exemple #9
0
def get_info(release_rows: list) -> list:
    """
    Gather all data about given releases from Apple's website.

    Input format (release row on the initial Security page):
    -----
    [
        [
            "<a href="https://support.apple.com/kb/HT213055">macOS Big Sur 11.6.3</a>",
            "macOS Big Sur",
            "26 Jan 2022"
        ]
    ]
    -----

    Return format:
    -----
    [
        Class Release (
            "name": "iOS and iPadOS 14.7",
            "emoji": ":iphone:",
            "security_content_link": "https://support.apple.com/kb/HT212623",
            "release_date": "26 Jan 2022",
            "num_of_bugs": 37,
            "num_of_zero_days": 3,
            "zero_days": {
                "CVE-2021-30761": "WebKit",
                "CVE-2021-30762": "WebKit",
                "CVE-2021-30713": "TCC"
            },
            "num_entries_added": 8,
            "num_entries_updated": 1
        )
    ]
    -----
    """

    releases = []

    for row in release_rows:
        releases.append(Release(row))

    return releases
Exemple #10
0
        if apk_source_datetime is None or source_last_commit_datetime is None or release_datetime is None:
            new_release = True
        if new_release or apk_source_datetime > release_datetime or source_last_commit_datetime > release_datetime:
            new_release = True
    else:
        new_release = True
    if Config.OVERRIDE_RELEASE or new_release:
        if Config.TARGET_ANDROID_VERSION == 10 and "go" not in Config.BUILD_PACKAGE_LIST:
            Config.BUILD_PACKAGE_LIST.append("go")
        Constants.update_android_version_dependencies()
        today = datetime.now(pytz.timezone('Europe/London')).strftime("%a")
        if Config.RELEASE_TYPE.__eq__("canary"):
            Constants.update_sourceforge_release_directory("canary")
        else:
            Constants.update_sourceforge_release_directory("")
        Release.zip(package_list)
        if release_repo is not None:
            release_repo.git_push(
                str(android_version) + ": " + str(commit_message))

if Config.BUILD_CONFIG:
    if FileOp.dir_exists(Constants.config_directory):
        Constants.update_sourceforge_release_directory("config")
        zip_status = Release.zip(['config'])
    else:
        print(Constants.config_directory + " doesn't exist!")

website_repo = None
if FileOp.dir_exists(Constants.website_directory):
    if Config.GIT_CHECK:
        print("Updating the changelog to the website")
Exemple #11
0
    def getProjects(self):
        # Perhaps I should provide more API endpoints to make scraping easier...
        projectlist = self.curl_get(self.DOMAIN + "/projects/").getvalue()
        soup = BeautifulSoup(projectlist, "html.parser")
        links = soup.find("ul", "prjlistclass")
        projects = []
        for link in links.find_all("a"):
            project = Project()
            sourceType = None
            projectURL = self.DOMAIN + link.get("href")
            projectName = projectURL.split("/")[-2]

            projectpageHTML = self.curl_get(projectURL).getvalue()
            projectpageSoup = BeautifulSoup(projectpageHTML, "html.parser")

            sourceURL = projectpageSoup.find(name="a", string="Source").get("href")
            sourceSoup = BeautifulSoup(self.curl_get(self.DOMAIN + sourceURL).getvalue(), "html.parser")
            sourceSoupText = sourceSoup.get_text()

            # get source
            if "git clone" in sourceSoupText:
                project.repoType = REPO_TYPES.git
                project.repoURL = "git://beta.datanethost.net/" + projectName + ".git"
            elif "svn co" in sourceSoupText:
                project.repoType = REPO_TYPES.SVN
                project.repoURL = self.DOMAIN + "/svn/" + projectName + "/"
            else:
                project.repoType = REPO_TYPES.hg
                project.repoURL = self.DOMAIN + "/hg/" + projectName + "/"


            # get downloads
            project.releases = []
            downlaodsSoup = BeautifulSoup(self.curl_get(projectURL + "downloads/").getvalue(), "html.parser")
            downloadSection = downlaodsSoup.find("table", "uploads")
            if "No downloads were found." not in downlaodsSoup.get_text():
                downloadRows = downloadSection.find_all("tr")[1:]
                for downloadRow in downloadRows:
                    cols = downloadRow.find_all("td")
                    downloadTD = cols[0]
                    downloadURL = self.DOMAIN + "/p/" + projectName + "/downloads/get/" + downloadTD.a.text
                    fileName = downloadTD.a.text
                    release = Release()
                    release.fileURL = downloadURL
                    release.fileName = fileName
                    project.releases.append(release)

            # get issues
            project.issues = []
            issuesSoup = BeautifulSoup(self.curl_get(projectURL + "issues/").getvalue(), "html.parser")
            if "No issues were found." not in issuesSoup.get_text():
                issuesSection = issuesSoup.find("table", "recent-issues")
                for issueRow in issuesSection.find_all("tr")[1:]:
                    issue = Issue()
                    cols = issueRow.find_all("td")
                    issueId = cols[0].text
                    issueURL = projectURL + "issues/" + issueId + "/"
                    issueStatus = cols[2].text
                    issueSummary = cols[1].text
                    issueTitle = cols[1].find("a").text
                    issueAuthor = cols[3].text
                    issue.author = issueAuthor
                    issue.comments = []
                    issue.status = issueStatus
                    issue.summary = issueSummary
                    issue.title = issueTitle
                    issue.id = issueId
                    # we must go deeper to get comments
                    issueComments = BeautifulSoup(self.curl_get(issueURL).getvalue(), "html.parser")
                    for comment in issueComments.find_all("div", "issue-comment"):
                        author = comment.find("p").get_text().split("by")[1].split(",")[0]
                        date = comment.find("span").get_text()
                        commentText = comment.find("pre").get_text()
                        issueComment = IssueComment()
                        issueComment.date = date
                        issueComment.author = author
                        issueComment.summary = commentText
                        issue.comments.append(issueComment)

                    project.issues.append(issue)

            # get wiki pages
            project.wikis = []
            wikiSoup = BeautifulSoup(self.curl_get(projectURL + "doc/").getvalue(), "html.parser")
            if "No documentation pages were found." not in wikiSoup.get_text():
                wikiSection = wikiSoup.find("table", "recent-issues")
                for wikiRow in wikiSection.find_all("tr")[1:]:
                    wiki = Wiki()
                    cols = wikiRow.find_all("td")
                    wiki.pageName = cols[0].text
                    wiki.summary = cols[1].text
                    wiki.updated = cols[2].text
                    wikiURL = projectURL + "page/" + wiki.pageName + "/"
                    wikiPageSoup = BeautifulSoup(self.curl_get(wikiURL).getvalue(), "html.parser")
                    wikiContent = wikiPageSoup.find(id="wiki-content")
                    wiki.htmlContent = wikiContent.prettify()
                    wiki.textContent = wikiContent.get_text()
                    project.wikis.append(wiki)


            projects.append(project)

        return projects
Exemple #12
0
    def __CreateReleaseList__(self, stream):
        found_item = False
        found_artist = False
        found_title = False
        found_label = False
        found_catnum = False

        cur_artist = ""
        cur_title = ""
        cur_label = ""
        cur_catnum = ""

        for item in stream:
            # check for item found
            if ("name" in item) and (item["name"] == "div"):
                for attTupel in item["data"]:
                    if ("class" in attTupel) and ("item" in attTupel):
                        found_item = True

                        # create a new release object
                        cur_release = Release()
                        

            # check if artist field ends
            if found_artist == True:
                if ("name" in item) and (item["name"] == "h4") and (item["type"] == "EndTag"):
                    found_artist = False

            # check if artist field ends
            if found_label == True:
                if ("name" in item) and (item["name"] == "span") and (item["type"] == "EndTag"):
                    found_label = False
                    cur_release.LabelName = cur_label.strip()

            # check if title field ends
            if found_title == True:
                if ("name" in item) and (item["name"] == "p") and (item["type"] == "EndTag"):
                    found_title = False

                    # add name to Release instance
                    cur_release.Name = cur_artist.strip() + " " + cur_title.strip()

            # check if catnum field ends
            if found_catnum == True:
                if ("name" in item) and (item["name"] == "span") and (item["type"] == "EndTag"):
                    found_catnum = False
                    
                    # add catnum to release
                    cur_release.Catid = cur_catnum.strip()

                    # because here is the item end, reset all data, and
                    # append the current release to the __ReleaseList
                    self.__ReleaseList.append(cur_release)
                    cur_artist =  ""
                    cur_title =  ""
                    cur_label = ""
                    cur_catnum = ""
                    found_item = False

            # check if label found
            if found_item == True:
                if ("name" in item) and (item["name"] == "span"):
                    for attTupel in item["data"]:
                        if ("class" in attTupel) and ("label" in attTupel):
                            found_label = True

            # check if artist found
            if found_item == True:
                if ("name" in item) and (item["name"] == "h4"):
                    for attTupel in item["data"]:
                        if ("class" in attTupel) and ("artist" in attTupel):
                            found_artist = True

            # check if title found
            if found_item == True:
                if ("name" in item) and (item["name"] == "p"):
                    for attTupel in item["data"]:
                        if ("class" in attTupel) and ("title" in attTupel):
                            found_title = True

            # find the infoPageLink
            if found_item == True and found_title:
                if ("name" in item) and (item["name"] == "a"):
                    for attTupel in item["data"]:
                        if attTupel[0] == "href":
                            cur_release.InfoPageLink = attTupel[1]

            # check if catnum found
            if found_item == True:
                if ("name" in item) and (item["name"] == "span"):
                    for attTupel in item["data"]:
                        if ("class" in attTupel) and ("catnum" in attTupel):
                            found_catnum = True

            # fetch artists
            if found_artist == True:
                if item["type"] == "SpaceCharacters":
                    cur_artist += " "
                if item["type"] == "Characters":
                    cur_artist += item["data"]

            # fetch artists
            if found_label == True:
                if item["type"] == "SpaceCharacters":
                    cur_label += " "
                if item["type"] == "Characters":
                    cur_label += item["data"]

            # fetch title
            if found_title == True:
                if item["type"] == "SpaceCharacters":
                    cur_title += " "
                if item["type"] == "Characters":
                    cur_title += item["data"]

            # fetch catnum
            if found_catnum == True:
                if item["type"] == "SpaceCharacters":
                    cur_catnum += " "
                if item["type"] == "Characters":
                    cur_catnum += item["data"]
Exemple #13
0
    def getProject(self, projectName):
        project = Project()
        sourceType = None
        projectURL = self.DOMAIN + "/p/" + projectName + "/"

        projectpageHTML = self.curl_get(projectURL).getvalue()
        projectpageSoup = BeautifulSoup(projectpageHTML, "html.parser")

        sourceURL = projectpageSoup.find(name="a", string="Source").get("href")
        sourceSoup = BeautifulSoup(self.curl_get(self.DOMAIN + "/p/" + sourceURL).getvalue(), "html.parser")
        sourceSoupText = sourceSoup.get_text()

        # get source
        if "git clone" in sourceSoupText:
            project.repoType = REPO_TYPES.git
            project.repoURL = "https://code.google.com/p/" + projectName + "/"
        elif "svn co" in sourceSoupText:
            project.repoType = REPO_TYPES.SVN
            project.repoURL = "http://" + projectName + ".googlecode.com/svn/"
        else:
            project.repoType = REPO_TYPES.hg
            project.repoURL = "https://code.google.com/p/" + projectName + "/"


        # get downloads
        project.releases = []
        downlaodsSoup = BeautifulSoup(self.curl_get(projectURL + "downloads/list").getvalue(), "html.parser")
        downloadSection = downlaodsSoup.find("table", "results")
        if "Your search did not generate any results." not in downlaodsSoup.get_text():
            downloadRows = downloadSection.find_all("tr")[1:]
            for downloadRow in downloadRows:
                cols = downloadRow.find_all("td")
                downloadTD = cols[1]
                downloadURL = "https://" + projectName + ".googlecode.com/files/" + downloadTD.a.text.replace("\n", "").strip(" ")
                fileName = downloadTD.a.text.replace("\n", "").strip(" ")
                release = Release()
                release.fileURL = downloadURL
                release.fileName = fileName
                project.releases.append(release)

        # get issues
        project.issues = []
        issuesSoup = BeautifulSoup(self.curl_get(projectURL + "issues/list").getvalue(), "html.parser")
        if "Your search did not generate any results." not in issuesSoup.get_text():
            issuesSection = issuesSoup.find("table", "results")
            for issueRow in issuesSection.find_all("tr")[1:]:
                issue = Issue()
                cols = issueRow.find_all("td")
                issueId = cols[1].text.replace("\n", "").strip()
                issueURL = projectURL + "issues/detail?id=" + issueId
                issueStatus = cols[3].text.replace("\n", "").strip(" ")
                issueSummary = cols[8].text.replace("\n", "")
                issueTitle = cols[8].text.replace("\n", "")
                issueAuthor = cols[5].text.replace("\n", "")

                #issue.author = issueAuthor
                issue.comments = []
                issue.status = issueStatus.strip(" ")
                issue.summary = issueSummary.strip(" ")
                issue.title = issueTitle
                issue.id = issueId

                # we must go deeper to get comments
                issueComments = BeautifulSoup(self.curl_get(issueURL).getvalue(), "html.parser")
                for comment in issueComments.find_all("div", "vt"):
                    #author = comment.find(class_="author").find("a").text
                    author = (comment.find(class_="author").find_all("a")[-1]).contents
                    date = comment.find("span", "date")["title"]
                    commentText = comment.find("pre").get_text()
                    issueComment = IssueComment()
                    issueComment.date = date
                    issueComment.author = author
                    issueComment.summary = commentText
                    issue.comments.append(issueComment)

                project.issues.append(issue)

        # get wiki pages
        project.wikis = []
        wikiSoup = BeautifulSoup(self.curl_get(projectURL + "w/list").getvalue(), "html.parser")
        if "Your search did not generate any results." not in wikiSoup.get_text():
            wikiSection = wikiSoup.find("table", "results")
            for wikiRow in wikiSection.find_all("tr")[1:]:
                wiki = Wiki()
                cols = wikiRow.find_all("td")
                wiki.pageName = cols[1].text.replace("\n", "").strip(" ")
                wiki.summary = cols[2].text.replace("\n", "").strip(" ")
                wiki.updated = cols[3].text.replace("\n", "").strip(" ")
                wikiURL = projectURL + "wiki/" + wiki.pageName
                wikiPageSoup = BeautifulSoup(self.curl_get(wikiURL).getvalue(), "html.parser")
                wikiContent = wikiPageSoup.find(id="wikicontent")
                wiki.htmlContent = wikiContent.prettify()
                wiki.textContent = wikiContent.get_text()
                project.wikis.append(wiki)

        return project
Exemple #14
0
android_versions = [Config.TARGET_ANDROID_VERSION]
package_list = Config.BUILD_PACKAGE_LIST
if arg_len > 1:
    android_versions = sys.argv[1].split(',')
    if arg_len > 2:
        package_list = sys.argv[2].split(',')

print("Android Versions to build: " + str(android_versions))
print("---------------------------------------")
print("Packages to build: " + str(package_list))
print("---------------------------------------")
# # override when we don't want to execute anything
# android_versions = []

if PROJECT_MODE.__eq__("fetch"):
    pkg_list = Release.package(FETCH_PACKAGE)
    if pkg_list.__len__() > 0:
        message = "Packages Successfully Fetched"
        print(message)
    else:
        message = "Fetching Failed"
        print(message)
else:
    release_repo = None
    source_last_commit_datetime = None
    if Config.GIT_CHECK:
        if FileOp.dir_exists(Constants.release_history_directory):
            release_repo = Git(Constants.release_history_directory)
        else:
            print(Constants.release_history_directory + " doesn't exist!")
        source_repo = Git(Constants.cwd)
                                               branch=branch, depth=1)
                    assert repo.__class__ is Repo  # clone an existing repository
                    assert Repo.init(repo_dir).__class__ is Repo
                except Exception as e:
                    print("Exception caught while cloning the repo: " + str(e))
                    try:
                        branch = "main"
                        print(f"git clone -b --depth=1 {branch} https://gitlab.com/nikgapps/{android_version}.git")
                        repo = git.Repo.clone_from(f"https://gitlab.com/nikgapps/{android_version}.git",
                                                   repo_dir,
                                                   branch=branch, depth=1)
                        assert repo.__class__ is Repo  # clone an existing repository
                        assert Repo.init(repo_dir).__class__ is Repo
                    except Exception as e:
                        print("Exception caught while cloning the repo: " + str(e))
                        continue
                Constants.end_of_function(start_time,
                                          f"Time taken to clone -b {branch} gitlab.com/nikgapps/{android_version}.git")
                Config.TARGET_ANDROID_VERSION = int(android_version)
                if FileOp.dir_exists(repo_dir):
                    Constants.update_sourceforge_release_directory("config")
                    zip_status = Release.zip(['config'])
                else:
                    print(f"{repo_dir} doesn't exist!")
            else:
                print(f"There is no config file in {config_folder}, cloning is not required!")
    else:
        print(Constants.config_directory + " doesn't exist!")

Constants.end_of_function(actual_start_time, "Total time taken by the program to build custom builds")
Exemple #16
0
 def accept_changeset(self, cs: Changeset):
     logger.debug("Accepting changeset: %s", cs)
     r = Release(name=f"R-{uuid.uuid4().hex}", changeset=cs, queued_time=self.tick)
     self.active_q.append(r)