Beispiel #1
0
 def __init__(self, headless=False):
     self.logger = Logger("PureDriver").get_logger()
     executable_path = os.path.join(
         pathlib.Path(os.path.dirname(__file__)).parent,
         "resources/chromedriver")
     options = Options()
     if headless:
         options.headless = True
     super().__init__(executable_path=executable_path, options=options)
     self.implicitly_wait(wait_seconds)
 def __init__(self, dbConn, dbSession, readOnly):
     self.logger = Logger()
     self.dbConn = dbConn
     self.dbSession = dbSession
     self.artistFactory = ArtistFactory(dbConn, dbSession)
     self.releaseFactory = ReleaseFactory(dbConn, dbSession)
     self.notFoundEntryInfo = []
     self.readOnly = readOnly
Beispiel #3
0
 def __init__(self, referer=None):
     self.referer = referer
     if not self.referer or self.referer.startswith("http://localhost"):
         self.referer = "http://github.com/sphildreth/roadie"
     self.logger = Logger()
     self.allMusicSearcher = AllMusicGuide(self.referer)
     self.spotifySearcher = Spotify(self.referer)
     self.mbSearcher = MusicBrainz(self.referer)
     self.lastFMSearcher = LastFM(self.referer)
     self.imageSearcher = ImageSearcher()
     self.iTunesSearcher = iTunes(self.referer)
     self.imageSearcher = ImageSearcher(self.referer)
Beispiel #4
0
class PureDriver(Chrome):
    def __init__(self, headless=False):
        self.logger = Logger("PureDriver").get_logger()
        executable_path = os.path.join(
            pathlib.Path(os.path.dirname(__file__)).parent,
            "resources/chromedriver")
        options = Options()
        if headless:
            options.headless = True
        super().__init__(executable_path=executable_path, options=options)
        self.implicitly_wait(wait_seconds)

    def go_booking_with_location_id(self) -> None:
        """
        go booking site by location id
        :param location_id:
        :return:
        """
        with open(
                os.path.join(os.path.dirname(os.path.dirname(__file__)),
                             "resources/condition.json")) as f:
            location_id = json.load(f)['location_id']
        url = f"https://pure360.pure-yoga.com/en/HK?location_id={location_id}"
        try:
            self.logger.info(f"Go to {url}")
            self.get(url=url)
        except Exception as e:
            self.logger.error(e)

    def sign_in(self):
        """
        Sign in the booking system
        :return:
        """
        self.logger.info(f"[Start] sign in...")
        try:
            username = os.environ['username']
            password = os.environ['password']
            time.sleep(1)
            sign_in_element = WebDriverWait(self, wait_seconds).until(
                EC.element_to_be_clickable(
                    (By.XPATH, xpath['sign_in_button'])))
            sign_in_element.click()
            time.sleep(1)
            self.find_element_by_xpath(
                xpath['username_input']).send_keys(username)
            self.find_element_by_xpath(
                xpath['password_input']).send_keys(password)
            self.find_element_by_xpath(xpath['login_input']).click()
            self.logger.info(f"[End] sign in...")
        except Exception as e:
            self.logger.error(e)

    def get_time_list(self) -> list:
        """
        Get vertical column time to list
        :return:
        """
        self.logger.info(f"[Start] get time list...")
        try:
            WebDriverWait(self, wait_seconds).until(
                EC.visibility_of_all_elements_located(
                    (By.XPATH, xpath['time_tr'])))
            time_list_elements = self.find_elements_by_xpath(xpath['time_tr'])
            time_list = list(
                map(lambda element: element.get_attribute("data-time"),
                    time_list_elements))
            self.logger.info(f"[End] get time list...")
            return time_list
        except Exception as e:
            self.logger.error(e)

    def get_date_list(self) -> list:
        """
        Get horizontal row date to list
        :return: 
        """
        self.logger.info(f"[Start] get date list...")
        try:
            date_list_elements = self.find_elements_by_xpath(xpath["date_tr"])
            date_list = list(
                map(lambda element: element.get_attribute("data-date"),
                    date_list_elements))
            self.logger.info(f"[End] get date list...")
            return date_list
        except Exception as e:
            self.logger.error(e)

    def go_next_week(self):
        """
        if sat and sun i need to click next week...
        since the date is on next week table
        :return:
        """
        if datetime.now().strftime("%a") in ["Sat", "Sun"]:
            self.logger.info(f"[Start] go next week...")
            try:
                self.find_element_by_xpath(xpath['week_span']).click()
                time.sleep(2)
                self.find_element_by_xpath(xpath['next_week_2li']).click()
                time.sleep(3)
                self.logger.info(f"[End] go next week...")
            except Exception as e:
                self.logger.error(e)
        else:
            pass

    def get_lesson_dict_list(self, xpaths):
        """
        Get the target column with class
        :return:
        """
        self.logger.info(f"[Start] go get lesson box...")
        try:
            lesson_elements = list(
                map(lambda xpath: self.find_element_by_xpath(xpath), xpaths))
            not_empty_lesson_element = list(
                filter(
                    lambda element: element.get_attribute("innerText") != "",
                    lesson_elements))
            lesson_dict_list = list(
                map(
                    lambda element: extract_lesson_tutor_name_mins_button(
                        element), not_empty_lesson_element))
            lesson_dict_list = list(
                filter(lambda element: element != False, lesson_dict_list))
            self.logger.info(
                f"[End] There are {len(lesson_dict_list)} nice courses!")
            for lesson_dict in lesson_dict_list:
                self.logger.info(
                    f"[{lesson_dict['date']} {lesson_dict['time']}] {lesson_dict['lesson']} teached by {lesson_dict['teacher']} with {lesson_dict['duration']}mins"
                )
            return lesson_dict_list
        except Exception as e:
            self.logger.error(e)

    def click_book(self, lesson_dict):
        """
        Click book lesson
        :param xpath:
        :return:
        """
        lesson_dict['button'].click()
        self.logger.info(
            f"Clicked Book [{lesson_dict['date']} {lesson_dict['time']}] {lesson_dict['lesson']} teached by {lesson_dict['teacher']} with {lesson_dict['duration']}mins]"
        )
Beispiel #5
0
 def __init__(self, config, dbConn, dbSession, readOnly):
     self.readOnly = readOnly or False
     self.logger = Logger()
     self.conn = dbConn
     self.session = dbSession
     super().__init__(config, self.logger)
Beispiel #6
0
class Validator(ProcessorBase):
    def __init__(self, config, dbConn, dbSession, readOnly):
        self.readOnly = readOnly or False
        self.logger = Logger()
        self.conn = dbConn
        self.session = dbSession
        super().__init__(config, self.logger)

    def validateArtists(self):
        for artist in self.session.query(Artist).all():
            self.validate(artist)

    def validate(self, artist, onlyValidateRelease=None):
        """
        Do sanity checks on given Artist
        :param onlyValidateRelease: Release
        :param artist: Artist
        :return:
        """
        if not artist:
            raise RuntimeError("Invalid Artist")
        if not self.config:
            raise RuntimeError("Invalid Configuration")
        if not self.libraryFolder:
            raise RuntimeError("Invalid Configuration: Library Folder Not Set")
        now = arrow.utcnow().datetime
        self.logger.info("Validating Artist [" + artist.name + "] Release Set [" + str(
            onlyValidateRelease is None) + "]")
        try:
            for release in artist.releases:
                issuesFound = False
                if onlyValidateRelease and release.roadieId != onlyValidateRelease.roadieId:
                    continue
                releaseFolder = self.albumFolder(artist, release.releaseDate.strftime('%Y'), release.title)
                try:
                    folderExists = os.path.exists(releaseFolder)
                except:
                    folderExists = False
                if not folderExists:
                    if not self.readOnly:
                        for media in release.media:
                            for track in media.tracks:
                                locatedTrackInfo = self.tryToFindFileForTrack(artist, track)
                                trackFullPath = track.fullPath()
                                if locatedTrackInfo and trackFullPath and not os.path.exists(trackFullPath):
                                    movedFile = self.moveToLibrary(artist, locatedTrackInfo['id3'],
                                                                   locatedTrackInfo['fileName'])
                                    if movedFile:
                                        self.logger.warn(
                                            "! Moved File From [" + locatedTrackInfo[
                                                'fileName'] + "] To [" + movedFile + "]")
                                        folderExists = True
                                else:
                                    track.filePath = None
                                    track.fileName = None
                                    track.fileSize = 0
                                    track.hash = None
                                track.lastUpdated = now
                    if not folderExists:
                        release.libraryStatus = 'Incomplete'
                        self.logger.warn(
                            "X Marking Release Missing [" + str(
                                release) + "] Missing Folder [" + releaseFolder + "] Not Found")
                        continue
                releaseTrackCount = 0
                # If release is not already complete, set to complete unless its found otherwise
                release.libraryStatus = 'Complete'
                releaseMediaWithTracks = []
                for releaseMedia in release.media:
                    releaseMediaTrackCount = 0
                    for track in sorted(releaseMedia.tracks, key=lambda tt: tt.trackNumber):
                        try:
                            trackFilename = self.pathToTrack(track)
                            isTrackFilePresent = False
                            if trackFilename:
                                try:
                                    isTrackFilePresent = os.path.isfile(trackFilename)
                                except:
                                    self.logger.exception()
                                    pass
                            if not isTrackFilePresent:
                                # See if track exists in another folder and title was renamed so folder no longer
                                #   matches what it is expected to be
                                if not self.readOnly:
                                    locatedTrackInfo = self.tryToFindFileForTrack(artist, track)
                                    if locatedTrackInfo and not isEqual(trackFilename, locatedTrackInfo['fileName']):
                                        movedFile = self.moveToLibrary(artist, locatedTrackInfo['id3'],
                                                                       locatedTrackInfo['fileName'])
                                        if movedFile:
                                            head, tail = os.path.split(movedFile)
                                            headNoLibrary = head.replace(self.config['ROADIE_LIBRARY_FOLDER'], "")
                                            trackHash = self.makeTrackHash(artist.roadieId, movedFile)
                                            track.fileName = tail
                                            track.filePath = headNoLibrary
                                            track.hash = trackHash
                                            track.fileSize = os.path.getsize(movedFile)
                                            track.lastUpdated = now
                                            self.logger.warn(
                                                "! Located Track [" + str(track.info(includePathInfo=True)) + "]")
                                            isTrackFilePresent = True
                                    else:
                                        track.filePath = None
                                        track.fileName = None
                                        track.fileSize = 0
                                        track.hash = None
                                        track.lastUpdated = now
                                        self.logger.warn(
                                            "X Missing Track [" + str(
                                                track.info(includePathInfo=True)) + "] File [" + str(
                                                trackFilename) + "]")
                                        issuesFound = True
                                        release.libraryStatus = 'Incomplete'
                            if isTrackFilePresent:
                                releaseMediaTrackCount += 1
                                releaseTrackCount += 1
                                if not isEqual(track.trackNumber, releaseMediaTrackCount):
                                    self.logger.warn("! Track Number Sequence Incorrect Is [" +
                                                     str(track.trackNumber) + "] Expected [" +
                                                     str(releaseMediaTrackCount) + "]")
                                    release.libraryStatus = 'Incomplete'
                                    issuesFound = True
                        except:
                            self.logger.exception()
                            issuesFound = True
                            pass
                    releaseMedia.trackCount = releaseMediaTrackCount
                    if releaseMedia.trackCount > 0:
                        releaseMediaWithTracks.append(releaseMedia)
                if not self.readOnly:
                    release.media = releaseMediaWithTracks
                    release.mediaCount = len(releaseMediaWithTracks)
                    # Seems not likely that a release only has a single track; more likely missing unknown tracks
                    if releaseTrackCount > 1 and release.trackCount < 2:
                        release.trackCount = releaseTrackCount
                    release.lastUpdated = now
                self.logger.info("Validated Artist [" + str(artist) + "], " +
                                 "Release [" + str(release) + "], " +
                                 "IssuesFound [" + str(issuesFound) + "]")
            if not self.readOnly:
                self.session.commit()
            else:
                self.session.rollback()
        except:
            self.logger.exception("Validating Artist, Rolling Back Session Transactions")
            try:
                self.session.rollback()
            except:
                pass
class CollectionImporter(ProcessorBase):
    format = None
    positions = None
    filename = None
    collectionId = None
    collection = None

    def __init__(self, dbConn, dbSession, readOnly):
        self.logger = Logger()
        self.dbConn = dbConn
        self.dbSession = dbSession
        self.artistFactory = ArtistFactory(dbConn, dbSession)
        self.releaseFactory = ReleaseFactory(dbConn, dbSession)
        self.notFoundEntryInfo = []
        self.readOnly = readOnly

    def _findColumns(self):
        self.position = -1
        self.release = -1
        self.artist = -1
        for i, position in enumerate(self.positions):
            if position.lower() == "position":
                self.position = i
            elif position.lower() == "release" or position.lower() == "album":
                self.release = i
            elif position.lower() == "artist":
                self.artist = i
        if self.position < 0 or self.release < 0 or self.artist < 0:
            self.logger.critical("Unable To Find Required Positions")
            return False
        return True

    def importFile(self, collectionId, fileFormat, filename):
        self.collectionId = collectionId
        self.collection = self.dbSession.query(Collection).filter(Collection.id == collectionId).first()
        self.format = fileFormat
        self.positions = self.format.split(',')
        self.filename = filename
        if not os.path.exists(self.filename):
            self.logger.critical("Unable to Find CSV File [" + self.filename + "]")
        else:
            self.logger.debug("Importing [" + self.filename + "]")
            return self.importCsvData(open(self.filename))

    def importCollection(self, collection):
        self.collectionId = collection.id
        self.collection = collection
        self.positions = collection.listInCSVFormat.split(',')
        self.importCsvData(io.StringIO(collection.listInCSV))

    def importCsvData(self, csvData):
        try:
            if not self.collection:
                self.logger.critical("Unable to Find Collection Id [" + self.collectionId + "]")
                return False
            self._findColumns()
            reader = csv.reader(csvData)
            self.collection.collectionReleases = []
            for row in reader:
                csvPosition = int(row[self.position].strip())
                csvArtist = row[self.artist].strip()
                csvRelease = row[self.release].strip()
                artist = self.artistFactory.get(csvArtist, False)
                if not artist:
                    self.logger.warn(("Artist [" + csvArtist + "] Not Found In Database").encode('utf-8'))
                    self.notFoundEntryInfo.append(
                        {'col': self.collection.name, 'position': csvPosition, 'artist': csvArtist,
                         'release': csvRelease});
                    continue
                release = self.releaseFactory.get(artist, csvRelease, False)
                if not release:
                    self.logger.warn(
                        ("Not able to find Release [" + csvRelease + "], Artist [" + csvArtist + "]").encode(
                            'utf-8'))
                    self.notFoundEntryInfo.append(
                        {'col': self.collection.name, 'position': csvPosition, 'artist': csvArtist,
                         'release': csvRelease})
                    continue
                colRelease = CollectionRelease()
                colRelease.releaseId = release.id
                colRelease.listNumber = csvPosition
                colRelease.createdDate = arrow.utcnow().datetime
                colRelease.roadieId = str(uuid.uuid4())
                self.collection.collectionReleases.append(colRelease)
                self.logger.info(
                    "Added Position [" + str(csvPosition) + "] Release [" + str(release) + "] To Collection")
            self.collection.lastUpdated = arrow.utcnow().datetime
            self.dbSession.commit()
            return True
        except:
            self.logger.exception("Error Importing Collection [" + self.collection.name + "]")
            self.dbSession.rollback()
            return False
Beispiel #8
0
 def __init__(self, dbConn, dbSession):
     self.conn = dbConn
     self.session = dbSession
     self.logger = Logger()
     self.searcher = ArtistSearcher()
Beispiel #9
0
class ReleaseFactory(object):
    def __init__(self, dbConn, dbSession):
        self.conn = dbConn
        self.session = dbSession
        self.logger = Logger()
        self.searcher = ArtistSearcher()

    def getAllForArtist(self, artist, forceRefresh=False):
        """
        Query Database for a release with the given title, if not found search and if found save and return results
        :param artist: Artist
        :param forceRefresh: bool
        """
        if not artist:
            return None
        printableArtistName = artist.name.encode('ascii', 'ignore').decode('utf-8')
        releases = self._getAllFromDatabaseForArtist(artist)
        if not releases or forceRefresh:
            if not releases:
                self.logger.info("Releases For Artist [" + printableArtistName + "] Not Found")
            else:
                self.logger.info("Refreshing Releases For Artist [" + printableArtistName + "]")
            releases = []
            srList = self.searcher.searchForArtistReleases(artist, [])
            if not srList:
                self.logger.info("Releases For Artist [" + printableArtistName +
                                 "] Not Found")
                return None
            if srList:
                for sr in srList:
                    title = sr.title
                    release = self._createDatabaseModelFromSearchModel(artist, title, sr)
                    self.session.add(release)
                    releases.append(release)
            self.session.commit()
        return releases

    def get(self, artist, title, doFindIfNotInDB=True, forceRefresh=False):
        """
        Query Database for a release with the given title, if not found search and if found save and return results
        :param forceRefresh: bool
        :param doFindIfNotInDB: bool
        :rtype : Release
        :param artist: Artist
        :param title: str
        """
        try:
            if not title or not artist:
                return None
            startTime = arrow.utcnow().datetime
            printableTitle = title.encode('ascii', 'ignore').decode('utf-8')
            printableArtistName = artist.name.encode('ascii', 'ignore').decode('utf-8')
            release = self._getFromDatabaseByTitle(artist, title)
            if not release and doFindIfNotInDB or forceRefresh:
                if not release:
                    self.logger.info("Release For Artist [" + printableArtistName +
                                     "] Not Found By Title [" + printableTitle + "]")
                else:
                    self.logger.info("Refreshing Release [" + printableTitle + "] For Artist [" + printableArtistName)
                release = Release()
                artistReleaseImages = self.session.query(Image) \
                    .add_column(Image.signature) \
                    .join(Release) \
                    .filter(Release.artistId == artist.id).all()
                srList = self.searcher.searchForArtistReleases(artist, artistReleaseImages, title)
                if not srList:
                    self.logger.info("Release For Artist [" + printableArtistName +
                                     "] Not Found By Title [" + printableTitle + "]")
                    return None
                sr = srList[0]
                if sr:
                    release = self._createDatabaseModelFromSearchModel(artist, title, sr)
                self.session.add(release)
                self.session.commit()
            elapsedTime = arrow.utcnow().datetime - startTime
            self.logger.info(": ReleaseFactory get elapsed time [" + str(elapsedTime) + "]")
            return release
        except:
            self.logger.exception("ReleaseFactory: Error In get()")
            pass
        return None

    def _createDatabaseModelFromSearchModel(self, artist, title, sr):
        """
        Take the given SearchResult Release Model and create a Database Model
        :type artist: Artist
        :type title: str
        :type sr: searchEngines.models.Release.Release
        """
        createDattabaseModelFromSearchModelRelease = Release()
        printableTitle = title.encode('ascii', 'ignore').decode('utf-8')
        releaseByExternalIds = self._getFromDatabaseByExternalIds(sr.musicBrainzId,
                                                                  sr.iTunesId,
                                                                  sr.lastFMId,
                                                                  sr.amgId,
                                                                  sr.spotifyId)
        if releaseByExternalIds:
            if not releaseByExternalIds.alternateNames:
                releaseByExternalIds.alternateNames = []
            if title not in releaseByExternalIds.alternateNames:
                self.logger.debug("Found Title By External Ids [" +
                                  releaseByExternalIds.title.encode('ascii', 'ignore')
                                  .decode('utf-8') + "] Added [" +
                                  printableTitle + "] To AlternateNames")
                if not releaseByExternalIds.alternateNames:
                    releaseByExternalIds.alternateNames = []
                releaseByExternalIds.alternateNames.append(title)
                releaseByExternalIds.lastUpdated = arrow.utcnow().datetime
                self.session.commit()
            return releaseByExternalIds
        createDattabaseModelFromSearchModelRelease.artist = artist
        createDattabaseModelFromSearchModelRelease.roadieId = sr.roadieId
        createDattabaseModelFromSearchModelRelease.title = title
        createDattabaseModelFromSearchModelRelease.releaseDate = parseDate(sr.releaseDate)
        createDattabaseModelFromSearchModelRelease.trackCount = sr.trackCount
        createDattabaseModelFromSearchModelRelease.mediaCount = sr.mediaCount
        createDattabaseModelFromSearchModelRelease.thumbnail = sr.thumbnail
        createDattabaseModelFromSearchModelRelease.profile = sr.profile
        if sr.releaseType == SearchReleaseType.Album:
            createDattabaseModelFromSearchModelRelease.releaseType = 'Album'
        elif sr.releaseType == SearchReleaseType.EP:
            createDattabaseModelFromSearchModelRelease.releaseType = 'EP'
        elif sr.releaseType == SearchReleaseType.Single:
            createDattabaseModelFromSearchModelRelease.releaseType = 'Single'
        createDattabaseModelFromSearchModelRelease.iTunesId = sr.iTunesId
        createDattabaseModelFromSearchModelRelease.amgId = sr.amgId
        createDattabaseModelFromSearchModelRelease.lastFMId = sr.lastFMId
        createDattabaseModelFromSearchModelRelease.lastFMSummary = sr.lastFMSummary
        createDattabaseModelFromSearchModelRelease.musicBrainzId = sr.musicBrainzId
        createDattabaseModelFromSearchModelRelease.spotifyId = sr.spotifyId
        createDattabaseModelFromSearchModelRelease.amgId = sr.amgId
        createDattabaseModelFromSearchModelRelease.tags = sr.tags
        createDattabaseModelFromSearchModelRelease.alternateNames = sr.alternateNames

        createDattabaseModelFromSearchModelRelease.urls = sr.urls
        if sr.images:
            createDattabaseModelFromSearchModelReleaseimages = []
            for image in sr.images:
                if image.image:
                    i = Image()
                    i.roadieId = image.roadieId
                    i.url = image.url
                    i.caption = image.caption
                    i.image = image.image
                    i.signature = image.signature
                    createDattabaseModelFromSearchModelReleaseimages.append(i)
            createDattabaseModelFromSearchModelRelease.images = createDattabaseModelFromSearchModelReleaseimages
            self.logger.debug(
                "= Added [" + str(len(createDattabaseModelFromSearchModelRelease.images)) + "] Images to Release")

        # TODO
        # See if cover file found in Release Folder
        # coverFile = os.path.join(mp3Folder, "cover.jpg")
        # if os.path.isfile(coverFile):
        #     ba = self.readImageThumbnailBytesFromFile(coverFile)
        # else:
        #     coverFile = os.path.join(mp3Folder, "front.jpg")
        #     if os.path.isfile(coverFile):
        #         ba = self.readImageThumbnailBytesFromFile(coverFile)
        # # if no bytes found see if MusicBrainz has cover art
        # if not ba:
        #     coverArtBytes = mb.lookupCoverArt(release.MusicBrainzId)
        #     if coverArtBytes:
        #         try:
        #             img = Image.open(io.BytesIO(coverArtBytes))
        #             img.thumbnail(self.thumbnailSize)
        #             b = io.BytesIO()
        #             img.save(b, "JPEG")
        #             ba = b.getvalue()
        #         except:
        #             pass
        if sr.genres:
            createDattabaseModelFromSearchModelRelease.genres = []
            for genre in sr.genres:
                dbGenre = self.session.query(Genre).filter(Genre.name == genre.name).first()
                if not dbGenre:
                    g = Genre()
                    g.name = genre.name
                    g.roadieId = genre.roadieId
                    createDattabaseModelFromSearchModelRelease.genres.append(g)
                else:
                    createDattabaseModelFromSearchModelRelease.genres.append(dbGenre)
        if sr.releaseLabels:
            createDattabaseModelFromSearchModelRelease.releaseLabels = []
            for srReleaseLabel in sr.releaseLabels:
                l = self._getLabelFromDatabase(srReleaseLabel.label.name)
                if not l:
                    l = Label()
                    l.roadieId = srReleaseLabel.label.roadieId
                    l.musicBrainzId = srReleaseLabel.label.musicBrainzId
                    l.beginDate = srReleaseLabel.label.beginDate
                    l.end = srReleaseLabel.label.endDate
                    l.imageUrl = srReleaseLabel.label.imageUrl
                    l.tags = srReleaseLabel.label.tags
                    if srReleaseLabel.label.alternateNames:
                        srLabelAlternateNames = []
                        for srLabelAn in srReleaseLabel.label.alternateNames:
                            srLabelAlternateNames.append(srLabelAn.replace("|", ","))
                        l.alternateNames = srLabelAlternateNames
                    l.sortName = srReleaseLabel.label.sortName
                    l.name = srReleaseLabel.label.name
                if l:
                    rl = ReleaseLabel()
                    rl.roadieId = srReleaseLabel.roadieId
                    rl.catalogNumber = srReleaseLabel.catalogNumber
                    rl.beginDate = parseDate(srReleaseLabel.beginDate)
                    rl.endDate = parseDate(srReleaseLabel.endDate)
                    rl.label = l
                    if rl not in createDattabaseModelFromSearchModelRelease.releaseLabels:
                        createDattabaseModelFromSearchModelRelease.releaseLabels.append(rl)
        if sr.media:
            createDattabaseModelFromSearchModelRelease.media = []
            for srMedia in sr.media:
                media = ReleaseMedia()
                media.roadieId = srMedia.roadieId
                media.releaseMediaNumber = int(srMedia.releaseMediaNumber)
                # The first media is release 1 not release 0
                if media.releaseMediaNumber < 1:
                    media.releaseMediaNumber = 1
                media.releaseSubTitle = srMedia.releaseSubTitle
                media.trackCount = srMedia.trackCount
                if srMedia.tracks:
                    media.tracks = []
                    for srTrack in srMedia.tracks:
                        track = Track()
                        track.roadieId = srTrack.roadieId
                        track.partTitles = srTrack.partTitles
                        track.musicBrainzId = srTrack.musicBrainzId
                        track.amgId = srTrack.amgId
                        track.spotifyId = srTrack.spotifyId
                        track.title = srTrack.title
                        track.trackNumber = srTrack.trackNumber
                        track.duration = srTrack.duration
                        track.tags = srTrack.tags
                        track.alternateNames = []
                        cleanedTitle = createCleanedName(srTrack.title)
                        if cleanedTitle != srTrack.title.lower().strip():
                            track.alternateNames.append(cleanedTitle)
                        media.tracks.append(track)
                createDattabaseModelFromSearchModelRelease.media.append(media)
            createDattabaseModelFromSearchModelRelease.mediaCount = len(
                createDattabaseModelFromSearchModelRelease.media)
        return createDattabaseModelFromSearchModelRelease

    def _getAllFromDatabaseForArtist(self, artist):
        if not artist:
            return None
        return self.session.query(Release).filter(Release.artistId == artist.id).order_by(Release.releaseDate).all()

    def _getFromDatabaseByTitle(self, artist, title):
        if not title:
            return None
        title = title.lower().strip()
        cleanedTitle = createCleanedName(title)
        stmt = or_(func.lower(Release.title) == title,
                   text("(lower(alternateNames) = '" + title.replace("'", "''") + "'" + ""
                                                                                        " OR alternateNames like '" + title.replace(
                       "'", "''") + "|%'" +
                        " OR alternateNames like '%|" + title.replace("'", "''") + "|%'" +
                        " OR alternateNames like '%|" + title.replace("'", "''") + "')"),
                   text("(alternateNames = '" + cleanedTitle + "'" + ""
                                                                     " OR alternateNames like '" + cleanedTitle + "|%'" +
                        " OR alternateNames like '%|" + cleanedTitle + "|%'" +
                        " OR alternateNames like '%|" + cleanedTitle + "')")
                   )
        return self.session.query(Release).filter(Release.artistId == artist.id).filter(stmt).first()

    def _getLabelFromDatabase(self, name):
        if not name:
            return None
        name = name.lower().strip()
        stmt = or_(func.lower(Label.name) == name,
                   text("(lower(alternateNames) = '" + name.replace("'", "''") + "'" + ""
                                                                                       " OR alternateNames like '" + name.replace(
                       "'", "''") + "|%'" +
                        " OR alternateNames like '%|" + name.replace("'", "''") + "|%'" +
                        " OR alternateNames like '%|" + name.replace("'", "''") + "')"))
        return self.session.query(Label).filter(stmt).first()

    def _getFromDatabaseByExternalIds(self, musicBrainzId, iTunesId, lastFMId, amgId, spotifyId):
        mb = and_(Release.musicBrainzId == musicBrainzId, musicBrainzId is not None)
        it = and_(Release.iTunesId == iTunesId, iTunesId is not None)
        lf = and_(Release.lastFMId == lastFMId, lastFMId is not None)
        ag = and_(Release.amgId == amgId, amgId is not None)
        sp = and_(Release.spotifyId == spotifyId, spotifyId is not None)
        stmt = or_(mb, it, lf, ag, sp)
        return self.session.query(Release).filter(stmt).first()

    def _getFromDatabaseByRoadieId(self, roadieId):
        return self.session.query(Release).filter(Release.roadieId == roadieId).first()

    def create(self, artist, title, trackCount, releaseDate):
        if not artist or not title or not trackCount or not releaseDate:
            return None
        release = Release()
        release.title = title
        release.releaseDate = parseDate(releaseDate)
        release.trackCount = trackCount
        release.artistId = artist.id
        release.createdDate = arrow.utcnow().datetime
        release.roadieId = str(uuid.uuid4())
        release.alternateNames = []
        cleanedTitle = createCleanedName(title)
        if cleanedTitle != title.lower().strip():
            release.alternateNames.append(cleanedTitle)
        return release

    def add(self, release):
        self.session.add(release)
        self.session.commit()

    def delete(self, release, pathToTrack, deleteFiles=False):
        """
        Performs all necessary steps to delete a Release and optionally Release Tracks
        :param pathToTrack: Method to generate Full Path for Release Media Tracks
        :param release: Releasesaasdf
        :type deleteFiles: bool
        """
        if not release:
            return False
        try:
            if deleteFiles:
                try:
                    for deleteReleaseMedia in release.media:
                        for track in deleteReleaseMedia.tracks:
                            trackPath = pathToTrack(track)
                            trackFolder = os.path.dirname(trackPath)
                            os.remove(trackPath)
                            # if the folder is empty then delete the folder as well
                            if trackFolder:
                                if not os.listdir(trackFolder):
                                    os.rmdir(trackFolder)
                except OSError:
                    pass
            release.genres = []
            self.session.commit()
            self.session.delete(release)
            self.session.commit()
            return True
        except:
            self.session.rollback()
            self.logger.exception("Error Deleting Release")
        return False
Beispiel #10
0
class Solver(object):
    """Solver for training and testing"""
    def __init__(self, cfg):
        """Initialize configurations."""

        self.cfg = cfg

        device_id = cfg.params.cuda_device_id

        if 'cuda' in device_id and torch.cuda.is_available():
            self.device = torch.device(device_id)
        else:
            self.device = torch.device('cpu')

        print("Training on device {}".format(self.device))

        self.data_loader, self.num_classes = get_loader(self.cfg)

        print("Dataset loaded with {} classes".format(self.num_classes))

        self.use_tensorboard = cfg.flags.USE_TENSORBOARD
        if self.use_tensorboard:
            self.build_tensorboard()

        self.build_model()
        if self.cfg.train_param.LOAD_NUMBER_POSE:
            self.load_pose_images_nums()
        else:
            self.load_pose_images()

        self.loaded_samples = False
        self.loaded_interp = False
        self.loaded_result = False

    def build_model(self):
        num_classes = self.num_classes

        img_size = self.cfg.model_param.img_size
        beta1 = self.cfg.optimizer_param.beta1
        beta2 = self.cfg.optimizer_param.beta2
        g_lr = self.cfg.optimizer_param.g_lr
        d_lr = self.cfg.optimizer_param.d_lr
        chosen_gen = self.cfg.model_param.gen
        chosen_dis = self.cfg.model_param.dis
        """Create a generator and a discriminator."""
        if chosen_gen == 'genb':
            ChosenGen = GenB
        elif chosen_gen == "gena":
            ChosenGen = GenA
        elif chosen_gen == "genc":
            ChosenGen = GenC
        elif chosen_gen == "genx":
            ChosenGen = GenX
        elif chosen_gen == "unet":
            ChosenGen = GeneratorUNet
        else:
            raise ValueError('Chosen generator not found.')

        if chosen_dis == "sn":
            ChosenDis = SNDisZ
        else:
            ChosenDis = DisZ

        # todo clean device in chosen gen
        self.G = ChosenGen(self.device)
        self.D = ChosenDis(img_size, num_classes)

        if self.cfg.flags.DATA_PARALLEL:
            device_ids = self.cfg.params.parallel_device_id
            device_ids_list = [
                'cuda:{}'.format(i) for i in device_ids.split(',')
            ]
            torch.nn.DataParallel(self.G,
                                  device_ids=device_ids_list,
                                  output_device=self.device)

        self.g_opt = torch.optim.Adam(self.G.parameters(), g_lr,
                                      [beta1, beta2])
        self.d_opt = torch.optim.Adam(self.D.parameters(), d_lr,
                                      [beta1, beta2])

        print_network(self.D, 'D')
        print_network(self.G, 'G')

        self.D.to(self.device)
        self.G.to(self.device)

        if self.cfg.flags.LOADED_CONFIG:
            self.restore_model()

    def restore_model(self):
        """Restore the trained generator and discriminator."""

        current_iter = self.cfg.train_param.global_iter

        print(
            'Loading the trained models from step {}...'.format(current_iter))

        # load models
        g_path = osp.join(self.cfg.dirs.model_save_dir,
                          '{}-G.ckpt'.format(current_iter))
        d_path = osp.join(self.cfg.dirs.model_save_dir,
                          '{}-D.ckpt'.format(current_iter))

        self.G.load_state_dict(
            torch.load(g_path, map_location=lambda storage, loc: storage))
        self.D.load_state_dict(
            torch.load(d_path, map_location=lambda storage, loc: storage))

        # load optimizers
        g_opt_path = osp.join(self.cfg.dirs.model_save_dir,
                              '{}-G_opt.pt'.format(current_iter))
        d_opt_path = osp.join(self.cfg.dirs.model_save_dir,
                              '{}-D_opt.pt'.format(current_iter))

        self.g_opt.load_state_dict(torch.load(g_opt_path))
        self.d_opt.load_state_dict(torch.load(d_opt_path))

    def save_current_state(self, itr):
        """Saves the current state of model training"""

        # save models
        g_path = osp.join(self.cfg.dirs.model_save_dir,
                          '{}-G.ckpt'.format(itr + 1))
        d_path = osp.join(self.cfg.dirs.model_save_dir,
                          '{}-D.ckpt'.format(itr + 1))

        torch.save(self.G.state_dict(), g_path)
        torch.save(self.D.state_dict(), d_path)

        # save optimizer
        g_opt_path = osp.join(self.cfg.dirs.model_save_dir,
                              '{}-G_opt.pt'.format(itr + 1))
        d_opt_path = osp.join(self.cfg.dirs.model_save_dir,
                              '{}-D_opt.pt'.format(itr + 1))

        torch.save(self.g_opt.state_dict(), g_opt_path)
        torch.save(self.d_opt.state_dict(), d_opt_path)

        # export config
        self.cfg.train_param.global_iter = itr + 1
        export_config(self.cfg)

        print('Saved training state')

    def build_tensorboard(self):
        """Build a tensorboard logger."""
        self.logger = Logger(self.cfg.dirs.log_dir)

    def update_lr(self, g_lr, d_lr):
        """Decay learning rates of the generator and discriminator."""
        for param_group in self.g_opt.param_groups:
            param_group['lr'] = g_lr
        for param_group in self.d_opt.param_groups:
            param_group['lr'] = d_lr

    def reset_grad(self):
        """Reset the gradient buffers."""
        self.g_opt.zero_grad()
        self.d_opt.zero_grad()

    def load_pose_images(self):
        img_size = self.cfg.model_param.img_size

        transform = T.Compose([T.Resize(img_size), T.ToTensor()])

        cwd = os.getcwd()
        os.chdir(self.cfg.dirs.pose_img_dir)

        self.pose_images = {}
        for filename in glob.glob("*.jpg"):
            pose = int(filename[:3])
            self.pose_images[pose] = transform(
                Image.open(osp.join(os.getcwd(), filename)))

        # print('poses', self.pose_images.keys())
        os.chdir(cwd)

    def load_pose_images_nums(self):
        img_size = self.cfg.model_param.img_size

        def get_im_sized_pose(pose_num):
            return torch.ones(1, img_size, img_size) * pose_num

        train_poses = self.cfg.train_param.poses
        scale_fact = self.cfg.train_param.scale_fact_pose
        if train_poses[-1] == 180:
            rescale = lambda x: (x / 180.0 * 2 - 1) / scale_fact
        elif train_poses[-1] == 90:
            rescale = lambda x: x / 90.0 / 10.0 / scale_fact
        self.pose_images = {}
        for train_pose in train_poses:
            self.pose_images[train_pose] = torch.ones(
                1, img_size, img_size) * rescale(train_pose)

        a = 0

    def get_all_templates(self, batch_size):
        """Generate target domain labels for debugging and testing."""
        labels = []
        pose_values = list(self.pose_images.keys())
        pose_values.sort()
        print(pose_values)

        for pose in pose_values:
            if pose in self.cfg.train_param.poses:
                pose_img = self.pose_images[pose]
                # Repeat it across the batch!
                pose_img = pose_img.unsqueeze(0).repeat(batch_size, 1, 1,
                                                        1).to(self.device)
                labels.append(pose_img)

        return labels

    def print_log(self, loss, itr, start_time):
        """ Print Progress of training"""
        et = time.time() - start_time
        et = str(datetime.timedelta(seconds=et))[:-7]
        log = "Elapsed [{}], Iteration [{}/{}]".format(
            et, itr, self.cfg.train_param.max_iters)
        for tag, value in loss.items():
            log += ", {}: {:.4f}".format(tag, value)
        print(log)

        if self.use_tensorboard:
            for tag, value in loss.items():
                self.logger.scalar_summary(tag, value, itr)

    def calc_disc_loss(self, a_real, b_real, c_real, a_T, b_T, c_T, class_idx,
                       loss_log, loss_log_prefix):
        batch_size = self.cfg.train_param.batch_size
        # TODO: Wgan GP loss -> might have errors
        #

        # # d_loss_real = -out_real.mean()
        # # d_loss_fake = out_fake.mean()
        #
        # # Any point on the line for gradient penalty
        # eps = torch.rand(batch_size, 1, 1, 1).to(self.device)
        # b_hat = (eps * b_real.data + (1 - eps) * b_fake.data)
        # b_hat = Variable(b_hat, requires_grad=True)
        #
        # # GP for D, y_poseT is just a conditioning variable
        # out_src, _ = self.D(a_real, b_hat, c_real, a_T, b_T, c_T)
        #
        # d_loss_gp = self.gradient_penalty(out_src, b_hat, self.device)

        # a + c --> b
        d_out_real, out_cls_real = self.D(a_real, b_real, c_real, a_T, b_T,
                                          c_T)

        b_fake = self.G(a_real, c_real, a_T, b_T, c_T)
        d_out_fake, _ = self.D(a_real, b_fake.detach(), c_real, a_T, b_T, c_T)

        d_loss_cls_real = self.ce_loss(out_cls_real, class_idx.long())

        d_loss_real = self.relu_loss(1.0 - d_out_real).mean()
        d_loss_fake = self.relu_loss(1.0 + d_out_fake).mean()

        d_loss_gp = 0

        lambda_gp = self.cfg.model_param.lambda_gp
        lambda_cls = self.cfg.model_param.lambda_cls

        # L_real + L_fake + l_GP + l_classification
        d_loss = d_loss_real + d_loss_fake + \
                 lambda_gp * d_loss_gp + lambda_cls * d_loss_cls_real

        # Logging.

        loss_log[loss_log_prefix + 'real'] = d_loss_real.item()
        loss_log[loss_log_prefix + 'fake'] = d_loss_fake.item()
        # loss_log[loss_log_prefix + 'lGP'] = lambda_gp * d_loss_gp.item()
        loss_log[loss_log_prefix + 'cls'] = lambda_cls * d_loss_cls_real.item()
        loss_log[loss_log_prefix + 'total'] = d_loss.item()

        return d_loss, loss_log

    def calc_gen_loss_ab(self, a_real, b_real, c_real, a_T, b_T, c_T,
                         class_idx, loss_log, loss_log_prefix):
        # Original pose to target pose.
        # self.vis_tensor(a_real)
        # self.vis_tensor(c_real)
        # self.vis_tensor(b_T, denorm_ar=False)
        # self.vis_tensor(b_real)

        b_fake = self.G(a_real, c_real, a_T, b_T, c_T)

        # Fake Image conditioned on pose loss
        out_src, out_cls = self.D(a_real, b_fake, c_real, a_T, b_T, c_T)
        g_pose_loss_fake = -out_src.mean()

        # Classification loss
        g_cls_loss = self.ce_loss(out_cls, class_idx.long())

        # Total Variational Regularisation
        tvr_loss = torch.sum(torch.abs(b_fake[:, :, 1:, :] - b_fake[:, :, :-1, :])) + \
                   torch.sum(torch.abs(b_fake[:, :, :, 1:] - b_fake[:, :, :, :-1]))
        tvr_loss = tvr_loss.mean()

        # L1 loss
        # l1_loss = torch.mean((b_fake - b_real) ** 2)
        l1_loss = torch.mean(torch.abs(b_fake - b_real))
        # l2_loss = torch.mean((b_fake - b_real)**2) ; lambda_l1 = 100 * lambda_l1

        lambda_cls = self.cfg.model_param.lambda_cls
        lambda_tvr = self.cfg.model_param.lambda_tvr
        lambda_l1 = self.cfg.model_param.lambda_l1
        g_loss = g_pose_loss_fake + lambda_tvr * tvr_loss + lambda_cls * g_cls_loss + lambda_l1 * l1_loss

        # Logging.
        loss_log[loss_log_prefix + 'fake'] = g_pose_loss_fake.item()
        loss_log[loss_log_prefix + 'tvr'] = lambda_tvr * tvr_loss.item()
        loss_log[loss_log_prefix + 'cls'] = lambda_cls * g_cls_loss.item()
        loss_log[loss_log_prefix + 'l1'] = lambda_l1 * l1_loss.item()
        loss_log[loss_log_prefix + 'total'] = g_loss.item()

        return g_loss, loss_log

    def train(self):

        print('Start training...')
        start_time = time.time()

        self.ce_loss = torch.nn.CrossEntropyLoss()
        self.relu_loss = torch.nn.ReLU()

        data_loader = self.data_loader
        data_iter = iter(data_loader)

        # Learning rate cache for decaying.
        g_lr = self.cfg.optimizer_param.g_lr
        d_lr = self.cfg.optimizer_param.d_lr

        cur_iter = self.cfg.train_param.global_iter
        max_iter = self.cfg.train_param.max_iters

        model_save_step = self.cfg.train_param.model_save_step
        sample_step = self.cfg.train_param.sample_step
        log_step = self.cfg.train_param.log_step
        lr_update_step = self.cfg.train_param.lr_update_step
        lr_decay_rate = self.cfg.optimizer_param.lr_decay_rate
        critic_train_no = self.cfg.train_param.critic_train_no

        for itr in range(cur_iter, max_iter):

            # =================================================================================== #
            #                             1. Preprocess input data                                #
            # =================================================================================== #

            try:
                class_idx, x, x_T, y, y_T, z, z_T = next(data_iter)
            except:
                data_iter = iter(data_loader)
                class_idx, x, x_T, y, y_T, z, z_T = next(data_iter)

            x = x.to(self.device)
            y = y.to(self.device)
            z = z.to(self.device)

            x_T_im = [self.pose_images[p] for p in x_T.numpy()]
            y_T_im = [self.pose_images[p] for p in y_T.numpy()]
            z_T_im = [self.pose_images[p] for p in z_T.numpy()]

            x_T_im = torch.stack(x_T_im, 0).to(self.device)
            y_T_im = torch.stack(y_T_im, 0).to(self.device)
            z_T_im = torch.stack(z_T_im, 0).to(self.device)

            class_idx = class_idx.to(self.device)

            # =================================================================================== #
            #                             2. Train the discriminator                              #
            # =================================================================================== #
            requires_grad(self.G, False)
            requires_grad(self.D, True)

            ### Notation
            # x --> y
            # y --> z
            # x+z encoded --> y
            loss_log = {}

            d_loss, loss_log = self.calc_disc_loss(x, y, z, x_T_im, y_T_im,
                                                   z_T_im, class_idx, loss_log,
                                                   'D/')
            self.reset_grad()
            d_loss.backward(retain_graph=True)
            self.d_opt.step()

            # =================================================================================== #
            #                               3. Train the generator                                #
            # =================================================================================== #
            if (itr + 1) % critic_train_no == 0:
                requires_grad(self.G, True)
                requires_grad(self.D, False)

                g_loss, loss_log = self.calc_gen_loss_ab(
                    x, y, z, x_T_im, y_T_im, z_T_im, class_idx, loss_log, 'G/')

                self.reset_grad()
                g_loss.backward()
                self.g_opt.step()

            # =================================================================================== #
            #                                 4. Miscellaneous                                    #
            # =================================================================================== #
            # Print out training information.
            if (itr + 1) % log_step == 0:
                self.print_log(loss_log, itr + 1, start_time)

            # Translate fixed images for debugging.
            if (itr + 1) % sample_step == 0:
                self.sample(itr)
                self.interpolate(itr)
                self.results(itr)

            # Save model checkpoints.
            if (itr + 1) % model_save_step == 0:
                self.save_current_state(itr)

            # Decay learning rates.
            if (itr + 1) % lr_update_step == 0:
                # TODO: check if this decay style is okay in GANs
                g_lr *= lr_decay_rate
                d_lr *= lr_decay_rate
                self.update_lr(g_lr, d_lr)
                print('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(
                    g_lr, d_lr))

    def sample(self, itr):
        batch_size = self.cfg.sample_param.num_samples
        train_poses = self.cfg.train_param.poses

        if not self.loaded_samples:
            self.sample_a, sample_a_pnum, self.sample_c, sample_c_pnum = \
                self.data_loader.dataset.get_sample_imgs(train_poses, batch_size, self.device)
            pose_templates = self.pose_images

            stack_dev = lambda t: torch.stack(t).to(self.device)

            self.sample_a_T = stack_dev(
                [pose_templates[pnum] for pnum in sample_a_pnum])
            self.sample_c_T = stack_dev(
                [pose_templates[pnum] for pnum in sample_c_pnum])

            self.loaded_samples = True

        with torch.no_grad():
            self.G.eval()

            sample_im = [self.sample_a]

            for i in range(len(train_poses)):
                cur_pose_degree = train_poses[i]
                sample_b_T = self.pose_images[cur_pose_degree].unsqueeze(
                    0).repeat(batch_size, 1, 1, 1).to(self.device)
                decoded_image = self.G(self.sample_a, self.sample_c,
                                       self.sample_a_T, sample_b_T,
                                       self.sample_c_T)

                sample_im.append(decoded_image)
            sample_im.append(self.sample_c)

        imgs = torch.cat(sample_im, dim=3)

        sample_path = osp.join(self.cfg.dirs.sample_dir,
                               '{}-images.jpg'.format(itr + 1))
        save_image(denorm(imgs.data.cpu()), sample_path, nrow=1, padding=0)
        self.G.train()

        print('Saved Sample images for {} into {}...'.format(itr, sample_path))

    def results(self, itr):
        batch_size = self.cfg.sample_param.num_samples
        train_poses = self.cfg.train_param.poses
        result_dir = self.cfg.dirs.result_dir
        real_dir = osp.join(result_dir, 'real')
        fake_dir = osp.join(result_dir, 'fake')

        if not self.loaded_result:
            self.res_a, res_a_pnum, self.res_c, res_c_pnum = \
                self.data_loader.dataset.get_sample_imgs(train_poses, batch_size, self.device)
            pose_templates = self.pose_images

            stack_dev = lambda t: torch.stack(t).to(self.device)

            self.res_a_T = stack_dev(
                [pose_templates[pnum] for pnum in res_a_pnum])
            self.res_c_T = stack_dev(
                [pose_templates[pnum] for pnum in res_c_pnum])

            self.loaded_result = True

            ensure_dir_exists(real_dir)
            ensure_dir_exists(fake_dir)

        with torch.no_grad():
            self.G.eval()
            max_i = len(train_poses) - 1
            for i in range(max_i + 1):
                cur_pose_degree = train_poses[i]
                cur_pose_T = self.pose_images[cur_pose_degree].unsqueeze(
                    0).repeat(batch_size, 1, 1, 1).to(self.device)

                decoded_image = self.G(self.res_a, self.res_c, self.res_a_T,
                                       cur_pose_T, self.res_c_T)

                dec = denorm(decoded_image.data.cpu()).numpy()[0].transpose(
                    1, 2, 0)
                dec_im_name = osp.join(fake_dir,
                                       'itr{}-pose{}.jpg'.format(itr + 1, i))
                imsave(dec_im_name, dec)

        print('Saved results for {} into {}...'.format(itr, result_dir))

        self.G.train()

    def interpolate(self, itr):
        batch_size = self.cfg.sample_param.num_samples
        interp_poses = self.cfg.sample_param.interp_poses

        if not self.loaded_interp:
            self.interp_sta, self.interp_end = self.data_loader.dataset.get_interp_imgs(
                interp_poses[0], interp_poses[-1], batch_size, self.device)

            self.interp_sta_T = self.pose_images[interp_poses[0]].unsqueeze(
                0).repeat(batch_size, 1, 1, 1).to(self.device)
            self.interp_end_T = self.pose_images[interp_poses[-1]].unsqueeze(
                0).repeat(batch_size, 1, 1, 1).to(self.device)

            self.loaded_interp = True

        with torch.no_grad():
            self.G.eval()

            max_i = len(interp_poses) - 1
            interp_im = [self.interp_sta]

            for i in range(max_i + 1):
                cur_pose_degree = interp_poses[i]
                cur_pose_T = self.pose_images[cur_pose_degree].unsqueeze(
                    0).repeat(batch_size, 1, 1, 1).to(self.device)
                decoded_image = self.G(self.interp_sta, self.interp_end,
                                       self.interp_sta_T, cur_pose_T,
                                       self.interp_end_T)

                interp_im.append(decoded_image)
            interp_im.append(self.interp_end)

        imgs = torch.cat(interp_im, dim=3)

        sample_path = osp.join(self.cfg.dirs.interpolate_dir,
                               '{}-images.jpg'.format(itr + 1))
        save_image(denorm(imgs.data.cpu()), sample_path, nrow=1, padding=0)
        self.G.train()

        print('Saved interpolated images for {} into {}...'.format(
            itr, sample_path))

    @staticmethod
    def gradient_penalty(y, x, device):
        """Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
        weight = torch.ones(y.size()).to(device)
        dydx = torch.autograd.grad(
            outputs=y,
            inputs=x,
            grad_outputs=weight,
            # retain_graph=True,
            create_graph=True,
            only_inputs=True)[0]

        dydx = dydx.view(dydx.size(0), -1)
        dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
        return torch.mean((dydx_l2norm - 1)**2)
Beispiel #11
0
 def build_tensorboard(self):
     """Build a tensorboard logger."""
     self.logger = Logger(self.cfg.dirs.log_dir)
Beispiel #12
0
 def setUpClass(cls) -> None:
     cls.logger = Logger(name="test").get_logger()
Beispiel #13
0
 def __init__(self, path, config=None):
     self.logger = Logger()
     self.filename = path
     self.config = config
     self._load(path, config)
Beispiel #14
0
class ID3(object):
    filename = None
    config = None

    def __init__(self, path, config=None):
        self.logger = Logger()
        self.filename = path
        self.config = config
        self._load(path, config)

    def isValid(self):
        try:
            if self.artist and \
                    self.year and \
                    self.album and \
                    self.track and \
                    self.title and \
                    self.bitrate and \
                            self.length > 0:
                return True
            else:
                return False
        except:
            return False

    def info(self):
        return "--- IsValid: [" + str(self.isValid()) + "] " + \
               "Artist [" + str(self.getArtist()) + "] " + \
               "HasTrackArtist [" + str(self.hasTrackArtist()) + "] " + \
               "Artist (TPE1) [" + str(self.artist) + "], " + \
               "Album Artist (TPE2) [" + str(self.albumArtist) + "], " + \
               "Year [" + str(self.year) + "], " + \
               "Album: [" + str(self.album) + "], " + \
               "Disc: [" + str(self.disc) + "], " + \
               "Track [" + str(self.track).zfill(2) + "], " + \
               "Title [" + str(self.title) + "], " + \
               "(" + str(self.bitrate) + "bps::" + str(self.length) + ")"

    def __str__(self):
        return str(self.artist) + "." + \
               str(self.year) + "." + \
               str(self.album) + "." + \
               str(self.track) + "." + \
               str(self.title) + "." + \
               str(self.bitrate) + "." + \
               str(self.length)

    def setCoverImage(self, image):
        try:
            tags = mutagenID3(self.filename)
        except ID3NoHeaderError:
            tags = mutagenID3()
        if self.config:
            if 'DoClearComments' in self.config:
                if self.config['DoClearComments'].lower() == "true":
                    tags.delall(u"COMM::'en'")
        tags.delall(u"APIC::'en'")
        tags.add(APIC(
                    encoding=3,
                    mime='image/jpeg',
                    type=3,
                    desc=u'Cover',
                    data=image
        ))
        tags.save(self.filename, v2_version=3) # this is for Windows Media Player compatibility

    def updateFromTrack(self, track):
        """
        Update the ID3 Track with the given Track values
        :param track: Track
        :return:
        """
        try:
            tags = mutagenID3(self.filename)
        except ID3NoHeaderError:
            tags = mutagenID3()
        tags["TIT2"] = TIT2(encoding=3, text=track.title)
        if track.artist:
            tags["TPE1"] = TPE1(encoding=3, text=track.artist.name)
        tags["TRCK"] = TRCK(encoding=3, text=str(track.trackNumber))
        if self.config:
            if 'DoClearComments' in self.config:
                if self.config['DoClearComments'].lower() == "true":
                    tags.delall(u"COMM::'en'")
        tags.save(self.filename)

    def updateFromRelease(self, release, track):
        """
        Update the given Track with loaded values
        :param release: Release
        :param track: Track
        :return:
        """
        try:
            tags = mutagenID3(self.filename)
        except ID3NoHeaderError:
            tags = mutagenID3()
        tags["TIT2"] = TIT2(encoding=3, text=track.title)
        tags["TALB"] = TALB(encoding=3, text=release.title)
        if track.artist:
            tags["TPE2"] = TPE2(encoding=3, text=release.artist.name)
            tags["TPE1"] = TPE1(encoding=3, text=track.artist.name)
        else:
            tags["TPE1"] = TPE1(encoding=3, text=release.artist.name)
        tags["TRCK"] = TRCK(encoding=3, text=str(track.trackNumber))
        if release.releaseDate:
            year = release.releaseDate.strftime('%Y')
            if year:
                tags["TDRC"] = TDRC(encoding=3, text=year)
        if self.config:
            if 'DoClearComments' in self.config:
                if self.config['DoClearComments'].lower() == "true":
                    tags.delall(u"COMM::'en'")
        tags.save(self.filename)

    def getTrackArtist(self):
        """
        Return the artist to use for this track be it Artist ("TPE1") or Album Artist ("TPE2")
        :param self:
        :return: str
        """
        return (self.artist or '').strip()

    def getReleaseArtist(self):
        """
        Return the artist to use for this Release be it Artist ("TPE1") or Album Artist ("TPE2")
        :param self:
        :return: str
        """
        if self.hasTrackArtist():
            return (self.albumArtist or '').strip()
        return (self.artist or '').strip()

    def hasTrackArtist(self):
        # Artist is always set
        artist = (self.artist or '').strip()
        # Album Artist is sometimes set and most of the times when set its the same as the Artist
        albumArtist = (self.albumArtist or '').strip()
        if albumArtist and not isEqual(artist, albumArtist):
            return True
        return False

    def _load(self, filename, config):
        self.dirty = False
        self.artist = ''
        self.artists = []
        self.albumArtist = ''
        self.album = ''
        self.track = ''
        self.title = ''
        self.year = ''
        self.disc = -1
        self.bitrate = ''
        self.length = -1
        try:
            short_tags = full_tags = mutagen.File(filename)
            comments = []
            if isinstance(full_tags, mutagen.mp3.MP3):
                for key in short_tags:
                    if key[0:4] == 'COMM':
                        if short_tags[key].desc == '':
                            comments.append(short_tags[key].text[0])
                short_tags = mutagen.mp3.MP3(filename, ID3=mutagen.easyid3.EasyID3)
            comments.append('')
            self.album = string.capwords(short_tags.get('album', [''])[0])
            self.artist = string.capwords(short_tags.get('artist', [''])[0])
            try:
                # id3v2.3.0, 4.2.1   TPE1    [#TPE1 Lead performer(s)/Soloist(s)]
                if self.artist and "/" in self.artist:
                    self.artists = []
                    for aa in self.artist.split("/"):
                        if aa:
                            self.artists.append(string.capwords(aa.strip()))
                    if len(self.artists) > 0:
                        self.artist = self.artists[0]
            except:
                pass
            try:
                self.albumArtist = full_tags['TPE2'].text[0]
            except:
                pass
            self.duration = "%u:%.2d" % (full_tags.info.length / 60, full_tags.info.length % 60)
            trackNumber = short_tags.get('tracknumber', [''])[0]
            self.track = 0
            try:
                if trackNumber and "/" in trackNumber:
                    self.track = int(trackNumber.split("/")[0])
                if trackNumber:
                    self.track = int(trackNumber)
            except:
                pass
            try:
                self.length = full_tags.info.length
            except:
                pass
            try:
                self.bitrate = full_tags.info.bitrate
            except:
                pass
            discNumber = short_tags.get('discnumber', [''])[0]
            self.disc = 0
            try:
                if discNumber and "/" in discNumber:
                    self.disc = int(discNumber.split("/")[0])
                elif discNumber:
                    self.disc = int(discNumber)
            except:
                pass
            self.year = short_tags.get('date', [''])[0]
            if not self.year:
                myfile = mpeg.Mpeg(filename)
                if myfile:
                    self.year = myfile.tag.year[:4]
            try:
                if not self.year:
                    self.year = full_tags.tags._DictProxy__dict['TDRL'].text[0].text
            except:
                pass
            self.title = string.capwords(short_tags.get('title', [''])[0])
            if self.title and config:
                if 'TitleReplacements' in config:
                    for rpl in config['TitleReplacements']:
                        for key, val in rpl.items():
                            self.title = self.title.replace(key, val)
                    self.dirty = True
                self.title = string.capwords(self.title)
            if self.title and self.track:
                if self.title.startswith('%02d - ' % self.track):
                    self.title = self.title[5:]
                elif self.title.startswith('%02d ' % self.track):
                    self.title = self.title[3:]
                elif self.title.startswith('- '):
                    self.title = self.title[2:]
                self.title = string.capwords(self.title)
                self.dirty = True
            self.comment = string.capwords(comments[0])
            if self.comment and config:
                if 'DoClearComments' in config:
                    if config['DoClearComments'].lower() == "true":
                        self.comment = None
                        self.dirty = True
            self.genre = ''
            genres = short_tags.get('genre', [''])
            if len(genres) > 0:
                self.genre = genres[0]
            self.imageBytes = None
            try:
                if full_tags.tags and 'APIC:' in full_tags.tags:
                    self.imageBytes = full_tags.tags._DictProxy__dict['APIC:'].data
            except:
                pass
        except:
            self.logger.exception()
Beispiel #15
0
class ArtistSearcher(object):
    """
    Query Enabled Search Engines and Find Artist Information and aggregate results.
    """
    allMusicSearcher = None
    spotifySearcher = None
    mbSearcher = None
    lastFMSearcher = None
    imageSearcher = None
    iTunesSearcher = None
    imageSearcher = None

    artistThumbnailSize = 160, 160
    releaseThumbnailSize = 80, 80
    imageMaximumSize = 500, 500

    cache = dict()

    imageCache = dict()

    def __init__(self, referer=None):
        self.referer = referer
        if not self.referer or self.referer.startswith("http://localhost"):
            self.referer = "http://github.com/sphildreth/roadie"
        self.logger = Logger()
        self.allMusicSearcher = AllMusicGuide(self.referer)
        self.spotifySearcher = Spotify(self.referer)
        self.mbSearcher = MusicBrainz(self.referer)
        self.lastFMSearcher = LastFM(self.referer)
        self.imageSearcher = ImageSearcher()
        self.iTunesSearcher = iTunes(self.referer)
        self.imageSearcher = ImageSearcher(self.referer)

    def searchForArtist(self, name):
        """
        Perform a search in all enabled search engines and return an aggregate Artist for the given Artist name
        :param name: String
                     Name of the Artist to find
        :return: Artist
                 Populated Artist or None if error or not found
        """
        if not name:
            return None
        if name in self.cache:
            return self.cache[name]
        try:
            startTime = arrow.utcnow().datetime
            artist = Artist(name=name)
            artist.roadieId = str(uuid.uuid4())
            if self.iTunesSearcher.IsActive:
                artist = artist.mergeWithArtist(self.iTunesSearcher.lookupArtist(name))
            if self.mbSearcher.IsActive:
                artist = artist.mergeWithArtist(self.mbSearcher.lookupArtist(name))
            if self.lastFMSearcher.IsActive:
                artist = artist.mergeWithArtist(self.lastFMSearcher.lookupArtist(name))
            if self.spotifySearcher.IsActive:
                artist = artist.mergeWithArtist(self.spotifySearcher.lookupArtist(name))
            if self.allMusicSearcher.IsActive:
                artist = artist.mergeWithArtist(self.allMusicSearcher.lookupArtist(name))
            if artist:
                # Fetch images with only urls, remove any with neither URL or BLOB
                if artist.images:
                    images = []
                    firstImageInImages = None
                    for image in artist.images:
                        if not image.image and image.url:
                            image.image = self.imageSearcher.getImageBytesForUrl(image.url)
                        if image.image:
                            # Resize to maximum image size and convert to JPEG
                            img = Image.open(io.BytesIO(image.image)).convert('RGB')
                            img.resize(self.imageMaximumSize)
                            b = io.BytesIO()
                            img.save(b, "JPEG")
                            image.image = b.getvalue()
                            firstImageInImages = firstImageInImages or image.image
                            image.signature = image.averageHash()
                            images.append(image)
                    if images:
                        dedupedImages = []
                        imageSignatures = []
                        for image in images:
                            if image.signature not in imageSignatures:
                                imageSignatures.append(image.signature)
                                dedupedImages.append(image)
                        artist.images = dedupedImages
                        if not artist.thumbnail and firstImageInImages:
                            try:
                                img = Image.open(io.BytesIO(firstImageInImages)).convert('RGB')
                                img.thumbnail(self.artistThumbnailSize)
                                b = io.BytesIO()
                                img.save(b, "JPEG")
                                artist.thumbnail = b.getvalue()
                            except:
                                pass
                # Add special search names to alternate names
                if not artist.alternateNames:
                    artist.alternateNames = []
                if artist.name not in artist.alternateNames:
                    cleanedArtistName = createCleanedName(artist.name)
                    if cleanedArtistName != artist.name.lower().strip() and \
                                    cleanedArtistName not in artist.alternateNames:
                        artist.alternateNames.append(cleanedArtistName)
                if not artist.bioContext:
                    try:
                        artist.bioContext = wikipedia.summary(artist.name)
                    except:
                        pass

                self.cache[name] = artist
            elapsedTime = arrow.utcnow().datetime - startTime
            printableName = name.encode('ascii', 'ignore').decode('utf-8')
            self.logger.debug("searchForArtist Elapsed Time [" + str(elapsedTime) + "] Name [" + printableName +
                              "] Found [" + (artist.name if artist else "") +
                              "] MusicBrainzId [" + str(artist.musicBrainzId) + "] " +
                              " iTunesId [" + str(artist.iTunesId) + "] " +
                              " amgId [" + str(artist.amgId) + "]" +
                              " spotifyId [" + str(artist.spotifyId) + "]"
                              .encode('ascii', 'ignore').decode('utf-8') + "]")
            return artist
        except:
            self.logger.exception("Error In searchForArtist")
        return None

    def _mergeReleaseLists(self, left, right):
        if left and not right:
            return left
        elif not left and right:
            return right
        elif not left and not right:
            return []
        else:
            mergeReleaseListsStart = arrow.utcnow()
            mergedReleases = left
            # Merge the right to the result
            for rRelease in right:
                foundRightInMerged = False
                for mRelease in mergedReleases:
                    if mRelease == rRelease:
                        mRelease.mergeWithRelease(rRelease)
                        foundRightInMerged = True
                        break
                if not foundRightInMerged:
                    mergedReleases.append(rRelease)
            mergedReleaseElapsed = arrow.utcnow() - mergeReleaseListsStart
            self.logger.debug("= MergeReleaseLists left size [" + str(len(left)) + "], right size [" + str(
                len(right)) + "] Elapsed Time [" + str(mergedReleaseElapsed) + "]")
            return mergedReleases

    def searchForArtistReleases(self, artist, artistReleaseImages, titleFilter=None):
        """
        Using the given populated Artist find all releases, with an optional filter

        :param artist: Artist
                       Artist to find releases for
        :param artistReleaseImages: list
                                    Collection if image signatures for Artist for deduping
        :param titleFilter: String
                            Optional filter of release Title to only include in results
        :return: iterable Release
                 Collection of releases found for artist
        """
        if not artist:
            return None
        try:
            startTime = arrow.utcnow().datetime
            releases = []
            if self.iTunesSearcher.IsActive:
                releases = self._mergeReleaseLists(releases, self.iTunesSearcher.searchForRelease(artist, titleFilter))
            if self.mbSearcher.IsActive:
                releases = self._mergeReleaseLists(releases, self.mbSearcher.searchForRelease(artist, titleFilter))
            if self.lastFMSearcher.IsActive and releases:
                mbIdList = []
                if not titleFilter:
                    mbIdList = [x.musicBrainzId for x in releases if x.musicBrainzId]
                else:
                    for x in releases:
                        if isEqual(x.title, titleFilter):
                            mbIdList.append(x.musicBrainzId)
                            break
                if mbIdList:
                    releases = self._mergeReleaseLists(releases,
                                                       self.lastFMSearcher.lookupReleasesForMusicBrainzIdList(artist,
                                                                                                              mbIdList))
            if self.spotifySearcher.IsActive:
                releases = self._mergeReleaseLists(releases, self.spotifySearcher.searchForRelease(artist, titleFilter))
            if releases:
                self.logger.debug(
                    "searchForArtistReleases Found [" + str(len(releases)) + "] For title [" + str(titleFilter) + "]")
                for searchForArtistRelease in releases:
                    if searchForArtistRelease.coverUrl:
                        coverImage = ArtistImage(searchForArtistRelease.coverUrl)
                        searchForArtistRelease.images.append(coverImage)
                    # Fetch images with only urls, remove any with neither URL or BLOB
                    if searchForArtistRelease.images:
                        images = []
                        for image in searchForArtistRelease.images:
                            if not image.image and image.url:
                                image.image = self.getImageForUrl(image.url)
                            if image.image:
                                # Resize to maximum image size and convert to JPEG
                                img = Image.open(io.BytesIO(image.image)).convert('RGB')
                                img.resize(self.imageMaximumSize)
                                b = io.BytesIO()
                                img.save(b, "JPEG")
                                image.image = b.getvalue()
                                # Hash image for deduping
                                image.signature = image.averageHash()
                                if image.signature:
                                    images.append(image)
                        if not images:
                            searchForArtistRelease.images = []
                        else:
                            dedupedImages = []
                            imageSignatures = artistReleaseImages or []
                            for image in images:
                                if image.signature not in imageSignatures:
                                    imageSignatures.append(image.signature)
                                    dedupedImages.append(image)
                            searchForArtistRelease.images = dedupedImages
                            if not searchForArtistRelease.thumbnail:
                                try:
                                    firstImageInImages = None
                                    for image in searchForArtistRelease.images:
                                        firstImageInImages = firstImageInImages or image.image
                                        if firstImageInImages:
                                            break
                                    img = Image.open(io.BytesIO(firstImageInImages)).convert('RGB')
                                    img.thumbnail(self.releaseThumbnailSize)
                                    b = io.BytesIO()
                                    img.save(b, "JPEG")
                                    searchForArtistRelease.thumbnail = b.getvalue()
                                except:
                                    pass
            if titleFilter and releases:
                filteredReleases = []
                cleanedTitleFilter = createCleanedName(titleFilter)
                for searchForArtistRelease in releases:
                    if isEqual(searchForArtistRelease.title,
                               titleFilter) or cleanedTitleFilter in searchForArtistRelease.alternateNames:
                        filteredReleases.append(searchForArtistRelease)
                releases = filteredReleases
            elapsedTime = arrow.utcnow().datetime - startTime
            self.logger.debug("searchForArtistReleases ElapseTime [" + str(elapsedTime) + "]")
            return releases
        except:
            self.logger.exception("Error In searchForArtistReleases")
            pass
        return None

    def getImageForUrl(self, url):
        if url not in self.imageCache:
            self.imageCache[url] = self.imageSearcher.getImageBytesForUrl(url)
            self.logger.debug("= Downloading Image [" + str(url) + "]")
        return self.imageCache[url]