Exemple #1
0
    def __init__( self, audio_file, file_ext, options ):
        self.file = audio_file
        self.filename = ""

        self.directory = os.path.dirname( self.file.filename )


        self.ext = file_ext
        self.file_data = metadata.Metadata()
        self.processed_data = metadata.Metadata()
        self.release = None
        self.ParseFileMetadata()
        self.errors = False
        self.matched_medium_track = None
Exemple #2
0
 def add(self, name, st, meta_ofs, hashgen = None):
     endswith = name.endswith('/')
     ename = pathsplit(name)
     basename = ename[-1]
     #log('add: %r %r\n' % (basename, name))
     flags = IX_EXISTS
     sha = None
     if hashgen:
         (gitmode, sha) = hashgen(name)
         flags |= IX_HASHVALID
     else:
         (gitmode, sha) = (0, EMPTY_SHA)
     if st:
         isdir = stat.S_ISDIR(st.st_mode)
         assert(isdir == endswith)
         e = NewEntry(basename, name, self.tmax,
                      st.st_dev, st.st_ino, st.st_nlink,
                      st.st_ctime, st.st_mtime, st.st_atime,
                      st.st_uid, st.st_gid,
                      st.st_size, st.st_mode, gitmode, sha, flags,
                      meta_ofs, 0, 0)
     else:
         assert(endswith)
         meta_ofs = self.metastore.store(metadata.Metadata())
         e = BlankNewEntry(basename, meta_ofs, tmax)
         e.gitmode = gitmode
         e.sha = sha
         e.flags = flags
     self._add(ename, e)
Exemple #3
0
    def open_metadata(self, path=None):
        if not path:
            path = self.cache_path(os.path.basename(METADATA_DOWNLOAD_FILE))
            path = self.cache.abspath(path)

        if self.cache.exists(path):
            return metadata.Metadata(self.pakfire, path)
def correctImages(inputPath, outputPath):

    # Reads all tiff images in an input folder
    # Correct the distortion
    # Then writes the undistorted image to the output folder
    # Also copies the metadata from the raw image to the undistorted image
    image_list = []
    for filename in glob.glob(inputPath + '/*.tif'):
        image_list.append(filename)

    for item in sorted(image_list):

        print(item)
        fileItem = os.path.basename(item)
        print(fileItem)
        print(outputPath + fileItem)

        meta = metadata.Metadata(item)
        flightImageRaw = cv2.imread(item,
                                    cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        flightUndistorted = correct_lens_distortion(meta, flightImageRaw)
        resultPath = outputPath + fileItem
        tiff.imsave(resultPath, flightUndistorted)
        copyMetadata(item, resultPath)

    return
    def id(self, pathname, name, parent):
        stat = os.lstat(pathname)
        size = stat.st_size

        cached_info = self.file_db.find_one({'name': name, 'parent': parent})

        cf = cached_file.CachedFile.FromStat(self, None, stat)

        # hasChanged should be internal to file
        # file should provide a method that returns file_info
        # it should return cached if not hasChanged (and cache exists)
        # it should take a filename and a cached value and say if the
        # cached value is stale
        if cached_info and not cf.hasChanged(cached_info):
            return cached_info['_id']

        if cf.isRegularFile:
            mf = metadata.Metadata(pathname, size)
            my_metadata = mf.metadata()
            # the rest of this logic should be cachedFile updating
            # the cache
            try:
                cf.digest = my_metadata['digest']
            except KeyError:
                pass
            try:
                cf.tags = pymongo.binary.Binary(
                    pickle.dumps(my_metadata['tags'], pickle.HIGHEST_PROTOCOL))
            except KeyError:
                pass

        return self.add(cf, pathname)
Exemple #6
0
    def test_init(self):
        """
        The metadata object appears to need to be created while CWD is
        in the root of a git repo containing a file called '<name>.yml'
        This file must contain a structure:
           {'distgit': {'namespace': '<value>'}}

        The metadata object requires:
          a type string <image|rpm>
          a Runtime object placeholder

        """
        mt = 'image'
        rt = MockRuntime(self.logger)
        name = 'test.yml'

        md = metadata.Metadata(mt, rt, name)

        #
        # Check the logs
        #
        logs = [l.rstrip() for l in open(self.test_file).readlines()]

        expected = 6
        actual = len(logs)
        self.assertEqual(
            expected, actual,
            "logging lines - expected: {}, actual: {}".format(
                expected, actual))
Exemple #7
0
def merge_file_metadata(dup_dict, base_dir=''):
    """merge_file_metadata

    given a dictionary of file_names and file metadata
    of files that all have the same content
    create a new file with that content and merged metadata
    or raise an exception if there is a merge conflict
    """
    metadatas = dup_dict.values()
    old_file_name = dup_dict.keys()[0]
    try:
        merged_metadata_dict = merge(metadatas)
        if merged_metadata_dict is None:
            return None
    except MergeConflict as exc:
        print exc
        return None
    try:
        old_file = metadata.Metadata(old_file_name)
        new_file_name = os.path.join(base_dir, old_file.canonical_name)
    except file_with_metadata.NoCanonicalName:
        return
    try:
        os.makedirs(os.path.dirname(new_file_name))
    except OSError:
        pass
    shutil.copy(old_file_name, new_file_name)
    print "copying to ", new_file_name,
    replace_metadata(new_file_name, merged_metadata_dict)
    print " done."
Exemple #8
0
def _golevel(level, f, ename, newentry, metastore, tmax):
    # close nodes back up the tree
    assert(level)
    default_meta_ofs = metastore.store(metadata.Metadata())
    while ename[:len(level.ename)] != level.ename:
        n = BlankNewEntry(level.ename[-1], default_meta_ofs, tmax)
        n.flags |= IX_EXISTS
        (n.children_ofs,n.children_n) = level.write(f)
        level.parent.list.append(n)
        level = level.parent

    # create nodes down the tree
    while len(level.ename) < len(ename):
        level = Level(ename[:len(level.ename)+1], level)

    # are we in precisely the right place?
    assert(ename == level.ename)
    n = newentry or \
        BlankNewEntry(ename and level.ename[-1] or None, default_meta_ofs, tmax)
    (n.children_ofs,n.children_n) = level.write(f)
    if level.parent:
        level.parent.list.append(n)
    level = level.parent

    return level
Exemple #9
0
    def update_metadata(self, force=False, offline=False):
        filename = os.path.join(METADATA_DOWNLOAD_PATH, METADATA_DOWNLOAD_FILE)
        cache_filename = self.cache_path(os.path.basename(filename))

        # Check if the metadata is already recent enough...
        exists = self.cache.exists(cache_filename)

        if not exists and offline:
            raise OfflineModeError, _("No metadata available for repository %s. Cannot download any.") \
             % self.name

        elif exists and offline:
            # Repository metadata exists. We cannot update anything because of the offline mode.
            return

        if not force and exists:
            age = self.cache.age(cache_filename)
            if age and age < TIME_10M:
                log.debug(
                    "Metadata is recent enough. I don't download it again.")
                return

        # Going to download metada.
        log.debug("Going to download repository metadata for %s..." %
                  self.name)
        assert not offline

        grabber = downloader.MetadataDownloader(self.pakfire)
        grabber = self.mirrors.group(grabber)

        while True:
            try:
                data = grabber.urlread(filename, limit=METADATA_DOWNLOAD_LIMIT)
            except urlgrabber.grabber.URLGrabError, e:
                if e.errno == 256:
                    raise DownloadError, _(
                        "Could not update metadata for %s from any mirror server"
                    ) % self.name

                grabber.increment_mirror(grabber)
                continue

            # Parse new metadata for comparison.
            md = metadata.Metadata(self.pakfire, metadata=data)

            if self.metadata and md < self.metadata:
                log.warning(
                    _("The downloaded metadata was less recent than the current one."
                      ))
                grabber.increment_mirror(grabber)
                continue

            # If the download went well, we write the downloaded data to disk
            # and break the loop.
            f = self.cache.open(cache_filename, "w")
            f.write(data)
            f.close()

            break
Exemple #10
0
 def __init__(self, parent):
     self.parent = parent
     self.phonon = self.parent.phonon
     self.playlist = self.parent.playlist
     self.media_object = self.parent.media_object
     self.metadata = metadata.Metadata()
     self.shuffle_mode = False
     self.repeat_mode = False
     self.mute = False
     self._current_artist = ""
     self._current_track = ""
Exemple #11
0
 def __init__(self, filepath=None):
     self._version = 1
     self.label = None
     self.created_at = None
     self.saved_at = None
     self.units = None
     self.metadata = metadata.Metadata()
     self.values = None
     self.kdtree = None
     if filepath is not None:
         self.load(filepath)
Exemple #12
0
 def metadata_error(self, error):
     with patch('io.open',
                mockOpen([id3v2_header, id3v1_trailer[:3],
                          '1234'])) as mock_open:
         with patch('mutagen.File', Mock(side_effect=error)):
             # with warnings.catch_warnings(record=True) as w:
             #     warnings.simplefilter("always")
             try:
                 f = metadata.Metadata(
                     'test_file',
                     len(id3v2_header + id3v1_trailer[:3] + '1234'))
                 info = f.metadata
             except TypeError:
                 print mock_open.mock_calls
Exemple #13
0
    def _initialize(self):
        # Create state directory.
        if not os.path.exists(K.STATE_DIR):
            os.mkdir(K.STATE_DIR)
        # Create upload thread (started in run() below).
        self._uploader = uploader.Uploader(self._main_cq,
                                           self._uploader_termination_event)
        # Create data sources.
        GPIO.setmode(GPIO.BCM)  # required before Wind()
        self._uploader.add_data_source(temperature.Temperature(), True)
        self._uploader.add_data_source(metadata.Metadata(), False)

        # Import modules only when enabled, since some again import optional libraries (e.g. DHT).
        # In demo mode, enable all and import demo instances.
        if True:  # TODO: Make wind optional.
            if C.DEMO_MODE_ENABLED():
                import demo.demo_wind as wind  # @UnusedImport
            else:
                import wind  # @Reimport
            self._uploader.add_data_source(wind.Wind(), True)
        if C.DHT_ENABLED() or C.DEMO_MODE_ENABLED():
            if C.DEMO_MODE_ENABLED():
                import demo.demo_dht as dht  # @UnusedImport
            else:
                import dht  # @Reimport
            self._uploader.add_data_source(dht.Dht(), True)
        if C.ADC_ENABLED() or C.DEMO_MODE_ENABLED():
            if C.DEMO_MODE_ENABLED():
                import demo.demo_spi_adc as spi_adc  # @UnusedImport
            else:
                import spi_adc  # @Reimport
            self._uploader.add_data_source(spi_adc.SpiAdc(), True)
        if C.HUAWEI_ENABLED() or C.DEMO_MODE_ENABLED():
            if C.DEMO_MODE_ENABLED():
                import demo.demo_huawei_status as huawei_status  # @UnusedImport
            else:
                import huawei_status  # @Reimport
            self._uploader.add_data_source(huawei_status.HuaweiStatus(), True)
        if C.DOOR_ENABLED() or C.DEMO_MODE_ENABLED():
            if C.DEMO_MODE_ENABLED():
                import demo.demo_door as door  # @UnusedImport
            else:
                import door  # @Reimport
            self._uploader.add_data_source(door.Door(), True)
        if C.PILOTS_ENABLED() or C.DEMO_MODE_ENABLED():
            if C.DEMO_MODE_ENABLED():
                import demo.demo_pilot_count as pilot_count  # @UnusedImport
            else:
                import pilot_count  # @Reimport
            self._uploader.add_data_source(pilot_count.PilotCount(), True)
def correctRaReDe(inputPath, outputPath):

    # Reads all the tiff images in an input folder
    # Converts each images to radiance
    # Converts radiance to reflectance
    # Converts reflectance to undistorted image

    # Unfinished -- causes the undistorted image to be quite a bit darker
    # Than it otherwise would be
    #

    image_list = []
    for filename in glob.glob(inputPath + '/*.tif'):
        image_list.append(filename)

    for item in sorted(image_list):

        print(item)
        fileItem = os.path.basename(item)
        print(fileItem)
        print(outputPath + fileItem)

        meta = metadata.Metadata(item)
        flightImageRaw = cv2.imread(item,
                                    cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        flightUndistorted = correct_lens_distortion(meta, flightImageRaw)
        resultPath = outputPath + fileItem
        tiff.imsave(resultPath, flightUndistorted)
        copyMetadata(item, resultPath)

    return

    flightRadianceImage, _, _, _ = utils.raw_image_to_radiance(
        meta, flightImageRaw)
    plotutils.plotwithcolorbar(flightRadianceImage, 'Radiance Image')
    radianceToReflectance = utils.radiance_to_reflectance(
        meta.band_name(), flightRadianceImage, flightImageRaw)
    print(radianceToReflectance)
    flightReflectanceImage = flightRadianceImage * radianceToReflectance
    plotutils.plotwithcolorbar(flightReflectanceImage, 'Reflectance converted')
    flightUndistortedReflectance = utils.correct_lens_distortion(
        meta, flightReflectanceImage)
    fUR = flightUndistortedReflectance * 255

    #flightUndistortedReflectance
    #plt.imsave('Result.tiff', flightUndistortedReflectance)
    #plt.imsave("Result.tiff", fUR)
    plotutils.plotwithcolorbar(flightUndistortedReflectance,
                               'Reflectance converted and undistorted')
    tiff.imsave('RadReflUndis.tif', flightUndistortedReflectance)
Exemple #15
0
 def __init__(self, id_):
     self.songs = list()
     self.fetched = False
     self.valid = True
     self.art = None
     self.data = None
     self.processed_data = metadata.Metadata()
     self.fetch_attempts = 0
     Release.num_loaded += 1
     try:
         uuid.UUID(id_)
     except ValueError:
         utils.safeprint(u"Corrupt UUID in file.")
         self.valid = False
     else:
         self.id = id_
Exemple #16
0
    def _script_to_filename( self, format_string, options ):
        # Do format script replacing here.
        script_data = metadata.Metadata()
        script_data.copy( self.processed_data )

        filename = script.ScriptParser().eval( format_string, script_data, self )

        filename = filename.replace( "\x00", "" ).replace( "\t", "" ).replace( "\n", "" )

        # replace incompatible characters
        if options["windows_compatible_filenames"] or sys.platform == "win32":
            filename = utils.replace_win32_incompat( filename )

        if options["ascii_filenames"]:
            if isinstance( filename, unicode ):
                filename = utils.unaccent( filename )
            filename = utils.replace_non_ascii( filename )

        return filename
  def Parse(self, string):
    """Parse a Pack2 definition string."""

    self.valid = True
    self.metadata = metadata.Metadata()
    self.errors = []
    try:
      self.parser.parse(string, tracking=True)
    except IndexError as e:
      # Due to a bug in PLY, an index error is caused if we raise a syntax
      # error.  If we've previously raised a syntax error, ignore it so that
      # we can raise a ParseError instead.
      if self.valid:
        raise e

    if not self.valid:
      raise ParseError('Parse Error', self.errors)

    return self.metadata
    def __init__(self, config):
        traverseLogger.info('Setting up service...')
        global currentService
        currentService = self
        self.config = config
        self.proxies = dict()
        self.active = False

        config['configuri'] = ('https' if config.get('usessl', True) else 'http') + '://' + config['targetip']
        httpprox = config['httpproxy']
        httpsprox = config['httpsproxy']
        self.proxies['http'] = httpprox if httpprox != "" else None
        self.proxies['https'] = httpsprox if httpsprox != "" else None

        # Convert list of strings to dict
        self.chkcertbundle = config['certificatebundle']
        chkcertbundle = self.chkcertbundle
        if chkcertbundle not in [None, ""] and config['certificatecheck']:
            if not os.path.isfile(chkcertbundle) and not os.path.isdir(chkcertbundle):
                self.chkcertbundle = None
                traverseLogger.error('ChkCertBundle is not found, defaulting to None')
        else:
            config['certificatebundle'] = None

        ChkCert = config['certificatecheck']
        AuthType = config['authtype']

        self.currentSession = None
        if not config.get('usessl', True) and not config['forceauth']:
            if config['username'] not in ['', None] or config['password'] not in ['', None]:
                traverseLogger.warning('Attempting to authenticate on unchecked http/https protocol is insecure, if necessary please use ForceAuth option.  Clearing auth credentials...')
                config['username'] = ''
                config['password'] = ''
        if AuthType == 'Session':
            certVal = chkcertbundle if ChkCert and chkcertbundle is not None else ChkCert
            # no proxy for system under test
            self.currentSession = rfSession(config['username'], config['password'], config['configuri'], None, certVal, self.proxies)
            self.currentSession.startSession()
        self.metadata = md.Metadata(traverseLogger)
        self.active = True
Exemple #19
0
 def create(self, path, id, original_path):
     """
     create a new image package at the targeted path
     """
     real_path = validate_path(path, 'directory')
     os.makedirs(
         os.path.join(real_path, id, 'temp')
     )  # don't we need to destroy this when done? what is it for?
     self.path = os.path.join(real_path, id)
     self.id = id
     self.manifest = manifest.Manifest(os.path.join(self.path,
                                                    'manifest-sha1.txt'),
                                       create=True)
     self.__import_original__(original_path)
     self.master = self.__generate_master__()
     self.original = os.path.basename(original_path)
     self.make_derivatives()
     self.metadata = metadata.Metadata(os.path.join(self.path, 'meta.xml'),
                                       create=True,
                                       exiftool_json=os.path.join(
                                           self.path, 'original-exif.json'))
     self.make_overview()
     self.__append_event__(
         'created package at {path}'.format(path=self.path))
Exemple #20
0
    def open(self, path):
        """
        open an existing image package at the targeted path
        """
        logger = logging.getLogger(sys._getframe().f_code.co_name)
        self.path = validate_path(path, 'directory')
        self.id = os.path.basename(self.path)
        # verify original and master and metadata and checksums
        # TBD
        # open manifest and metadata
        self.manifest = manifest.Manifest(
            os.path.join(self.path, 'manifest-sha1.txt'))
        try:
            self.metadata = metadata.Metadata(
                os.path.join(self.path, 'meta.xml'))
        except IOError:
            logger.warning(
                "no meta.xml file was found in the package for this image ({0})"
                .format(self.id))

            raise
        # see if there is an original file yet
        filenames = self.manifest.get_all().keys()
        for filename in filenames:
            if 'original.' in filename:
                front, extension = os.path.splitext(filename)
                if 'sha1' not in extension:
                    self.original = filename
        try:
            o = self.original
        except AttributeError:
            logger.error(
                "no original image file was found in the package for this image ({0})"
                .format(self.id))
            raise
        self.make_overview()
Exemple #21
0
import argparse

import metadata

parser = argparse.ArgumentParser(
    description='Get metadata value of query parameters')

parser.add_argument('--dir_path',
                    type=str,
                    help='Path to the directory of the metadata file',
                    required=True)

parser.add_argument(
    '--get',
    type=str,
    help=
    'Query to execute. If query parameter is found,it return the value/dict of values'
)

args = parser.parse_args()

mtd = metadata.Metadata(args.dir_path)

mtd.get_cli(args.get)
    def __init__(self, config, default_entries=[]):
        traverseLogger.info('Setting up service...')
        global currentService
        currentService = self
        self.config = config
        self.proxies = dict()
        self.active = False

        config['configuri'] = ('https' if config.get('usessl', True) else
                               'http') + '://' + config['targetip']
        httpprox = config['httpproxy']
        httpsprox = config['httpsproxy']
        self.proxies['http'] = httpprox if httpprox != "" else None
        self.proxies['https'] = httpsprox if httpsprox != "" else None

        # Convert list of strings to dict
        self.chkcertbundle = config['certificatebundle']
        chkcertbundle = self.chkcertbundle
        if chkcertbundle not in [None, ""] and config['certificatecheck']:
            if not os.path.isfile(chkcertbundle) and not os.path.isdir(
                    chkcertbundle):
                self.chkcertbundle = None
                traverseLogger.error(
                    'ChkCertBundle is not found, defaulting to None')
        else:
            config['certificatebundle'] = None

        ChkCert = config['certificatecheck']
        AuthType = config['authtype']

        self.currentSession = None
        if not config.get('usessl', True) and not config['forceauth']:
            if config['username'] not in [
                    '', None
            ] or config['password'] not in ['', None]:
                traverseLogger.warning(
                    'Attempting to authenticate on unchecked http/https protocol is insecure, if necessary please use ForceAuth option.  Clearing auth credentials...'
                )
                config['username'] = ''
                config['password'] = ''
        if AuthType == 'Session':
            certVal = chkcertbundle if ChkCert and chkcertbundle is not None else ChkCert
            # no proxy for system under test
            self.currentSession = rfSession(config['username'],
                                            config['password'],
                                            config['configuri'], None, certVal,
                                            self.proxies)
            self.currentSession.startSession()
        self.metadata = md.Metadata(traverseLogger)

        target_version = self.config.get('versioncheck')

        # get Version
        success, data, status, delay = self.callResourceURI('/redfish/v1')
        if not success:
            traverseLogger.warn('Could not get ServiceRoot')
        elif target_version in [None, '']:
            if 'RedfishVersion' not in data:
                traverseLogger.warn(
                    'Could not get RedfishVersion from ServiceRoot')
            else:
                traverseLogger.info('Redfish Version of Service: {}'.format(
                    data['RedfishVersion']))
                target_version = data['RedfishVersion']

        # with Version, get default and compare to user defined values
        default_config_target = defaultconfig_by_version.get(
            target_version, dict())
        override_with = {
            k: default_config_target[k]
            for k in default_config_target if k in default_entries
        }
        if len(override_with) > 0:
            traverseLogger.info(
                'CONFIG: RedfishVersion {} has augmented these tool defaults {}'
                .format(target_version, override_with))
        self.config.update(override_with)

        self.active = True
    def createReportJson(self):
        c3dValObj = c3dValidation.c3dValidation(self.workingDirectory)

        self.measurementNames = c3dValObj.getValidC3dList(True)
        fileNames = c3dValObj.getValidC3dList(False)
        self.null = None

        if fileNames != []:  # empty list means c3d does not contain Plugin-Gait created 'LAnkleAngles' or there are no events
            self.frameRate, self.analogRate = getFrameAndAnalogRateFromC3D(
                fileNames[0])

            ts = self.getTimeseriesResults()
            print "--------------------Timeseries OK--------------------------------"
            mapProfile = map2.MAP(self.workingDirectory)
            gvs = self.getGVSResults(mapProfile)
            print "--------------------GVS OK--------------------------------"
            gps = self.getGPSResults(mapProfile)
            print "--------------------GPS OK--------------------------------"
            emgExp = self.getEMGResults()
            print "--------------------EMG--------------------------------"

            # Events
            ev = self.getEvents()

            print "--------------------events OK--------------------------------"

            # #MetaData
            metaDataObj = metadata.Metadata(self.workingDirectory,
                                            self.modelledC3dfilenames,
                                            self.subjectMetadata,
                                            self.creationDate)
            md = metaDataObj.medatadaInfo()
            print "--------------------metadata OK--------------------------------"

            # Subject
            sub = metaDataObj.subjectInfo()
            print "--------------------subjectInfo OK--------------------------------"
            # Project
            proj = metaDataObj.projectInfo()
            print "--------------------proj OK--------------------------------"

            # TSP
            tspObj = tsp.TSP(self.workingDirectory)
            tsparams = tspObj.export()
            print "--------------------TSP OK--------------------------------"

            # Measurements
            measObj = measurements.Measurements(self.workingDirectory)
            mea = measObj.measurementInfo(self.extra_settings)

            print "--------------------Measurements OK--------------------------------"

            # Create json
            root = {
                "results": ts + gvs + gps + emgExp + tsparams,
                "events": ev,
                "metadata": md,
                "measurements": mea,
                "clientId": self.clientId,
                "subject": sub,
                "project": proj
            }

            return root
        else:
            root = {}
            return root
Exemple #24
0
 def test_non_mp3_file_to_mp3_tags(self, mock_mutagen):
     mock_mutagen.return_value = None
     f = metadata.Metadata('test_file', 0)
     mp3_tags = f.tags
     self.assertEqual(mp3_tags, None)
Exemple #25
0
    def play(self):

        self.emit("preplay")

        self.player = gst.Pipeline("player")

        self.queue_video = gst.element_factory_make("queue", "queue_video")
        self.player.add(self.queue_video)

        self.input_type = 0

        # Source selection

        self.source_pads = {}
        self.audio_pads = {}
        self.pip_pads = {}

        self.output_bins = {}
        type = 0
        source_number = 0
        pip_number = 0

        self.pip = PictureInPicture()

        self.player.add(self.pip)

        for row in self.sources.get_store():
            (name, source) = row
            element = source.create()
            self.player.add(element)

            if element.does_audio():
                if not self.input_type & MEDIA_AUDIO:

                    # The pipeline has audio sources, and this is the first
                    # audio source we add

                    if self.audio_source is None:
                        self.emit("error", "You need to select an audio source")
                        self.emit("stopped")
                        return
                    self.input_type |= MEDIA_AUDIO
                    self.input_selector = gst.element_factory_make(
                            "input-selector", "audio-selector"
                    )
                    self.player.add(self.input_selector)

                audiobin = audioinputbin.AudioInputBin(source)
                self.player.add(audiobin)

                element.audio_pad.link(audiobin.get_static_pad("sink"))
                self.audio_pads[name] = \
                        self.input_selector.get_request_pad("sink%d")
                audiobin.src_pad.link(self.audio_pads[name])

            if element.does_video():
                self.input_type |= MEDIA_VIDEO

                self.source_pads[name] = source_number
                source_number = source_number + 1

                # Thumbnail preview

                tee = gst.element_factory_make("tee", None)
                self.player.add(tee)
                element.video_pad.link(tee.sink_pads().next())

                thumbnail_queue = gst.element_factory_make("queue", None)
                self.player.add(thumbnail_queue)
                self.thumbnails[name] = Preview(self)
                self.player.add(self.thumbnails[name])

                thumbnail_err = gst.element_link_many(
                    tee, thumbnail_queue, self.thumbnails[name]
                )
                if thumbnail_err == False:
                    self.emit("error", "Error conecting thumbnail preview.")

                # Picture in Picture

                self.pip_pads[name] = pip_number
                pip_number = pip_number + 1

                main_queue = gst.element_factory_make("queue", None)
                self.player.add(main_queue)
                pip_queue = gst.element_factory_make("queue", None)
                self.player.add(pip_queue)

                tee.link(main_queue)
                tee.link(pip_queue)
                main_queue.src_pads().next().link(self.pip.get_request_pad_A())
                pip_queue.src_pads().next().link(self.pip.get_request_pad_B())

            if name == self.video_source:
                type |= element.get_type()
            if name == self.audio_source:
                type |= element.get_type()

        self.watermark = gst.element_factory_make(
                "cairoimageoverlay", "cairoimageoverlay"
        )
        self.player.add(self.watermark)

        self.colorspace = gst.element_factory_make(
                "ffmpegcolorspace", "colorspace-imageoverlay-videobalance"
        )
        self.player.add(self.colorspace)

        self.videobalance = gst.element_factory_make(
                "videobalance", "videobalance"
        )
        self.player.add(self.videobalance)
        if self.videobalance_contrast:
            self.videobalance.set_property(
                    "contrast", self.videobalance_contrast
            )
        if self.videobalance_brightness:
            self.videobalance.set_property(
                    "brightness", self.videobalance_brightness
            )
        if self.videobalance_hue:
            self.videobalance.set_property(
                    "hue", self.videobalance_hue
            )
        if self.videobalance_saturation:
            self.videobalance.set_property(
                    "saturation", self.videobalance_saturation
            )

        gst.element_link_many(
                self.pip, self.watermark, self.colorspace, self.videobalance,
                self.queue_video
        )

        self._switch_source()
        self._switch_pip()

        if self.pip_position:
            self.pip.set_property("position", self.pip_position)

        self.effect[MEDIA_VIDEO] = effect.video_effect.VideoEffect(
                self.effect_name[MEDIA_VIDEO]
        )
        self.player.add(self.effect[MEDIA_VIDEO])

        self.overlay = gst.element_factory_make("textoverlay", "overlay")
        self.overlay.set_property("font-desc", self.overlay_font)
        self.overlay.set_property("halign", self.halign)
        self.overlay.set_property("valign", self.valign)
        self.player.add(self.overlay)

        gst.element_link_many(
                self.queue_video, self.effect[MEDIA_VIDEO], self.overlay
        )

        self.preview_tee = multeequeue.MulTeeQueue()
        self.player.add(self.preview_tee)

        self.overlay.link(self.preview_tee)

        if self.input_type & MEDIA_AUDIO:
            self.convert = gst.element_factory_make("audioconvert", "convert")
            self.player.add(self.convert)

            self.effect[MEDIA_AUDIO] = effect.audio_effect.AudioEffect(
                    self.effect_name[MEDIA_AUDIO]
            )
            self.player.add(self.effect[MEDIA_AUDIO])

            self.audio_tee = gst.element_factory_make("tee", "audio_tee")
            self.player.add(self.audio_tee)

            self.volume = volume.Volume()
            self.player.add(self.volume)

            gst.element_link_many(
                    self.input_selector, self.volume,
                    self.effect[MEDIA_AUDIO], self.convert, self.audio_tee
            )
            self.input_selector.set_property(
                    "active-pad", self.audio_pads[self.audio_source]
            )
        added_encoders = {}

        pip_width = 0
        pip_height = 0

        for row in self.outputs.get_store():
            (name, output) = row

            output_bin = outputbin.OutputBin(output)
            self.output_bins[name] = output_bin
            self.player.add(output_bin)

            encoder_name = output.get_config()["parent"]

            encoder_item = self.encoders.get_item(encoder_name)
            if encoder_item is None:
                self.emit("error", "Please, add an encoder.")
                break

            if added_encoders.has_key(encoder_name):
                tee = added_encoders[encoder_name]

                tee.link(output_bin)
            else:
                tee = gst.element_factory_make("tee", None)
                self.player.add(tee)

                converter_item = encoder_item.parent
                converter = converter_item.create()
                if converter_item.config["width"] > pip_width:
                    pip_width = converter_item.config["width"]
                if converter_item.config["height"] > pip_height:
                    pip_height = converter_item.config["height"]
                self.player.add(converter)

                encoder = encoder_item.factory.create(type)
                if encoder.vorbisenc:
                    self.metadata = metadata.Metadata(encoder.vorbisenc)
                    self.metadata.set_tags(self.taglist)
                encoder.config(encoder_item.config)
                self.player.add(encoder)

                added_encoders[encoder_name] = tee
                self.preview_tee.get_src_pad().link(
                        converter.sink_pads().next()
                )
                gst.element_link_many(
                        converter, encoder, tee, output_bin
                )

                if self.input_type & MEDIA_AUDIO:
                    audio_queue = gst.element_factory_make("queue", None)
                    self.player.add(audio_queue)

                    gst.element_link_many(self.audio_tee, audio_queue, encoder)

        self.preview = Preview(self)
        self.player.add(self.preview)
        self.preview_tee.get_src_pad().link(self.preview.sink_pads().next())

        if pip_width == 0:
            pip_width = 320
            pip_height = 240
        self.pip.set_property("width", int(pip_width))
        self.pip.set_property("height", int(pip_height))

        self.video_width = int(pip_width)
        self.video_height = int(pip_height)
        self._set_watermark(self.video_width, self.video_height)

        self.overlay.set_property("text", self.overlay_text)
        if self.volume_value is not None:
            self.volume.set_property("volume", self.volume_value)

        self.emit("pipeline-ready")

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
        cr = self.player.set_state(gst.STATE_PLAYING)
        if cr == gst.STATE_CHANGE_SUCCESS:
            self.emit("playing")
        elif cr == gst.STATE_CHANGE_ASYNC:
            self.pending_state = gst.STATE_PLAYING
Exemple #26
0
    def save(self, path=None, algo="xz"):
        """
			This function saves the database and metadata to path so it can
			be exported to a remote repository.
		"""
        if not path:
            path = self.path

        # Create filenames
        metapath = os.path.join(path, METADATA_DOWNLOAD_PATH)
        db_path = os.path.join(metapath, METADATA_DATABASE_FILE)
        md_path = os.path.join(metapath, METADATA_DOWNLOAD_FILE)

        # Remove all pre-existing metadata.
        if os.path.exists(metapath):
            util.rm(metapath)

        # Create directory for metdadata.
        os.makedirs(metapath)

        # Save the database to path and get the filename.
        self.index.write(db_path)

        # Make a reference to the database file that it will get a unique name
        # so we won't get into any trouble with caching proxies.
        db_hash = util.calc_hash1(db_path)

        db_path2 = os.path.join(os.path.dirname(db_path),
                                "%s-%s" % (db_hash, os.path.basename(db_path)))

        # Compress the database.
        if algo:
            # Open input file and get filesize of input file.
            f = open(db_path)
            filesize = os.path.getsize(db_path)

            # Make a nice progress bar.
            p = util.make_progress(_("Compressing database..."), filesize)

            # Create compressing file handler.
            c = compress.compressobj(db_path2)

            try:
                size = 0
                while True:
                    buf = f.read(BUFFER_SIZE)
                    if not buf:
                        break

                    if p:
                        size += len(buf)
                        p.update(size)

                    c.write(buf)
            except:
                # XXX catch compression errors
                raise

            finally:
                f.close()
                c.close()
                p.finish()

                # Remove old database.
                os.unlink(db_path)

        else:
            shutil.move(db_path, db_path2)

        # Create a new metadata object and add out information to it.
        md = metadata.Metadata(self.pakfire)

        # Save name of the hashed database to the metadata.
        md.database = os.path.basename(db_path2)
        md.database_hash1 = db_hash
        md.database_compression = algo

        # Save metdata to repository.
        md.save(md_path)
Exemple #27
0
 def test_metadata_id3v1v2(self, mock_mutagen):
     mock_mutagen.return_value = {'tag': 'value'}
     f = metadata.Metadata('test_file',
                           len(id3v2_header + id3v1_trailer[:3] + '1234'))
     info = f.metadata
     self.assertEqual(info, {'digest': hash_1234, 'tags': {'tag': 'value'}})
Exemple #28
0
 def test_mp3_file_to_mp3_tags(self, mock_mutagen):
     mock_mutagen.return_value = {'tag': 'value'}
     f = metadata.Metadata('test_file', 0)
     mp3_tags = f.tags
     self.assertEqual(mp3_tags, {'tag': 'value'})
     self.assertTrue(mock_mutagen.called)
Exemple #29
0
 def test_malformed_mp3_file_to_mp3_tags(self, mock_mutagen):
     mock_mutagen.side_effect = mutagen.mp3.HeaderNotFoundError(
         'test error')
     f = metadata.Metadata('test_file', len(id3v1v2))
     mp3_tags = f.tags
     self.assertIsNone(mp3_tags)
Exemple #30
0
 def test_metadata_of_non_mp3_file(self, mock_mutagen):
     mock_mutagen.return_value = None
     f = metadata.Metadata('test_file', len('' + '' + '1234'))
     info = f.metadata
     self.assertEqual(info, {'digest': hash_1234})