def qqvideo(vid):
    #html = _http(self.url)
    #vid = re.compile(r'vid:"([^"]+)"').search(html).group(1)
    vid = 'c001640aimv'
    murl = 'http://vv.video.qq.com/'
    #vinfo = _http('%sgetvinfo?otype=json&vids=%s&platform=70202&utype=-1&appver=3.2.19.325'% (murl, vid) + '&ehost=http%3a%2f%2fcache.tv.qq.com%2fqqplayerout.swf%3fvid%3dc001640aimv' )
    vinfo = _http('%sgetinfo?otype=json&vids=%s&defaultfmt=fhd'% (murl, vid))
    infoj = json.loads(vinfo.split('=')[1][:-1])
    qtyps = OrderedDict((
        ('1080P', 'fhd'), ('超清', 'shd'), ('高清', 'hd'), ('标清', 'sd')))
    vtyps = {v['name']:v['id'] for v in infoj['fl']['fi']}
    qtypid = vtyps['sd']
    sels = [k for k,v in qtyps.iteritems() if v in vtyps]
    #sel = dialog.select('清晰度', sels)
    sel = 0
    surls = []
    urlpre = infoj['vl']['vi'][0]['ul']['ui'][-1]['url']
    if sel is not -1:
        qtypid = vtyps[qtyps[sels[sel]]]
    for i in range(1, int(infoj['vl']['vi'][0]['cl']['fc'])):
        fn = '%s.p%s.%s.mp4' % (vid, qtypid%10000, str(i))
        sinfo = _http(
                '{0}getkey?format={1}&filename={2}&vid={3}&otype=json'.format(
                        murl, qtypid, fn, vid))
        skey = json.loads(sinfo.split('=')[1][:-1])['key']
        surl = urllib2.urlopen(
                '%s%s?vkey=%s' % (urlpre, fn, skey), timeout=30).geturl()
        if not surl: break
        surls.append(surl)
        movurl = 'stack://{0}'.format(' , '.join(surls))
        return movurl
 def test_iterators(self):
     pairs = [("c", 1), ("b", 2), ("a", 3), ("d", 4), ("e", 5), ("f", 6)]
     shuffle(pairs)
     od = OrderedDict(pairs)
     self.assertEqual(list(od), [t[0] for t in pairs])
     self.assertEqual(od.keys()[:], [t[0] for t in pairs])
     self.assertEqual(od.values()[:], [t[1] for t in pairs])
     self.assertEqual(od.items()[:], pairs)
     self.assertEqual(list(od.iterkeys()), [t[0] for t in pairs])
     self.assertEqual(list(od.itervalues()), [t[1] for t in pairs])
     self.assertEqual(list(od.iteritems()), pairs)
     self.assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)])
 def test_iterators(self):
     pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
     shuffle(pairs)
     od = OrderedDict(pairs)
     self.assertEqual(list(od), [t[0] for t in pairs])
     self.assertEqual(od.keys()[:], [t[0] for t in pairs])
     self.assertEqual(od.values()[:], [t[1] for t in pairs])
     self.assertEqual(od.items()[:], pairs)
     self.assertEqual(list(od.iterkeys()), [t[0] for t in pairs])
     self.assertEqual(list(od.itervalues()), [t[1] for t in pairs])
     self.assertEqual(list(od.iteritems()), pairs)
     self.assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)])
示例#4
0
文件: Job.py 项目: minnus/supremm
class Job(object):
    """ Contains the data for a job. """

    # pylint: disable=too-many-instance-attributes

    def __init__(self, job_pk_id, job_id, acct):
        # pylint: disable=too-many-arguments

        self.job_pk_id = job_pk_id
        self.job_id = job_id
        self.acct = acct
        self._nodecount = acct['nodes']
        self._start_datetime = datetimeconvert(acct['start_time'])
        self._end_datetime = datetimeconvert(acct['end_time'])

        # It is neccessary to set the end time to be one second past because the time
        # precision is only per-second
        self._end_datetime += datetime.timedelta(seconds=1)

        self.walltime = acct['end_time'] - acct['start_time']
        self._nodes = OrderedDict()

        self._data = {}
        self.jobdir = None
        self._nodebegin = {}
        self._nodeend = {}

        self._errors = {}

    def __str__(self):
        """ Return a summary string describing the job """
        return "jobid=%s nodes=%s walltime=%s" % (self.job_id, self._nodecount,
                                                  self.walltime)

    def setjobdir(self, jobdir):
        """
        Set job dir
        """
        self.jobdir = jobdir

    def addnodearchive(self, nodename, node_archive):
        """
        Add the path to the node archive to the list archives for the job
        """
        self._nodes[nodename].set_combinedarchive(node_archive)

    def set_rawarchives(self, node_ar_map):
        """
        Store the list of raw archives that comprise the node
        """
        for nodename, archivelist in node_ar_map.iteritems():
            self._nodes[nodename].set_rawarchives(archivelist)

    def rawarchives(self):
        """ iterator for the raw archives for the nodes in the job """
        for nodename, nodedata in self._nodes.iteritems():
            if len(nodedata.rawarchives) > 0:
                yield nodename, nodedata.rawarchives

    def nodearchives(self):
        """ iterator for the combined archives for the nodes in the job """
        for nodename, nodedata in self._nodes.iteritems():
            if nodedata.archive != None:
                yield nodename, nodedata.nodeindex, nodedata.archive

    def has_any_archives(self):
        """ are there any archives for this job """

        for _, nodedata in self._nodes.iteritems():
            if len(nodedata.rawarchives) > 0:
                return True

        return False

    def has_enough_raw_archives(self):
        """ are there enough raw archives for this job to try pmlogextract"""

        num_archives = 0

        for _, nodedata in self._nodes.iteritems():
            if len(nodedata.rawarchives) > 0:
                num_archives += 1

        if float(num_archives) / float(self._nodecount) > 0.95:
            return True
        else:
            return False

    def has_enough_combined_archives(self):
        """ are there enough combined archives for this job to try summarization"""

        num_archives = 0

        for _, nodedata in self._nodes.iteritems():
            if nodedata.archive != None:
                num_archives += 1

        if float(num_archives) / float(self._nodecount) > 0.95:
            return True
        else:
            return False

    def setnodebeginend(self, node, begin, end):
        """
        Set the begin and end times for the given node. If either
        begin or end is None then the default time from the accounting
        data is used
        """
        if begin != None:
            self._nodebegin[node] = begin
        if end != None:
            self._nodeend[node] = end

    def getnodebegin(self, node):
        """
        Get the start time for job data on the given node
        """
        if node in self._nodebegin:
            return self._nodebegin[node]
        else:
            return self.start_datetime

    def getnodeend(self, node):
        """
        Get end time for job data on the given node
        """
        if node in self._nodeend:
            return self._nodeend[node]
        else:
            return self.end_datetime

    @property
    def nodecount(self):
        """ Total number of nodes assigned to the job """
        return self._nodecount

    @property
    def start_datetime(self):
        """
        Gets a datetime object representing the job's start time, or None
        if the string representation can't be converted.

        Returns:
            A datetime object representing the job's start time, or None
            if the string representation can't be converted.
        """
        return self._start_datetime

    def set_nodes(self, nodelist):
        """ Set the list of nodes assigned to the job.  The First entry in the
        list should be the head node """
        for nodeid, node in enumerate(nodelist):
            self._nodes[node] = JobNode(node, nodeid)

    @property
    def end_datetime(self):
        """
        Gets a datetime object representing the job's end time, or None
        if the string representation can't be converted.

        Returns:
            A datetime object representing the job's end time, or None
            if the string representation can't be converted.
        """
        return self._end_datetime

    def get_errors(self):
        """ Return the list of processing errors """
        return self._errors.keys()

    def record_error(self, msg):
        """ record a processing error for the job """
        if msg in self._errors:
            self._errors[msg] += 1
        else:
            self._errors[msg] = 1

    def data(self):
        """ return all job metadata """
        return self._data

    def adddata(self, name, data):
        """ Add job metadata """
        self._data[name] = data

    def getdata(self, name):
        """ return job metadata for name """
        if name in self._data:
            return self._data[name]
        return None
示例#5
0
文件: Job.py 项目: iMurfyD/supremm
class Job(object):
    """ Contains the data for a job. """
    # pylint: disable=too-many-instance-attributes

    def __init__(self, job_pk_id, job_id, acct):
        # pylint: disable=too-many-arguments

        self.job_pk_id = job_pk_id
        self.job_id = job_id
        self.acct = acct
        self._nodecount = acct['nodes']
        self._start_datetime = datetimeconvert(acct['start_time'])
        self._end_datetime = datetimeconvert(acct['end_time'])

        # It is neccessary to set the end time to be one second past because the time
        # precision is only per-second
        self._end_datetime += datetime.timedelta(seconds=1)

        self.walltime = acct['end_time'] - acct['start_time']
        self._nodes = OrderedDict()

        self._data = {}
        self.jobdir = None
        self._nodebegin = {}
        self._nodeend = {}

        self._errors = {}

    def __str__(self):
        """ Return a summary string describing the job """
        return "jobid=%s nodes=%s walltime=%s" % (self.job_id, self._nodecount, self.walltime)

    def setjobdir(self, jobdir):
        """
        Set job dir
        """
        self.jobdir = jobdir

    def addnodearchive(self, nodename, node_archive):
        """
        Add the path to the node archive to the list archives for the job
        """
        self._nodes[nodename].set_combinedarchive(node_archive)

    def set_rawarchives(self, node_ar_map):
        """
        Store the list of raw archives that comprise the node
        """
        for nodename, archivelist in node_ar_map.iteritems():
            self._nodes[nodename].set_rawarchives(archivelist)

    def rawarchives(self):
        """ iterator for the raw archives for the nodes in the job """
        for nodename, nodedata in self._nodes.iteritems():
            if len(nodedata.rawarchives) > 0:
                yield nodename, nodedata.rawarchives

    def nodearchives(self):
        """ iterator for the combined archives for the nodes in the job """
        for nodename, nodedata in self._nodes.iteritems():
            if nodedata.archive != None:
                yield nodename, nodedata.nodeindex, nodedata.archive

    def setnodebeginend(self, node, begin, end):
        """
        Set the begin and end times for the given node. If either
        begin or end is None then the default time from the accounting
        data is used
        """
        if begin != None:
            self._nodebegin[node] = begin
        if end != None:
            self._nodeend[node] = end

    def getnodebegin(self, node):
        """
        Get the start time for job data on the given node
        """
        if node in self._nodebegin:
            return self._nodebegin[node]
        else:
            return self.start_datetime

    def getnodeend(self, node):
        """
        Get end time for job data on the given node
        """
        if node in self._nodeend:
            return self._nodeend[node]
        else:
            return self.end_datetime

    @property
    def nodecount(self):
        """ Total number of nodes assigned to the job """
        return self._nodecount

    @property
    def start_datetime(self):
        """
        Gets a datetime object representing the job's start time, or None
        if the string representation can't be converted.

        Returns:
            A datetime object representing the job's start time, or None
            if the string representation can't be converted.
        """
        return self._start_datetime

    def set_nodes(self, nodelist):
        """ Set the list of nodes assigned to the job.  The First entry in the
        list should be the head node """
        for nodeid, node in enumerate(nodelist):
            self._nodes[node] = JobNode(node, nodeid)

    @property
    def end_datetime(self):
        """
        Gets a datetime object representing the job's end time, or None
        if the string representation can't be converted.

        Returns:
            A datetime object representing the job's end time, or None
            if the string representation can't be converted.
        """
        return self._end_datetime

    def get_errors(self):
        """ Return the list of processing errors """
        return self._errors.keys()

    def record_error(self, msg):
        """ record a processing error for the job """
        if msg in self._errors:
            self._errors[msg] += 1
        else:
            self._errors[msg] = 1

    def data(self):
        """ return all job metadata """
        return self._data

    def adddata(self, name, data):
        """ Add job metadata """
        self._data[name] = data

    def getdata(self, name):
        """ return job metadata for name """
        if name in self._data:
            return self._data[name]
        return None