コード例 #1
0
ファイル: seriesinfo.py プロジェクト: stampedeboss/DadVision
    def _tvdbGetInfo(self):

        try:
            _shows = self.db.search(self.series.titleBase, "en")
        except (TVDBAttributeError, TVDBIndexError, TVDBValueError,
                error.BadData):
            _an_error = traceback.format_exc()
            log.debug(
                traceback.format_exception_only(type(_an_error),
                                                _an_error)[-1])
            raise SeriesNotFound

        if len(_shows) == 0: raise SeriesNotFound
        if len(_shows) == 1:
            if matching(self.series.title.lower(),
                        self.decode(_shows[0].SeriesName).lower(),
                        factor=85):
                _shows[0].update()
                _series = Series(tvdb=_shows[0])

                self.series.update(_series)
                self.series.source = 'tvdb'
                self.series.tvdb_info = _series
                return
            else:
                raise SeriesNotFound

        _rankings = {'Continuing': {}, 'Ended': {}, 'Other': {}}
        for _show in _shows:
            _title_suffix = self._check_suffix.match(
                self.decode(_show.SeriesName))
            if _title_suffix:
                _score = matching(self.series.titleBase.lower(),
                                  _title_suffix.group('SeriesName').lower())
            else:
                _score = matching(self.series.titleBase.lower(),
                                  self.decode(_show.SeriesName).lower())
            if _score < 90:
                continue

            _show.update()
            _series = Series(tvdb=_show)
            if _score in _rankings[_series.status]:
                _rankings[_series.status][_score][_series.title] = _series
            else:
                _rankings[_series.status][_score] = {_series.title: _series}

        if not _rankings: raise SeriesNotFound

        self._reviewShowData(_rankings, 'tvdb')

        return
コード例 #2
0
ファイル: seriesinfo.py プロジェクト: stampedeboss/DadVision
    def __init__(self, rtnDict=True):
        log.trace('SeriesInfo.__init__')

        super(SeriesInfo, self).__init__()

        seriesinfo_group = self.options.parser.add_argument_group(
            "Episode Detail Options", description=None)
        seriesinfo_group.add_argument("--sn",
                                      "--name",
                                      type=str,
                                      dest='series_name')
        seriesinfo_group.add_argument("--season", type=int, dest='season')
        seriesinfo_group.add_argument("--epno",
                                      type=int,
                                      action='append',
                                      dest='epno')
        seriesinfo_group.add_argument("--tvdb",
                                      dest="processes",
                                      action="append_const",
                                      const='tvdb',
                                      help="Information to come from TVDB")
        seriesinfo_group.add_argument("--tvrage",
                                      dest="processes",
                                      action="append_const",
                                      const='tvrage',
                                      help="Information to come from TVRage")
        seriesinfo_group.add_argument(
            "--MyTrakt",
            dest="processes",
            action="append_const",
            const='MyTrakt',
            help="Information to come from MyTrakt.tv")
        seriesinfo_group.add_argument(
            "--series-only",
            "--so",
            dest="get_episodes",
            action="store_false",
            default=False,
            help="Information to come from MyTrakt.tv")

        self.db = api.TVDB("959D8E76B796A1FB")

        self.trakt_user = None

        self._check_suffix = re.compile(
            '^(?P<SeriesName>.*)[ \._\-][\(](?P<Suffix>(?:19|20)\d{2}|[a-zA-Z]*)[\)].*$',
            re.I)

        self.last_series = Series()
        self.series = Series()
        self.rtnDict = rtnDict
コード例 #3
0
ファイル: loadshape.py プロジェクト: wuyou33/eetd-loadshape
    def cost(self, load_data=None, start_at=None, end_at=None, step_count=None):
        """calculate the cost of energy based on the provided tariff

        R script produces one output file:
        timestamp, previous-interval-cost, cumulative-previous-interval-cost

        [tariff.R command]
        ./tariff.R
            --loadFile=LOAD_FILE
            --tariffFile=TARIFF_FILE
            --outputTimestampFile=OUTPUT_TIMES_FILE
            --demandResponseFile=DEMAND_RESPONSE_DATES
            --outputFile=OUTPUT_FILE
        """
        if load_data == None: load_data = self.training_load_series
        
        if not isinstance(load_data, Series):
            raise Exception("load_data argument must be a Series object")
        if not isinstance(self.tariff, Tariff):
            raise Exception("cannot calculate cost - no tariff provided")

        output_times = self._build_output_time_series(start_at, end_at,
                                                      step_size=900,
                                                      step_count=step_count)

        # ----- write temporary files ----- #
        load_tmp            = load_data.write_to_tempfile(exclude=False)
        tariff_tmp          = self.tariff.write_tariff_to_tempfile()
        output_times_tmp    = output_times.write_to_tempfile()
        output_tmp          = tempfile.NamedTemporaryFile()

        # ----- build command ----- #
        cmd = path.join(self.model_dir, 'tariff.R')
        cmd += " --loadFile=%s"             % load_tmp.name
        cmd += " --tariffFile=%s"           % tariff_tmp.name
        cmd += " --outputTimestampFile=%s"  % output_times_tmp.name
        cmd += " --outputFile=%s"           % output_tmp.name

        if len(self.tariff.dr_periods) > 0:
            dr_periods_tmp = self.tariff.write_dr_periods_to_tempfile()
            cmd += " --demandResponseFile=%s" % dr_periods_tmp.name

        self._run_script(cmd)
        
        # ----- process results ----- #
        cost_series             = Series(output_tmp.name, self.timezone, data_column=1)
        cumulative_cost_series  = Series(output_tmp.name, self.timezone, data_column=2)

        return cost_series, cumulative_cost_series
コード例 #4
0
ファイル: style_frame.py プロジェクト: Kapernikov/StyleFrame
 def __getitem__(self, item):
     if isinstance(item, pd.Series):
         return self.data_df.__getitem__(item).index
     elif isinstance(item, list):
         return StyleFrame(self.data_df.__getitem__(item))
     else:
         return Series(self.data_df.__getitem__(item))
コード例 #5
0
def GetMetaDataForList(commit_range, git_dir=None, count=None,
                       series = Series()):
    """Reads out patch series metadata from the commits

    This does a 'git log' on the relevant commits and pulls out the tags we
    are interested in.

    Args:
        commit_range: Range of commits to count (e.g. 'HEAD..base')
        git_dir: Path to git repositiory (None to use default)
        count: Number of commits to list, or None for no limit
        series: Series object to add information into. By default a new series
            is started.
    Returns:
        A Series object containing information about the commits.
    """
    params = ['git', 'log', '--no-color', '--reverse', '--no-decorate',
                    commit_range]
    if count is not None:
        params[2:2] = ['-n%d' % count]
    if git_dir:
        params[1:1] = ['--git-dir', git_dir]
    pipe = [params]
    stdout = command.RunPipe(pipe, capture=True).stdout
    ps = PatchStream(series, is_log=True)
    for line in stdout.splitlines():
        ps.ProcessLine(line)
    ps.Finalize()
    return series
コード例 #6
0
ファイル: info.py プロジェクト: stampedeboss/DadVision
	def getShow(self):
		_series = Series(title=self.args.series_name)
		_series = self.getShowInfo(_series)

		for key, value in _series.__dict__.iteritems():
			log.info('Key: {}   Value: {}'.format(key, value))
		return
コード例 #7
0
ファイル: patchstream.py プロジェクト: thomaschandler/u-boot
def GetMetaDataForList(commit_range,
                       git_dir=None,
                       count=None,
                       series=None,
                       allow_overwrite=False):
    """Reads out patch series metadata from the commits

    This does a 'git log' on the relevant commits and pulls out the tags we
    are interested in.

    Args:
        commit_range: Range of commits to count (e.g. 'HEAD..base')
        git_dir: Path to git repositiory (None to use default)
        count: Number of commits to list, or None for no limit
        series: Series object to add information into. By default a new series
            is started.
        allow_overwrite: Allow tags to overwrite an existing tag
    Returns:
        A Series object containing information about the commits.
    """
    if not series:
        series = Series()
    series.allow_overwrite = allow_overwrite
    params = gitutil.LogCmd(commit_range,
                            reverse=True,
                            count=count,
                            git_dir=git_dir)
    stdout = command.RunPipe([params], capture=True).stdout
    ps = PatchStream(series, is_log=True)
    for line in stdout.splitlines():
        ps.ProcessLine(line)
    ps.Finalize()
    return series
コード例 #8
0
    def std(self,
            verbose=True,
            decode=True,
            passes=None,
            num_threads=1,
            apply_experimental_transforms=False):
        """ Eager operation to compute the standard deviation of the values in each column

        Parameters
        ----------
        verbose, decode, passes, num_threads, apply_experimental_transforms
            see LazyResult

        Returns
        -------
        Series

        """
        index = []
        data = []
        for column_name in self:
            index.append(column_name)
            # get as series
            series = self[str(column_name)]
            # apply the operation
            data.append(series.std().evaluate(verbose, decode, passes,
                                              num_threads,
                                              apply_experimental_transforms))

        return Series(np.array(data), np.dtype(np.float64),
                      Index(np.array(index).astype(np.str), np.dtype(np.str)))
コード例 #9
0
    def _aggregate(self,
                   operation,
                   verbose=True,
                   decode=True,
                   passes=None,
                   num_threads=1,
                   apply_experimental_transforms=False):
        assert isinstance(operation, (str, unicode))

        index = []
        data = []
        for column_name in self:
            index.append(column_name)
            # get as series
            series = self[str(column_name)]
            # apply the operation
            data.append(
                LazyResult(
                    weld_aggregate(series.expr, operation,
                                   series.weld_type), series.weld_type,
                    0).evaluate(verbose, decode, passes, num_threads,
                                apply_experimental_transforms))

        return Series(
            np.array(data).astype(np.float64), np.dtype(np.float64),
            Index(np.array(index).astype(np.str), np.dtype(np.str)))
コード例 #10
0
ファイル: loadshape.py プロジェクト: wuyou33/eetd-loadshape
    def diff(self, start_at=None, end_at=None, step_size=900, step_count=None):
        """calculate the difference between baseline and actual

        R script produces two output files:
        (1) diff:       timestamp,  kw_diff,    cumulative_kwh_diff
        (2) baseline:   timestamp,  kw_base,    cumulative_kwh_base

        [diff.R command]
        ./diff.R
            --loadFile=LOAD_FILE
            --baselineFile=BASELINE_LOAD_FILE
            --outputTimesFile=OUTPUT_TIMES_FILE
            --outputFile=OUTPUT_DIFF_FILE
            --predictedBaselineOutputFile=OUTPUT_BASE_FILE
        """
        if self.baseline_series == None: self.baseline()
        
        output_times = self._build_output_time_series(start_at, end_at,
                                                      step_size, step_count)

        # ----- write temporary files ----- #
        load_tmp            = self.training_load_series.write_to_tempfile(exclude=False)
        baseline_tmp        = self.baseline_series.write_to_tempfile()
        output_times_tmp    = output_times.write_to_tempfile()
        output_diff_tmp     = tempfile.NamedTemporaryFile()
        output_base_tmp     = tempfile.NamedTemporaryFile()
        
        # ----- build command ----- #
        cmd = path.join(self.model_dir, 'diff.R')
        cmd += " --loadFile=%s"                     % load_tmp.name
        cmd += " --baselineFile=%s"                 % baseline_tmp.name
        cmd += " --outputTimesFile=%s"              % output_times_tmp.name
        cmd += " --outputFile=%s"                   % output_diff_tmp.name
        cmd += " --predictedBaselineOutputFile=%s"  % output_base_tmp.name
        
        # ----- run script ----- #
        self._run_script(cmd)

        # ----- process results ----- #
        kw_diff = Series(output_diff_tmp.name, self.timezone, data_column=1)
        kw_base = Series(output_base_tmp.name, self.timezone, data_column=1)

        cumulative_kwh_diff = Series(output_diff_tmp.name, self.timezone, data_column=2)
        cumulative_kwh_base = Series(output_base_tmp.name, self.timezone, data_column=2)

        return kw_diff, kw_base, cumulative_kwh_diff, cumulative_kwh_base
コード例 #11
0
 def new_series(self, path, fields):
     if path not in self.series:
         # New segment
         self.series[path] = Series(self._generate_serial(), fields)
         # Unlike segments, for which the register is modified during commits,
         # creation and deletion of series are immediately recorded into the register
         register.record_series(path, self.series[path])
     else:
         raise KeyError('Series already exists')
コード例 #12
0
    def parse(self):
        series_tmp = []
        for serie in os.listdir(self.path):
            if not (serie.startswith(".")):
                series_tmp.append(serie)

        series = []
        for serie in sorted(series_tmp):
            series.append(Series(self.path + "/" + serie, serie))

        return series
コード例 #13
0
ファイル: loadshape.py プロジェクト: wuyou33/eetd-loadshape
 def _get_series(self, data):
     """returns a series built from the data arg
     - if the data arg is None: return None
     - if the data arg is a Series: return the Series
     - if the data arg is a string: attempt to build Series from file path
     - if the data arg is a List: attempt to build Series from list
     """
     if (isinstance(data, Series)) | (data == None):
         return data
     else:
         return Series(data, self.timezone, self.temp_units)
コード例 #14
0
ファイル: patchstream.py プロジェクト: thomaschandler/u-boot
def GetMetaDataForTest(text):
    """Process metadata from a file containing a git log. Used for tests

    Args:
        text:
    """
    series = Series()
    ps = PatchStream(series, is_log=True)
    for line in text.splitlines():
        ps.ProcessLine(line)
    ps.Finalize()
    return series
コード例 #15
0
ファイル: block.py プロジェクト: pont-us/c5-pmag-paper-data
    def apply(self, series, subseries=0):
        """Warp one of the series using the other as a template.

        :param series: the index of the ``Bseries`` to warp, 0 or 1
        :type series: ``int``
        :param subseries: for multi-series ('tandem') ``Bseries``, the index
        of the actual ``Series`` within the ``Bseries``
        :return: the series, warped to fit the other series
        :rtype: ``Series``
        :raise ValueError: when ``series`` is not 0 or 1
        """

        if series not in (0, 1):
            raise ValueError("series must be 0 or 1.")

        warpee = series
        target = 1 - series
        warpee_bs = self.series[warpee]
        target_bs = self.series[target]

        # Make a copy of the warpee's data. We'll keep the
        # y values but go through it overwriting the x values
        # with their warped counterparts.
        new_data = self.series[warpee].series[subseries].data.copy()

        pos_w, pos_t = 0, 0  # warpee, target
        for run_n in range(len(self.runs[0])):
            len_w = self.runs[warpee][run_n]
            len_t = self.runs[target][run_n]

            # get the endpoints of the warpee and target runs
            w0, w1 = warpee_bs.get_xrange(pos_w, pos_w + len_w)
            t0, t1 = target_bs.get_xrange(pos_t, pos_t + len_t)
            wx0 = warpee_bs.series[subseries].data[0][w0]
            wx1 = warpee_bs.series[subseries].data[0][w1 - 1]
            tx0 = target_bs.series[subseries].data[0][t0]
            tx1 = target_bs.series[subseries].data[0][t1 - 1]

            # calculate scaling between warpee-x and target-x
            scale = (tx1 - tx0) / (wx1 - wx0)

            # apply the mapping to the warpee data within the run
            w0 = warpee_bs.get_block_start(pos_w)
            w1 = warpee_bs.get_block_start(pos_w + len_w)
            for i in xrange(w0, w1):
                new_data[0][i] = (new_data[0][i] - wx0) * scale + tx0

            # update block positions
            pos_w += len_w
            pos_t += len_t

        return Series(new_data, warpee_bs.series[subseries].name + "-wp")
コード例 #16
0
ファイル: info.py プロジェクト: stampedeboss/DadVision
	def _processFile(self, pathname):

		_file_details = self.parser.getFileDetails(pathname)
		for key, value in _file_details.iteritems():
			log.info('Key: {}   Value: {}'.format(key, value))

		redirect_response = raw_input('Press any key to continue:  ')
		_series = Series(**_file_details)
		_series = self.getShowInfo(_series)

		for key, value in _series.__dict__.iteritems():
			log.info('Key: {}   Value: {}'.format(key, value))

		return
コード例 #17
0
ファイル: library.py プロジェクト: Mc-Beton/7_4
    def add_series(self, i, j, k, h, n):
        self.video_lib.append(
            Series(title=i, year=j, gener=k, season=h, episode=n))
        with open('series_lib.csv', 'a', newline='') as csvfile:
            fieldnames = ['Title', 'Year', 'Type', 'Seasons', 'Episode']
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

            writer.writerow({
                'Title': i,
                'Year': j,
                'Type': k,
                'Seasons': h,
                'Episode': n
            })
コード例 #18
0
def create_new_series(file):
    from tvdb_client import ApiV2Client
    api_client = ApiV2Client(TVDB_LOGIN['username'], TVDB_LOGIN['api_key'], TVDB_LOGIN['account_identifier'])
    api_client.login()
    show = api_client.get_series(file.tvdb_id)
    premiere = ''
    if 'data' in show:
        premiere = show['data']['firstAired']
    print(get_base_path(file))
    print(get_base_path(file).rsplit(os.sep, 1))
    base_path = get_base_path(file).rsplit(os.sep, 1)[0]
    SHOWS.update({file.series_name: Series(series_name=file.series_name, status=file.status, tvdb_id=file.tvdb_id,
                                           name_needed=file.name_needed, location=base_path, premiere=premiere)})
    file.report['info'].append('Series created')
コード例 #19
0
ファイル: seriesinfo.py プロジェクト: stampedeboss/DadVision
    def _tvrageGetInfo(self):

        _shows = feeds.search(self.series.titleBase)
        if not _shows: raise SeriesNotFound
        if len(_shows) == 1:
            _series = Series(tvrage=etree_to_dict(_shows[0])['show'])
            if matching(self.series.title.lower(),
                        _series.title.lower(),
                        factor=85):
                _series = Series(tvrage=_shows[0])
                self.series.update(_series)
                self.series.source = 'tvrage'
                self.series.tvrage_info = _series
                return
            else:
                raise SeriesNotFound

        _rankings = {}
        for _show in _shows:
            _series = Series(tvrage=etree_to_dict(_show)['show'])
            _score = matching(self.series.title.lower(),
                              self.decode(_series.titleBase.lower()))
            if _score < 85:
                continue

            if _score in _rankings[_series.status]:
                _rankings[_series.status][_score][_series.title] = _series
            else:
                _rankings[_series.status][_score] = {_series.title: _series}

        if not _rankings:
            raise SeriesNotFound

        self._reviewShowData(_rankings, 'tvrage')

        return
コード例 #20
0
ファイル: study.py プロジェクト: jond01/contrib-pydicom
 def add_dataset(self, dataset):
     try:
         if self.dicom_dataset.StudyInstanceUID == dataset.StudyInstanceUID:
             for x in self.series:
                 try:
                     x.add_dataset(dataset)
                     logger.debug("Part of this series")
                     break
                 except Exception as e:
                     logger.debug("Not part of this series")
             else:
                 self.series.append(Series(dicom_dataset=dataset))
         else:
             raise KeyError("Not the same StudyInstanceUIDs")
     except Exception as e:
         logger.debug("trouble adding series to study", exc_info=e)
         raise KeyError("Not the same StudyInstanceUIDs")
コード例 #21
0
ファイル: nyaa_mission.py プロジェクト: Neraste/nyaa-mission
    def set_series(self, config):
        """ Set series from config

            Args:
                config (configparser.SectionProxy): dictionary of series,
                    containing their parameters.
        """
        for name, section in config.items():
            if name == 'DEFAULT':
                continue

            self.series.append(Series(
                name,
                directory_local_prefix=self.directory_local,
                directory_server_prefix=self.directory_server,
                **section
                ))
コード例 #22
0
    def _element_wise_operation(self, other, operation):
        if not isinstance(other, (str, unicode, int, long, float, bool)):
            raise TypeError('can only compare with scalars')

        assert isinstance(operation, (str, unicode))

        new_data = {}
        for column_name in self:
            # get as series
            series = self[str(column_name)]
            # apply the operation
            new_data[column_name] = Series(
                weld_element_wise_op(series.expr, other, operation,
                                     series.weld_type), series.dtype,
                self.index, series.name)

        return DataFrame(new_data, self.index)
コード例 #23
0
    def add_series(self):
        data_mgr = DatabaseManager(Config().database_name, None)
        series_args = {}
        for i in range(self.add_series_table.rowCount()):
            try:
                if self.add_series_table.item(i, 1).background() == Qt.red:
                    return
            except AttributeError:
                pass

            curr_heading = self.add_series_table.item(i, 0).text()
            try:
                curr_text = self.add_series_table.item(i, 1).text()
            except AttributeError:  # is_completed
                curr_text = (self.add_series_table.cellWidget(i, 1)
                             .currentText())

            if curr_heading == "Name":
                series_args['name'] = curr_text
                if series_args['name'] in ["", "Unknown"]:
                    self.add_series_table.item(i, 1).setBackground(Qt.red)
                    return
            elif curr_heading == "Alt. Names":
                series_args['alt_names'] = curr_text
            elif curr_heading == "Author":
                series_args['author'] = curr_text
            elif curr_heading == "Volumes Owned":
                if curr_text in ["None", "0", ""]:
                    series_args['volumes_owned'] = generate_volumes_owned("")
                else:
                    series_args['volumes_owned'] = generate_volumes_owned(
                        curr_text)
            elif curr_heading == "Publisher":
                series_args['publisher'] = curr_text
            elif curr_heading == "Completed":
                status = curr_text
                series_args['is_completed'] = 1 if status == "Yes" else 0

        new_series = Series(**series_args)

        if new_series.add_series_to_database(data_mgr):
            cur = data_mgr.query("SELECT rowid FROM series WHERE name='%s'"
                                 % series_args['name'].replace("'", "''"))
            self.added = cur.fetchone()[0]
            self.close()
コード例 #24
0
ファイル: patchstream.py プロジェクト: dyg540/uboot_2440
def GetMetaData(start, count):
    """Reads out patch series metadata from the commits

    This does a 'git log' on the relevant commits and pulls out the tags we
    are interested in.

    Args:
        start: Commit to start from: 0=HEAD, 1=next one, etc.
        count: Number of commits to list
    """
    pipe = [['git', 'log', '--reverse', 'HEAD~%d' % start, '-n%d' % count]]
    stdout = command.RunPipe(pipe, capture=True)
    series = Series()
    ps = PatchStream(series, is_log=True)
    for line in stdout.splitlines():
        ps.ProcessLine(line)
    ps.Finalize()
    return series
コード例 #25
0
ファイル: seriesinfo.py プロジェクト: stampedeboss/DadVision
    def _getInfoFromProviders(self, processOrder):

        if self.last_series.title == self.series.title:
            self.series.copyShow(self.last_series)
            return
        else:
            self.last_series = Series(title=self.series.title)

        options = {
            'tvdb': self._tvdbGetInfo,
            'MyTrakt': self._traktGetInfo,
            'tvrage': self._tvrageGetInfo
        }

        try:
            for service in processOrder:
                try:
                    options[service]()
                    if self.series.keysFound:
                        #						if not self.series.tvdb_id and 'tvdb' in processOrder:
                        #							self.series.title = re.sub(' and ', ' & ', self.series.title)
                        #							options['tvdb']()
                        raise GetOutOfLoop
                except SeriesNotFound:
                    sys.exc_clear()
                except GetOutOfLoop:
                    raise GetOutOfLoop
                except:
                    _an_error = traceback.format_exc()
                    log.debug(
                        traceback.format_exception_only(
                            type(_an_error), _an_error)[-1])
                    raise SeriesNotFound
            if self.series.keysFound:
                raise GetOutOfLoop
            self.last_request = {'LastRequestName': ''}
            raise SeriesNotFound('ALL: Unable to locate series: {}'.format(
                self.series.title))
        except GetOutOfLoop:
            sys.exc_clear()

        self.last_series.copyShow(self.series)

        return
コード例 #26
0
def entry_to_series(entry):
    """
    entry_to_series()
    Takes a single row from a database query and converts it
    into a series.
    """
    if not entry:
        return None

    series = Series(
        name=str(entry[SI.NAME]),  # Series Name
        volumes_owned=str(entry[SI.VOL_OWNED]),  # Volumes Owned
        is_completed=entry[SI.IS_COMPLETED],  # Is Completed
        next_volume=entry[SI.NEXT_VOLUME],  # Next Volume
        publisher=str(entry[SI.PUBLISHER]),  # Publisher
        author=str(entry[SI.AUTHOR]),  # Author
        alt_names=str(entry[SI.ALT_NAMES]),  # Alternate Names
        rowid=entry[SI.ROWID])  # Row ID in db
    return series
コード例 #27
0
def prep(data):
    base = create_location(data[SERIES_NAME], data['anime'])
    if not base:
        return False
    show = Series(series_name=data[SERIES_NAME],
                  location=base,
                  tvdb_id=data[TVDB_ID],
                  premiere=data[PREMIERE],
                  final=data[FINAL],
                  status=data[STATUS],
                  name_needed=data[NAME_NEEDED])

    REPORT['info'].append('Series Name: ' + show.series_name)
    REPORT['info'].append('Status: ' + show.status)

    for f in data['files']:
        if not f['s_nr'] or not f['e_nr']:
            continue

        f = File(old_location=os.path.join(FILE_DIR, f['location']),
                 series_name=show.series_name,
                 s_nr=f['s_nr'],
                 e_nr=f['e_nr'],
                 title=f['title'],
                 title2=f['title2'],
                 title3=f['title3'],
                 episode_option=f['episode_option'],
                 subs=f['sub'],
                 anime=show.anime)

        folder = make_season_folder(f.s_nr, show.location)
        if not folder:
            return False

        name = Episode.compile_file_name(None, file=f)
        if f.subs:
            f.location = os.path.join(SUB_DIR, name)
        else:
            f.location = os.path.join(folder, name)
        QUEUE.append(f)

    return show
コード例 #28
0
def load_files(top):
    shows = {}
    len_top = len(top.split(sep))
    for root, dirs, _ in walk(top):

        for name in dirs:
            if root == top:
                shows[name] = Series(location=path.join(root, name),
                                     series_name=name)
                continue

            show = path.basename(root)

            if len(root.split(sep)) - len_top > 1:
                continue
            if 'Special' in name:
                continue

            season = shows[show].add_season(location=path.join(root, name))
            season.update_episodes(reload_metadata=False)

    return shows
コード例 #29
0
    def head(self,
             n=10,
             verbose=True,
             decode=True,
             passes=None,
             num_threads=1,
             apply_experimental_transforms=False):
        """ Eagerly evaluates the DataFrame

        This operation has no consequences, unlike getitem.

        Parameters
        ----------
        n : int, optional
            how many rows to return
        verbose, decode, passes, num_threads, apply_experimental_transforms
            see LazyResult

        Returns
        -------
        DataFrame
            the output of evaluate on the sliced DataFrame

        """
        new_index = self.index.head(n)

        new_data = {}
        for column_name in self:
            # making series because Series has the proper method to slice something; re-use the code above
            series = self[str(column_name)]

            new_data[column_name] = Series(series.head(n), series.dtype,
                                           series.index, series.name)

        return DataFrame(new_data,
                         new_index).evaluate(verbose, decode, passes,
                                             num_threads,
                                             apply_experimental_transforms)
コード例 #30
0
ファイル: loadshape.py プロジェクト: wuyou33/eetd-loadshape
    def _build_output_time_series(self, start_at=None, end_at=None,
                                  step_size=900, step_count=None):
        """assemble prediction series:
        - this is the series of timestamps for which baseline values will be calculated
        - the prediction series is stored in a Series object to take advantage of some of the Series features
        - default start_at/end is training_load_series.start_at/end_at
        - default prediction step is 900s
        - step_count will trump step_size
        """
        if start_at == None: start_at = self.training_load_series.start_at()
        if end_at == None: end_at = self.training_load_series.end_at()
        
        start_at = utils.read_timestamp(start_at, self.timezone)
        end_at = utils.read_timestamp(end_at, self.timezone)

        if step_count != None:
            duration = end_at - start_at
            step_size = int(float(duration) / step_count)

        p_data = range(start_at, end_at+1, step_size)
        p_data = [(v, 0) for v in p_data]
        
        return Series(p_data, self.timezone)