Ejemplo n.º 1
0
 def tmpdir(self):
     if not hasattr(self, '_tmpdir'):
         self._tmpdir = tempfile.mkdtemp(prefix="rpmspectool_")
         log_debug("Created temporary directory '{}'".format(self._tmpdir))
         if not self.args.debug:
             atexit.register(self._rm_tmpdir)
     return self._tmpdir
Ejemplo n.º 2
0
 def on_input_status_event(self, signal, value):
     log_debug('update source selection')
     toto = self._model.props.sources[value]
     for row in self._source_list:
         if row.value == toto.props.val:
             self._current = row.value
             self._source_list.select_row(row)
Ejemplo n.º 3
0
 def tmpdir(self):
     if not hasattr(self, '_tmpdir'):
         self._tmpdir = tempfile.mkdtemp(prefix="rpmspectool_")
         log_debug("Created temporary directory '{}'".format(self._tmpdir))
         if not self.args.debug:
             atexit.register(self._rm_tmpdir)
     return self._tmpdir
Ejemplo n.º 4
0
def download_files(url):
    website = urllib2.urlopen(url)
    html = website.read()
    links = re.findall(expr, html)
    files = []
    for link in links:
        log_debug("downloading {}".format(link))
        files.append(url_download(link, download_path))
    return files
Ejemplo n.º 5
0
def url_download(url, download_path):
    data = urllib2.urlopen(url).read()
    parsed_url = urlparse.urlparse(url)
    file_name = path.basename(parsed_url.path)
    file = path.join(download_path, file_name)
    with open(file, 'w+') as open_file:
        open_file.write(data)
        log_debug("wrote {} to {}".format(url, file))
    return file
Ejemplo n.º 6
0
 def __init__(self, coremodel):
     log_debug('initialize power widget')
     super().__init__()
     self._coremodel = coremodel
     self._corepower = coremodel.props.corepower
     self._corepower.bind_property('network_player_active',
                                   self._network_player_power_switch,
                                   'active')
     self._coremodel.emit('network-player-power-get-status-event')
     log_debug('power widget initialized')
Ejemplo n.º 7
0
 def LoadResource(self, resourceName):
     name = os.path.join(data_dir, resourceName)
     if not name.endswith('.png'):
         name += '.png'
     try:
         image = pyglet.image.load(name)
     except Exception, ex:
         log_debug(' Cannot load image: ' + name)
         log_debug('Raising: ' + str(ex))
         raise
Ejemplo n.º 8
0
 def LoadResource(self, resourceName):
         name = os.path.join( data_dir, resourceName )
         if not name.endswith('.png'):
                 name += '.png'
         try:
                 image = pyglet.image.load(name)
         except Exception, ex:
                 log_debug( ' Cannot load image: '+ name )
                 log_debug( 'Raising: '+ str(ex) )
                 raise
Ejemplo n.º 9
0
 def LoadResource(self, resourceName):
     name = os.path.join(data_dir, resourceName)
     if not name.endswith('.py'):
         name += '.py'
     try:
         glbs = {}
         execfile(name, glbs)
     except Exception, ex:
         log_debug('Cannot load triggers: ' + name)
         log_debug('Raising: ' + str(ex))
         raise
Ejemplo n.º 10
0
 def LoadResource(self, resourceName):
         name = os.path.join( data_dir, resourceName )
         if not name.endswith('.py'):
                 name += '.py'
         try:
             glbs = {}
             execfile(name, glbs)
         except Exception, ex:
                 log_debug( 'Cannot load triggers: '+ name )
                 log_debug( 'Raising: '+ str(ex) )
                 raise
Ejemplo n.º 11
0
    def __init__(self, coremodel: CoreModel):
        log_debug('initialize source widget')
        super().__init__()

        self._coremodel = coremodel
        self._model: CoreSource = coremodel.props.coresource
        self._coremodel.connect('network-player-input-status-event',
                                self.on_input_status_event)
        self._source_list.bind_model(self._model, self._create_row)
        self._coremodel.emit('network-player-input-get-status-event')
        self.show_all()
        log_debug('source widget initialized')
Ejemplo n.º 12
0
    def Update(self, series_meta, provider_wrapper, database_manager):
        """
        Procedure to handle updates. The default behaviour is to not update; just retrieve from the
        database.

        :param series_meta: SeriesMetaData
        :param provider_wrapper: ProviderWrapper
        :param database_manager: DatabaseManager
        :return:
        """
        log_debug('Fetching {0} from {1}'.format(series_meta.ticker_full,
                                                 database_manager.Name))
        return database_manager.Retrieve(series_meta)
Ejemplo n.º 13
0
def fetch(ticker, database='Default', dropna=True):
    """
    Fetch a series from database; may create series and/or update as needed.

    :param ticker: str
    :param database: str
    :param dropna: bool
    :param always_list: bool
    :return: pandas.Series
    """
    # NOTE: This will get fancier, but don't over-design for now...
    if database.lower() == 'default':
        database = PlatformConfiguration["Database"]["Default"]
    database_manager: DatabaseManager = Databases[database]
    series_meta = database_manager.Find(ticker)
    series_meta.AssertValid()
    provider_code = series_meta.series_provider_code
    try:
        provider_manager: ProviderWrapper = Providers[provider_code]
    except:
        raise KeyError('Unknown provider_code: ' + provider_code)

    if series_meta.Exists:
        # Return what is on the database.
        global UpdateProtocolList
        # TODO: Allow for choice of protocol.
        return UpdateProtocolList["NOUPDATE"].Update(series_meta,
                                                     provider_manager,
                                                     database_manager)
    else:
        if provider_manager.IsExternal:
            _hook_fetch_external(provider_manager, ticker)
        log_debug('Fetching %s', ticker)
        if Providers.EchoAccess:
            print('Going to {0} to fetch {1}'.format(provider_manager.Name,
                                                     ticker))
        ser_list = provider_manager.fetch(series_meta)
        if dropna:
            ser_list = [x.dropna() for x in ser_list]
        if len(ser_list) > 1:
            # Not sure how more than one series will work with the SeriesMetaData
            raise NotImplementedError(
                'More than one series in a fetch not supported')
        log('Writing %s', ticker)
        ser = ser_list[0]
        database_manager.Write(ser, series_meta)
    return ser_list[0]
Ejemplo n.º 14
0
    def FetchAndWrite(self, ticker, series_meta, provider_manager,
                      database_manager):
        """
        Fetch a series from an external provider, and write it to the database. Should not
        need to override this method.

        :param ticker: str
        :param series_meta: econ_platform_core.SeriesMetaData
        :param provider_manager: econ_platform_core.ProviderWrapper
        :param database_manager: econ_platform_core.DatabaseManager
        :return:
        """
        if provider_manager.IsExternal:
            _hook_fetch_external(provider_manager, ticker)
        if provider_manager.PushOnly:
            raise PlatformError(
                'Series {0} does not exist on {1}. Its ticker indicates that it is push-only series.'
                .format(ticker, database_manager.Code)) from None
        log_debug('Fetching %s', ticker)
        # Force this to False, so that ProviderManager extension writers do not need to
        # remember to do so.
        provider_manager.TableWasFetched = False
        if econ_platform_core.Providers.EchoAccess:
            print('Going to {0} to fetch {1}'.format(provider_manager.Name,
                                                     ticker))
        try:
            out = provider_manager.fetch(series_meta)
        except TickerNotFoundError:
            # If the table was fetched, write the table, even if the specific series was not there...
            if provider_manager.TableWasFetched:
                self.WriteTable(provider_manager, database_manager)
            raise
        if type(out) is not tuple:
            ser = out
        else:
            ser, series_meta = out
        ser = ser.dropna()
        log_debug('Writing %s', ticker)
        if not provider_manager.TableWasFetched:
            database_manager.Write(ser, series_meta)
            if not database_manager.SetsLastUpdateAutomatically:
                database_manager.SetLastUpdate(series_meta.ticker_full)
        else:
            self.WriteTable(provider_manager, database_manager)
        return ser
Ejemplo n.º 15
0
    def Update(self, ticker, series_meta, provider_wrapper, database_manager):
        """
        Procedure to handle updates. The default behaviour is to not update; just retrieve from the
        database.

        :param ticker: str
        :param series_meta: econ_platform_core.SeriesMetadata
        :param provider_wrapper: econ_platform_core.ProviderWrapper
        :param database_manager: econ_platform_core.DatabaseManager
        :return:
        """
        last_refresh = series_meta.last_refresh
        ticker_str = ticker
        if last_refresh is None:
            last_refresh = database_manager.GetLastRefresh(series_meta.ticker_full)
        # If the developer is too lazy to parse strings...
        if type(last_refresh) is str:
            last_refresh = dateutil.parser.parse(last_refresh)
        nnow = datetime.datetime.now()
        if self.NumHours is None:
            self.NumHours = econ_platform_core.PlatformConfiguration['UpdateProtocol'].getint('SimpleHours')
        age = math.floor(((nnow - last_refresh).total_seconds()) / (60 * 60))
        if age < self.NumHours:
            log_debug('Series {0} not stale, going to {1}'.format(ticker_str, database_manager.Code))
            return database_manager.Retrieve(series_meta)
        else:
            # The adventure begins!
            # For now, refresh entire series.
            try:
                return self.FetchAndWrite(ticker, series_meta, provider_wrapper, database_manager)
            except NoDataError:
                log_debug('Series {0} has no new data; marking refreshed'.format(ticker_str))
                database_manager.SetLastRefresh(series_meta.ticker_full)
                return database_manager.Retrieve(series_meta)
            except PlatformError as ex:
                # Unable to fetch; just retrieve from the database
                # This is perhaps too broad, but we can use whatever is in the database.
                econ_platform_core.log_last_error(just_info=True)
                print('Could not fetch from provider; using database')
                print('Explanation: ' + str(ex))
                return database_manager.Retrieve(series_meta)
Ejemplo n.º 16
0
    def __init__(self, application, title):
        log_debug('initliaze window')
        super().__init__(application=application, title=title)
        self.set_default_size(800, 600)
        self._app = application

        self.hb = HeaderBar(self._app.props.coremodel)
        self.set_titlebar(self.hb)
        self._all = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
        self._power = Power(self._app.props.coremodel)
        self._power.props.visible = True
        self._all.pack_start(self._power, False, True, 0)
        self._command = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
        self.source = Source(self._app.props.coremodel)
        self.source.props.visible = True
        self._command.pack_start(self.source, False, True, 0)
        self.menu = Menu(self._app.props.coremodel)
        self.menu.props.visible = True
        self._command.pack_start(self.menu, True, True, 0)
        self._command.props.visible = True
        self._all.pack_start(self._command, True, True, 0)
        self.add(self._all)
        self._all.show()
        log_debug('window initliazed')
Ejemplo n.º 17
0
def log_extension_status():
    """
    After the fact logging of what extensions were loaded. Useful for R
    :return:
    """
    log_debug('Successful Extension Initialisation')
    for e in LoadedExtensions:
        log_debug(e)
    if len(DecoratedFailedExtensions) == 0:
        log_debug('No extension loads failed.')
        return
    log_warning('Failed Extension Initialisation')
    for f, warn in DecoratedFailedExtensions:
        log_warning('Extension_File\t{0}\tMessage:\t{1}'.format(f, warn))
Ejemplo n.º 18
0
    def eval_specfile(self, definitions=None):
        log_debug("eval_specfile()")

        log_debug("writing parsed file '{}'".format(self.out_specfile_path))

        cmdline = (self.rpmcmd, "--eval")

        for macro in self.rpm_cmd_macros:
            self.out_specfile.write(
                "%undefine {macro}\n%define {macro} ".format(
                    macro=macro).encode('utf-8'))
            with Popen(
                    cmdline + ("%{}\n".format(macro),), stdin=DEVNULL,
                    stdout=PIPE, stderr=DEVNULL, close_fds=True) as rpmpipe:
                self.out_specfile.write(rpmpipe.stdout.read())
        self.out_specfile.write(b"\n")

        for definition in definitions:
            self.out_specfile.write(
                "%define {}\n".format(definition).encode('utf-8'))

        if self.need_conditionals_quirk:
            self._write_conditionals_quirk()

        preamble = []
        group_seen = False
        conditional_depth = 0

        for l in self.in_specfile.readlines():
            m = self.macro_re.search(l)
            if m:
                name = m.group('name')
                if name in self.section_names:
                    # unwind open conditional blocks
                    unwinding_lines = [b"%endif\n"] * conditional_depth
                    preamble.extend(unwinding_lines)
                    self.out_specfile.write(b"".join(unwinding_lines))

                    # we're only interested in the preamble
                    break
                elif name in self.conditional_names:
                    conditional_depth += 1
                elif name == b'endif':
                    conditional_depth -= 1

            # ignore arch specifics
            if self.archstuff_re.search(l):
                continue

            # replace legacy tags
            l = self.copyright_re.sub(rb"License", l)
            l = self.serial_re.sub(rb"Epoch", l)

            preamble.append(l)
            self.out_specfile.write(l)

            if self.group_re.search(l):
                group_seen = True

        self.in_specfile.close()

        if not group_seen:
            preamble.append(b"Group: rpmspectool\n")

        eof = b"EOF"

        while eof in preamble:
            eof += b"_EOF"

        preamble_bytes = b"".join(preamble)

        self.out_specfile.write(
            b"%description\n%prep\ncat << " + eof + b"\n" +
            preamble_bytes + b"\nSrcDir: %{_sourcedir}\n" + eof)

        self.out_specfile.close()

        cmdline = [self.rpmbuildcmd]

        for macro in self.rpm_cmd_macros:
            cmdline.extend(("--define", "{} {}".format(macro, self.tmpdir)))

        cmdline.extend(("--nodeps", "-bp", self.out_specfile_path))

        ret_dict = defaultdict(dict)

        with Popen(
                cmdline, stdin=DEVNULL, stdout=PIPE, stderr=PIPE,
                close_fds=True) as rpm:
            stdout, stderr = rpm.communicate()
            if rpm.returncode:
                raise RPMSpecEvalError(
                    self.out_specfile_path, rpm.returncode, stderr)

            for l in stdout.split(b"\n"):
                l = l.strip()
                m = self.source_patch_re.search(l)
                if m:
                    if m.group('sourcepatch').lower() == b'source':
                        log_debug("Found source: {!r}".format(l))
                        spdict = ret_dict['sources']
                    else:
                        log_debug("Found patch: {!r}".format(l))
                        spdict = ret_dict['patches']
                    try:
                        index = int(m.group('index'))
                    except TypeError:
                        index = 0
                    spdict[index] = m.group('fileurl').decode('utf-8')
                m = self.srcdir_re.search(l)
                if m:
                    ret_dict['srcdir'] = m.group('srcdir').decode('utf-8')

        return ret_dict
Ejemplo n.º 19
0
def fetch(ticker, database='Default', dropna=True):
    """
    Fetch a series from database; may create series and/or update as needed.

    (May create a "fetchmany()" for fancier fetches that take a slice of the database.)

    :param ticker: str
    :param database: str
    :param dropna: bool
    :return: pandas.Series
    """
    # Default handling is inide the database manager...
    # if database.lower() == 'default':
    #     database = PlatformConfiguration["Database"]["Default"]
    database_manager: DatabaseManager = Databases[database]
    series_meta = database_manager.Find(ticker)
    series_meta.AssertValid()
    provider_code = series_meta.series_provider_code
    # noinspection PyPep8
    try:
        provider_manager: ProviderWrapper = Providers[provider_code]
    except:
        raise KeyError('Unknown provider_code: ' + str(provider_code))

    if series_meta.Exists:
        # Return what is on the database.
        global UpdateProtocolList
        # TODO: Allow for choice of protocol.
        return UpdateProtocolList["NOUPDATE"].Update(series_meta,
                                                     provider_manager,
                                                     database_manager)
    else:
        if provider_manager.IsExternal:
            _hook_fetch_external(provider_manager, ticker)
        log_debug('Fetching %s', ticker)
        # Force this to False, so that ProviderManager extension writers do not need to
        # remember to do so.
        provider_manager.TableWasFetched = False
        if Providers.EchoAccess:
            print('Going to {0} to fetch {1}'.format(provider_manager.Name,
                                                     ticker))
        try:
            out = provider_manager.fetch(series_meta)
        except TickerNotFoundError:
            # If the table was fetched, write the table, even if the specific series was not there...
            if provider_manager.TableWasFetched:
                for k in provider_manager.TableSeries:
                    t_ser = provider_manager.TableSeries[k]
                    t_meta = provider_manager.TableMeta[k]
                    # Don't write the single series again..
                    if str(t_meta.ticker_full) == str(series_meta.ticker_full):
                        continue
                    database_manager.Write(t_ser, t_meta)
            raise
        if type(out) is not tuple:
            ser = out
        else:
            ser, series_meta = out
        if dropna:
            ser = ser.dropna()
        log('Writing %s', ticker)
        database_manager.Write(ser, series_meta)
        if provider_manager.TableWasFetched:
            for k in provider_manager.TableSeries:
                t_ser = provider_manager.TableSeries[k]
                t_meta = provider_manager.TableMeta[k]
                # Don't write the single series again..
                if str(t_meta.ticker_full) == str(series_meta.ticker_full):
                    continue
                database_manager.Write(t_ser, t_meta)

    return ser
Ejemplo n.º 20
0
    def main(self):
        argparser = self.get_arg_parser()
        args = self.args = argparser.parse_args(sys.argv[1:])

        if args.debug:
            logging.basicConfig(level=logging.DEBUG)

        log_debug("args: {}".format(args))

        if not getattr(args, 'cmd'):
            argparser.print_usage()
        elif args.cmd == 'version':
            print("{prog} {version}".format(prog=sys.argv[0], version=version))
        else:
            parsed_spec_path = os.path.join(
                self.tmpdir,
                "rpmspectool-" + os.path.basename(self.args.specfile.name))
            spechandler = RPMSpecHandler(self.tmpdir, args.specfile,
                                         parsed_spec_path)

            try:
                specfile_res = spechandler.eval_specfile(self.args.define)
            except RPMSpecEvalError as e:
                specpath, returncode, stderr = e.args
                if args.debug:
                    errmsg = _(
                        "Error parsing intermediate spec file '{specpath}'.")
                else:
                    errmsg = _("Error parsing intermediate spec file.")
                print(errmsg.format(specpath=specpath), file=sys.stderr)
                if args.verbose:
                    print(_("RPM error:\n{stderr}").format(stderr=stderr),
                          file=sys.stderr)
                sys.exit(2)

            sources, patches = self.filter_sources_patches(
                args, specfile_res['sources'], specfile_res['patches'])

            if args.cmd == 'list':
                for prefix, what in (("Source", sources), ("Patch", patches)):
                    for i in sorted(what):
                        print("{}{}: {}".format(prefix, i, what[i]))
            elif args.cmd == 'get':
                if getattr(args, 'sourcedir'):
                    where = specfile_res['srcdir']
                else:
                    where = getattr(args, 'directory')
                for what in sources, patches:
                    for i in sorted(what):
                        url = what[i]
                        if is_url(url):
                            try:
                                download(url,
                                         where=where,
                                         dry_run=args.dry_run,
                                         insecure=args.insecure,
                                         force=args.force)
                            except DownloadError as e:
                                log_error(e.args[0])
                                return 1
                            except FileExistsError as e:
                                log_error("{}: {}".format(
                                    e.args[1],
                                    getattr(e, 'filename2', e.filename)))
                                return 1

        return 0
Ejemplo n.º 21
0
def fetch_basic(url, user_agent, results_location, job_id=None):
    """function to use for basic fetch

    :param url: url to fetch information from
    :param user_agent: user agent string that is used by the minion in making the fetch
    :param results_location: the location to where the results are stored
    :param job_id: id of the job that is used to differentiate if from any other jobs that may be run
    :return: results_data - a dictionary of metadata on the fetch

    Before anything else, the url that is specified is cleaned so as to not throw any errors. After that, a Requests
    session is started. It is from this session that we will be making the HTTP request to the minion that makes the
    fetch. We perform a GET on the url. If there is no connection error, we raise the http status code from the get. If
    it is 200, we write the page data to a file. After that, we write some metadata about the fetch to the results_data
    dictionary, such as cookies and any info about redirects. Then, we return the results_data dictionary.
    """
    log_debug("fetch_basic", "Entering fetch_basic")
    headers = {'user-agent': user_agent}

	# Clean up an loose hanging non-printable characters. 
	# In the future consider moving this out to it's own method
	# with additional checks. 
    url_clean = url.lstrip()

    results_data = {'requested_url': url,
                    'actual_url': url_clean,
                    'remote_job_id': str(job_id)}

    log_debug("fetch_basic", "Starting Fetch of: " + url_clean)

    session = requests.Session()

    try:
		# Do the actual fetch. We are ok with non-valid SSL Certs
        fetch_result = session.get(url_clean, headers=headers, verify=False)

    except requests.ConnectionError:
        results_data['connection_success'] = False
        log_debug("fetch_basic", "Connection Failed for Fetch: " + url_clean)
        return results_data

    except Exception as e:
        log_debug("fetch_basic", "Unexpected Exception while fetching site. " + str(type(e)) + " : " + str(e))
        return results_data

    try:
        fetch_result.raise_for_status()

        results_data['fetch_success'] = True

        log_debug("fetch_basic", "Succesfully Fetched: " + url_clean)

        try:
            log_debug("fetch_basic", "Opening: " + str(results_location) + " for: " + url_clean)

            fetch_file = open(results_location, 'wb')

            for chunk in fetch_result.iter_content(100000):
                fetch_file.write(chunk)

                log_debug("fetch_basic",
                          "Writing " + str(len(chunk)) + " bytes to: " + str(results_location) + " for: " + str(
                              url_clean))

            fetch_file.close()

            log_debug("fetch_basic", "Closing: " + str(results_location) + " for: " + str(url_clean))

            results_data['fetch_object_success'] = True
            log_debug("fetch_basic", "got here")
        except:
            results_data['fetch_object_success'] = False

    except Exception as e:
        log_debug("fetch_basic", "Unexpected Exception while handlin fetch data. " + str(type(e)) + " : " + str(e))
        results_data['fetch_success'] = False

    finally:
        results_data['connection_success'] = True
        results_data['server_info'] = dict(fetch_result.headers)
        results_data['response_code'] = fetch_result.status_code

        if len(fetch_result.cookies) > 0:
            results_data['cookies'] = fetch_result.cookies.get_dict()

        if len(fetch_result.history) > 0:
            # There was some sort of redirect... Might as well capture everything
            results_data['redirects'] = []
            for item in fetch_result.history:
                history_item = {'headers': dict(item.headers),
                                'response_code': item.status_code}
                results_data['redirects'].append(history_item)

        return results_data
Ejemplo n.º 22
0
def fetch_website(url, user_agent, results_location_dir):
    """function to use for website fetch

    :param url: url to fetch information from
    :param user_agent: user agent string that is used by the minion in making the fetch
    :param results_location_dir: the location to where the results are stored
    :return: results_data - a dictionary of metadata on the fetch

    This method uses a different library than the basic fetch method, Ghost.py (documentation at
    http://ghost-py.readthedocs.io/en/latest/#). After cleaning the url, a session is opened with the user agent string
    passed in. Then the specific web page is opened and all the resources of the web page are collected. After that, a
    screen-shot of the web page is collected. Then, the page data is written to a file that is named from
    the session id. Then each resource gathered during the fetch is written to a file, and these are placed in the same
    directory as the page data. Beyond that, miscellaneous metadata is written to the results_data dictionary.
    """
    log_debug("fetch_website", "Entering fetch_website")

    # clean the url
    url_clean = url.lstrip()

    log_debug("fetch_website", "Starting Fetch of: " + url_clean)

    # start a Ghost.py session
    session = Ghost().start(user_agent=user_agent)

    results_data = {'requested_url': url,
                    'actual_url': url_clean,
                    'remote_job_id': str(session.id)}
    try:
        # open the web page and gather all the page's resources
        page, resources = session.open(address=url_clean, user_agent=user_agent)

    # catch a TimeoutError
    except (ghost.TimeoutError, ghost.Error):
        results_data['connection_success'] = False
        log_debug("fetch_website", "Connection Failed for Fetch: " + url_clean)
        return results_data

    except Exception as e:
        print type(e)
        print str(e)
        return results_data

    # if page is None and there are no resources, that means that a connection to the page failed
    if page is None and len(resources) == 0:
        log_debug("fetch_website", "")
        results_data['connection_success'] = False

    else:
        netloc = urlparse(url_clean).netloc
        log_debug("fetch_website", "Attempting to capture screenshot of {}".format(netloc))

        try:
            # capture a screen-shot of the web page
            session.capture_to("{}/{}.png".format(results_location_dir, netloc))

            log_debug("fetch_website", "Successful capture of screenshot of {}".format(netloc))

        except Exception as e:
            log_debug("fetch_website", "Failed to capture screenshot of {}".format(netloc))

            print type(e)
            print str(e)

        try:
            log_debug("fetch_website", "Opening: {}/{} for: {}".format(results_location_dir, session.id, url_clean))
            fetch_file = open("{}/{}".format(results_location_dir, session.id), 'w')

            log_debug("fetch_website", "writing page content to file")

            # write page content to file
            fetch_file.write(page.content)

            log_debug("fetch_website", "closing {}".format(session.id))
            fetch_file.close()

            # write the data of each resource to different files
            for resource in resources:
                log_debug("fetch_website", "opening {}/resource{} for: {}".format(results_location_dir,
                                                                                  resources.index(resource),
                                                                                  url_clean))
                data_file = open("{}/resource{}".format(results_location_dir, resources.index(resource)), "w")

                log_debug("fetch_website", "writing content to {}".format(resources.index(resource)))
                data_file.write(resource.content)

                log_debug("fetch_website", "closing {}".format(resources.index(resource)))
                data_file.close()

            results_data['fetch_object_success'] = True

        except:
            results_data['fetch_object_success'] = False

        finally:
            # collect more metadata
            results_data['connection_success'] = True
            results_data['server_info'] = dict(page.headers)
            results_data['response_code'] = page.http_status

            if page.http_status in [400, 404, 403, 401]:
                results_data["fetch_success"] = False

            if len(session.cookies) > 0:
                results_data['cookies'] = [x.value().data() for x in session.cookies]

            return results_data
Ejemplo n.º 23
0
#adds a stream catcher for display and a memory handler for saving
log_stream=StreamCatch()
logger=getLogger()
display_handler=StreamHandler(stream=log_stream)
display_handler.setLevel(LOGLEVEL)
display_handler.setFormatter(Formatter(LOGFORMATTER))
display_handler.name="StreamCatch"
logger.addHandler(display_handler)

memory_handler=MemoryHandler(MEMBUFFER)
memory_handler.setLevel(LOGLEVEL)
memory_handler.setFormatter(Formatter(LOGFORMATTER))
memory_handler.name="MemoryLog"
logger.addHandler(memory_handler)

log_debug("Started logging")

def make_log_file(log_path, mode='a'):
    """Points memory handler at a particular file to save the log."""
    file_handler = FileHandler(filename=log_path, mode=mode)
    file_handler.setLevel(LOGLEVEL)
    file_handler.setFormatter(Formatter(LOGFORMATTER))
    memory_handler.setTarget(file_handler)

def remove_log_file():
    """closes the log file and removes memory_handler from pointing at it"""
    if memory_handler.target:
        old_log_file_path=memory_handler.target.baseFilename
        memory_handler.target.flush()
        memory_handler.target.close()
        memory_handler.target=None
Ejemplo n.º 24
0
 def _on_network_player_power_activated(self, switch, gparam):
     log_debug('on_network_player_power_activated : {}'.format(
         switch.get_active()))
     self._coremodel.emit('network-player-power-event', switch.get_active())
Ejemplo n.º 25
0
    def eval_specfile(self, definitions=None):
        log_debug("eval_specfile()")

        log_debug("writing parsed file '{}'".format(self.out_specfile_path))

        cmdline = (self.rpmcmd, "--eval")

        for macro in self.rpm_cmd_macros:
            self.out_specfile.write(
                "%undefine {macro}\n%define {macro} ".format(
                    macro=macro).encode('utf-8'))
            with Popen(cmdline + ("%{}\n".format(macro), ),
                       stdin=DEVNULL,
                       stdout=PIPE,
                       stderr=DEVNULL,
                       close_fds=True) as rpmpipe:
                self.out_specfile.write(rpmpipe.stdout.read())
        self.out_specfile.write(b"\n")

        for definition in definitions:
            self.out_specfile.write(
                "%define {}\n".format(definition).encode('utf-8'))

        if self.need_conditionals_quirk:
            self._write_conditionals_quirk()

        preamble = []
        group_seen = False
        conditional_depth = 0

        for l in self.in_specfile.readlines():
            m = self.macro_re.search(l)
            if m:
                name = m.group('name')
                if name in self.section_names:
                    # unwind open conditional blocks
                    unwinding_lines = [b"%endif\n"] * conditional_depth
                    preamble.extend(unwinding_lines)
                    self.out_specfile.write(b"".join(unwinding_lines))

                    # we're only interested in the preamble
                    break
                elif name in self.conditional_names:
                    conditional_depth += 1
                elif name == b'endif':
                    conditional_depth -= 1

            # ignore arch specifics
            if self.archstuff_re.search(l):
                continue

            # replace legacy tags
            l = self.copyright_re.sub(rb"License", l)
            l = self.serial_re.sub(rb"Epoch", l)

            preamble.append(l)
            self.out_specfile.write(l)

            if self.group_re.search(l):
                group_seen = True

        self.in_specfile.close()

        if not group_seen:
            preamble.append(b"Group: rpmspectool\n")

        eof = b"EOF"

        while eof in preamble:
            eof += b"_EOF"

        preamble_bytes = b"".join(preamble)

        self.out_specfile.write(b"%description\n%prep\ncat << " + eof + b"\n" +
                                preamble_bytes + b"\nSrcDir: %{_sourcedir}\n" +
                                eof)

        self.out_specfile.close()

        cmdline = [self.rpmbuildcmd]

        for macro in self.rpm_cmd_macros:
            cmdline.extend(("--define", "{} {}".format(macro, self.tmpdir)))

        cmdline.extend(("--nodeps", "-bp", self.out_specfile_path))

        ret_dict = defaultdict(dict)

        with Popen(cmdline,
                   stdin=DEVNULL,
                   stdout=PIPE,
                   stderr=PIPE,
                   close_fds=True) as rpm:
            stdout, stderr = rpm.communicate()
            if rpm.returncode:
                raise RPMSpecEvalError(self.out_specfile_path, rpm.returncode,
                                       stderr)

            for l in stdout.split(b"\n"):
                l = l.strip()
                m = self.source_patch_re.search(l)
                if m:
                    if m.group('sourcepatch').lower() == b'source':
                        log_debug("Found source: {!r}".format(l))
                        spdict = ret_dict['sources']
                    else:
                        log_debug("Found patch: {!r}".format(l))
                        spdict = ret_dict['patches']
                    try:
                        index = int(m.group('index'))
                    except TypeError:
                        index = 0
                    spdict[index] = m.group('fileurl').decode('utf-8')
                m = self.srcdir_re.search(l)
                if m:
                    ret_dict['srcdir'] = m.group('srcdir').decode('utf-8')

        return ret_dict
Ejemplo n.º 26
0
#adds a stream catcher for display and a memory handler for saving
log_stream = StreamCatch()
logger = getLogger()
display_handler = StreamHandler(stream=log_stream)
display_handler.setLevel(LOGLEVEL)
display_handler.setFormatter(Formatter(LOGFORMATTER))
display_handler.name = "StreamCatch"
logger.addHandler(display_handler)

memory_handler = MemoryHandler(MEMBUFFER)
memory_handler.setLevel(LOGLEVEL)
memory_handler.setFormatter(Formatter(LOGFORMATTER))
memory_handler.name = "MemoryLog"
logger.addHandler(memory_handler)

log_debug("Started logging")


def make_log_file(log_path, mode='a'):
    """Points memory handler at a particular file to save the log."""
    file_handler = FileHandler(filename=log_path, mode=mode)
    file_handler.setLevel(LOGLEVEL)
    file_handler.setFormatter(Formatter(LOGFORMATTER))
    memory_handler.setTarget(file_handler)


def remove_log_file():
    """closes the log file and removes memory_handler from pointing at it"""
    if memory_handler.target:
        old_log_file_path = memory_handler.target.baseFilename
        memory_handler.target.flush()
Ejemplo n.º 27
0
    def main(self):
        argparser = self.get_arg_parser()
        args = self.args = argparser.parse_args(sys.argv[1:])

        if args.debug:
            logging.basicConfig(level=logging.DEBUG)

        log_debug("args: {}".format(args))

        if not getattr(args, 'cmd'):
            argparser.print_usage()
        elif args.cmd == 'version':
            print("{prog} {version}".format(
                prog=sys.argv[0], version=version))
        else:
            parsed_spec_path = os.path.join(
                self.tmpdir, "rpmspectool-" + os.path.basename(
                    self.args.specfile.name))
            spechandler = RPMSpecHandler(
                self.tmpdir, args.specfile, parsed_spec_path)

            try:
                specfile_res = spechandler.eval_specfile(self.args.define)
            except RPMSpecEvalError as e:
                specpath, returncode, stderr = e.args
                if args.debug:
                    errmsg = _(
                        "Error parsing intermediate spec file '{specpath}'.")
                else:
                    errmsg = _("Error parsing intermediate spec file.")
                print(errmsg.format(specpath=specpath), file=sys.stderr)
                if args.verbose:
                    print(
                        _("RPM error:\n{stderr}").format(stderr=stderr),
                        file=sys.stderr)
                sys.exit(2)

            sources, patches = self.filter_sources_patches(
                args, specfile_res['sources'], specfile_res['patches'])

            if args.cmd == 'list':
                for prefix, what in (
                        ("Source", sources), ("Patch", patches)):
                    for i in sorted(what):
                        print("{}{}: {}".format(prefix, i, what[i]))
            elif args.cmd == 'get':
                if getattr(args, 'sourcedir'):
                    where = specfile_res['srcdir']
                else:
                    where = getattr(args, 'directory')
                for what in sources, patches:
                    for i in sorted(what):
                        url = what[i]
                        if is_url(url):
                            try:
                                download(
                                    url, where=where, dry_run=args.dry_run,
                                    insecure=args.insecure, force=args.force)
                            except DownloadError as e:
                                log_error(e.args[0])
                                return 1
                            except FileExistsError as e:
                                log_error("{}: {}".format(e.args[1], getattr(
                                    e, 'filename2', e.filename)))
                                return 1

        return 0