コード例 #1
0
def main():
    """Start point of the program"""

    # Parse arguments from command line
    cl_args = CLI.parse()

    # Prints version of rss_reader and directory where it's placed
    if cl_args.get('version'):
        print(f"RSS-Reader {version}" + " from " + str(os.getcwd()))
        sys.exit()

    # Allow logger to print logs to command-line
    Logger().set_stream_logging(cl_args.get('verbose'))

    # Create logger by implemented function
    logger = Logger().get_logger("rss_reader")

    data = RSSDataHandler(*rss_handler(cl_args.get('source')),
                          cl_args.get('json'), cl_args.get('limit'))

    output = OutputHandler(data, cl_args.get('colorize'))
    if cl_args.get('json'):
        pprint(unescape(output.format_to_json_string()))
    else:
        for text in output.format_news():
            print(text, end="\n\n")
コード例 #2
0
ファイル: zoom_scraper.py プロジェクト: malpais/zoomgrab
 def __init__(self,
              url,
              output_dir=None,
              output_format=None,
              username_format='full',
              domain='',
              gophish_url=None,
              gophish_api_key=None):
     self.url = url
     self.scraper = cfscrape.create_scraper(delay=10)
     try:
         self.tokens, self.user_agent = cfscrape.get_tokens(url,
                                                            proxies=proxies,
                                                            verify=False)
     except Exception as e:
         click.secho(
             f'[!] failed to retrieve scrape page, received HTTP {str(e)}... exiting.',
             fg='red')
         sys.exit(-1)
     self.output_dir = output_dir
     self.output_format = output_format
     self.username_format = username_format
     self.domain = domain
     self.output_handler = OutputHandler(output_dir, domain,
                                         username_format, output_format,
                                         gophish_url, gophish_api_key)
コード例 #3
0
ファイル: thermostat.py プロジェクト: boatski/pi-stat
    def __init__(self, sensor, weather):
        # Call exitCleanup() when the program is ended or if it crashes
        atexit.register(self.exitCleanup)

        self.sensor = sensor
        self.weather = weather
        self.schedule = Schedule()
        self.output = OutputHandler()
        self.updateSetpoints()
        self.checkSchedule()
コード例 #4
0
    def __init__(self):
        en_key = KeyHandler()
        en_file = FileHandler()
        en_coder = EncodingHandler()
        en_printer = OutputHandler()
        en_key.set_next(en_file)
        en_file.set_next(en_coder)
        en_coder.set_next(en_printer)

        de_key = KeyHandler()
        de_file = FileHandler()
        de_coder = DecodingHandler()
        de_printer = OutputHandler()
        de_key.set_next(de_file)
        de_file.set_next(de_coder)
        de_coder.set_next(de_printer)

        self.encryption_start_handler = en_key
        self.decryption_start_handler = de_key
コード例 #5
0
ファイル: main.py プロジェクト: AliGhezalAG/utt_board_handler
def main():
	scaleModes =	{
					  "50": "Normal mode",
					  "46": "Real-time mode"
					}

	board_handler = BoardHandler()
	data_handler = DataHandler(scaleModes)
	output_handler = OutputHandler()
	gui = GuiHandler(board_handler, data_handler, output_handler)
	gui.initWindow()
コード例 #6
0
ファイル: zoom_scraper.py プロジェクト: malpais/zoomgrab
class ZoomScraper():
    url = ''
    scraper = None
    tokens = {}
    user_agent = ''
    domain = ''
    username_format = 'full'
    pages = []
    page_count = 1
    current_page = None
    output_dir = None
    output_format = None
    output_handler = None
    """
    Instantiate ZoomScraper

    param: url - URL of target
    
    Initializes a cfscrape scraper object and performs an initial GET request
    to the target URL to acquire cloudflare-specific tokens for future use
    """
    def __init__(self,
                 url,
                 output_dir=None,
                 output_format=None,
                 username_format='full',
                 domain='',
                 gophish_url=None,
                 gophish_api_key=None):
        self.url = url
        self.scraper = cfscrape.create_scraper(delay=10)
        try:
            self.tokens, self.user_agent = cfscrape.get_tokens(url,
                                                               proxies=proxies,
                                                               verify=False)
        except Exception as e:
            click.secho(
                f'[!] failed to retrieve scrape page, received HTTP {str(e)}... exiting.',
                fg='red')
            sys.exit(-1)
        self.output_dir = output_dir
        self.output_format = output_format
        self.username_format = username_format
        self.domain = domain
        self.output_handler = OutputHandler(output_dir, domain,
                                            username_format, output_format,
                                            gophish_url, gophish_api_key)

    """
    Scrape page at URL

    param url: URL of target (optional)

    Performs a GET of an initialized URL or a new URL from the user. Cloudflare
    tokens retrieved in __init__() are re-used during scraping. Finally, parse 
    any scraped HTML with BeautifulSoup and return the bs4 object.
    """

    def scrape(self, url='', store_pagecount=False):
        if not url:
            url = self.url

        response = self.scraper.get(url,
                                    cookies=self.tokens,
                                    proxies=proxies,
                                    verify=False)
        if response.status_code != 200:
            click.secho(
                f'[!] failed to retrieve scrape page, received HTTP {response.status_code}... exiting.',
                fg='red')
            sys.exit(-1)
        self.current_page = BeautifulSoup(response.content, 'html.parser')
        if store_pagecount:
            self._get_pagecount(self.current_page)
            click.secho(f'[+] scraping page 1/{self.page_count}...',
                        fg='green')

        # Extract data from scraped page and save results if requested.
        person_results = self._get_data_from_page(self.username_format,
                                                  self.domain)
        self.output_handler._save_results(person_results)

    """
    Loops through the total number of zoom pages and scrape()-s employee data. Will print
    results to stdout upon completion.
    """

    def scrape_pages(self):
        for page in [
                f'{self.url}?pageNum={x}'
                for x in range(2, self.page_count + 1)
        ]:
            click.secho(
                f'[+] scraping page {page.split("=")[-1]}/{self.page_count}...',
                fg='green')
            try:
                self.scrape(page)

            except:
                click.secho(f'[!] failed to scrape', fg='red')

        self.output_handler._print_results()
        if self.output_handler.gophish_api:
            self.output_handler._import_into_gophish()

    """
    Determine result counts from Zoominfo

    param page_content: BeautifulSoup page object of a zoom result

    return int: Total contacts found across a count of zoom pages
    """

    def _get_pagecount(self, page_content):
        # Regex to match the counter text in the first page of results
        zoom_total_contacts_pattern = re.compile(
            r'(?P<num_contacts>\d+) results')
        total_search_pages = page_content.find(
            'h2', {
                'class': 'page_searchResults_numberOfResults',
            })
        # Matches section of page that shows number of total results
        # "1-25 of 1,742 Contacts"
        # Replace commas to get a number value for number of contacts
        zoom_total_contacts = zoom_total_contacts_pattern.search(
            total_search_pages.text.replace(',', '')).group('num_contacts')
        zoom_page_count = math.ceil(int(zoom_total_contacts) / 25)

        click.secho(
            f'[+] found {zoom_total_contacts} records across {zoom_page_count} pages of results...',
            fg='green')
        click.secho(
            f'[+] starting scrape of {zoom_page_count} pages. scraping cloudflare sites can be tricky, be patient!',
            fg='green')

        self.page_count = zoom_page_count

    """
    Convert HTML into person data

    param row_element: BeautifulSoup row object containing person data
    param email_format_string: User-provided string to determine the output format of parsed username
    param domain: User-provided string to append to converted username data

    return dict: Dictionary value of parsed person data from HTML row.
    """

    def _parse_employee_info(self,
                             row_element,
                             email_format_string='',
                             domain=''):
        # Find relevent elements for personnel data in bs4 row object
        name_selector = row_element.find('div',
                                         {'class': 'tableRow_personName'})
        title_selector = row_element.find('div', {'class': 'dynamicLink'})
        location_selector = row_element.findAll('a', {'class': 'dynamicLink'})

        # Pull text values for data if available, falling back to defaults if not exists
        person_name = name_selector.text if name_selector else None
        person_title = title_selector.text if title_selector else ''
        person_location = ', '.join([
            field.text for field in location_selector
        ]) if location_selector else 'Unknown'
        username = ''

        if person_name:
            # Split up a name into parts for parsing, trimming special characters
            #
            # 'Joe Z. Dirt' -> ['Joe', 'Z', 'Dirt']
            # 'Mary Skinner' -> ['Mary', 'Skinner']
            name_parts = person_name.replace('.', '').replace('\'',
                                                              '').split(' ')

            # Switch on `email_format_string` to chop up name_parts
            # based on user-defined format string. Special care given
            # to names with middle names.
            if email_format_string == 'firstlast':
                username = f'{name_parts[0]}{name_parts[-1]}'
            elif email_format_string == 'firstmlast':
                if len(name_parts) > 2:
                    username = f'{name_parts[0]}{name_parts[1][:1]}{name_parts[-1]}'
                else:
                    username = f'{name_parts[0]}{name_parts[-1]}'
            elif email_format_string == 'flast':
                username = f'{name_parts[0][:1]}{name_parts[-1]}'
            elif email_format_string == 'lastf':
                username = f'{name_parts[-1]}{name_parts[0][:1]}'
            elif email_format_string == 'first.last':
                username = f'{name_parts[0]}.{name_parts[-1]}'
            elif email_format_string == 'first_last':
                username = f'{name_parts[0]}_{name_parts[-1]}'
            elif email_format_string == 'fmlast':
                if len(name_parts) > 2:
                    username = f'{name_parts[0][:1]}{name_parts[1][:1]}{name_parts[-1]}'
                else:
                    username = f'{name_parts[0][:1]}{name_parts[-1]}'
            else:
                # default to 'full'
                username = ''.join(name_parts)
        return {
            'Full Name': person_name,
            'Title': person_title,
            'Location': person_location,
            'Email': f'{username.lower()}@{domain}',
        }

    """
    Iterate through a scraped page and extract employee data from the HTML

    param username_format: Which format should zoomgrab format the employee email addresses, specified in cli options
    param domain: Domain to use for the generated employee email addresses

    return list: List of parsed employee data
    """

    def _get_data_from_page(self, username_format, domain):
        person_results = []
        for row in self.current_page.findAll('tr', {'class': 'tableRow'})[1:]:
            person_results.append(
                self._parse_employee_info(row, username_format, domain))
        return person_results
コード例 #7
0
ファイル: thermostat.py プロジェクト: boatski/pi-stat
class Thermostat(object):
    # SQLite
    con = None

    # Indoor info
    indoorTemperature = 0
    indoorHumidity = 0
    prevIndoorTemperature = 0
    prevIndoorHumidity = 0

    # Outdoor info
    outdoorTemperature = 0
    outdoorHumidity = 0

    offset = 1

    scheduleIsOn = False

    def __init__(self, sensor, weather):
        # Call exitCleanup() when the program is ended or if it crashes
        atexit.register(self.exitCleanup)

        self.sensor = sensor
        self.weather = weather
        self.schedule = Schedule()
        self.output = OutputHandler()
        self.updateSetpoints()
        self.checkSchedule()

    """
	Used to determine what outputs should be switched on and what outputs should be switched off.
	** Minimum on times for Heat/Cool and a Fan delay needs to be implemented. **
	"""

    def control(self):
        self.updateSetpoints()
        self.checkSchedule()

        # Get the sensor readings
        self.sensorData = self.sensor.getSensorData()
        self.weatherData = self.weather.getWeatherData()

        # Only update temperatures if the dictionary is not empty
        if self.sensorData:
            self.indoorTemperature = self.sensorData["Temp"]
            self.indoorHumidity = self.sensorData["Hum"]

            # Only update temperatures if the dictionary is not empty
        if self.weatherData:
            self.outdoorTemperature = self.weatherData["current_observation"]["temp_f"]
            self.outdoorHumidity = self.weatherData["current_observation"]["relative_humidity"]

        if self.indoorTemperature != self.prevIndoorTemperature or self.indoorHumidity != self.prevIndoorHumidity:
            self.prevIndoorTemperature = self.indoorTemperature
            self.prevIndoorHumidity = self.indoorHumidity

        self.setOutput()
        self.output.printOutputStatus()
        # print "Schedule: " + str(self.scheduleIsOn)

    """
	Toggles the outputs based on the current temperature and relative setpoints.
	"""

    def setOutput(self):
        if (
            self.outdoorTemperature >= self.outdoorLockout.getSetpoint()
        ):  # If outdoorTemp >= outdoorLockout, do not heat
            if self.scheduleIsOn:
                if self.indoorTemperature > (self.occupiedCool.getSetpoint() + self.offset):  # Turn on cool
                    self.output.enableCool()
                else:
                    self.output.disableCool()
            else:  # Schedule is off
                if self.indoorTemperature > (self.unoccupiedCool.getSetpoint() + self.offset):  # Turn on cool
                    self.output.enableCool()
                else:
                    self.output.disableCool()
        elif (
            self.outdoorTemperature < self.outdoorLockout.getSetpoint()
        ):  # If outdoorTemp < outdoorLockout, do not cool
            if self.scheduleIsOn:
                if self.indoorTemperature < (self.occupiedHeat.getSetpoint() - self.offset):  # Turn on heat
                    self.output.enableHeat()
                else:
                    self.output.disableHeat()
            else:  # Schedule is off
                if self.indoorTemperature < (self.unoccupiedHeat.getSetpoint() - self.offset):  # Turn on heat
                    self.output.enableHeat()
                else:
                    self.output.disableHeat()

    """
	Grabs the current setpoints from the database.
	"""

    def updateSetpoints(self):
        # Grab setpoint data from the sqlite database
        try:
            con = lite.connect("../../db/pi-stat.db")
            cur = con.cursor()
            cur.execute("SELECT * FROM Thermostat;")

            data = cur.fetchone()

            # Setpoints
            self.occupiedCool = Setpoint(data[0])
            self.unoccupiedCool = Setpoint(data[1])
            self.occupiedHeat = Setpoint(data[2])
            self.unoccupiedHeat = Setpoint(data[3])
            self.outdoorLockout = Lockout(data[4])

        except lite.Error, e:
            print "Error %s:" % e.args[0]
            sys.exit(1)

        finally: