def qq_captcha_pass():
    big_image = cv.imread(GlobalParam.get_test_image_path() +
                          'test_qq_mail_big.png')
    small_image = cv.imread(GlobalParam.get_test_image_path() +
                            'test_qq_mail_small.png')
    cv.imshow('1', small_image)
    cv.waitKey(0)
def qq_login(qq_driver, user_name, user_pass):
    element = find_element_by_id(qq_driver, 'qqLoginTab')
    element.click()
    qq_driver.switch_to.frame('login_frame')
    element = find_element_by_id(qq_driver, 'u')
    element.click()
    element.send_keys(user_name)
    element = find_element_by_id(qq_driver, 'p')
    element.click()
    element.send_keys(user_pass)
    element = find_element_by_id(qq_driver, 'login_button')
    element.click()
    wait_for_frame_and_switch_to_frame(qq_driver, 'tcaptcha_iframe')
    img_element = find_element_by_id(qq_driver, 'slideBg')
    wait_for_element_appeared(qq_driver, img_element)
    big = img_element.get_attribute('src')
    request_download_file_by_url(
        big,
        GlobalParam.get_test_image_path() + 'test_qq_mail_big.png')
    img_element = find_element_by_id(qq_driver, 'slideBlock')
    wait_for_element_appeared(qq_driver, img_element)
    small = img_element.get_attribute('src')
    request_download_file_by_url(
        small,
        GlobalParam.get_test_image_path() + 'test_qq_mail_small.png')
Exemplo n.º 3
0
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=GlobalParam.get_ml_ch2_housing_data()):
    if not os.path.isdir(housing_path):
        os.makedirs(housing_path)
        print(GlobalParam.get_ml_ch2_housing_data())
        tgz_path = os.path.join(housing_path, "housing.tgz")
        request.urlretrieve(housing_url, tgz_path)
        housing_tgz = tarfile.open(tgz_path)
        housing_tgz.extractall(path=housing_path)
        housing_tgz.close()
def android_aapt_install():
    print(
        WindowsOsUtil.get_command_output(' '.join(
            ('adb push', GlobalParam.get_aapt_path(), '/data/local/tmp'))))
    print(
        WindowsOsUtil.get_command_output(
            'adb shell chmod 0755 /data/local/tmp/aapt-arm-pie'))
def android_all_package_list():
    msg = filter(
        None,
        WindowsOsUtil.get_shell_output(['adb', 'shell'],
                                       'pm list packages -3 -f').split('\r\n'))
    package_list = []
    for ms in msg:
        ms = android_aapt_get_app_info(
            ms[ms.index('/'):ms.rindex('=')]).replace('\r\n', '').split('\'')
        if ms.count('package: name=') == 1:
            pkg = ms[ms.index('package: name=') + 1]
        else:
            pkg = ''
        if ms.count('application-label-zh-CN:') == 1:
            name = ms[ms.index('application-label-zh-CN:') + 1]
        elif ms.count('application-label:') == 1:
            name = ms[ms.index('application-label:') + 1]
        elif ms.count('application: label=') == 1:
            name = ms[ms.index('application: label=') + 1]
        else:
            name = 'no label name'
        if ms.count('launchable-activity: name=') == 1:
            launch = ms[ms.index('launchable-activity: name=') + 1]
        else:
            launch = android_search_app_activity(pkg)
        package_list.append(','.join((pkg, name, launch)))

    write_string_to_file(
        GlobalParam.get_android_apk_list().replace(
            'android_apk_list',
            android_product_info() + '_android_apk_list'), package_list,
        'utf8')
    return package_list
def android_search_package_by_name(app_name):
    read_list = list(
        read_file(
            GlobalParam.get_android_apk_list().replace(
                'android_apk_list',
                android_product_info() + '_android_apk_list'),
            'utf8').strip('][').replace('\'', '').split(', '))
    for rl in read_list:
        if rl.__contains__(app_name):
            return rl.split(',')[0], rl.split(',')[2]
Exemplo n.º 7
0
    def split_gif_to_images(gif_path):
        image_object = Image.open(gif_path)
        if image_object.is_animated:
            for frame in range(0, image_object.n_frames):
                image_object.seek(frame)
                image_object.save(fp=GlobalParam.get_gif_import() +
                                  str(frame) + '.png',
                                  format='PNG')


# fp_out = GlobalParam.get_gif_export() + 'out.gif'
# split_gif_to_images(fp_out)

# fp_in = GlobalParam.get_gif_import()
# fp_out = GlobalParam.get_gif_export() + 'out.gif'
# ImageUtils.combine_images_to_gif(fp_in, 'png', fp_out)
Exemplo n.º 8
0
def visualizing_geographical_data(train_set_data):
    california_img=mpimg.imread(GlobalParam.get_ml_ch2_housing_image())
    visual_housing = train_set_data.copy()
    visual_housing.plot(kind="scatter", x="longitude", y="latitude", figsize=(10,7),
                             s=visual_housing['population']/100, label="Population",
                             c="median_house_value", cmap=plt.get_cmap("jet"),
                             colorbar=False, alpha=0.4,)
    plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5,
               cmap=plt.get_cmap("jet"))
    plt.ylabel("Latitude", fontsize=14)
    plt.xlabel("Longitude", fontsize=14)
    prices = visual_housing["median_house_value"]
    tick_values = np.linspace(prices.min(), prices.max(), 11)
    cbar = plt.colorbar()
    cbar.ax.set_yticklabels(["$%dk"%(round(v/1000)) for v in tick_values], fontsize=14)
    cbar.set_label('Median House Value', fontsize=16)

    plt.legend(fontsize=16)
    plt.show()
Exemplo n.º 9
0
from opencv.cvutils import *
from python_common.global_param import GlobalParam

# write video not working now, need resize frame before write video file
play_and_save_Video(GlobalParam.get_video_input(),
                    GlobalParam.get_video_output(), 1, 1)

# screen record show opencv window and write video file
screen_record(0, 0, 2560, 1600, 'avc1', 'mp4', 10, 0.5)

# screen record not show opencv window and write video file use timeout
screen_record(0, 0, 640, 400, 'avc1', 'mp4', 10, 0.5, 30)

# screen record show opencv window and write video file use screen offset
screen_record(100, 200, 640, 400, 'avc1', 'mp4', 10, 0.5)
Exemplo n.º 10
0
from opencv.cvutils import *
from python_common.global_param import GlobalParam

import os

'''
clean characters images when characters folder isn't empty
'''
if len(os.listdir(GlobalParam.get_character_output())) > 0:
    for i in os.listdir(GlobalParam.get_character_output()):
        os.remove(GlobalParam.get_character_output() + i)

'''
get text area, and write to file
'''
input = cv.imread(GlobalParam.get_image_input(),cv.IMREAD_UNCHANGED)
# showAllContours(input,detectTextAreaFromImage(input), 10, 5)
input =findTextAreaContours(input,detectTextAreaFromImage(input),7,10,0,0,7,12)
cv.imwrite(GlobalParam.get_image_output(),input)


'''
split text area image to single character images, collect all images names and sort by names
'''
getLastWordsContour(GlobalParam.get_image_output(),
                    GlobalParam.get_character_output(), 50, 50,
                    GlobalParam.get_sentence_output())
char_image_list=[]
for i in os.listdir(GlobalParam.get_character_output()):
    char_image_list.append(int(i.replace('.png', '')))
char_length = len(char_image_list)
    for (x, y, w, h) in faces:
        center = (x + w // 2, y + h // 2)
        face_frame = cv.ellipse(face_frame, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4)

        face_roi = frame_gray[y:y + h, x:x + w]
        # -- In each face, detect eyes
        eyes = eyes_cascade.detectMultiScale(face_roi)
        for (x2, y2, w2, h2) in eyes:
            eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)
            radius = int(round((w2 + h2) * 0.25))
            face_frame = cv.circle(face_frame, eye_center, radius, (255, 0, 0), 4)

    cv.imshow('Capture - Face detection', face_frame)


face_cascade_name = GlobalParam.get_face_detect_face_xml()
eyes_cascade_name = GlobalParam.get_face_detect_eyes_xml()

face_cascade = cv.CascadeClassifier()
eyes_cascade = cv.CascadeClassifier()

# 1. Load the cascades
if not face_cascade.load(cv.samples.findFile(face_cascade_name)):
    print('--(!)Error loading face cascade')
    exit(0)
if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)):
    print('--(!)Error loading eyes cascade')
    exit(0)

camera_device = 0
# 2. Read the video stream
Exemplo n.º 12
0
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import TimeoutException
from file_and_system.file_utils import write_binary_to_file
from file_and_system.windows_os_utils import WindowsOsUtil
from python_common.global_param import GlobalParam
from selenium_test.selenium_utils import *

WindowsOsUtil.kill_process_by_name('MicrosoftWebDriver.exe')
driver = init_driver('edge', GlobalParam.get_edge_driver_path())
open_browser_single_tab(driver, 'https://www.12306.cn/index/')

# wait dom return complete state
wait_for_page_full_loaded(driver)
loading_element = find_element_by_xpath(driver, '//div[@id="page-loading"]')
wait_for_element_disappeared(driver, loading_element)
try:

    wait_for_element_to_be_clickable(driver, '//a[text()="登录"]')
    wait_for_page_full_loaded(driver)
    wait_for_element_to_be_clickable(driver, '//a[text()="账号登录"]')

    # get captcha picture
    img_text = find_element_by_xpath(driver,
                                     '//img[@id="J-loginImg"]').get_attribute(
                                         'src')[len('data:image/jpg;base64,'):]
    write_binary_to_file(GlobalParam.get_image_input(), img_text)

    find_element_by_xpath(driver,
                          '//input[@id="J-userName"]').send_keys('username')
    find_element_by_xpath(driver,
                          '//input[@id="J-password"]').send_keys('password')
Exemplo n.º 13
0
def sort_by_target(mnist):
    reorder_train = np.array(
        sorted([(target, i)
                for i, target in enumerate(mnist.target[:60000])]))[:, 1]
    reorder_test = np.array(
        sorted([(target, i)
                for i, target in enumerate(mnist.target[60000:])]))[:, 1]
    mnist.data[:60000] = mnist.data[reorder_train]
    mnist.target[:60000] = mnist.target[reorder_train]
    mnist.data[60000:] = mnist.data[reorder_test + 60000]
    mnist.target[60000:] = mnist.target[reorder_test + 60000]


mnist = fetch_openml('mnist_784',
                     data_home=GlobalParam.get_ml_ch3_sklearn_data_home(),
                     version=1,
                     cache=True)
mnist.target = mnist.target.astype(np.int8)
sort_by_target(mnist)

X, y = mnist["data"], mnist["target"]
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
sgd_clf = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)
sgd_clf.fit(X_train, y_train_5)
some_digit = X[36000]
# y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
Exemplo n.º 14
0
from file_and_system.windows_os_utils import WindowsOsUtil
from selenium_test.selenium_utils import init_driver, open_browser_single_tab
from python_common.global_param import GlobalParam

WindowsOsUtil.kill_process_by_name('MicrosoftWebDriver.exe')
WindowsOsUtil.kill_process_by_name('chromedriver.exe')
WindowsOsUtil.kill_process_by_name('IEDriverServer.exe')
chrome_driver = init_driver('chrome', GlobalParam.get_chrome_driver_path())
open_browser_single_tab(chrome_driver,'https://www.baidu.com')
chrome_driver.quit()

edge_driver = init_driver('edge', GlobalParam.get_edge_driver_path())
open_browser_single_tab(edge_driver,'https://www.baidu.com')
edge_driver.quit()

ie_driver = init_driver('ie', GlobalParam.get_ie_driver_path())
open_browser_single_tab(ie_driver,'https://www.baidu.com')
ie_driver.quit()



Exemplo n.º 15
0
# create table
create_table_sql = '''
        CREATE TABLE movie
        (
            id INT PRIMARY KEY  NOT NULL,
            name        VARCHAR(100) NOT NULL,
            chnname     VARCHAR(50),
            main_cast        VARCHAR(50),
            year         VARCHAR(10) NOT NULL,
            region      VARCHAR(20) NOT NULL,
            type        VARCHAR(20),
            viewed     VARCHAR(5) NOT NULL,
            want_to_review   VARCHAR(5) NOT NULL
        );
      '''
maria_connection = connect_to_databases(GlobalParam.get_mariadb_url(),
                                        GlobalParam.get_mariadb_user(),
                                        GlobalParam.get_mariadb_password())
execute_sql(maria_connection, create_table_sql, False)
maria_connection.close()
postgresql_connection = connect_to_databases(GlobalParam.get_pgsql_url(),
                                             GlobalParam.get_pgsql_user(),
                                             GlobalParam.get_pgsql_password())
execute_sql(postgresql_connection, create_table_sql, False)
postgresql_connection.close()

# read excel sheet datasets then insert into database table
insert_many_sql = '''
        INSERT INTO movie (id, name, chnname, main_cast, year, region, type, viewed, want_to_review)
        VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
      '''
Exemplo n.º 16
0
from selenium_test.selenium_utils import *
from file_and_system.windows_os_utils import WindowsOsUtil
from python_common.global_param import GlobalParam
import time

WindowsOsUtil.kill_process_by_name('MicrosoftWebDriver.exe')

# open multiple tabs and switch to one tab before closed
driver = init_driver('edge', GlobalParam.get_edge_driver_path())
url_list = ['tieba.baidu.com','www.baidu.com','cn.bing.com']
open_browser_multi_tab(driver, url_list)
time.sleep(2)
switch_to_tab(driver,'www.baidu.com')
time.sleep(5)
close_driver(driver)
Exemplo n.º 17
0
from opencv.cvutils import *
from python_common.global_param import GlobalParam

#------------examples----------------#
'''
image read, modify, show and write functions
'''
input = cv.imread(GlobalParam.get_image_input(), cv.IMREAD_UNCHANGED)
output = resizeImage(input, 2, 1, 3)
output = reverse_color_image(input)
output = denoiseImage(input)
output = rotateImage(input, 20)
cv.imshow('12', output)
cv.waitKey(0)
cv.imwrite(GlobalParam.get_image_output(), output)
'''
combine two images
'''
input1 = cv.imread(GlobalParam.get_image_input(), cv.IMREAD_UNCHANGED)
input2 = cv.imread(GlobalParam.get_image_output(), cv.IMREAD_UNCHANGED)
output = resizeImage(combineTwoImages(input1, input2, 'portrait'), 1, 1, 2)
'''
write text in image
'''
# only can write ascii text
input = cv.imread(GlobalParam.get_sentence_output(), cv.IMREAD_UNCHANGED)
output = writeTextOnImageAscii(input, '12444', (20, 20),
                               cv.FONT_HERSHEY_SIMPLEX, 1, (209, 80, 0, 255),
                               3)

# write unicode text, like chinese, japanese...
Exemplo n.º 18
0
def load_housing_data(housing_path=GlobalParam.get_ml_ch2_housing_data()):
    csv_path = os.path.join(housing_path, "housing.csv")
    return pd.read_csv(csv_path)
Exemplo n.º 19
0
from python_common.global_param import GlobalParam
from datasets.datasets_utils import read_excel

# read excel, use converters decide column type, if all column type is str,
# function equals:read_excel(excel_datasets, 'movie', True, dtype=str)
result = read_excel(GlobalParam.get_excel_datasets(),
                    'movie_maria',
                    True,
                    converters={
                        'id': int,
                        'name': str,
                        'chnname': str,
                        'main_cast': str,
                        'year': str,
                        'region': str,
                        'type': str,
                        'viewed': str,
                        'want_to_review': str
                    })
print(result)