Ejemplo n.º 1
0
def run_framework(test_args=[], test_runner=None, test_collector=None):
    """Run the functest framework"""
    if test_runner is None:
        test_runner = runner.CLIRunner()
    runner.test_runner = test_runner

    if test_collector is None:
        test_collector = collector.Collector()
    collector.test_collector = test_collector

    tests = []
    test_runner.start()

    test_args = [os.path.abspath(arg) for arg in test_args]

    if len(test_args) is 0:
        test_args.append(os.path.abspath(os.path.curdir))

    for arg in test_args:
        module_chain = test_collector.create_module_chain(arg)
        tests.append([test_collector.create_test_module(arg), module_chain])

    runner.test_runner.wrap_stdout(global_settings.wrap_stdout,
                                   global_settings.wrap_stderr)
    global_settings.test_runner = test_runner
    totals = frame.execute(tests)
    stdout_wrap = test_runner.get_stdout_wrap()
    if stdout_wrap is None:
        stdout_wrap = ''
    sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
    reports.report_summary(totals, stdout_wrap)
    runner.test_runner.summary(totals)
    reports.report_final(totals)
    runner.test_runner.final(totals)
    sleep(.5)
Ejemplo n.º 2
0
def testgomarks():
    resData = {}
    ext_uname = ''
    ext_pw = ''
    if request.method == 'GET':
        try:
            if request.args.get('username') and request.args.get('password'):
                ext_uname = request.args.get('username')
                ext_pw = request.args.get('password')
            else:
                return render_template('slcmgo_get_response.html',
                                       bois_ip=request.remote_addr)
        except:
            resData["code"] = "200"
            return jsonify(resData)

    col = collector.Collector(ext_uname, ext_pw)
    col.makeReq()
    if col.loginError:
        resData["code"] = "100"
        return jsonify(resData)
    if col.collectionError:
        resData["code"] = "101"
        return jsonify(resData)
    if col.errorDuringExtraction:
        resData["code"] = "102"
        return jsonify(resData)
    resData["code"] = "666"
    resData["data"] = col.marksData
    return jsonify(resData)
Ejemplo n.º 3
0
def collect(pkg_name):
    """collect the needed package file from system
    create a zip file of these files

    :param pkg_name: the name of needed package
    """
    collect_deb = collector.Collector(pkg_name)
    collect_deb.extract_installed_package()
    collect_deb.package_exist()
    collect_deb.collector_file()  # /usr/...

    # /DEBIAN/...
    control_info = collect_deb.get_control()
    if control_info is not None:
        os.mkdir("donut/DEBIAN")
        with open("donut/DEBIAN/control", 'w') as file:
            file.write(control_info)
        collect_deb.fix_control("donut/DEBIAN/control")
    collect_deb.extract_md5()

    time_setter = archive.Archive("{}/donut".format(os.getcwd()), pkg_name,
                                  "zip", "1 Jan 18")
    time_setter.set_default_time()  # time-sync
    time_setter.pack_it(
    )  # zip them, this file is asserted to be same in any machine
    collect_deb.clean()
Ejemplo n.º 4
0
def build_deb(pkg_name):
    """unzip the packed file and build deb package
    """
    zip_mag = archive.Archive("{}/donut".format(os.getcwd()), pkg_name, "zip", "1 Jan 18")
    zip_mag.un_zip("{}/Coding/gcc-8-base_donut_decoded.zip".format(os.getcwd()),
                   "{}/donut".format(os.getcwd()))
    collect_deb = collector.Collector(pkg_name)
    collect_deb.build("{}/donut".format(os.getcwd()), "{}/{}_donut.deb".format(os.getcwd(), pkg_name))
Ejemplo n.º 5
0
 def setUp(self):
     self.cc = collector.Collector()
     self.a = self.cc.add_symbol("a", "a", type=collector.TYPE_FUNCTION, stack_size=1)
     self.b = self.cc.add_symbol("b", "b", type=collector.TYPE_FUNCTION, stack_size=10)
     self.c = self.cc.add_symbol("c", "c", type=collector.TYPE_FUNCTION, stack_size=100)
     self.d = self.cc.add_symbol("d", "d", type=collector.TYPE_FUNCTION, stack_size=1000)
     self.e = self.cc.add_symbol("e", "e", type=collector.TYPE_FUNCTION, stack_size=10000)
     self.f = self.cc.add_symbol("f", "f", type=collector.TYPE_FUNCTION)
     self.cc.enhance_call_tree()
     self.cc.add_function_call(self.a, self.b)
     self.cc.add_function_call(self.a, self.c)
     self.cc.add_function_call(self.b, self.a)
     self.cc.add_function_call(self.c, self.b)
     self.cc.add_function_call(self.c, self.d)
     self.cc.add_function_call(self.d, self.e)
     self.cc.add_function_call(self.d, self.f)
     self.h = BacktraceHelper(self.cc)
Ejemplo n.º 6
0
def add():
    """Endpoint for adding gif urls manually."""
    if request.method == 'GET':
        return render_template(
            'add.html',
        )

    post_data = request.form
    gif_url = post_data.get('url', '')
    if not gif_url:
        return redirect(url_for('add'))

    c = collector.Collector()
    c.download_gifs([gif_url])
    args = request.view_args.copy()

    return redirect(url_for('index', **args))
Ejemplo n.º 7
0
def main():
    global driver

    print("processing!")
    driver = webdriver.Chrome("D:/chromedriver.exe")
    l = login.Login(driver, username, password)
    l.signin()
    driver.get('https://www.instagram.com/unitytechnologies/')

    col = collector.Collector(driver)
    col.get_followers()
    print("followers: ", col.get_num_of_followers())
    print("post: ", col.get_num_of_post())
    print("following: ", col.get_num_of_following())

    followButton = interaction.Interaction(driver)
    followButton.follow()

    time.sleep(60)
Ejemplo n.º 8
0
def main():

    abus = Bus()
    bbus = Bus()
    cbus = Bus()

    ct = collector.Collector(abus)
    rt = recorderdb.RecorderDB(abus, bbus, cbus)
    ut = uploader.Uploader(bbus, cbus)
    ht = hearbeat.Hearbeat()

    ct.start()
    rt.start()
    ut.start()
    ht.start()

    ct.join()
    rt.join()
    ut.join()
    ht.join()
Ejemplo n.º 9
0
    def callback(self):
        location = str(self.combo_ent.get()).strip()
        min_price = ["min_price=", str(self.ent_min.get()).strip().replace(" ", "")]
        max_price = ["max_price=", str(self.ent_max.get()).strip().replace(" ", "")]
        look_for_craig = ["auto_make_model=", str(self.combosub_ent.get()).strip().replace(" ", "+")]
        model_year = ["min_auto_year=", str(self.ent_mod.get()).strip().replace(" ", "")]
        odometer = ["max_auto_miles=", str(self.ent_odo.get()).strip().replace(" ", "")]

        cquery = []
        qlist = [min_price, max_price, look_for_craig, model_year, odometer]
        for q in qlist:
            if q[1] != "":
                cquery.append(q[0] + q[1])
        url = f"https://{location}.craigslist.org/search/cta?" + ("&".join(cquery[0:]))
        print(url)

        open_collect = collector.Collector(url)
        count_car = open_collect.totalcount_factor()
        run_scraper = open_collect.collect()
        print(count_car)
        print(run_scraper)
        hugo = DictatorWS.Dictator(run_scraper)
Ejemplo n.º 10
0
def _run():
    try:
        curSettings = settings.Settings(_getWorkDir() + "/conf.txt")
    except settings.ReadFailed:
        logging.warning("settings read failed")
        return

    curCollector = collector.Collector()
    curCollector.addSource(musicbrainz_source.MusicbrainzSource())

    curStorage = storage.Storage()
    try:
        with curStorage.connect(_getWorkDir() + "/save.dat"):
            curController = controller.Controller(curStorage, curCollector,
                                                  curSettings)

            atClient = at_client.AtClient(curSettings)
            atClient.setStorage(curStorage)

            curController.run()

    except InternalStorageError:
        logging.error("storage connect failed")
        raise
    # extension = ".session"
    # extension = '.win'
    extension = '.adr'
    url_regex_simple_firefox = '"url":"([^"]*)","'
    #url_regex_all_urls = """'(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))'"""
    url_regex_http_https = '.*(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+).*'

    def __init__(self):
        self.collection = set()

    def parse(self, file):
        # pattern = re.compile(SimpleTextParser.url_regex_simple_firefox)
        pattern = re.compile(SimpleTextParser.url_regex_http_https)
        for i, line in enumerate(file):
            for match in re.finditer(pattern, line):
                print('Found on line {0}: {1}'.format(i + 1, match.groups()))
                self.collection.add(match.group(1))


if __name__ == "__main__":
    arg_parser = collector.setup_parser('txt')
    argsDict = collector.create_args_dict(arg_parser)
    collector = collector.Collector(
        SimpleTextParser, argsDict['srcdir'], argsDict['destdir'],
        argsDict.get('write_separate_result_files'))
    if 'diff' in argsDict:
        collector.make_diff(argsDict['diff'])
    else:
        collector.collect_all()
        collector.create_result_file()
import collector as coll

# Hamilton Bradford
# This is my test file

x = coll.Collector()
x.add(5)
x.add(10)
print("Total amount of numbers in the list: ", x.count())
print("Sum is ", x.sum())
print("Sum of the squares is ", x.sum_squares())
print("The mean is ", x.average())
print("The variance is ", x.variance())
print("The standard deviation is ", x.standard_deviation())
print("The numbers in the list are: ")
x.weprint()


Ejemplo n.º 13
0
def main():
    col = collector.Collector()
    col.run()
    col.data_store.data_store
Ejemplo n.º 14
0
        if tag == 'a':
            pocket_tags = dict(attrs).get('tags')
            if not pocket_tags:
                pocket_tags = 'without_tag'

            url = dict(attrs).get('href')
            self.collection.add(url)
            if pocket_tags not in self.urls_by_category:
                self.urls_by_category[pocket_tags] = set()
            self.urls_by_category[pocket_tags].add(url)

    def handle_endtag(self, tag):
        pass
        # print("Encountered an end tag :", tag)

    def handle_data(self, data):
        pass
        # print("Encountered some data  :", data)


if __name__ == "__main__":
    arg_parser = collector.setup_parser('HTML')
    argsDict = collector.create_args_dict(arg_parser)
    collector = collector.Collector(HTMLPocketExportParser, argsDict['srcdir'],
                                    argsDict['destdir'],
                                    argsDict['write_separate_result_files'])
    if 'diff' in argsDict:
        collector.make_diff(argsDict['diff'])
    else:
        collector.collect_all()
        collector.create_result_file()
Ejemplo n.º 15
0
def main():
    for extension in constants.SUPPORTED_EXTENSIONS:
        collector_obj = collector.Collector(extension)
        collector_obj.create_work_folder()
        collector_obj.scan_for_files()
Ejemplo n.º 16
0
            for g in getNavigableStrings(c):
                yield g


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--proxy_host",help="set Tor proxy host")
    parser.add_argument("--proxy_port",help="set Tor proxy port")
    parser.add_argument("--filepath",help="set output filepath")
    args = parser.parse_args()

    host = args.proxy_host
    port = args.proxy_port
    filepath = args.filepath

    cl = collector.Collector(host,port)
    hs_lst = cl.get_link()

    for hs_url in hs_lst:
        try:
            output = ""
            content = cl.get_content(hs_url)
            soup = BeautifulSoup(content,"html.parser")

            """
            for comment in soup(text=lambda x:isinstance(x,Comment)):
                comment.extract()

            for script in soup.find_all('script',src=False):
                script.decompose()
Ejemplo n.º 17
0
def generate_vs_project(repo_dir, source_dir, build_dir, solution_path):
    c = collector.Collector(source_dir)
    c.run()
    g = generator.VS2019Generator(c, repo_dir, build_dir, solution_path)
    g.run()
Ejemplo n.º 18
0
        self.collection = set()

    def parse(self, file):
        self.feed(file.read())
        self.close()

    def handle_starttag(self, tag, attrs):
        # print("Encountered a start tag:", tag)
        if tag == 'a':
            self.collection.add(dict(attrs).get('href'))

    def handle_endtag(self, tag):
        pass
        # print("Encountered an end tag :", tag)

    def handle_data(self, data):
        pass
        # print("Encountered some data  :", data)


if __name__ == "__main__":
    arg_parser = collector.setup_parser('HTML')
    argsDict = collector.create_args_dict(arg_parser)
    collector = collector.Collector(MyHTMLParser, argsDict['srcdir'],
                                    argsDict['destdir'],
                                    argsDict['write_separate_result_files'])
    if 'diff' in argsDict:
        collector.make_diff(argsDict['diff'])
    else:
        collector.collect_all()
        collector.create_result_file()
Ejemplo n.º 19
0
        if utils.is_jobs_killed(jobs):
            report.update_jobs(jobs)
            report.run()
            report.to_csv('%s/%s-case1-pass%d.csv' %
                          (outdir, JOB_NAME, PASSE_NUM))
            break


def usage():
    print 'Usage python main.py [run_case1|run_case2|wait_for_finished|wait_for_cleaned]'


if __name__ == "__main__":
    if len(sys.argv) != 2:
        usage()
        exit(0)

    c = collector.Collector()
    if sys.argv[1] == 'run_case1':
        run_case1(c)
    elif sys.argv[1] == 'run_case2':
        run_case2(c)
    elif sys.argv[1] == 'wait_for_finished':
        wait_for_finished(c)
    elif sys.argv[1] == 'wait_for_cleaned':
        wait_for_cleaned(c)
    elif sys.argv[1] == 'merge_case1_reports':
        merge_case_one_reports(JOB_NAME, PASSES)
    else:
        usage()
#  Kirsty Russell
#  CS 4720
#  Assignment 1 Test
#  Professor Setzer
#  January 22, 2019

import collector as col

a = col.Collector()
a.add(1)
a.add(5)
a.add(6)
a.add(10)
a.add(20)
print("Sum:                 ", a.sum())
print("Count:               ", a.count())
print("Sum of Squares:      ", a.sum_squares())
print("Average:             ", a.average())
print("Variance:            ", a.variance())
print("Standard Deviation:  ", a.standard_deviation())
Ejemplo n.º 21
0
                r'/')[-1] and event.key[2] is False:
            logging.info('开始重新加载自定义不可切分词典')
            lock.acquire()
            collector_service.reload_dict()
            lock.release()
            logging.info('完成加载自定义不可切分词典')
        # Waring! 发现重载自定义词典会报错
        # /hanlp.properties 被修改,重新加载Hanlp分词词典
        # if event.key[0] == 'modified' and 'hanlp' in event.key[1] and event.key[2] is False:
        #     logging.info('开始重新加载 Hanlp 自定义词典')
        #     collector_service.segmentor.reload_custom_dictionry()
        #     logging.info('完成加载 Hanlp 自定义词典')


if __name__ == "__main__":
    collector_service = collector.Collector()

    lock = threading.Lock()

    t = WatchFileThread()
    t.start()

    t_d = update_dic.UpdateDictThread()
    t_d.start()

    settings = {
        'template_path': 'views',  # html文件
        'static_path': 'statics',  # 静态文件(css,js,img)
        'static_url_prefix': '/statics/',  # 静态文件前缀
        'cookie_secret': 'adm',  # cookie自定义字符串加盐
    }
Ejemplo n.º 22
0
 def __init__(self, methodName):
     super().__init__(methodName)
     self.av = collector.Collector()
     with open("tests/sample_data.json", "r") as self.sample_data_file:
         self.sample_data = json.loads(self.sample_data_file.readline())