Esempio n. 1
0
def main(args):
    host = typ.Host()
    runner = typ.Runner(host)
    parser = ArgumentParser(host)
    parser.prog = os.path.basename(sys.argv[0])
    parser.description = __doc__
    parser.formatter_class = argparse.RawDescriptionHelpFormatter
    runner.parse_args(
        parser=parser,
        argv=args,
        isolate=['installer_test.*'],  # InstallerTest must be serialized.
        top_level_dir=CUR_DIR,
        retry_limit=3,  # Retry failures by default since the tests are flaky.
    )
    if parser.exit_status is not None:
        return parser.exit_status

    # Stuff args into environment vars for use by child procs.
    _prepare_env_for_subprocesses(parser, runner.args)

    try:
        return runner.run()[0]
    except KeyboardInterrupt:
        self.print_("interrupted, exiting", stream=self.host.stderr)
        return 130
    def init_parser(self):
        parser = ArgumentParser()

        admin_branch, help_branch, course_branch = \
            parser.add_argument('branch', branching=[('admin', lambda x: x == 'admin'), ('help', lambda x: x == 'help'),
                                                     ('course', lambda x: True)],
                                help='select course, admin, or help')
        course_branch.help = 'Enter your course to submit files to it.'
        course_branch.add_argument(
            'assignment', help='specify which assignment you are submitting')
        course_branch.add_argument(
            'files',
            multiple=0,
            help='specify the files which you are submitting')

        admin_branch.add_argument(
            'admin-select',
            help=
            "choose one of: add-admin, remove-admin, create-course, delete-course, display-admins, display-courses"
        )
        admin_branch.add_argument('names', optional=True, multiple=0)
        '''
        branching=[('add-admin', lambda x: x == 'add-admin'),
                                                             ('remove-admin', lambda x: x == 'remove-admin'),
                                                             ('create-course', lambda x: x == 'create-course'),
                                                             ('delete-course', lambda x: x == 'delete-course'),
                                                             ('display-admins', lambda x: x == 'display-admins'),
                                                             ('display-courses', lambda x: x == 'display-courses')],
        '''

        return parser
Esempio n. 3
0
 def __init__(self, argv):
     arg_parser = ArgumentParser()
     arg = arg_parser.parse(sys.argv[1:])
     self.nb_person = arg[0]
     self.simulation_type = arg[1]
     self.metrics_enable = arg[2]
     self.scenario = None
Esempio n. 4
0
def main():
    """Main function for calling others"""
    parsing = ArgumentParser()
    rooms_file, students_file = parsing.args_info()
    example = FileReader()
    rooms = example.file_reader(rooms_file)
    students = example.file_reader(students_file)
    new_info = Distribution(rooms, students).student_distribution()
    result = JsonExporter(new_info).unloading()
    print(result)
Esempio n. 5
0
 def __init__(self):
     args = ArgumentParser()
     self.count = args.text_start
     self.urls = url_manager.UrlManager()
     self.downloader = html_downloader.HtmlDownloader()
     self.parser = html_parser.HtmlParser()
     self.outputer = outputer.HtmlOutputer()
Esempio n. 6
0
def main():
    arguments = ArgumentParser().arguments

    Utils.make_dir(Config.log_dirpath)
    logger = Logger(filename=Config.log_filename, is_debug=arguments.debug)

    logger.info("Start crawling... (migrate={}, debug={})".format(
        arguments.migrate, arguments.debug))

    news_list, df = Crawler(arguments.migrate, logger).execute()

    if len(news_list) > 0 and not arguments.migrate:
        slack_bot = SlackBot(Config.channel, Config.debug_channel,
                             Config.slack_api_token)
        for news in news_list:
            message = Message.make_message(news)
            is_success, status = slack_bot.post_message(
                message, is_debug=arguments.debug)
            if is_success:
                logger.info(status)
                df.to_csv(Config.database_filename, index=False)
            else:
                logger.error(status)

    logger.info("Stop crawling... (migrate={}, debug={})".format(
        arguments.migrate, arguments.debug))
Esempio n. 7
0
 def __init__(self):
     self.args = ArgumentParser()
     self.file_count = 0
     self.log_file = self.args.update_html_log_path + datetime.today().strftime("%Y-%m-%d") + ".txt"
     if not os.path.exists(self.log_file):
         os.mknod(self.log_file)
     self.log_file_handler = open(self.log_file, "a+", encoding='utf-8')
Esempio n. 8
0
 def __init__(self):
     args = ArgumentParser()
     keyword_start = args.keyword_start
     keyword_end = args.keyword_end
     self.new_urls = set()
     for index in range(keyword_start, keyword_end):
         url = "https://baike.baidu.com/view/" + str(index)
         self.new_urls.add(url)
     self.old_urls = set()
     self.fail_urls = set()
     self.fail_url_mark = True
Esempio n. 9
0
def main(args):
    a = ArgumentParser()
    args = a.parse_args(args)

    s = Searcher()
    b = Booker()

    try:
        # 'from' and 'return' are keywords, need to use vars() :-(
        token = s.search(
            args.date,
            vars(args)['from'],
            args.to,
            cheapest=False if args.fastest else True,
            length_of_stay=(0 if args.one_way else vars(args)['return'])
        )
        pnr = b.book(token, args.bags)
        print(pnr)
    except BookFlightError as e:
        print(e, file=sys.stderr)
Esempio n. 10
0
def main():
    argument_parser = ArgumentParser()
    # どちらも指定されてない or どちらも指定されている場合にどちらも行う
    is_both = not argument_parser.arguments.crawl ^ argument_parser.arguments.scrape

    if is_both:
        Crawler.run()
        Scraper.run()
    else:
        if argument_parser.arguments.crawl:
            Crawler.run()
        elif argument_parser.arguments.scrape:
            Scraper.run()
Esempio n. 11
0
def main():

    level = ArgumentParser().start()

    if level is not None:
        LogHandler(log_level=level)
    else:
        LogHandler()

    process_data = ProcessData()

    #process_data.test_jitbit()
    process_data.start()
Esempio n. 12
0
 def __init__(self):
     args = ArgumentParser()
     text_start = args.text_start
     text_end = args.text_end
     self.new_urls = set()
     key_words = [
         line.strip()
         for line in open(args.key_word_path, 'r', encoding='UTF-8')
     ]
     for index in range(text_start, text_end):
         url = "https://baike.baidu.com/item/" + quote(
             key_words[index].strip())
         self.new_urls.add(url)
     key_words.clear()
     self.old_urls = set()
     self.fail_urls = set()
     self.fail_url_mark = True
Esempio n. 13
0
    def __init__(self, interrupt=False):
        self.args = ArgumentParser()
        self.count = self.args.keyword_start
        self.urls = url_manager.UrlManager()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = outputer.HtmlOutputer()
        self.title_to_id = dict()
        self.interrupt = interrupt
        # 初始化ES客户端
        # self.es = Elasticsearch(hosts=self.args.es_url, timeout=120)

        # 爬虫更新中断后保存之前爬取的词条
        if self.interrupt:
            self.interrupt_previous_titles = set()
            self.load_interrupt_titles()
            print("Load %d titles." % len(self.interrupt_previous_titles))
Esempio n. 14
0
def _initialize():
    """Initializes the InstallerTest class.

    This entails setting the class attributes and adding the configured test
    methods to the class.
    """
    args = ArgumentParser().parse_args()

    log_level = (logging.ERROR if args.quiet else
                 logging.DEBUG if args.verbose else logging.INFO)
    LOGGER.setLevel(log_level)
    handler = logging.StreamHandler()
    handler.setFormatter(
        logging.Formatter(
            fmt='[%(asctime)s:%(filename)s(%(lineno)d)] %(message)s',
            datefmt='%m%d/%H%M%S'))
    LOGGER.addHandler(handler)

    # Pull args from the parent proc out of the environment block.
    if os.environ.get('CMI_FORCE_CLEAN', False):
        global _force_clean
        _force_clean = True
    InstallerTest._output_dir = os.environ.get('CMI_OUTPUT_DIR')
    installer_path = GetAbsoluteExecutablePath(
        os.environ.get('CMI_INSTALLER_PATH', 'mini_installer.exe'))
    previous_version_installer_path = GetAbsoluteExecutablePath(
        os.environ.get('CMI_PREVIOUS_VERSION_INSTALLER_PATH',
                       'previous_version_mini_installer.exe'))
    chromedriver_path = GetAbsoluteExecutablePath(
        os.environ.get('CMI_CHROMEDRIVER_PATH', 'chromedriver.exe'))
    config_path = GetAbsoluteConfigPath(
        os.environ.get('CMI_CONFIG', 'config.config'))

    InstallerTest._variable_expander = VariableExpander(
        installer_path, previous_version_installer_path, chromedriver_path,
        args.quiet, InstallerTest._output_dir)
    InstallerTest._config = ParseConfigFile(config_path,
                                            InstallerTest._variable_expander)

    # Add a test_Foo function to the InstallerTest class for each test in
    # the config file.
    run_test_fn = getattr(InstallerTest, 'run_test')
    for test in InstallerTest._config.tests:
        setattr(InstallerTest, 'test_' + test['name'], run_test_fn)
Esempio n. 15
0
def __get_argparse_configuration__():
    parser = ArgumentParser(description="Graphics argument parser",
                            usage=argparse.SUPPRESS)
    parser.add_argument("--shape",
                        "-s",
                        dest="shape",
                        choices=["Circle", "Rectangle"],
                        required=True)
    parser.add_argument("--color",
                        "-c",
                        type=check_color_variable,
                        nargs=3,
                        dest="color")
    parser.add_argument("--area",
                        "-a",
                        type=int,
                        nargs=4,
                        dest="area",
                        required=True)
    return parser
def main():
    args = ArgumentParser().args
    image_path = args.image_path

    address = host + ":" + str(port)
    # task_resize_addr = address + '/api/new_task'
    one_resize_addr = address + api_image_resize_endpoint

    if not is_valid_image(image_path):
        print("Error: Invalid image path {}".format(image_path), file=sys.stderr)
        exit(1)

    new_image_name, image_extension = get_new_image_name(image_path, args.new_name)
    files = prepare_image_package_to_send(image_path, args.new_width, args.new_height)
    response = requests.post(one_resize_addr, files=files)
    nparr = np.frombuffer(response.content, np.uint8)
    new_sized_image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    file_drop_loc = args.drop_loc + new_image_name + "." + image_extension
    save_new_resized_image(new_sized_image, file_drop_loc)
Esempio n. 17
0
 def __init__(self):
     args = ArgumentParser()
     index_start = 1
     try:
         with open(args.index_end_path, 'r', encoding='utf-8') as f:
             index_end = int(f.readline().strip('\n'))
     except Exception as e:
         print(e)
         sys.exit(-1)
     self.new_urls = set()
     print("Adding all urls ...")
     for index in range(index_start, index_end):
         url = "https://baike.baidu.com/view/" + str(index)
         self.new_urls.add(url)
     print("Done.")
     self.old_urls = set()
     self.fail_urls = set()
     self.fail_url_mark = True
     self.downloader = HtmlDownloader()
Esempio n. 18
0
def __get_argparse_configuration__():
    parser = ArgumentParser(description="Log argument parser",
                            usage=argparse.SUPPRESS)
    parser.add_argument("--level",
                        "-l",
                        dest="level",
                        default="Debug",
                        required=False,
                        choices=['Warn', 'Info', 'Debug', 'Error'])
    parser.add_argument("--message",
                        "-m",
                        nargs='+',
                        dest="message",
                        required=True)
    return parser
Esempio n. 19
0
def main():
    args = ArgumentParser.parse_arguments()
    covid_records = FileParser.fetch_covid_cases_stats(args.file_path)
    safety_measures = FileParser.fetch_covid_safety_measures(
        covid_records, args.file_path)
    calculator = Calculator(covid_records=covid_records,
                            safety_measure_records=safety_measures)

    if args.a:
        print('Recover over total ratio is {}'.format(
            calculator.get_recovered_over_total_ratio(args.a)))

    if args.b:
        print('Death Average is {}'.format(calculator.get_death_average(
            args.b)))

    if args.c:
        measures = calculator.get_safety_measures_efficiency()
        for measure in measures:
            print('measure: {} efficiency: {}'.format(measure[MEASURE],
                                                      measure[EFFICIENCY]))
    if args.d:
        calculator.plot_measure_efficiency()
            self.tweet_handler.stop_streaming ()

    '''
    this method is the root method to generate repors from
    the collection of data. this method will execute
    periodically.
    '''

    def generate_reports(self):
        try:
            print ('\n')
            if len (self.tweet_handler.twitter_data_store) == 0:
                pass
                # print ('Zero')
            else:
                self.tweet_processor.tweet_data = self.tweet_handler.twitter_data_store
                self.tweet_processor.print_uniq_user_count ()
                self.tweet_processor.print_domain_related_result ()
                self.tweet_processor.print_unique_words ()
        finally:
            self.scheduler.enter (60, 1, self.generate_reports)
        self.scheduler.run ()


if __name__ == '__main__':
    argument_parser = ArgumentParser ()
    args = argument_parser.parse_args ()
    query = args.query
    tweet__base = TweetBase (query)
    tweet__base.listen_tweet_and_generate_reports ()
Esempio n. 21
0
import matplotlib.pyplot as plt
from numpy import flip
from timeit import default_timer as timer

from argument_parser import ArgumentParser
from classifiers.bayes import Bayes
from classifiers.decision_tree import DecisionTree
from classifiers.k_neighbors import KNeighbors
from classifiers.neural_network import NeuralNetwork
from classifiers.svm import SVM
from utils import factorize, load_dataset, learning_curve_plot, print_basic_stats

args = ArgumentParser().get_arguments()

data = load_dataset(args.dataset)
labels = data[data.columns[-1]].unique()
data = data.apply(factorize)

fraction = args.training_fraction
class_args = args.class_args

if args.classifier is 0:
    classifiers = [DecisionTree(data, labels, fraction, class_args),
                    Bayes(data, labels, fraction, class_args),
                    SVM(data, labels, fraction, class_args),
                    KNeighbors(data, labels, fraction, class_args),
                    NeuralNetwork(data, labels, fraction, class_args)]
elif args.classifier is 1:
    classifiers = [DecisionTree(data, labels, fraction, class_args)]
elif args.classifier is 2:
    classifiers = [Bayes(data, labels, fraction, class_args)]
Esempio n. 22
0
from argument_parser import ArgumentParser
from data_loader import load_data
from drift_detectors.drift_adwin import DriftADWIN
from drift_detectors.drift_ddm import DriftDDM
from drift_detectors.drift_eddm import DriftEDDM
from drift_detectors.drift_page_hinkley import DriftPageHinkley
from knn import KNN
from util import *

argument_parser = ArgumentParser()
filename = argument_parser.get_filename()
adwin_delta = argument_parser.get_delta()
knn_parameters = argument_parser.get_knn_parameters()

data, labels = load_data(filename)

classifier = KNN(data, labels, knn_parameters[0], knn_parameters[1])

classifier.train()
classifier.test()

prediction_table = classifier.get_prediction_table()

print(f'{"Accuracy:":<15}{classifier.get_accuracy() * 100:4.2f} %\n')

algorithms = [
    DriftDDM(),
    DriftEDDM(),
    DriftADWIN(adwin_delta),
    DriftPageHinkley()
]
Esempio n. 23
0
                # else:
                #     self.outputer.collect_data(html_cont, item_url, title)
                if self.interrupt and title in self.interrupt_previous_titles:
                    self.urls.add_old_url(page_url)
                    self.count += 1
                    print("Keyword already exist %d Url: %s" % (self.count, unquote(page_url)))
                    continue
                if update_time >= '2018-01-01 00:00:00':
                    self.outputer.collect_data(html_cont, item_url, title)
                self.urls.add_old_url(page_url)
                self.count += 1
                # if self.count % 1000000 == 0:
                #     logging.debug('Visited %d items.' % self.count)
                print("Keyword Success %d Url:%s" % (self.count, unquote(page_url)))
            except:
                self.urls.add_fail_url(page_url)
                print("Failed")


if __name__ == '__main__':
    args = ArgumentParser()
    logging.basicConfig(
        level=logging.INFO,
        filename=args.update_output_log_path,
        filemode='a',
        format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    )
    spider = SpiderMain(interrupt=True)
    # logging.info("The number of entities with baike urls is {}.".format(len(spider.url_to_update_time)))
    spider.craw()
    spider.outputer.log_file_handler.close()
            "INSERT INTO ReferenceObjects(uuid, eid, name, ref_type, x, y, z, pitch, yaw) VALUES (?,?,?,?,?,?,?,?,?)",
            self.memory.self_memid,
            p.entityId,
            p.name,
            "player",
            p.pos.x,
            p.pos.y,
            p.pos.z,
            p.look.pitch,
            p.look.yaw,
        )


if __name__ == "__main__":
    base_path = os.path.dirname(__file__)
    parser = ArgumentParser("Minecraft", base_path)
    opts = parser.parse()

    # set up stdout logging
    sh = logging.StreamHandler()
    sh.setLevel(logging.DEBUG if opts.verbose else logging.INFO)
    sh.setFormatter(log_formatter)
    logging.getLogger().addHandler(sh)
    logging.info("Info logging")
    logging.debug("Debug logging")

    # Check that models and datasets are up to date
    rc = subprocess.call([opts.verify_hash_script_path, "craftassist"])

    set_start_method("spawn", force=True)
Esempio n. 25
0
from matplotlib import pyplot
from argument_parser import ArgumentParser
from clusterers.agglomerative import Agglomerative
from clusterers.density_based import DensityBased
from clusterers.expectation_maximization import ExpectationMaximization
from clusterers.k_means import Kmeans
from clusterers.Optic_Clustering import optics
from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score, silhouette_score
from utils import clear, load_dataset, factorize
from gap import optimalK
from sklearn.cluster import KMeans


argument_parser = ArgumentParser()

setup = {
    "dataset": argument_parser.get_dataset_path(),
    "dataset_name": argument_parser.get_dataset_name(),
    "simple_name": argument_parser.get_simple_dataset_name(),
    "algorithm": argument_parser.get_algorithm(),
    "clusters": argument_parser.get_number_of_clusters(),
    "class_args": argument_parser.get_classifier_arguments()
}

if argument_parser.is_n_clusters_fixed() is True:
    setup['clusters'] = argument_parser.get_fixed_n_clusters()

data = load_dataset(setup['dataset'])
data = data.apply(factorize)

if argument_parser.is_elbow_method_run() is False:
Esempio n. 26
0
from argument_parser import ArgumentParser, ArgumentGroup, MutexArgumentParser, StaticArgument

args = ArgumentParser.parse_argv([
    ArgumentParser("cache_dir", "-cd", 1, "./verser",
                   lambda params: params[0]),
    ArgumentParser("result_dir", "-rd", 1, "./verser",
                   lambda params: params[0]),
    ArgumentParser("processor_id", "-p", 1, "ocr", lambda params: params[0]),
    MutexArgumentParser("files", [
        ArgumentParser(
            "files", "-fseq", 3, None, lambda params: {
                "type": "sequence",
                "format": params[0],
                "offset": int(params[1]),
                "count": int(params[2])
            }),
        ArgumentGroup("files", [
            StaticArgument("type", "sequence"),
            ArgumentParser("offset", "-o", 1, 1,
                           lambda params: int(params[0])),
            ArgumentParser(
                "format", "-furl", 1,
                "https://hosting.softagent.se/upps-personverser/PictureLoader?Antialias=ON&ImageId=%s&Scale=1",
                lambda params: params[0]),
            ArgumentParser("count", "-c", 1, 108953,
                           lambda params: int(params[0])),
        ])
    ])
], sys.argv[1:len(sys.argv)])

files = args["files"]
Esempio n. 27
0
 def __init__(self):
     self.args = ArgumentParser()
     text_start = self.args.text_start
     self.file_count = int(text_start / 300000)
     self.datas = []
Esempio n. 28
0
from argument_parser import ArgumentParser
from data_loader import load_data
from drift_detectors.drift_adwin import DriftADWIN
from drift_detectors.drift_ddm import DriftDDM
from drift_detectors.drift_eddm import DriftEDDM
from drift_detectors.drift_page_hinkley import DriftPageHinkley
from classifiers.bayes import Bayes
from classifiers.neural_network import NeuralNetwork
from classifiers.knn import KNN
from classifiers.svm import SVM
from util import *

argument_parser = ArgumentParser()
filename = argument_parser.get_filename()
adwin_delta = argument_parser.get_delta()
training_set_ratio = argument_parser.get_training_set_ratio()
neighbors_number = argument_parser.get_neighbors_number()
kernel = argument_parser.get_kernel()
regulation = argument_parser.get_regulation()
max_iters = argument_parser.get_iterations()
n_of_hidden = argument_parser.get_n_of_hidden_layers()
algorithm = argument_parser.get_algorithm()
printing = argument_parser.is_printing()

data, labels = load_data(filename)

classifiers = {
    'bayes': Bayes(data, labels, training_set_ratio),
    'knn': KNN(data, labels, training_set_ratio, neighbors_number),
    'nn': NeuralNetwork(data, labels, training_set_ratio, n_of_hidden,
                        max_iters),
Esempio n. 29
0
File: main.py Progetto: heekim1/MH
            reads_processed, short_reads = self.mh_dict[mh_name].get_alleles(
                mh_number_of_bases_targeted)
            self.total_reads_count += reads_processed
            self.short_reads_count += short_reads
            self.mh_dict[mh_name].calculate_avg_mapping_quality()
            self.mh_dict[mh_name].calculate_total_coverage()
            self.mh_dict[mh_name].filter_alleles_and_set_analytical_threshold(
                self.filters_obj)
            self.mh_dict[mh_name].calculate_number_of_alleles()
            self.mh_dict[mh_name].calculate_number_of_contributors()
            if self.info:
                self.info_obj.add_info(self.mh_dict[mh_name])

    def _get_number_of_contributors_overall(self):
        self.number_of_contributors_overall = max([
            self.mh_dict[mh_name].number_of_contributors
            for mh_name in self.mh_dict.keys()
        ])


if __name__ == '__main__':
    args_obj = ArgumentParser()
    mh_obj = Microhaplotyper(bam=args_obj.args.bam_file_path,
                             bed=args_obj.args.bed_file_path,
                             info=args_obj.args.info_file_path,
                             out=args_obj.args.out_file_path,
                             analytical_threshold=args_obj.args.min_coverage)
    mh_obj.get_microhaplotypes()
    output_obj = OutputMicrohaplotypes(mh_obj)
    output_obj.output_microhaplotypes()
Esempio n. 30
0
from log_utils import LogUtils
from merge_utils import MergeUtils

if __name__ == "__main__":
    #Init argument parser
    a_parser = ArgumentParser([{
        "short_name": '-f1',
        "long_name": "--folder1",
        "required": True,
        "help": "Path to first folder"
    }, {
        "short_name": '-f2',
        "long_name": "--folder2",
        "required": True,
        "help": "Path to second folder"
    }, {
        "short_name": '-l',
        "long_name": "--log",
        "required": False,
        "help": "Export log as CSV"
    }, {
        "short_name": '-m',
        "long_name": '--merge',
        "required": False,
        "help": "Merge both folders in one"
    }])
    args = a_parser.get_args()

    image_paths = []
    image_paths.append(get_image_paths(args['folder1']))
    print("[INFO] Found " + str(len(image_paths[-1])) +