コード例 #1
0
def run_bert_finetuning(datadir: str,
                        filename: str,
                        train_size: float,
                        k: int = 0):
    util.init_logging(logging.DEBUG, "relationModel_bert_" + str(k))
    if k > 0:
        k_fold_split, data_df = dataset.prepare_data_cross_validation(
            datadir + filename, True, k)
        for train_indices, eval_indices in k_fold_split:
            # split feature vectors into train and eval
            train_df = data_df.loc[train_indices, :]
            eval_df = data_df.loc[eval_indices, :]

            # run BERT fine-tuning (training and evaluation)
            classifier_bert.model_with_simpletransformers(
                train_df, eval_df,
                filename.rsplit('.')[0])
    elif train_size == 1.0:
        # run BERT fine-tuning (100% training)
        train_df = dataset.edit_class_labels_df(
            dataset.read_in_data(datadir + filename, True))
        classifier_bert.model_with_simpletransformers(train_df, None,
                                                      filename.rsplit('.')[0])
    else:
        # split data into train and eval
        train_df, eval_df = dataset.prepare_data_train_test(
            datadir + filename, train_size, True)
        # run BERT fine-tuning (training and evaluation)
        classifier_bert.model_with_simpletransformers(train_df, eval_df,
                                                      filename.rsplit('.')[0])
コード例 #2
0
 def __init__(self, host=None, port=None, uri=None, **kwargs):
     init_logging()
     cnf = loadConfig()
     self.host = cnf.getHost()
     self.port = cnf.getPort()
     con = ''
     if kwargs:
         for key in kwargs:
             uriStr = ''
             uriStr = '&' + key + "=" + kwargs[key]
             con += uriStr
     self.uri = uri + con
     httpClient = None
     try:
         httpClient = httplib.HTTPConnection(self.host,
                                             self.port,
                                             timeout=30)
         httpClient.request('GET', self.uri)
         tmp = httpClient.getresponse().read()
         buf = json.loads(tmp)
         self.result = buf
     except:
         logging.debug('get data failed.....')
     finally:
         if httpClient:
             httpClient.close()
コード例 #3
0
ファイル: meteor.py プロジェクト: weehowe-z/flask-weather
 def __init__(self, host=None, port=None, uri=None, **kwargs):
     init_logging()
     cnf = loadConfig()
     self.host = cnf.getHost()
     print self.host
     self.port = cnf.getPort()
     con = ''
     if kwargs:
         for key in kwargs:
             uriStr = ''
             uriStr = '&' + key + "=" + kwargs[key]
             con += uriStr
     self.uri = uri + con
     httpClient = None
     try:
         httpClient = httplib.HTTPConnection(
             self.host, self.port, timeout=30)
         httpClient.request('GET', self.uri)
         tmp = httpClient.getresponse().read()
         buf = json.loads(tmp)
         self.result = buf
     except:
         logging.debug('get data failed.....')
     finally:
         if httpClient:
             httpClient.close()
コード例 #4
0
def handle_args():
    """handle cmdline arguments
    usual procedure:
    1. -f  collect facts
    2. --ensure-all-change-types  post-process so that jgrok does not throw exception
    3. -s  run grok
    4. -v  verify
    5.1 --count-class  results in TeX, number of test classes
    5.2.1 --count-method-json  results in json, number of methods
    5.2.2 --count-method-tex   results in TeX, number of methods, in comparison with existing tools
    optional. --resume  skip existing
    optional. --debug  run with a small subset of projects
    optional. -l  specify log level
    :return: arguments
    """
    parser = argparse.ArgumentParser(description="Test selection on Defects4j")
    parser.add_argument("-l", metavar="LOG_LEVEL", type=str)
    parser.add_argument("-f", action="store_true", help="Collect facts")
    parser.add_argument(
        "--resume",
        action="store_true",
        help=
        "Resume process, according to the existence of files/dirs. Works with -f, -s"
    )
    parser.add_argument(
        "--ensure-all-change-types",
        action="store_true",
        help=
        "Ensure that all change types exist in diff facts by adding NONE facts"
    )
    parser.add_argument("--debug",
                        action="store_true",
                        help="Use one bug pair for testing, default: True")
    parser.add_argument("-s",
                        metavar="GROK_SCRIPT",
                        help="Run grok with specified grok script")
    parser.add_argument("-v", action="store_true", help="Verify grok results")
    parser.add_argument("--count-class",
                        metavar="OUTPUT_TEX_SRC",
                        help="Output TeX source for a results table")
    parser.add_argument(
        "--count-method-json",
        metavar="OUTPUT_JSON",
        help="Count test methods of affected test classes and write OUTPUT_JSON"
    )
    parser.add_argument("--count-method-tex",
                        nargs=2,
                        metavar=("JSON_FILE", "OUTPUT_TEX_SRC"),
                        help="Read JSON and write TeX table")
    parser.add_argument("--percent",
                        metavar="JSON_FILE",
                        help="Calculate percentage")
    args = parser.parse_args()
    init_logging(args.l)
    logger.debug(args)
    return args
コード例 #5
0
 def setUp(self):
     self.test_dir = tempfile.TemporaryDirectory()
     self.conf = util.init_conf([])
     self.conf.steam_dir = self.test_dir.name
     self.conf.ts_root = self.test_dir.name
     self.conf.aoe2_usr_dir = 'replays'
     self.conf.player = 'mr_monkey'
     os.mkdir(os.path.join(self.test_dir.name, self.conf.aoe2_usr_dir))
     util.add_derived_fields(self.conf)
     util.init_logging(self.conf)
コード例 #6
0
ファイル: config.py プロジェクト: weehowe-z/flask-weather
 def __init__(self):
     init_logging()
     self.path = os.path.split(os.path.realpath(__file__))[0] + "/config.json"
     with open(self.path) as f:
         configInfor = json.load(f)
     if configInfor is not None:
         self.host = configInfor["host"]
         self.port = configInfor["port"]
     else:
         logging.debug("load config failed.....")
コード例 #7
0
def main():
    for i in range(len(Stats.n)):
        for c in range(Stats.count[i]):
            print("i: " + str(i) + ", c: " + str(c))

            max_circle_matrix = create_max_circle_matrix(Stats.n[i])

            timer = datetime.now()
            result_max_circle_1 = tmg.make_relation_matrix(
                np.copy(max_circle_matrix))
            Stats.max_circle_tmg_time[i] += datetime.now() - timer
            print(
                str(datetime.now()) + " max 1 " +
                str(Stats.max_circle_tmg_time[i]))

            #timer = datetime.now()
            #result_max_circle_2 = bilp.make_relation_matrix(np.copy(max_circle_matrix))
            #Stats.max_circle_bilp_time[i] += datetime.now() - timer
            #print(str(datetime.now()) + " max 2 " + str(Stats.max_circle_bilp_time[i]))

            no_circle_matrix = create_no_circle_matrix(result_max_circle_1)

            timer = datetime.now()
            result_no_circle_1 = tmg.make_relation_matrix(
                np.copy(no_circle_matrix))
            Stats.no_circle_tmg_time[i] += datetime.now() - timer
            print(
                str(datetime.now()) + " no 1 " +
                str(Stats.no_circle_tmg_time[i]))

            #timer = datetime.now()
            #result_no_circle_2 = bilp.make_relation_matrix(np.copy(no_circle_matrix))
            #Stats.no_circle_bilp_time[i] += datetime.now() - timer
            #print(str(datetime.now()) + " no 2 " + str(Stats.no_circle_bilp_time[i]))

            single_circle_matrix = create_single_circle_matrix(Stats.n[i])

            timer = datetime.now()
            result_single_circle_1 = tmg.make_relation_matrix(
                np.copy(single_circle_matrix))
            Stats.single_circle_tmg_time[i] += datetime.now() - timer
            print(
                str(datetime.now()) + " single 1 " +
                str(Stats.single_circle_tmg_time[i]))

            #timer = datetime.now()
            #result_single_circle_2 = bilp.make_relation_matrix(np.copy(single_circle_matrix))
            #Stats.single_circle_bilp_time[i] += datetime.now() - timer
            #print(str(datetime.now()) + " single 2 " + str(Stats.single_circle_bilp_time[i]))

    util.init_logging(logging.DEBUG,
                      "GenerateTreeTiming_" + time.strftime("%Y%m%d-%H%M%S"))
    Stats.log_results()
    exit(0)
コード例 #8
0
def main():
    # logging
    util.init_logging(logging.INFO, "PredictRelationTiming_" + time.strftime("%Y%m%d-%H%M%S"))

    # command line interface
    parser = argparse.ArgumentParser()
    parser.add_argument('MajorClaim', type=str, help='topic of the discussion')
    parser.add_argument('--search', type=int,
                        help='number of sentences to further process from the search results (if not given all sentences retrieved are used)')
    parser.add_argument('--classify', nargs='+', type=str,
                        help='multiple sentences (group sentences with ""), a text or an url to be used as a source to collect arguments from')
    parser.add_argument('-svm', action='store_true', help='change classifier for estimating relation probabilities from BERT to SVM')
    parser.add_argument('-bilp', action='store_true',
                        help='change from generate tree approach tmg (Traversing and Modifying Graphs) to bilp (Binary Linear Integer Programming)')
    parser.add_argument('--cluster', nargs=2, type=float,
                        help='cluster arguments before processing them (relation only possible within cluster) -> first arg: similarity threshold, second arg: min_cluster_size')
    args = parser.parse_args()
    logging.info(args)

    # search engine
    search_engine = ArgumenText("userID", "apiKey")
    if args.classify is None:
        sentences = search_engine.query_search_api(args.MajorClaim)
        if args.search is not None and args.search < len(sentences):
            stance_pro = [a for a in sentences if a["stanceLabel"] == 'pro']
            stance_con = [a for a in sentences if a["stanceLabel"] == 'contra']
            stance_pro.sort(key=lambda s: s["argumentConfidence"]*s["stanceConfidence"], reverse=True)
            stance_con.sort(key=lambda s: s["argumentConfidence"]*s["stanceConfidence"], reverse=True)
            pro_len = min(int(args.search/2), len(stance_pro))
            con_len = min(args.search - pro_len, len(stance_con))
            diff = args.search - pro_len - con_len
            pro_len += diff
            sentences = stance_pro[:pro_len]
            sentences.extend(stance_con[:con_len])
    else:
        if len(args.classify) is 1:
            args.classify = args.classify[0]
        sentences = search_engine.query_classify_api(args.MajorClaim, args.classify)
    arguments = ArgumentList(args.MajorClaim, sentences)

    # clustering
    if args.cluster is not None:
        clusters = search_engine.query_cluster_api([s["sentenceOriginal"] for s in sentences], args.cluster[0],
                                               args.cluster[1])
        logging.debug(clusters)
        arguments.apply_clusters(clusters)

    # relation processing
    relation_processing = RelationProcessor(args.svm, args.bilp)
    relation_processing.generate_relation_matrix(arguments)

    Stats.log_results()

    exit(0)
コード例 #9
0
ファイル: count_facts.py プロジェクト: d-fact/CSlicer
def handle_args():
    parser = argparse.ArgumentParser(description="compute difference of two generated list of commits")

    parser.add_argument("-p", required=True, metavar="FACTS_DIR",
                        help="Path to the directory of generated facts")
    parser.add_argument("-o", metavar="OUTPUT_FILE", type=str)
    parser.add_argument("-l", metavar="LOG_LEVEL", type=str)
    args = parser.parse_args()
    init_logging(args.l)
    logger.debug(args)
    return args
コード例 #10
0
def handle_args():
    parser = argparse.ArgumentParser(
        description="Read ground truth from DoSC dataset")
    parser.add_argument("-f",
                        required=True,
                        metavar="FUNCTIONALITY",
                        help="List slice for a specific functionality")
    parser.add_argument("-l", metavar="LOG_LEVEL", type=str)
    args = parser.parse_args()
    init_logging(args.l)
    logger.debug(args)
    return args
コード例 #11
0
ファイル: benchmark.py プロジェクト: d-fact/CSlicer
def handle_args():
    parser = argparse.ArgumentParser(description="compute difference of two generated list of commits")
    parser.add_argument("-s", metavar="SEL_FILE", type=str, required=True)
    parser.add_argument("-c", metavar="ORIG_CFG_DIR", type=str)
    parser.add_argument("--os", metavar="OUT_NAMES_JSON", type=str, help="output json file for store benchmark names")
    parser.add_argument("--og", metavar="OUT_GROUP_JSON", type=str, help="output json file for store benchmark groups")
    parser.add_argument("-l", metavar="LOG_LEVEL", type=str)
    parser.add_argument("--all-benchmark", action="store_true")
    parser.add_argument("--existing-config", action="store_true")
    args = parser.parse_args()
    init_logging(args.l)
    logger.debug(args)
    return args
コード例 #12
0
def run_sklearn_classifiers(datadir: str,
                            filename: str,
                            train_size: float,
                            k: int = 0,
                            feature_range=range(0, 15),
                            specific_model=None):
    # read in data
    data_df = dataset.read_in_data(datadir + filename, True)

    for counter in feature_range:
        util.init_logging(
            logging.DEBUG,
            "relationModel_sklearn_" + str(counter) + "_" + str(k))
        # generate feature vectors from data
        data_feature, data_label = features.make_featurevector(
            data_df, 'features/' + filename + '/bert/sentence_pair.joblib',
            'features/' + filename + '/bert/sentence.joblib', counter)

        if k > 0:
            k_fold_split, data_label = dataset.prepare_feature_cross_validation(
                data_feature, data_label, k)
            for train_indices, eval_indices in k_fold_split:
                # split feature vectors into train and eval
                train_feature, train_label = data_feature[
                    train_indices], data_label[train_indices]
                eval_feature, eval_label = data_feature[
                    eval_indices], data_label[eval_indices]
                # run sklearn classifier bench (training and evaluation)
                classifier_sklearn.models_with_sklearn(train_feature,
                                                       train_label,
                                                       eval_feature,
                                                       eval_label,
                                                       filename.rsplit('.')[0],
                                                       specific_model)
        elif train_size == 1.0:
            # run sklearn classifier bench (100% training)
            data_label = dataset.edit_class_label_np(data_label)
            classifier_sklearn.models_with_sklearn(data_feature, data_label,
                                                   None, None,
                                                   filename.rsplit('.')[0],
                                                   specific_model)
        else:
            # split feature vectors into train and eval
            train_feature, train_label, eval_feature, eval_label = dataset.prepare_feature_train_test(
                data_feature, data_label, train_size)
            # run sklearn classifier bench (training and evaluation)
            classifier_sklearn.models_with_sklearn(train_feature, train_label,
                                                   eval_feature, eval_label,
                                                   filename.rsplit('.')[0],
                                                   specific_model)
コード例 #13
0
ファイル: confLoad.py プロジェクト: zhounanshu/smart-campaus
 def __init__(self):
     init_logging()
     self.path = os.path.split(
         os.path.realpath(__file__))[0] + '/config.json'
     with open(self.path) as f:
         configInfor = json.load(f)
     if configInfor is not None:
         self.host = configInfor['host']
         self.port = configInfor['port']
         self.user = configInfor['user']
         self.passwd = configInfor['passwd']
         self.db = configInfor['db']
         self.url = configInfor['url']
     else:
         logging.debug("load config failed......")
コード例 #14
0
def main():
    # values
    datadir = "dataset/"
    filename = "MyDataset_balanced"
    train_size = 0.8
    logfile_name = "relationModel_MyDatasetBalanced"

    # init logging
    util.init_logging(logging.DEBUG, logfile_name)

    # sklearn classifiers
    run_sklearn_classifiers(datadir, filename, train_size, k=5)

    # bert fine-tuning
    run_bert_finetuning(datadir, filename, train_size)
コード例 #15
0
ファイル: confLoad.py プロジェクト: zhounanshu/smart-campaus
 def __init__(self):
     init_logging()
     self.path = os.path.split(
         os.path.realpath(__file__))[0] + '/config.json'
     with open(self.path) as f:
         configInfor = json.load(f)
     if configInfor is not None:
         self.host = configInfor['host']
         self.port = configInfor['port']
         self.user = configInfor['user']
         self.passwd = configInfor['passwd']
         self.db = configInfor['db']
         self.url = configInfor['url']
     else:
         logging.debug("load config failed......")
コード例 #16
0
ファイル: main.py プロジェクト: aogonevskiy/benchbase
def main(argv=sys.argv):
    """Main test"""
    global USAGE
    parser = OptionParser(USAGE, formatter=TitledHelpFormatter(),
                          version="benchbase %s" % get_version())
    parser.add_option("-v", "--verbose", action="store_true",
                      help="Verbose output")
    parser.add_option("-l", "--logfile", type="string",
                      default=os.path.expanduser(DEFAULT_LOG),
                      help="Log file path")
    parser.add_option("-d", "--database", type="string",
                      default=os.path.expanduser(DEFAULT_DB),
                      help="SQLite db path")
    parser.add_option("-m", "--comment", type="string",
                      help="Add a comment")
    parser.add_option("-j", "--jmeter", action="store_true",
                      default=True,
                      help="JMeter input file")
    parser.add_option("-f", "--funkload", action="store_true",
                      default=False,
                      help="FunkLoad input file")
    parser.add_option("--rmdatabase", action="store_true",
                      default=False,
                      help="Remove existing database")
    parser.add_option("-o", "--output", type="string",
                      help="Report output directory")
    parser.add_option("-H", "--host", type="string",
                      help="Host name when adding sar report")
    parser.add_option("-r", "--runningavg", type="int",
                      default=5,
                      help="Number of second to compute the running average.")
    parser.add_option("--chart-width", type="int",
                      default=800,
                      help="Width of charts in report.")
    parser.add_option("--chart-height", type="int",
                      default=768,
                      help="Heigth of charts in report.")
    parser.add_option("--period", type="int",
                      help="Resolution in second")

    options, args = parser.parse_args(argv)
    init_logging(options)
    if len(args) == 1:
        parser.error("Missing command")
    cmd = args[1]
    fn = globals()['cmd_' + cmd]
    ret = fn(args[2:], options)
    return ret
コード例 #17
0
def main():
    conf = util.init_conf()
    util.init_logging(conf)
    if not conf.subcommand:
        logging.error("No subcommand specified\n%s", conf.usage)
        return
    subcmd_func = {
        'find_new': find_new_replay_and_ts,
        'list': list_replay_in_repo,
        'plot_ts': plot_ts_file,
        'plot_last_ts': plot_most_recent_ts_in_repo,
        'plot_replay': plot_replay_by_idx,
        'plot_last_replay': plot_last_replay_in_repo,
    }
    lazy_import_matplotlib()
    subcmd_func[conf.subcommand[0]](conf)
コード例 #18
0
def handle_args():
    parser = argparse.ArgumentParser(
        description="compute difference of two generated list of commits")
    parser.add_argument(
        "--cslicer",
        metavar="NAMES",
        help="Run cslicer (old way) on a list of project names")
    parser.add_argument("--prepare",
                        metavar="NAMES",
                        help="Preparing work for a list of project names")
    parser.add_argument("--fact",
                        metavar="GROUPS",
                        help="Run cslicer with facts collection")
    parser.add_argument("--verify", action="store_true", help="verify results")
    parser.add_argument("-l", metavar="LOG_LEVEL", type=str)
    args = parser.parse_args()
    init_logging(args.l)
    logger.debug(args)
    return args
コード例 #19
0
ファイル: main.py プロジェクト: bdelbosc/jenkviz
def main(argv=sys.argv):
    """Main test"""
    global USAGE
    parser = OptionParser(USAGE, formatter=TitledHelpFormatter(),
                          version="jenkviz %s" % get_version())
    parser.add_option("-v", "--verbose", action="store_true",
                      help="Verbose output")
    parser.add_option("-l", "--logfile", type="string",
                      default=os.path.expanduser(DEFAULT_LOG),
                      help="Log file path")
    parser.add_option("-d", "--database", type="string",
                      default=os.path.expanduser(DEFAULT_DB),
                      help="SQLite db path")
    parser.add_option("-o", "--output", type="string",
                      help="SVG output file")
    parser.add_option("--from-file", type="string",
                      help="Use html files in the the FROM_FILE directory instead of querying jenkins server.")
    parser.add_option("--to-file", type="string",
                      help="Save jenkins page into the TO_FILE directory.")
    parser.add_option("-r", "--reverse", action="store_true",
                      default=False,
                      help="Reverse crawl")
    parser.add_option("--direct", action="store_true",
                      default=False,
                      help="Display only direct upstream dependencies")
    parser.add_option("--explore", action="store_true",
                      default=False,
                      help="Display downstream build with external upstream")
    parser.add_option("-u", "--update", action="store_true",
                      default=False,
                      help="Always fetch build from server (update local database)")

    options, args = parser.parse_args(argv)
    if options.explore:
        options.direct = False
    init_logging(options)
    if len(args) == 1:
        parser.error("Missing command")
    cmd = args[1]
    fn = globals()['cmd_' + cmd]
    ret = fn(args[2:], options)
    return ret
コード例 #20
0
def main():
    logger = init_logging()

    parser = argparse.ArgumentParser(
        description=
        "This script analyzes a Redshift cluster and outputs a summary report with statistics"
        "its performance.")

    parser.add_argument('-b',
                        '--bucket',
                        nargs=1,
                        type=str,
                        help='location of replay outputs')
    parser.add_argument('-r1',
                        '--replay_id1',
                        nargs='?',
                        type=str,
                        default='',
                        help='replay id 1')
    parser.add_argument('-r2',
                        '--replay_id2',
                        nargs='?',
                        type=str,
                        default='',
                        help='replay id 2, required for '
                        'comparison')
    parser.add_argument('-s', '--sql', action='store_true', help='sql')

    args = parser.parse_args()

    if not (args.bucket or args.replay_id1 or args.replay_id2):
        print("Find work location")
    elif args.bucket and not (args.replay_id1 or args.replay_id2):
        list_replays(args.bucket[0])
    elif args.bucket and args.replay_id1 and not args.replay_id2:
        if args.sql:
            list_sql(args.bucket[0], args.replay_id1)
        else:
            analysis_summary(args.bucket[0], args.replay_id1)
    elif args.bucket and args.replay_id1 and args.replay_id2:
        if args.replay_id1 == args.replay_id2:
            logger.error(
                "Cannot compare same replay, please choose two distinct replay ids."
            )
            exit(-1)
        else:
            print(f"Compare replays {args.replay_id1} and {args.replay_id2}.")
            run_comparison_analysis(args.bucket[0], args.replay_id1,
                                    args.replay_id2)
    else:
        print("Please enter valid arguments.")
        exit(-1)
コード例 #21
0
    def __init__(self, config, cluster_name, clock, limits_timeout=300):
        self.config = config
        self.cluster_name = cluster_name
        self.clock = clock
        self.limits_timeout = limits_timeout
        # initialize in constructor so that cyclecloud_provider can initialize this
        # with the proper log_level. In tests, this will use the default.
        self.logger = init_logging()

        default_dir = os.getenv('HF_WORKDIR', '/var/tmp')
        self.db_dir = config.get('symphony.hostfactory.db_path', default_dir)
        self.requests_db = JsonStore('azurecc_requests.json', self.db_dir)
        self.capacity_db = JsonStore('azurecc_capacity.json', self.db_dir)
コード例 #22
0
def handle_args():
    parser = argparse.ArgumentParser(
        description="compute difference of two generated list of commits")

    parser.add_argument("-p",
                        required=True,
                        metavar="FACTS_DIR",
                        help="Path to the directory of generated facts")
    parser.add_argument(
        "-g",
        required=True,
        metavar="GROUP_FILE",
        help="A JSON file listing all groups for easy processing")
    parser.add_argument("-s",
                        required=True,
                        metavar="GROK_SCRIPT",
                        help="Path of grok script")
    parser.add_argument("-o", metavar="OUTPUT_FILE", type=str)
    parser.add_argument("-l", metavar="LOG_LEVEL", type=str)
    args = parser.parse_args()
    init_logging(args.l)
    logger.debug(args)
    return args
コード例 #23
0
def main():
    conf = util.init_conf()
    util.init_logging(conf)
    game_details = write_game_details_internal(conf, conf.replay_file)
コード例 #24
0
ファイル: sln.py プロジェクト: alin1popa/HackPrague2019
 def __init__(self):
     self.driver = webdriver.Chrome()
     self.driver.get(url='http://www.google.com?hl=en')
     self.log = init_logging(__name__)
コード例 #25
0
ファイル: api.py プロジェクト: marlier/domino
import simplejson as json
from werkzeug.contrib.atom import AtomFeed
from datetime import date

import mysql as Mysql
import domino_twilio as Twilio
import user as User
import alert as Alert
import team as Team
import email as Email
import rule as Rule
import notification as Notification
import util as Util

conf = Util.load_conf()
Util.init_logging("api")

class Api():
    '''
    This API class is designed to return json via http request
    '''
    def __init__(self, **data):
        '''
        Initialize the api class
        '''
        # set some default attribute values
        
        # default option vars
        self.limit = 25
        self.offset = 0
        self.search = ''
コード例 #26
0
import json

import requests as req

from constants import ID, CODE, DEFAULT_PLACES_RADIUS, POI_CATEGORIES
from sln import GScraper
from util import init_logging

LOG = init_logging(__name__)


def _get(url):
    param_connector = "&" if "?" in url else "?"
    creds = "{}app_id={}&app_code={}".format(param_connector, ID, CODE)
    url = url + creds
    LOG.debug(url)
    r = req.get(url)
    r.raise_for_status()
    return r.content


def _get_flow(params):
    url = 'https://traffic.api.here.com/traffic/6.2/flow{}'.format(params)
    return _get(url)


def _get_places(params):
    url = 'https://places.cit.api.here.com/places/v1/browse?{}'.format(params)
    return _get(url)

コード例 #27
0
    get_name,
    add_user,
    hash_uid,
    init_groups,
    restricted,
    group_chat_only,
    selected_groups_only,
    selected_messages_only,
)
from config import TELEGRAM_BOT_TOKEN, BOT_VERSION, PUB_IP, CERT, PRIV_KEY

# Debug Mode default Off
DEBUG = False

# Init logging
LOGGER = init_logging()


@selected_groups_only
@selected_messages_only
def process_message(update, context):
    """Process every new update."""
    # pp = pprint.PrettyPrinter(indent=4)
    # pp.pprint(update.to_dict())

    group_id = update.effective_chat.id
    user_id = update.effective_user.id
    user_name = get_name(update)
    msg_type = effective_message_type(update)
    text = update.effective_message.text
    if text:
コード例 #28
0
ファイル: context.py プロジェクト: AntonKozlov/embox-1

def why_inviable_instance_is_disabled(outcome, *_):
    node, value = outcome
    assert not value
    fmt = '{node} is disabled because of an error: {node.error}'
    return fmt.format(**locals())


def resolve(initial_module):
    return Context().resolve(initial_module)


if __name__ == '__main__':
    import util
    util.init_logging('%s.log' % __name__)

    from pprint import pprint

    from mybuild.binding.pydsl import *

    @module
    def conf(self):
        self._constrain(m1(bar=17))
        # self._constrain(m3)
        self.sources = 'test.c'

    @module
    def m1(self, bar=42):
        self._constrain(m2(foo=bar))
コード例 #29
0
        else:
            group_counts = calculate_cm_counts(group, target_col, binary=False)
        confusion_analysis(group_counts, albreakdown_output_path, experiment_tag, name + "_peralsource", None, loss_al, acc_al, params, source=group_name)
    with open(output_path + "/" + name + "_predictionsal.pkl", 'wb') as outfile:
        pickle.dump(al_selection_df, outfile)


#if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--log", dest="log_path", default=None)
parser.add_argument("--temp", dest="temp", default=True)
parser.add_argument("--experiment", dest="experiment_path", default=None)
parser.add_argument("--row", dest="experiment_row", type=int, default=None)
args = parser.parse_args()

util.init_logging(args.log_path)

if args.experiment_path is not None:
    logging.info("=====================================================")
    if args.experiment_row is not None:
        logging.info("Experiment %s %i started...",args.experiment_path, args.experiment_row)
    else:
        logging.info("Experiment %s started...",args.experiment_path)
    logging.info("=====================================================")

    with open(args.experiment_path, 'r') as infile:
        paramset = json.load(infile)

    util.TMP_PATH = args.temp
    
    if args.experiment_row is not None:
コード例 #30
0
ファイル: clustering.py プロジェクト: ArniBoy/Feedback
__author__ = 'Arne Recknagel'

import logging
from math import sqrt
import codecs

import numpy as np

from sklearn.pipeline import Pipeline
from sklearn.cluster import DBSCAN
from sklearn import metrics

from feature_extractors import DataSeparator
from preprocessing import parse, POS, NEG, NEU
from util import svm_pipeline, k_means_pipeline, get_corpus, init_logging, root, get_feature_union, get_corpus
init_logging()


def get_train_data():
    classes = POS | NEU | NEG
    train_loc = root+'Data/twitterData/train_alternative.tsv'
    dev_loc = root+'Data/twitterData/dev_alternative.tsv'
    test_loc = root+'Data/twitterData/test_alternative.tsv'
    train_labels, train_tweets, train_pos = parse(
        train_loc, classes
    )
    dev_labels, dev_tweets, dev_pos = parse(
        dev_loc, classes
    )
    test_labels, test_tweets, test_pos = parse(
        test_loc, classes
コード例 #31
0
import signal

from tornado import ioloop
from tornado.web import Application
import tornado.httpserver

from tcp_server import CtrlServer, MyTCPServer
from zmq_server import zmq_server
import conf
from conn import Conn
import util

if __name__ == "__main__":
    if conf.DAEMONIZE:
        util.daemonize()
    util.init_logging("log/gate_server.log", colorfy=True)

    handlers = [
        (r'/', Conn),
        (r'/snappy/', Conn, dict(compress="snappy", binary=True,
                                 conn_tag='SP')),
        (r'/msgpack-snappy-skip/', Conn,
         dict(msgpack=True,
              compress="snappy",
              skip_size=512,
              binary=True,
              conn_tag='MP-SP-skip')),
        (r'/msgpack-lz4-skip/', Conn,
         dict(msgpack=True,
              compress="lz4",
              skip_size=512,
コード例 #32
0
ファイル: client.py プロジェクト: Darthone/atto
                    client.send(p.encode())
                    logging.debug("Waiting for ack")
                    message = client.recv()
                    logging.info("Recieved ack")
                time.sleep(self.config['client']['sleep']/1000)
                self.check_config()
            except zmq.error.ZMQError as e:
                logger.critical("ZMQError, Exiting: %s", e)
                exit()


if __name__ == '__main__':
    if zmq.zmq_version_info() < (4,0):
        raise RuntimeError("Security is not supported in libzmq version < 4.0. libzmq version {0}".format(zmq.zmq_version()))
    config = util.load_yaml_file(util.config.CLIENT["config"])
    util.init_logging(**config['logging'])
    daemon = Client(config['pid_file'], config_file=util.config.CLIENT["config"])
    daemon.run()
    if len(sys.argv) == 2:
        if 'start' == sys.argv[1]:
            daemon.start()
        elif 'stop' == sys.argv[1]:
            daemon.stop()
        elif 'restart' == sys.argv[1]:
            daemon.restart()
        else:
            print "Unknown command"
            sys.exit(2)
        sys.exit(0)
    else:
        print "usage: %s start|stop|restart" % sys.argv[0]
コード例 #33
0
import getpass
import os
from os.path import split, join, exists, isdir, expanduser
import glob
from shutil import copy2, copytree
import traceback
import time
from subprocess import Popen, CalledProcessError

import yaml

import util

# systemctl should log this print
print('trying to start logging')
l = util.init_logging(__file__)

evar = 'DRIVE_SYSTEMCTL_UNIT'
try:
    # This should be set in the invokation of the service by systemd
    service_unit = os.environ[evar]
    l.info(f'service_unit: {service_unit}')
except KeyError as e:
    l.error(f'failed to lookup {evar}')
    raise

root = '/'.join([''] + service_unit[:-len('.mount')].split('-'))
l.info(f'mount_point: {root}')
'''
evar = 'DRIVE_MOUNT_POINT'
try:
コード例 #34
0
                kv_d = kv_d - mx.nd.array(u.getA())

            # exchange with kvstore
            util.update_param(kvstore,
                              kv_d,
                              kv_x,
                              pull_only=util.need_restart())
            kv_d = mx.nd.zeros((dim, 1))
            x = kv_x.asnumpy()

            if not util.need_restart():
                start, end = end, min(end + args.batch_size, size)
                if start == end:
                    break
            util.reset_cancel()

        # compute objective
        loss = size / dim * np.dot((lambda_ / 2 * x - b).T, x)
        for i in range(*interval):
            loss -= (A[i] * x)**2 / 2
        logging.info('Epoch[{}] loss={}'.format(epoch, np.sum(loss) + 2))


if __name__ == '__main__':
    util.init_logging()
    logging.info('Start executing train_eigen.py')
    args = parse_args()
    logging.info('Start preparing data.')
    data = prepare_data()
    train(data, args)
コード例 #35
0
ファイル: evaluate.py プロジェクト: yiyouls/LISA
    required=True,
    help='Comma-separated list of paths to layer configuration json.')
arg_parser.add_argument(
    '--attention_configs',
    help='Comma-separated list of paths to attention configuration json.')
arg_parser.add_argument(
    '--combine_test_files',
    action='store_true',
    help='Whether to combine list of test files into a single score.')

arg_parser.set_defaults(debug=False)
arg_parser.set_defaults(combine_test_files=False)

args, leftovers = arg_parser.parse_known_args()

util.init_logging(tf.logging.INFO)

if not os.path.isdir(args.save_dir):
    util.fatal_error("save_dir not found: %s" % args.save_dir)

# Load all the various configurations
# todo: validate json
data_config = train_utils.load_json_configs(args.data_config)
model_config = train_utils.load_json_configs(args.model_configs)
task_config = train_utils.load_json_configs(args.task_configs, args)
layer_config = train_utils.load_json_configs(args.layer_configs)
attention_config = train_utils.load_json_configs(args.attention_configs)

# attention_config = {}
# if args.attention_configs and args.attention_configs != '':
#   attention_config =
コード例 #36
0
ファイル: context.py プロジェクト: vloginova/mybuild
    return fmt.format(**locals())

def why_inviable_instance_is_disabled(outcome, *_):
    node, value = outcome
    assert not value
    fmt = '{node} is disabled because of an error: {node.error}'
    return fmt.format(**locals())


def resolve(initial_module):
    return Context().resolve(initial_module)


if __name__ == '__main__':
    import util
    util.init_logging('%s.log' % __name__)

    from pprint import pprint

    from mybuild.binding.pydsl import *

    @module
    def conf(self):
        self._constrain(m1(bar=17))
        # self._constrain(m3)
        self.sources = 'test.c'

    @module
    def m1(self, bar=42):
        self._constrain(m2(foo=bar))
コード例 #37
0
from flask import Blueprint, make_response
from jwcrypto import jwk

from util import init_logging

logger = init_logging(__name__)


def create_blueprint():
    jwk_bp = Blueprint('jwk_blueprint', __name__, template_folder='templates')

    @jwk_bp.route('/jwk')
    def get_jwk():
        with open("public.pem", "rb") as f:
            key = jwk.JWK.from_pem(f.read())
            response = make_response(key.export(private_key=False), 200)
            response.headers['Content-Type'] = 'application/json'
            return response

    return jwk_bp
コード例 #38
0
        def m2(self):
            self._constrain(m3(a=True))
            self._constrain(m3(a=False))

        @module
        def m3(self, a=False):
            pass

        modules = resolve(conf)

        self.assertIn(conf, modules)
        self.assertIn(m1, modules)
        self.assertNotIn(m2, modules)
        self.assertIn(m3, modules)


def suite(wafctx_):
    class WafCtxBoundTestCase(SolverTestCase):
        wafctx = wafctx_
    return unittest.TestLoader().loadTestsFromTestCase(WafCtxBoundTestCase)


if __name__ == '__main__':
    import util, sys, logging
    # util.init_logging(filename='%s.log' % __name__)
    util.init_logging(sys.stderr,
                      level=logging.DUMP)

    unittest.main()

コード例 #39
0
    """Load a data file and generate and upload VolcView
    images for any defined VolcView sectors covered by the data."""
    start = time.time()
    # Convert volcview sector definitions to our "native" format
    _gen_sector_bounds(config.VOLCVIEW_SECTORS)  # "converts" in-place.

    logging.info("Generating images")
    file_processor = DataFile(data_file)
    file_processor.use_spawn = use_spawn
    file_processor.process_data()
    logging.info("Completed run in %d seconds", time.time() - start)
    return


if __name__ == "__main__":
    init_logging()
    parser = argparse.ArgumentParser(
        description="SO2 data file interface to VolcView")
    parser.add_argument(
        "files",
        nargs="*",
        default=[],
        help="SO2 data files to generate and upload VolcView images for")
    parser.add_argument(
        "-c",
        "--check",
        dest="check",
        action='store_const',
        help=
        "Check VolcView servers for the required bands/types, creating if needed",
        const=True,
コード例 #40
0
def main():
    global logger
    logger = init_logging(logging.INFO)

    global g_config

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "config_file",
        type=argparse.FileType("r"),
        help="Location of extraction config file.",
    )
    args = parser.parse_args()

    g_config = {}
    with args.config_file as stream:
        try:
            g_config = yaml.safe_load(stream)
        except yaml.YAMLError as exception:
            logger.error(f"Failed to parse extraction config yaml file: {exception}")
            exit(-1)

    validate_config_file(g_config)

    level = logging.getLevelName(g_config.get('log_level', 'INFO').upper())
    set_log_level(level)

    if g_config.get("logfile_level") != "none":
        level = logging.getLevelName(g_config.get('logfile_level', 'DEBUG').upper())
        log_file = 'extract.log'
        add_logfile(log_file, level=level, preamble=yaml.dump(g_config), backup_count=g_config.get("backup_count", 2))

    # print the version 
    log_version()

    interface = load_driver()
    if not interface:
        logger.error("Failed to load driver.")
        exit(-1)

    if g_config["source_cluster_endpoint"]:
        extraction_name = f'Extraction_{g_config["source_cluster_endpoint"].split(".")[0]}_{datetime.datetime.now().replace(tzinfo=datetime.timezone.utc).isoformat()}'
    else:
        extraction_name = f"Extraction_{datetime.datetime.now().replace(tzinfo=datetime.timezone.utc).isoformat()}"

    if g_config.get("start_time"):
        start_time = dateutil.parser.parse(g_config["start_time"]).astimezone(
            dateutil.tz.tzutc()
        )
    else:
        start_time = ""

    if g_config.get("end_time"):
        end_time = dateutil.parser.parse(g_config["end_time"]).astimezone(
            dateutil.tz.tzutc()
        )
    else:
        end_time = ""

    # read the logs
    if g_config.get("log_location"):
        log_location = g_config["log_location"]
    elif g_config.get("source_cluster_endpoint"):
        log_location  = get_cluster_log_location(g_config["source_cluster_endpoint"])
    else:
        logger.error("Either log_location or source_cluster_endpoint must be specified.")
        exit(-1)

    (connections, audit_logs, databases, last_connections) = get_logs(log_location, start_time, end_time)

    logger.debug(f"Found {len(connections)} connection logs, {len(audit_logs)} audit logs")

    if(len(audit_logs) == 0 or len(connections) == 0):
        logger.warning("No audit logs or connections logs found. Please verify that the audit log location or cluster endpoint is correct. Note, audit logs can take several hours to start appearing in S3 after logging is first enabled.")
        exit(-1)

    if g_config["source_cluster_endpoint"]:
        logger.info(f'Retrieving info from {g_config["source_cluster_endpoint"]}')
        source_cluster_urls = get_connection_string(
            g_config["source_cluster_endpoint"],
            g_config["master_username"],
            g_config["odbc_driver"],
        )

        source_cluster_statement_text_logs = retrieve_source_cluster_statement_text(
            source_cluster_urls, databases, start_time, end_time, interface,
        )

        combine_logs(audit_logs, source_cluster_statement_text_logs)

        if (
            g_config["source_cluster_system_table_unload_location"]
            and g_config["unload_system_table_queries"]
            and g_config["source_cluster_system_table_unload_iam_role"]
        ):
            logger.info(
                f'Exporting system tables to {g_config["source_cluster_system_table_unload_location"]}'
            )

            unload_system_table(
                source_cluster_urls,
                g_config["odbc_driver"],
                g_config["unload_system_table_queries"],
                g_config["source_cluster_system_table_unload_location"] + "/" + extraction_name,
                g_config["source_cluster_system_table_unload_iam_role"],
            )

            logger.info(
                f'Exported system tables to {g_config["source_cluster_system_table_unload_location"]}'
            )

    save_logs(
        audit_logs,
        last_connections,
        g_config["workload_location"] + "/" + extraction_name,
        connections,
        start_time,
        end_time
    )