from args import parser, get_config_module
import hashlib
import hmac
import sys
import requests
import urllib

parser.add_argument("path", metavar="P", type=str, help="Path to query metadata of")
vals = parser.parse_args()
config = get_config_module(vals.config)
path = vals.path

try:
    from config import app_config
    key = app_config["PUBLISHER_SECRET_KEY"]
    root = app_config.get("APPLICATION_ROOT", "")
except ImportError, KeyError:
    print "Unable to retrieve secret key for metadata request"
    sys.exit(1)

try:
    from config import host_config
    if "host" in host_config:
        host = host_config["host"]
    else:
        host = "127.0.0.1"

    if "port" in host_config:
        port = host_config["port"]
    else:
        port = 8000
Beispiel #2
0
from args import parser, get_config_module
import hashlib
import hmac
import sys
import requests
import urllib

parser.add_argument("group", metavar="G", type=str, help="Group to associate file with")
parser.add_argument("path", metavar="P", type=str, help="Path to expose")
parser.add_argument("key", metavar="K", type=str, help="Key for path")

vals = parser.parse_args()
config = get_config_module(vals.config)
path = vals.path

try:
    from config import app_config
    key = app_config["PUBLISHER_SECRET_KEY"]
    root = app_config.get("APPLICATION_ROOT", "")
except ImportError, KeyError:
    print "Unable to retrieve secret key for metadata request"
    sys.exit(1)

try:
    from config import host_config
    if "host" in host_config:
        host = host_config["host"]
    else:
        host = "127.0.0.1"

    if "port" in host_config:
Beispiel #3
0
    except Exception as e:
        log.error('Error while running query', exc_info=e)
        if runner.query_process is not None:
            kill_family(runner.query_process.pid, signal.SIGKILL)
        if runner.server_process is not None:
            kill_family(runner.server_process.pid, signal.SIGKILL)

        return None
    finally:
        runner.clean()
        if db is not None:
            db.close()


if __name__ == '__main__':
    parser.add_argument('-v', '--verbose', action='store_true', default=False)
    args = parser.parse_args()

    if args.fault == 'flip' and args.single and args.mean_runtime is None:
        parser.error(
            '--mean-runtime is required when --fault is set to flip and --single is given'
        )

    if args.fault == 'flip' and args.flip_rate is None and not args.single:
        parser.error(
            '--flip-rate is required when --fault is set to flip and --single is not given'
        )

    check_injector(args.debug)
    init_pool(args.threads)
Beispiel #4
0
import os
import subprocess
from collect import collect
from args import parser
from util import get_dir_name

if __name__ == '__main__':
    parser.add_argument('-n', '--nodes', type=str, required=True)
    parser.add_argument('-o', '--output-dir', type=str, required=True)
    parser.add_argument('-v', '--values', nargs='+', required=True)
    args = parser.parse_args()

    for db in args.database:
        for val in args.values:
            val = float(val)
            exp_name = get_dir_name(
                database=db,
                query=args.query,
                fault=args.fault,
                inject_to_heap=args.heap,
                inject_to_anon=args.anon,
                inject_to_stack=args.stack,
                flip_rate=val,
                random_flip_rate=args.random_flip_rate,
                suffix=args.suffix
            )

            clush_command = ['clush', '-v', '-w', args.nodes, 'cd', 'chaos_db', '&&', 'PYTHONPATH=./orchestrator',
                             'python3', 'orchestrator/orchestrator.py', '-d', db, '-q', args.query, '-w',
                             args.working_directory, '-i', str(args.iterations), '-t', str(args.threads), '-fr',
                             str(val)]
from torch import nn
import torch.nn.functional as F

from model import get_model_optimizer
from loops import train_loop, evaluate, infer
from dataset import cross_validation_split, get_test_dataset, BucketingSampler, make_collate_fn
from transformers import BertTokenizer, AlbertTokenizer
from torch.utils.data import DataLoader, Dataset
from evaluation import target_metric
from misc import target_columns, input_columns

from args import parser

# parser = argparse.ArgumentParser()

parser.add_argument("--checkpoint", type=str, required=True)
parser.add_argument("--dataframe", type=str, required=True)
parser.add_argument("--output_dir", type=str, required=True)

args = parser.parse_args()

logging.getLogger("transformers").setLevel(logging.ERROR)

test_df = pd.read_csv(args.dataframe)

tokenizer = BertTokenizer.from_pretrained(args.bert_model,
                                          do_lower_case=("uncased"
                                                         in args.bert_model))

test_set = get_test_dataset(args, test_df, tokenizer)
test_loader = DataLoader(