コード例 #1
0
def check_args(args):
    if args.load_model:
        if not path.isdir(args.load_model):
            sys.exit('Error: directory {} does not exist'.format(
                args.load_model))

        for i in range(Model.FUTURE_CHUNKS):
            model_path = path.join(args.load_model, 'py-{}.pt'.format(i))
            if not path.isfile(model_path):
                sys.exit(
                    'Error: Python model {} does not exist'.format(model_path))

    if args.save_model:
        make_sure_path_exists(args.save_model)

        for i in range(Model.FUTURE_CHUNKS):
            model_path = path.join(args.save_model, 'py-{}.pt'.format(i))
            if path.isfile(model_path):
                sys.exit(
                    'Error: Python model {} already exists'.format(model_path))

            model_path = path.join(args.save_model, 'cpp-{}.pt'.format(i))
            if path.isfile(model_path):
                sys.exit(
                    'Error: C++ model {} already exists'.format(model_path))

            meta_path = path.join(args.save_model, 'cpp-meta-{}.pt'.format(i))
            if path.isfile(meta_path):
                sys.exit('Error: meta {} already exists'.format(meta_path))

    if args.inference:
        if not args.load_model:
            sys.exit('Error: need to load model before inference')

        if args.tune or args.save_model:
            sys.exit('Error: cannot tune or save model during inference')
    else:
        if not args.save_model:
            sys.stderr.write('Warning: model will not be saved\n')

    # want to tune hyperparameters
    if args.tune:
        if args.save_model:
            sys.stderr.write('Warning: model would better be trained with '
                             'validation dataset\n')

        global TUNING
        TUNING = True

    # set device to CPU or GPU
    if args.enable_gpu:
        if not torch.cuda.is_available():
            sys.exit('Error: --enable-gpu is set but no CUDA is available')

        global DEVICE
        DEVICE = torch.device('cuda')
        torch.backends.cudnn.benchmark = True
コード例 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--bandwidth',
                        metavar='Mbps',
                        required=True,
                        help='constant bandwidth (Mbps)')
    parser.add_argument('--output-dir',
                        default="/home/hesy/pro/pan/pantheon/src/experiments",
                        metavar='DIR',
                        help='directory to output trace')
    args = parser.parse_args()
    """
    # number of packets in 60 seconds
    num_packets = int(float(args.bandwidth) * 5000) 
    ts_list = np.linspace(0, 60000, num=num_packets, endpoint=False)
    """
    # number of packets in 48 seconds
    # change each 12s ( 4 periods )        -- which produces a integer
    duration = 12 * 1000
    begin, end = 0, 0 + duration
    ts_list = []
    for _ in range(2):
        num_packets = int(float(args.bandwidth) * (5000 / 5))
        ts_list.extend(np.linspace(begin, end, num=num_packets,
                                   endpoint=False))
        begin += duration
        end += duration

        num_packets_ = int(float(args.bandwidth) / 2 * (5000 / 5))
        ts_list.extend(
            np.linspace(begin, end, num=num_packets_, endpoint=False))
        begin += duration
        end += duration

    # trace path
    make_sure_path_exists(args.output_dir)
    trace_path = path.join(args.output_dir,
                           '%smbps_jitter.trace' % args.bandwidth)

    # write timestamps to trace
    with open(trace_path, 'w') as trace:
        for ts in ts_list:
            trace.write('%d\n' % ts)
コード例 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--bandwidth',
                        metavar='Mbps',
                        required=True,
                        help='constant bandwidth(Mbps)')
    parser.add_argument('--output-dir',
                        metavar='DIR',
                        required=True,
                        help='directory to output trace')
    args = parser.parse_args()

    num_packets = int(float(args.bandwidth) * 5000)
    ts_list = np.linspace(0, 60000, num=num_packets, endpoint=False)

    #trace path
    make_sure_path_exists(args.output_dir)
    trace_path = path.join(args.output_dir, '%smbps.trace' % args.bandwidth)
    with open(trace_path, 'w') as trace:
        for ts in ts_list:
            trace.write('%d\n' % ts)
コード例 #4
0
def check_args(args):
    if args.load_model:
        if not path.isdir(args.load_model):
            sys.exit('Error: directory {} does not exist'
                     .format(args.load_model))

        for i in range(Model.FUTURE_CHUNKS):
            model_path = path.join(args.load_model, 'py-{}.pt'.format(i))
            if not path.isfile(model_path):
                sys.exit('Error: Python model {} does not exist'
                         .format(model_path))

    if args.save_model:
        make_sure_path_exists(args.save_model)

        for i in range(Model.FUTURE_CHUNKS):
            model_path = path.join(args.save_model, 'py-{}.pt'.format(i))
            if path.isfile(model_path):
                sys.exit('Error: Python model {} already exists'
                         .format(model_path))

            model_path = path.join(args.save_model, 'cpp-{}.pt'.format(i))
            if path.isfile(model_path):
                sys.exit('Error: C++ model {} already exists'
                         .format(model_path))

            meta_path = path.join(args.save_model, 'cpp-meta-{}.pt'.format(i))
            if path.isfile(meta_path):
                sys.exit('Error: meta {} already exists'.format(meta_path))

    if args.inference:
        if not args.load_model:
            sys.exit('Error: need to load model before inference')

        if args.tune or args.save_model:
            sys.exit('Error: cannot tune or save model during inference')
    else:
        if not args.save_model:
            sys.exit('Error: specify a folder to save models\n')

    # want to tune hyperparameters
    if args.tune:
        if args.save_model:
            sys.stderr.write('Warning: model would better be trained with '
                             'validation dataset\n')

        global TUNING
        TUNING = True

    # set device to CPU or GPU
    if args.enable_gpu:
        if not torch.cuda.is_available():
            sys.exit('Error: --enable-gpu is set but no CUDA is available')

        global DEVICE
        DEVICE = torch.device('cuda')
        torch.backends.cudnn.benchmark = True

    # continual learning
    if args.cl:
        if not args.load_model or not args.save_model:
            sys.exit('Error: pass --load-model and --save-model to perform '
                     'continual learning')

        if args.time_start or args.time_end:
            sys.exit('Error: --cl conflicts with --from and --to; it has its '
                     'own strategy to sample data from specific durations')

        if args.inference:
            sys.exit('Error: cannot perform inference with --cl turned on')

        global NUM_EPOCHS
        NUM_EPOCHS = 200
コード例 #5
0
ファイル: qqublish.py プロジェクト: ischurov/qqublish
def do_update_github(builder: BookBuilder) -> None:
    """

    :param builder: BookBuilder
    :return:
    """

    make_sure_path_exists(builder.repodir())

    try:
        lock = zc.lockfile.LockFile(builder.lockfile())
    except zc.lockfile.LockError:
        print(f"lockfile exists: {builder}")
        return

    git = app.config["GIT"]
    with open(builder.logfile(), "w") as log:

        def log_and_check_output(commands, **kwargs):
            print(" ".join(commands), file=log)
            output = subprocess.check_output(commands,
                                             stderr=subprocess.STDOUT,
                                             **kwargs).decode("utf-8")
            print(output, file=log)
            log.flush()

        clonedir = builder.clonedir()

        make_sure_path_exists(clonedir)
        try:
            try:
                log_and_check_output([git, "pull"], cwd=clonedir)
            except subprocess.CalledProcessError:
                try:
                    log_and_check_output(
                        [git, "clone", builder.repo_url(), "./"], cwd=clonedir)
                except subprocess.CalledProcessError as e:
                    if "already exists" in e.output:
                        shutil.rmtree(str(clonedir))
                        make_sure_path_exists(clonedir)
                        log_and_check_output(
                            [git, "clone",
                             builder.repo_url(), "./"],
                            cwd=clonedir)

            commands = [
                "docker",
                "run",
                "--rm",
                "--init",
                "--net",
                "none",
                "-v",
                str(clonedir) + ":/home/user/thebook",
                "ischurov/qqmbr:latest",
                "build",
                "--base-url",
                builder.base_url,
                "--copy-mathjax",
                "--template_options",
                json.dumps({"footer": mkfooter(builder.repo_url())}),
            ]

            print("Let's try")
            proc = subprocess.Popen(commands,
                                    stdout=log,
                                    stderr=log,
                                    cwd=str(clonedir))
            retcode = proc.wait()
            if retcode:
                raise Exception("FAILED: Process terminated with "
                                "non-zero status")

            copy_tree(str(clonedir / "build"), str(builder.outputdir()))
            print("SUCCESS", file=log)
        except Exception as e:
            print("FAILED: Something went wrong:", e, file=log)
        finally:
            print("Removing lock")
            lock.close()
            builder.lockfile().unlink()
コード例 #6
0
ファイル: settings.py プロジェクト: sibyl229/data-mixin
'''path to several useful folders'''
import os
from helpers import make_sure_path_exists

PROJECT_ROOT = os.path.abspath(
    os.path.join(os.path.dirname(__file__), '../'))
RAW_INPUT_PATH = os.path.join(PROJECT_ROOT, 'raw_inputs/')
CLEAN_INPUT_PATH = os.path.join(PROJECT_ROOT, 'input/')
FEATURE_FILE_PATH = os.path.join(PROJECT_ROOT, 'input/features')
#NORMALIZED_FEATURE_FILE_PATH = os.path.join(PROJECT_ROOT, 'input/normalized_features')
FIG_PATH = os.path.join(PROJECT_ROOT, 'fig')

for p in [RAW_INPUT_PATH, CLEAN_INPUT_PATH, FIG_PATH]:
    make_sure_path_exists(p)