Beispiel #1
0
def Main(*argv):
    parse_args(*argv)

    stats_before = ParseStats()
    stats_before.read_file()
    stats_before.parse()
    stats_before.get_max_size()

    sys.stdout.write(term.CLEAR_SCREEN)
    for i in range(term.LINES):
        sys.stdout.write(term.DOWN)
    sys.stdout.write(term.HIDE_CURSOR)
    sys.stdout.flush()

    getkey_thread = threading.Thread(
        group=None,
        target=getkey,
        name="keythread"
    )
    getkey_thread.start()

    while True:
        queue = Queue.Queue()
        thread_ = threading.Thread(
            group=None,
            target=process,
            name="Thread1",
            args=(stats_before.columns, queue)
        )

        thread_.start()

        time.sleep(Config.timewait)
        stats_before.columns = queue.get()
Beispiel #2
0
def update_run_log(fx_app, new_data=None):
    meta = {'run_id': IrisCore.get_run_id(),
            'fx_version': fx_app.version,
            'fx_build_id': fx_app.build_id,
            'platform': Settings.get_os(),
            'config': '%s, %s-bit, %s' % (Platform.OS_VERSION, Platform.OS_BITS, Platform.PROCESSOR),
            'channel': fx_app.channel,
            'locale': fx_app.locale,
            'args': ' '.join(sys.argv),
            'params': vars(parse_args()),
            'log': os.path.join(IrisCore.get_current_run_dir(), 'iris_log.log')}

    repo = git.Repo(IrisCore.get_module_dir())
    meta['iris_version'] = 1.0
    meta['iris_repo'] = repo.working_tree_dir
    if parse_args().headless_run:
        pass
    else:
        meta['iris_branch'] = repo.active_branch.name
        meta['iris_branch_head'] = repo.head.object.hexsha

    if new_data is None:
        logger.debug('Updating run.json with initial run data.')
        meta['total'] = 0
        meta['passed'] = 0
        meta['failed'] = 0
        meta['skipped'] = 0
        meta['errors'] = 0
        meta['start_time'] = 0
        meta['end_time'] = 0
        meta['total_time'] = 0
        tests = []
    else:
        logger.debug('Updating runs.json with completed run data.')
        meta['total'] = new_data['total']
        meta['passed'] = new_data['passed']
        meta['failed'] = new_data['failed']
        meta['failed_tests'] = new_data['failed_tests']
        meta['skipped'] = new_data['skipped']
        meta['errors'] = new_data['errors']
        meta['start_time'] = new_data['start_time']
        meta['end_time'] = new_data['end_time']
        meta['total_time'] = new_data['total_time']
        tests = new_data['tests']

    run_file = os.path.join(IrisCore.get_current_run_dir(), 'run.json')
    run_file_data = {'meta': meta, 'tests': tests}
    with open(run_file, 'w') as f:
        json.dump(run_file_data, f, sort_keys=True, indent=True)
def main():
    import configspark
    sc = configspark.SPARK_CONTEXT

    args = parse_args(HELP_PROMPT, sys.argv)
    filename = args.get("filename")
    user_id = args.get("user_id")

    # Load the Users/Songs ID maps
    full_text = sc.textFile(config.MSD_DATA)
    full_raw = full_text.map(msd_parse.parse_line)
    users, songs, songs_reverse_map = msd_parse.get_user_song_maps(full_raw)

    # Load the new ratings (if any) and replace raw IDs with int IDs
    user_id, raw_plays = get_training_data(sc, filename, user_id, users, songs)
    converted_user_id = users.get(user_id)
    ratings_train = raw_plays.map(msd_parse.rating_convert)

    user_songs_unheard = unheard_songs(converted_user_id, ratings_train,
                                       songs, songs_reverse_map)

    model = prepare_model(sc, filename, converted_user_id, ratings_train)

    make_recommendations(user_id, model, sc.parallelize(user_songs_unheard),
                         songs_reverse_map)

    sc.stop()
Beispiel #4
0
def run() -> None:
    values: Tuple[int, int, int, int, int, bool] = parse_args()
    N: int = values[0]
    M: int = values[1]
    E: int = values[3]
    fill: bool = values[-1]
    nrepeat: int = 1000
    print(f"Total size S = {N * M} N = {N} M = {M} E = {E}")

    w = Workload(N, M, E, fill)
    p = pk.TeamPolicy(E, "auto", 32, pk.get_default_space())

    timer = pk.Timer()

    for i in range(nrepeat):
        result = pk.parallel_reduce(p, w.yAx)

    timer_result = timer.seconds()

    print(f"Computed result for {N} x {M} x {E} is {result}")
    solution: float = N * M * E

    if result != solution:
        pk.printf("Error: result (%lf) != solution (%lf)\n", result, solution)

    print(
        f"N({N}) M({M}) E({E}) nrepeat({nrepeat}) problem(MB) time({timer_result}) bandwidth(GB/s)"
    )
Beispiel #5
0
def run() -> None:
    values: Tuple[int, int, int, int, int, bool] = parse_args()
    N: int = values[0]
    M: int = values[1]
    nrepeat: int = 1 
    print(f"Total size S = {N * M} N = {N} M = {M}")

    y = pk.View([N], pk.double)
    x = pk.View([M], pk.double)
    A = pk.View([N * M], pk.double)

    p = pk.RangePolicy(pk.get_default_space(), 0, N)
    pk.parallel_for(p, y_init, y=y)
    pk.parallel_for(pk.RangePolicy(pk.get_default_space(), 0, M), y_init, y=x)
    pk.parallel_for(p, matrix_init, M=M, A=A)

    timer = pk.Timer()

    for i in range(nrepeat):
        result = pk.parallel_reduce(p, yAx, M=M, y=y, x=x, A=A)

    timer_result = timer.seconds()

    print(f"Computed result for {N} x {M} is {result}")
    solution = N * M

    if result != solution:
        pk.printf("Error: result (%lf) != solution (%lf)\n", result, solution)

    print(f"N({N}) M({M}) nrepeat({nrepeat}) problem(MB) time({timer_result}) bandwidth(GB/s)")
Beispiel #6
0
def run() -> None:
    values: Tuple[int, int, int, int, int, bool] = parse_args()
    N: int = values[0]
    M: int = values[1]
    nrepeat: int = 100
    print(f"Total size S = {N * M} N = {N} M = {M}")

    p = pk.RangePolicy(pk.get_default_space(), 0, N)
    w = Workload(N, M)
    pk.parallel_for(p, w.y_init)
    pk.parallel_for(pk.RangePolicy(pk.get_default_space(), 0, M), w.x_init)
    pk.parallel_for(p, w.matrix_init)

    timer = pk.Timer()

    for i in range(nrepeat):
        result = pk.parallel_reduce(p, w.yAx)

    timer_result = timer.seconds()

    print(f"Computed result for {N} x {M} is {result}")
    solution = N * M

    if result != solution:
        pk.printf("Error: result (%lf) != solution (%lf)\n", result, solution)

    print(f"N({N}) M({M}) nrepeat({nrepeat}) problem(MB) time({timer_result}) bandwidth(GB/s)")
Beispiel #7
0
 def get_image_debug_path():
     """Returns the root directory where a test's debug images are located."""
     parent, test = IrisCore.parse_module_path()
     path = os.path.join(parse_args().workdir, 'runs',
                         IrisCore.get_run_id(), parent, test,
                         'debug_images')
     return path
Beispiel #8
0
def main():
    temp = parse_args()
    if temp == (0, 0):
        return
    else:
        board_filename = "boards" + os.sep + "%s" % temp[0] + "-" + "%s" % temp[1] + ".txt"
    board = read_board(board_filename)
    print_board(board)
    pieces = parse_board(board)

    # Get starting and ending pieces
    # The starting piece is denoted by a capital letter
    starting_piece = next((x for x in pieces if x.kind.isupper()), None)
    if starting_piece is None:
        print("Could not find starting piece!")
        return
    else:
        print("Starting piece:")
        starting_piece.display_piece()

    # The ending piece is the king k
    ending_piece = next((x for x in pieces if x.kind == "k"), None)
    if ending_piece is None:
        print("Could not find ending piece!")
        return
    else:
        print("Ending piece:")
        ending_piece.display_piece()

    solve_board(starting_piece, pieces)
Beispiel #9
0
def main():
    temp = parse_args()
    if temp == (0, 0):
        return
    else:
        board_filename = 'boards' + os.sep + "%s" % temp[
            0] + '-' + "%s" % temp[1] + '.txt'
    board = read_board(board_filename)
    print_board(board)
    pieces = parse_board(board)

    # Get starting and ending pieces
    # The starting piece is denoted by a capital letter
    starting_piece = next((x for x in pieces if x.kind.isupper()), None)
    if starting_piece is None:
        print("Could not find starting piece!")
        return
    else:
        print("Starting piece:")
        starting_piece.display_piece()

    # The ending piece is the king k
    ending_piece = next((x for x in pieces if x.kind == 'k'), None)
    if ending_piece is None:
        print("Could not find ending piece!")
        return
    else:
        print("Ending piece:")
        ending_piece.display_piece()

    solve_board(starting_piece, pieces)
Beispiel #10
0
 def test_directory_returns_value(self):
     args = parse_args.parse_args([
         "--directory", "/some", "--database", "file.db", "--config-file",
         "file.yml"
     ])
     result = args.directory
     expect = "/some"
     self.assertEqual(expect, result)
Beispiel #11
0
 def create_run_directory():
     IrisCore.create_working_directory()
     master_run_directory = os.path.join(parse_args().workdir, 'runs')
     if not os.path.exists(master_run_directory):
         os.mkdir(master_run_directory)
     run_directory = os.path.join(master_run_directory, IrisCore.get_run_id())
     if not os.path.exists(run_directory):
         os.mkdir(run_directory)
Beispiel #12
0
def main():
    args = parse_args()
    csv_filename = args.data
    result_csv = args.outfile
    data_dict = load_data(csv_filename)
    calculate(data_dict)
    write_csv(data_dict, result_csv)
    write_sql(result_csv, args=args)
Beispiel #13
0
 def __init__(self, *w, **kw):
     if not sys.stdin.isatty():
         self.input = sys.stdin.read()
     else:
         self.input = ''
     self.__dict__.update(self._default_fields)
     self.__dict__.update(parse_args())
     self.banzai(*w, **kw)
     pass
Beispiel #14
0
def main():
    args = parse_args(infer=True)
    if args.gpu:
        dev = '/gpu:0'
    else:
        dev = '/cpu:0'

    with tf.device(dev):
        infer(args)
Beispiel #15
0
def main():
    args = parse_args()

    tic = time.time()
    remote_thr(args.url, args.count, args.size, args.poll, args.copy)
    toc = time.time()

    if (toc - tic) < 3:
        print ("For best results, tests should take at least a few seconds.")
Beispiel #16
0
def main():
    """The big tent."""
    args = parse_args(sys.argv[1:])
    repo = Repo(args.repo)
    cpw = commits_per_week(repo)
    if args.cumulative:
        cpw = cums(cpw)
    for commits in cpw:
        print(commits)
Beispiel #17
0
def main():
    args = parse_args()

    print ("Running program...")
    tic = time.time()
    local_thr(args.url, args.count, args.size, args.poll, args.copy)
    toc = time.time()

    if (toc - tic) < 3:
        print ("For best results, tests should take at least a few seconds.")
Beispiel #18
0
    def create_working_directory():
        if not os.path.exists(parse_args().workdir):
            logger.debug('Creating working directory %s' % parse_args().workdir)
            os.makedirs(parse_args().workdir)
        if not os.path.exists(os.path.join(parse_args().workdir, 'data')):
            os.makedirs(os.path.join(parse_args().workdir, 'data'))

        if parse_args().clear:
            master_run_directory = os.path.join(parse_args().workdir, 'runs')
            if os.path.exists(master_run_directory):
                shutil.rmtree(master_run_directory, ignore_errors=True)
            run_file = os.path.join(parse_args().workdir, 'data', 'all_runs.json')
            if os.path.exists(run_file):
                os.remove(run_file)
            cache_builds_directory = os.path.join(parse_args().workdir, 'cache')
            if os.path.exists(cache_builds_directory):
                shutil.rmtree(cache_builds_directory, ignore_errors=True)
Beispiel #19
0
def main():
    """
    Export video files
    """

    event_ids, series_ids = parse_args()
    digest_login = DigestLogin(user=config.digest_user,
                               password=config.digest_pw)

    # get events from all series
    if series_ids:
        print("Getting events for series.")
        events = []
        for series_id in series_ids:
            try:
                events_of_series = get_events_of_series(
                    config.url, digest_login, series_id)
                events += events_of_series
            except Exception as e:
                print("Events of series {} could not be requested: {}".format(
                    series_id, str(e)))

        if not events:
            __abort_script("No events found.")

        event_ids = [get_id(event) for event in events]

    print("Starting export process.")
    for event_id in event_ids:
        try:
            print("Exporting videos of media package {}".format(event_id))

            mp_xml = get_media_package(config.url, digest_login, event_id)

            mp = parse_manifest_from_endpoint(mp_xml, event_id, False, True)

            if config.create_series_dirs and mp.series_id:
                mp_dir = os.path.join(config.target_directory, mp.series_id,
                                      mp.id)
            else:
                mp_dir = os.path.join(config.target_directory, mp.id)
            export_videos(mp, mp_dir, config.url, digest_login,
                          config.export_archived, config.export_publications,
                          config.export_mimetypes, config.export_flavors,
                          config.stream_security)

        except Exception as e:
            print(
                "Tracks of media package {} could not be exported: {}".format(
                    event_id, str(e)))

    print("Done.")
Beispiel #20
0
def update_run_index(fx_app, new_data=None):
    current_run = {
        'id': IrisCore.get_run_id(),
        'version': fx_app.version,
        'build': fx_app.build_id,
        'channel': fx_app.channel,
        'locale': fx_app.locale
    }

    if new_data is None:
        logger.debug('Updating runs.json with initial run data.')
        current_run['total'] = '*'
        current_run['failed'] = '*'
    else:
        logger.debug('Updating runs.json with completed run data.')
        current_run['total'] = new_data['total']
        current_run['failed'] = new_data['failed']

    old_js_folder = os.path.join(parse_args().workdir, 'js')
    if os.path.exists(old_js_folder):
        shutil.rmtree(old_js_folder, ignore_errors=True)

    run_file = os.path.join(parse_args().workdir, 'data', 'all_runs.json')

    if os.path.exists(run_file):
        logger.debug('Updating run file: %s' % run_file)
        with open(run_file, 'r') as f:
            run_file_data = json.load(f)
        for run in run_file_data['runs']:
            if run['id'] == IrisCore.get_run_id():
                run_file_data['runs'].remove(run)
        run_file_data['runs'].append(current_run)
    else:
        logger.debug('Creating run file: %s' % run_file)
        run_file_data = {'runs': []}
        run_file_data['runs'].append(current_run)

    with open(run_file, 'w') as f:
        json.dump(run_file_data, f, sort_keys=True, indent=True)
Beispiel #21
0
def run() -> None:
    values: Tuple[int, int, int, int, int, bool] = parse_args()
    N: int = values[0]
    M: int = values[1]
    fill: bool = values[-1]
    nrepeat: int = 100
    print(f"Total size S = {N * M} N = {N} M = {M}")

    pk.set_default_space(pk.ExecutionSpace.Cuda)

    y: pk.View1D = pk.View([N], pk.double)
    x: pk.View1D = pk.View([M], pk.double)
    A: pk.View2D = pk.View([N, M], pk.double)

    p = pk.RangePolicy(pk.get_default_space(), 0, N)
    pk.parallel_for(p, y_init, y=y)
    pk.parallel_for(pk.RangePolicy(pk.get_default_space(), 0, M), y_init, y=x)
    pk.parallel_for(p, matrix_init, M=M, A=A)

    # if fill:
    #     y.fill(1)
    #     x.fill(1)
    #     A.fill(1)
    # else:
    #     for i in range(N):
    #         y[i] = 1

    #     for i in range(M):
    #         x[i] = 1

    #     for j in range(N):
    #         for i in range(M):
    #             A[j][i] = 1

    timer = pk.Timer()

    for i in range(nrepeat):
        result = pk.parallel_reduce(p, yAx, M=M, y=y, x=x, A=A)

    timer_result = timer.seconds()

    print(f"Computed result for {N} x {M} is {result}")
    solution: float = N * M

    if result != solution:
        pk.printf("Error: result (%lf) != solution (%lf)\n", result, solution)

    print(
        f"N({N}) M({M}) nrepeat({nrepeat}) problem(MB) time({timer_result}) bandwidth(GB/s)"
    )
Beispiel #22
0
def run():
    """Runs dilated residual network model in either train or predict mode"""

    config = parse_args()

    if not os.path.isdir(config.logs):
        os.makedirs(config.logs)

    if config.mode == 'train':
        train(config)
    elif config.mode == 'predict':
        predict(config)
    else:
        ValueError("Mode must be either train or predict")
Beispiel #23
0
    def verify_test_compat(test, browser):
        if browser.channel is None or browser.version is None:
            return False

        not_excluded = True
        exclude = [test.exclude] if isinstance(test.exclude, str) else [i for i in test.exclude]
        for item in exclude:
            if item in browser.channel or item in get_os() or item in browser.locale:
                not_excluded = False
        correct_version = True if test.fx_version == '' else check_version(browser.version, test.fx_version)
        correct_channel = browser.channel in test.channel
        correct_locale = parse_args().locale in test.locale
        correct_platform = get_os() in test.platform
        result = True == correct_platform == correct_version == correct_channel == correct_locale == not_excluded
        return result
Beispiel #24
0
def run() -> None:
    values: Tuple[int, int, int, int, int, bool] = parse_args()
    N: int = values[0]
    M: int = values[1]
    E: int = values[3]
    fill: bool = values[-1]
    nrepeat: int = 1000
    print(f"Total size S = {N * M} N = {N} M = {M} E = {E}")

    y: pk.View2D = pk.View([E, N], pk.double, layout=pk.Layout.LayoutRight)
    x: pk.View2D = pk.View([E, M], pk.double, layout=pk.Layout.LayoutRight)
    A: pk.View3D = pk.View([E, N, M], pk.double, layout=pk.Layout.LayoutRight)

    if fill:
        y.fill(1)
        x.fill(1)
        A.fill(1)
    else:
        for e in range(E):
            for i in range(N):
                y[e][i] = 1

            for i in range(M):
                x[e][i] = 1

            for j in range(N):
                for i in range(M):
                    A[e][j][i] = 1

    p = pk.TeamPolicy(E, "auto", 32, pk.get_default_space())

    timer = pk.Timer()

    for i in range(nrepeat):
        result = pk.parallel_reduce(p, yAx, N=N, M=M, y=y, x=x, A=A)

    timer_result = timer.seconds()

    print(
        f"Computed result for {N} x {M} x {E} is {result}")
    solution: float = N * M * E

    if result != solution:
        pk.printf("Error: result (%lf) != solution (%lf)\n",
                  result, solution)

    print(f"N({N}) M({M}) E({E}) nrepeat({nrepeat}) problem(MB) time({timer_result}) bandwidth(GB/s)")
Beispiel #25
0
def main():
    debug('started!')

    args = parse_args()

    status_callback = "http://{}:{}/status".format(args.host, args.port)
    debug(status_callback)
    callmgr = Callmgr(status_callback)
    for i in range(0, 15):
        t = CallTask(callmgr, queue)
        t.start()

    app.run(host='0.0.0.0', port=args.port, debug=True)
    # app.run(host='127.0.0.1', debug=True)

    queue.join()
    t.stop()
Beispiel #26
0
def create_arg_json():
    arg_data = {'email': {'type': 'bool', 'value': ['true', 'false'], 'default': 'false', 'label': 'Email results'},
                'firefox': {'type': 'str', 'value': ['local', 'latest', 'latest-esr', 'latest-beta', 'nightly'],
                            'default': 'latest-beta', 'label': 'Firefox'},
                'highlight': {'type': 'bool', 'value': ['true', 'false'], 'default': 'false',
                              'label': 'Debug using highlighting'},
                'locale': {'type': 'str', 'value': Settings.LOCALES, 'default': 'en-US', 'label': 'Locale'},
                'mouse': {'type': 'float', 'value': ['0.0', '0.5', '1.0', '2.0'], 'default': '0.5',
                          'label': 'Mouse speed'},
                'override': {'type': 'bool', 'value': ['true', 'false'], 'default': 'false',
                             'label': 'Run disabled tests'},
                'port': {'type': 'int', 'value': ['2000'], 'default': '2000', 'label': 'Local web server port'},
                'report': {'type': 'bool', 'value': ['true', 'false'], 'default': 'false',
                           'label': 'Create TestRail report'},
                'save': {'type': 'bool', 'value': ['true', 'false'], 'default': 'false',
                         'label': 'Save profiles to disk'}}

    arg_log_file = os.path.join(parse_args().workdir, 'data', 'all_args.json')
    with open(arg_log_file, 'w') as f:
        json.dump(arg_data, f, sort_keys=True, indent=True)
Beispiel #27
0
def init_config():
    args = parse_args()
    # below automatically set
    args.in_H, args.in_W = [int(x) for x in args.inSize.split("x")]
    args.depth_H, args.depth_W = [int(x) for x in args.depthSize.split("x")]

    args.datalist_path = os.path.join(args.dataset_path, args.datalist_path)
    args.pcgt_path = os.path.join(args.dataset_path, args.pcgt_path)
    args.rendering_path = os.path.join(args.dataset_path, args.rendering_path)

    if args.cat == 1:
        args.cat_list = CAT_LIST_1
        args.test_iter = 2000
    elif args.cat == 13:
        args.cat_list = CAT_LIST_13
        args.test_iter = 5000
    else:
        raise NotImplementedError

    args.gpu_id = str(args.gpu)

    # below constant
    args.K = CAMERA_INTRINSIC
    return args
def main():
    args = parse_args()
    glue_process(args)
Beispiel #29
0
 def delete_run_directory():
     master_run_directory = os.path.join(parse_args().workdir, 'runs')
     run_directory = os.path.join(master_run_directory,
                                  IrisCore.get_run_id())
     if os.path.exists(run_directory):
         shutil.rmtree(run_directory, ignore_errors=True)
Beispiel #30
0
import cv2

from libs.config import load_config
from libs.timer import Timer
from parse_args import parse_args
import libs.utils as utils
import libs.font_utils as font_utils
from textrenderer.corpus.corpus_utils import corpus_factory
from textrenderer.renderer import Renderer
from tenacity import retry

lock = mp.Lock()
counter = mp.Value('i', 0)
STOP_TOKEN = 'kill'

flags = parse_args()
cfg = load_config(flags.config_file)

fonts = font_utils.get_font_paths_from_list(flags.fonts_list)
bgs = utils.load_bgs(flags.bg_dir)

corpus = corpus_factory(flags.corpus_mode, flags.chars_file, flags.corpus_dir,
                        flags.length)

renderer = Renderer(corpus,
                    fonts,
                    bgs,
                    cfg,
                    height=flags.img_height,
                    width=flags.img_width,
                    clip_max_chars=flags.clip_max_chars,
Beispiel #31
0
def main():
    (args, kw) = parse_args('error_thresh pareto_layers rank_top subsample filespec'.split(), usage)
    error_thresh = float(kw.get('error_thresh', '0.5'))
    pareto_layers = int(kw.get('pareto_layers', '4'))
    rank_top = kw.get('rank_top', '')
    subsample = int(kw.get('subsample', '1'))
    filespec = kw.get('filespec', '*')
    
    if len(args) < 1:
        usage()
    
    outdir = args[0]
    pareto_full = ''
    if len(args) > 1:
        pareto_full = args[1]
    
    T0 = time.time()
    L = []
    avg_errorL = []
    suffix = 'pareto_full.txt'
    filenameL = glob.glob(os.path.join(outdir, filespec + suffix))
    filename_successL = []
    for filename in filenameL:
        try:
            obj_current = json.loads(open(filename, 'rt').read())
            trace_filename = filename[:len(filename)-len(suffix)] + 'trace.json'
#            print trace_filename
            trace_current = json.loads(open(trace_filename, 'rt').read())
            for obj in obj_current:
                obj['filename'] = filename
            L.append(obj_current)
            avg_errorL.append(trace_current[-1]['avg_time20'])
            filename_successL.append(filename)
        except:
            print 'Warning: could not load %s' % filename
    print 'Loaded %d Pareto frontiers in %f secs' % (len(L), time.time()-T0)
    
    object_L = []
    
    error_L = []
    time_L = []
    index_L = []
    for i in range(len(L)):
        for obj in L[i]:
            error_L.append(obj['error'])
            time_L.append(obj['time'])
            filename = os.path.split(filenameL[i])[1]
            prefix = filename.split('_')[0]
            obj['filename'] = os.path.abspath(filenameL[i])
            obj['name'] = prefix + '_' + obj['name']
            object_L.append(obj)
            index_L.append(len(object_L)-1)
    error_L = numpy.array(error_L)
    time_L = numpy.array(time_L)
    index_L = numpy.array(index_L)
    
    print 'Number of scattered points: %d' % len(error_L)
    
    ans = []
    
    for it in range(pareto_layers):
        paretoL = get_pareto(error_L, time_L)
        for idx in paretoL:
            ans.append(object_L[index_L[idx]])
        error_L = numpy.delete(error_L, paretoL)
        time_L = numpy.delete(time_L, paretoL)
        index_L = numpy.delete(index_L, paretoL)
    
    ans = [obj for obj in ans if obj['error'] <= error_thresh]
    if subsample > 1:
        ans = ans[::subsample]
        
    if len(rank_top):
        collect_count = collections.defaultdict(lambda: 0)
        for obj in ans:
            collect_count[obj['filename']] += 1
        count_filename = sorted([(count, filename) for (filename, count) in collect_count.items()], reverse=True)
        
        with open(rank_top, 'wt') as f:
            for (count, filename) in count_filename:
                print >>f, filename
                

    print 'Number of Pareto points written: %d' % len(ans)
    
    if len(pareto_full):
        with open(pareto_full, 'wt') as f:
            f.write(json.dumps(ans)) #, sort_keys=True, indent=4)) #, separators=(',', ': ')))
Beispiel #32
0
 def get_base_local_web_url():
     return 'http://127.0.0.1:%s' % parse_args().port
Beispiel #33
0
 def get_current_run_dir():
     """Returns the directory inside the working directory of the active run."""
     IrisCore.create_run_directory()
     return os.path.join(parse_args().workdir, 'runs',
                         IrisCore.get_run_id())
Beispiel #34
0
 def get_working_dir():
     """Returns the path to the root of the directory where local data is stored."""
     IrisCore.create_working_directory()
     return parse_args().workdir
Beispiel #35
0
def main():
    (args, kw) = parse_args('population_size generations frac_elitism frac_mutate prob_modify_consts tournament_size finalize_only finalize_filter loop_perf_only run_remotely_on_tablet training_images_directory cross_validation_directory resume_previous_search sample_importance_only sample_grid_only run_on_fir init_pop run_final_generation use_adaptive reconstruct_hints reduce_space'.split(), usage)
    if len(args) < 2:
        usage()
    
    program_name = args[0]
    result_dir = args[1]
    makedirs(result_dir)
    
    start_gen_index = 0
    
    d = {}
    exec "init_pop = "+kw.get('init_pop', '[]'.encode('hex')).decode('hex') in globals(), d
    init_pop = d['init_pop']
    pop_size0 = int(kw.get('population_size', '30'))
    if len(init_pop) > pop_size0: 
        random.shuffle(init_pop)
        init_pop = init_pop[:pop_size0]
    
    p = AttrDict(population_size=pop_size0,
                 generations=int(kw.get('generations', '25')),
                 frac_elitism=float(kw.get('frac_elitism', '0.3')),
                 frac_mutate=float(kw.get('frac_mutate', '0.3')),
                 tournament_size=int(kw.get('tournament_size', '8')),
                 loops=get_loops(program_name),
                 program_name=program_name,
                 result_dir=os.path.abspath(result_dir),
                 prob_modify_consts=0.3,
                 seen=set(),
                 all_rank0=init_pop,
                 finalize_only=int(kw.get('finalize_only', '0')),
                 finalize_filter=kw.get('finalize_filter', ''),
                 loop_perf_only=int(kw.get('loop_perf_only', '0')),
                 run_remotely_on_tablet=int(kw.get('run_remotely_on_tablet', '0')),
                 training_images_directory=kw.get('training_images_directory', os.path.abspath('../images/train/')),
                 cross_validation_directory=kw.get('cross_validation_directory', os.path.abspath('../images/train/')),
                 resume_previous_search=int(kw.get('resume_previous_search', '0')),
                 sample_importance_only=int(kw.get('sample_importance_only', '0')),
                 sample_grid_only=int(kw.get('sample_grid_only', '0')), 
                 run_on_fir=int(kw.get('run_on_fir', '0')), 
                 run_final_generation=int(kw.get('run_final_generation', '0')),
                 use_adaptive=int(kw.get('use_adaptive', '1')),
                 reconstruct_hints=int(kw.get('reconstruct_hints', '0')),
                 reduce_space=int(kw.get('reduce_space', '1')))

    set_sample_lines(p)

    print '\n\n'
    print 'Parameters:'
    print '    population_size:', p['population_size']
    print '    generations:', p['generations']
    print '    frac_elitism:', p['frac_elitism']
    print '    frac_mutate:', p['frac_mutate']
    print '    tournament_size:', p['tournament_size']
#    print '    loops:', p['loops']
    print '    program_name:', p['program_name']
    print '    result_dir:', p['result_dir']
    print '    prob_modify_consts:', p['prob_modify_consts']
    print '    seen:', p['seen']
    print '    finalize_only:', p['finalize_only']
    print '    finalize_filter:', p['finalize_filter']
    print '    loop_perf_only:', p['loop_perf_only']
    print '    run_remotely_on_tablet:', p['run_remotely_on_tablet']
    print '    training_images_directory:', p['training_images_directory']
    print '    cross_validation_directory:', p['cross_validation_directory']
    print '    resume_previous_search:', p['resume_previous_search']
    print '    sample_importance_only:', p['sample_importance_only']
    print '    sample_grid_only:', p['sample_grid_only']
    print '    run_on_fir:', p['run_on_fir']
    print '    run_final_generation:', p['run_final_generation']
    print '    use_adaptive:', p['use_adaptive']
    print '    reconstruct_hints:', p['reconstruct_hints']
    print '    reduce_space:', p['reduce_space']
    print '\n\n'
    
    if p['sample_importance_only'] and p['sample_grid_only']:
        print >> sys.stderr, 'Cannot have both sample_importance_only and sample_grid_only be true.'
        sys.exit(1)
    
    global foreach_lines
    if p['sample_importance_only']:
        new_foreach_lines = []
        for e in foreach_lines:
            if 'importance' in e[0]:
                new_foreach_lines.append(e)
        foreach_lines = new_foreach_lines
    
    if p['sample_grid_only']:
        new_foreach_lines = []
        for e in foreach_lines:
            if 'sample_grid(' in e[0]:
                new_foreach_lines.append(e)
        foreach_lines = new_foreach_lines
        
    if p['resume_previous_search']:
        gen_name_list = [ int(e[3:]) for e in os.listdir(result_dir) if 'gen' in e ]
        if 100000 in gen_name_list:
            print >> sys.stderr, 'Cannot resume previous search since previous search completed. Utilize the -finalize_only option instead.'
            sys.exit(1)
        start_gen_index = max(gen_name_list) if len(gen_name_list)>0 else 0
    
    if p['run_remotely_on_tablet']:
        print "Running timings remotely on the tablet."
        os.system("ssh phablet@"+TABLET_IP+" '(rm -rf "+TABLET_INPUT_IMAGES_DIRECTORY+")'")
        os.system("ssh phablet@"+TABLET_IP+" '(mkdir "+TABLET_INPUT_IMAGES_DIRECTORY+")'")
        os.system('scp '+p['training_images_directory']+'/*.png phablet@'+TABLET_IP+':'+TABLET_INPUT_IMAGES_DIRECTORY)
    
    curve_area_list = []
    
    if not p.finalize_only:
        if os.path.exists(result_dir) and not os.path.samefile(result_dir, '.') and not p['resume_previous_search']:
            shutil.rmtree(result_dir, ignore_errors=True)
        for generation in xrange(start_gen_index, p.generations):
            T0 = time.time()
            
            if generation == 0:
                pop = initial_population(p)
            elif generation == start_gen_index:
                # Update current generation
                gen_dirname = os.path.join(p.result_dir, 'gen%06d' % generation)
                d = {}
                s = open(os.path.join(gen_dirname, 'current_gen.py'), 'rt').read()
                exec s in globals(), d
                pop = d['current_gen']
                
#                if not p['run_on_fir']:
                if True:
                    # Make sure curve_area_list is updated
                    list_of_dirs_with_convergence_values = sorted([ f for f in list_dir_abs(p.result_dir) if 'gen' in path_leaf(f)], key=lambda x: int(path_leaf(x)[3:]))[:-1]
                    
                    for i,e in enumerate(list_of_dirs_with_convergence_values):        
                        curve_area_list.append( (i,float(open(os.path.join(e, 'pareto_over_all_approximations_so_far/area_under_pareto_frontier.txt'),'r').read())) )
                        
                        # Recalculate area_under_pareto_frontier_for_all_generations_so_far.csv for each generation in case we decided to change curve_area_alpha
                        current_generation_dirname = os.path.join(p.result_dir, 'gen%06d' % i)
                        current_generation_pareto_dirname = os.path.join(current_generation_dirname, 'pareto_over_all_approximations_so_far')
                        area_under_pareto_frontier_for_all_generations_so_far_csv_location = os.path.join(current_generation_pareto_dirname, 'area_under_pareto_frontier_for_all_generations_so_far.csv')
                        with open(area_under_pareto_frontier_for_all_generations_so_far_csv_location, 'wt') as f:
                            smoothed_curve_area_list = [ curve_area_list[0][1] ]
                            if len(curve_area_list)>1:
                                for i_0 in xrange(len(curve_area_list[1:])):
                                    i = i_0+1
                                    smoothed_curve_area_list.append( curve_area_alpha*curve_area_list[i][1] + (1-curve_area_alpha)*smoothed_curve_area_list[i-1] )
                            f.write('Generation, Area Under Curve, Smoothed Area Under Curve (alpha='+str(curve_area_alpha)+') \n')
                            for i,e in enumerate(curve_area_list):
                                f.write(str(e[0])+', '+str(e[1])+', '+str(smoothed_curve_area_list[i])+'\n')
                        # Graph area_under_pareto_frontier_for_all_generations_so_far.csv data for Area Under Curve and Smoothed Area Under Curve
                        commands = '''
set datafile separator ","
set title "Area Under Pareto Curve as Generations Pass"
set ylabel "Area Under Curve"
set xlabel "Generation"
#set ytic 0.1
#set xtic 1
set xr [0:*]
set yr [0:*]

set terminal png size 1200,900
'''
                        plot_command_area_under_curve = '"'+area_under_pareto_frontier_for_all_generations_so_far_csv_location+'" using 1:2 with linespoints title "Area Under Curve" '
                        plot_command_smoothed_area_under_curve = '"'+area_under_pareto_frontier_for_all_generations_so_far_csv_location+'" using 1:3 with linespoints title "Smoothed Area Under Curve" '
                        gnuplot_script_location = os.path.join(current_generation_pareto_dirname, 'script.gnuplot')
                        with open(gnuplot_script_location, 'wt') as f:
                            f.write( commands + '\nset output "area_under_curve.png" \n' + '\nplot '+ plot_command_area_under_curve + '\n')
                        os.system("(cd "+current_generation_pareto_dirname+" && cat "+gnuplot_script_location+" | gnuplot)")
                        with open(gnuplot_script_location, 'wt') as f:
                            f.write( commands + '\nset output "area_under_curve_smoothed.png" \n' + '\nplot '+ plot_command_smoothed_area_under_curve + '\n')
                        os.system("(cd "+current_generation_pareto_dirname+" && cat "+gnuplot_script_location+" | gnuplot)")
                        with open(gnuplot_script_location, 'wt') as f:
                            f.write( commands + '\nset output "area_under_curve_both.png" \n' + '\nplot '+ plot_command_area_under_curve + ', ' +plot_command_smoothed_area_under_curve + '\n')
                        os.system("(cd "+current_generation_pareto_dirname+" && cat "+gnuplot_script_location+" | gnuplot)")
                    assert len(curve_area_list) == start_gen_index, "Did not recompute curve_area_list correctly from .txt files"
                
                # Update p.all_rank0
                load_past_run(p)
            else:
                pop = next_generation(p, pop)
            
            print_generation(generation)
            print 'Generated new population'
            sys.stdout.flush()
            
            get_time_error(p, pop, generation)
            print_population(pop)
            track_rank0(p, pop)
            
            # Save Pareto frontier over all the approximations we've explored so far
            dirname = os.path.join(p.result_dir, 'gen%06d' % generation)
            makedirs(dirname)
            pareto_dirname = os.path.join(dirname, 'pareto_over_all_approximations_so_far')
            makedirs(pareto_dirname)
            pareto_frontier = [ p.all_rank0[i] for (i,rank) in enumerate(get_pareto_rank(p.all_rank0)) if rank==0 ] 
            pareto_values = []
            #if not p['run_on_fir']:
            f_pareto_csv = open(os.path.join(pareto_dirname,'pareto.csv'), 'wt')
            f_pareto_csv.write('Approx File, Time, Mean Lab \n')
            for (i,indiv) in enumerate(pareto_frontier):
                filename = os.path.join(pareto_dirname, individual_filename(i))
#                if not p['run_on_fir']:
                f_pareto_csv.write(individual_filename(i)+', '+str(indiv.time)+', '+str(indiv.error)+'\n')
                pareto_values.append((indiv.time,indiv.error))
                with open(filename, 'wt') as f_approx_file:
                    f_approx_file.write(str(indiv)) 
            with open(os.path.join(pareto_dirname,'pareto_so_far.py'), 'wt') as f:
                f.write('pareto_so_far = ' + repr(pareto_frontier))
#            if not p['run_on_fir']:
            f_pareto_csv.close()
#            if not p['run_on_fir']:
            if True:
                # Record current convergence
                CONVERGENCE_MEASURE_TIME_LOWER_BOUND = 0.6
                CONVERGENCE_MEASURE_TIME_UPPER_BOUND = 1.0
                TIME_INDEX = 0
                ERROR_INDEX = 1
                pareto_values_for_area = sorted(pareto_values, key=lambda x:x[TIME_INDEX])
                area_under_pareto_frontier = 0
                if CONVERGENCE_MEASURE_TIME_LOWER_BOUND < pareto_values_for_area[0][TIME_INDEX]:
                    area_under_pareto_frontier = float('inf')
                else:
                    # Clip the right hand side
                    while CONVERGENCE_MEASURE_TIME_UPPER_BOUND <= pareto_values_for_area[-1][TIME_INDEX]:
                        pareto_values_for_area.pop(-1)
                    pareto_values_for_area.append( (1.0,0.0) )
                    # Clip the left hand side
                    while CONVERGENCE_MEASURE_TIME_LOWER_BOUND >= pareto_values_for_area[1][TIME_INDEX]:
                        pareto_values_for_area.pop(0)
                    first_point = pareto_values_for_area[0]
                    second_point = pareto_values_for_area[1]
                    frac = (CONVERGENCE_MEASURE_TIME_LOWER_BOUND-first_point[TIME_INDEX])/(second_point[TIME_INDEX]-first_point[TIME_INDEX])
                    new_first_point = (first_point[TIME_INDEX]+frac*(second_point[TIME_INDEX]-first_point[TIME_INDEX]),first_point[ERROR_INDEX]+frac*(second_point[ERROR_INDEX]-first_point[ERROR_INDEX]))
                    pareto_values_for_area[0] = new_first_point
                    # Calculate area under curve
                    for i in xrange(len(pareto_values_for_area)-1):
                        point_a = pareto_values_for_area[i]
                        point_b = pareto_values_for_area[i+1]
                        area = 0.5*(point_a[ERROR_INDEX]+point_b[ERROR_INDEX])*(point_b[TIME_INDEX]-point_a[TIME_INDEX])
                        area_under_pareto_frontier += area
                print 'Area Under Pareto Frontier:', area_under_pareto_frontier, '\n'
                # Write pareto area into .txt file
                with open(os.path.join(pareto_dirname, 'area_under_pareto_frontier.txt'), 'wt') as f:
                    f.write(str(area_under_pareto_frontier))
                curve_area_list.append( (generation,area_under_pareto_frontier) )
                # Add pareto area into area_under_pareto_frontier_for_all_generations_so_far.csv
                area_under_pareto_frontier_for_all_generations_so_far_csv_location = os.path.join(pareto_dirname, 'area_under_pareto_frontier_for_all_generations_so_far.csv')
                with open(area_under_pareto_frontier_for_all_generations_so_far_csv_location, 'wt') as f:
                    smoothed_curve_area_list = [ curve_area_list[0][1] ]
                    if len(curve_area_list)>1:
                        for i_0 in xrange(len(curve_area_list[1:])):
                            i = i_0+1
                            smoothed_curve_area_list.append( curve_area_alpha*curve_area_list[i][1] + (1-curve_area_alpha)*smoothed_curve_area_list[i-1] )
                    f.write('Generation, Area Under Curve, Smoothed Area Under Curve (alpha='+str(curve_area_alpha)+') \n')
                    for i,e in enumerate(curve_area_list):
                        f.write(str(e[0])+', '+str(e[1])+', '+str(smoothed_curve_area_list[i])+'\n')
#            if not p['run_on_fir']: 
            if True:
                # Graph area_under_pareto_frontier_for_all_generations_so_far.csv data for Area Under Curve and Smoothed Area Under Curve
                commands = '''
set datafile separator ","
set title "Area Under Pareto Curve as Generations Pass"
set ylabel "Area Under Curve"
set xlabel "Generation"
#set ytic 0.1
#set xtic 1
set xr [0:*]
set yr [0:*]

set terminal png size 1200,900
'''
                plot_command_area_under_curve = '"'+area_under_pareto_frontier_for_all_generations_so_far_csv_location+'" using 1:2 with linespoints title "Area Under Curve" '
                plot_command_smoothed_area_under_curve = '"'+area_under_pareto_frontier_for_all_generations_so_far_csv_location+'" using 1:3 with linespoints title "Smoothed Area Under Curve" '
                gnuplot_script_location = os.path.join(pareto_dirname, 'script.gnuplot')
                with open(gnuplot_script_location, 'wt') as f:
                    f.write( commands + '\nset output "area_under_curve.png" \n' + '\nplot '+ plot_command_area_under_curve + '\n')
                os.system("(cd "+pareto_dirname+" && cat "+gnuplot_script_location+" | gnuplot)")
                with open(gnuplot_script_location, 'wt') as f:
                    f.write( commands + '\nset output "area_under_curve_smoothed.png" \n' + '\nplot '+ plot_command_smoothed_area_under_curve + '\n')
                os.system("(cd "+pareto_dirname+" && cat "+gnuplot_script_location+" | gnuplot)")
                with open(gnuplot_script_location, 'wt') as f:
                    f.write( commands + '\nset output "area_under_curve_both.png" \n' + '\nplot '+ plot_command_area_under_curve + ', ' +plot_command_smoothed_area_under_curve + '\n')
                os.system("(cd "+pareto_dirname+" && cat "+gnuplot_script_location+" | gnuplot)")
                
                # Copy rank0.py for each generation
                shutil.copyfile( os.path.join(p.result_dir, 'rank0.py') , os.path.join(dirname, 'rank0_gen%06d.py' % generation) )
            if p['reduce_space']:
                reduce_space_generation(p, pop, generation)

            print 'Tuning time for generation %d: %f secs' % (generation, time.time()-T0)
            print
            sys.stdout.flush()
    else:
        load_past_run(p)
    
    if p['run_final_generation']:
        generation = FINAL_GENERATION_INDEX
        print '='*80
        print 'Computing Time and Error on Cross Validation Images'
        print '='*80
        if (p['run_remotely_on_tablet']):
            os.system("ssh phablet@"+TABLET_IP+" '(rm -rf "+TABLET_INPUT_IMAGES_DIRECTORY+")'")
            os.system("ssh phablet@"+TABLET_IP+" '(mkdir "+TABLET_INPUT_IMAGES_DIRECTORY+")'")
            os.system('scp '+p['cross_validation_directory']+'/*.png phablet@'+TABLET_IP+':'+TABLET_INPUT_IMAGES_DIRECTORY)
    #    get_time_error(p, p.all_rank0, FINAL_GENERATION_INDEX, False) # uncomment this line if we want our GA to get the pareto frontier of all approximations explored by our search using our testing/cross validation images
        rank = get_pareto_rank(p.all_rank0)
        p.all_rank0 = [x for (i, x) in enumerate(p.all_rank0) if rank[i] == 0]
        p.all_rank0 = sorted(p.all_rank0, key=lambda x: x.error)
        print_generation(generation, ' (Pareto frontier over all generations, size is %d)' % len(p.all_rank0))
        get_time_error(p, p.all_rank0, FINAL_GENERATION_INDEX, False)
    print "\n\nGA complete."
Beispiel #36
0
		print e
	return p.pid

# start the attack by spawning instances for each client on the network that's not in the whitelist
def start_attack(output, iface, bssid):
	while True:
		clients = get_clients(output)
		diff = list(set(clients) - set(attack_clients))
		for item in diff:
			if item != bssid and item not in args.whitelist:
				pid = spawn_attack(iface, item, bssid)
				attack_pids.append(pid)
		time.sleep(5)

if __name__ == "__main__":
	args = parse_args()
	print "Bringing up interface \""+args.interface+"\"..."
	initialise_interface()
	print "> OK"
	
	print "Finding BSSID for \""+args.essid+"\"..."
	info = get_ap_bssid()
	if not info:
		print "Failed to find BSSID for \""+args.essid+"\"! Check that the ESSID is correct and you're in range."
		sys.exit()
	print "> Found \""+args.essid+"\": BSSID="+info.bssid+", Channel="+str(info.channel)+", Power="+str(info.power)
	
	print "Creating monitoring interface on channel "+str(info.channel)+"..."
	mon = create_monitor_interface(info.channel)
	if not mon:
		print "Failed to create monitoring interface!"
Beispiel #37
0
def create_test_json(master_test_list):
    test_log_file = os.path.join(parse_args().workdir, 'data',
                                 'all_tests.json')
    with open(test_log_file, 'w') as f:
        json.dump(master_test_list, f, sort_keys=True, indent=True)