Пример #1
0
def main(data_dir, config_dir, output_dir):
    try:
        config, msg = validate(config_dir)
        if config is None:
            write_status(output_dir, False, msg)
            return 1

        summary = {}
        for model in sorted(config['models']):
            summary[model] = []
            # the script will not be run if there is an error
            cmd_id = 0
            for _, _, exp_config in parse_commands(model, config):
                for combo in unfold_settings(exp_config):
                    stats, msg = parse_data_file(exp_config['type'], model,
                                                 config, combo, data_dir,
                                                 cmd_id)
                    if stats is None:
                        write_status(output_dir, False, msg)
                        return 1
                    stats['command_id'] = cmd_id
                    summary[model].append(stats)
                cmd_id += 1
        write_json(output_dir, 'data.json', summary)
        write_status(output_dir, True, 'success')
    except Exception as e:
        write_status(output_dir, False, render_exception(e))
Пример #2
0
def report_results(model_name, i, config, specific_params, num_retries,
                   out_file, use_dtr, trial_run, trial_run_outfile,
                   results_queue):
    """
    Given a queue of results, do all the necessary reporting
    """
    measurements = []
    while not results_queue.empty():
        measurements.append(results_queue.get())

    # all we care about for a trial run is max memory usage
    if trial_run:
        write_json(
            os.getcwd(), trial_run_outfile,
            {'mem': max(map(lambda data: data['total_mem'], measurements))})
        return

    memory_budget = specific_params.get('memory_budget', -1)
    dry_run = config['dry_run']
    save_log = use_dtr and specific_params.get(
        'save_logs', config['save_logs']) and i == config['n_inputs'] - 1

    if save_log:
        save_trial_log(config['log_dest'],
                       config.get('simrd_config', None),
                       model_name,
                       specific_params,
                       is_baseline=(memory_budget == -1))

    # clean up after ourselves
    delete_logs()

    with open(out_file, 'a', newline='') as csvfile:
        from pt_trial_util import create_csv_writer
        writer = create_csv_writer(csvfile, specific_params)
        for j in range(len(measurements)):
            data = measurements[j]
            # do unit conversions now: times in ms,
            # memory in MB
            entry = {
                'num_retries': num_retries,
                'time': data['time'] * 1e3,
                'sync_time': data['sync_time'] * 1e3,
                # pytorch's cuda elapsed time is already in ms
                'gpu_time': float(data['gpu_time']),
                # 'cuda_time' : float(data['cuda_time']) * 1e-6,
                'input_mem': data['input_mem'] * 1e-6,
                'model_mem': data['model_mem'] * 1e-6,
                'total_mem': data['total_mem'] * 1e-6,
                'memory_budget': memory_budget,
                # profiling (reported in nanoseconds)
                'base_compute_time': data['base_compute_time'] * 1e-6,
                'remat_compute_time': data['remat_compute_time'] * 1e-6,
                'search_time': data['search_time'] * 1e-6,
                'cost_time': data['cost_time'] * 1e-6,
                'rep': j - dry_run,
                'input': i,
                **specific_params
            }
            writer.writerow(entry)
Пример #3
0
def main(config_dir, output_dir):
    """Run the experiment."""
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    try:
        # the experiment involves RPC calls that could potentially hang so we have a timeout on our end too
        killswitch = Timer(config.get('timeout', 300),
                           lambda: timeout_failure(output_dir))
        killswitch.start()

        result = {}
        config_iter = itertools.product(config['models'], config['targets'],
                                        config['devices'])
        for (model, target, device) in config_iter:
            # TODO(weberlo): There has to be some idiom to get rid of this boilerplate.
            if model not in result:
                result[model] = {}
            if target not in result[model]:
                result[model][target] = {}
            if device not in result[model][target]:
                result[model][target][device] = {}
            result[model][target][device] = run_single(model, target, device,
                                                       config)

        killswitch.cancel()
    except Exception as e:
        write_status(output_dir, False,
                     'Exception encountered:\n' + render_exception(e))
        return 1

    write_json(output_dir, 'data.json', result)
    write_status(output_dir, True, 'success')
Пример #4
0
def main(argv):
    args = parse_arguments(argv)

    if args.save_as is None:
        out_dir = os.path.join(args.out_dir, base64_encode(args.keyword))
    else:
        out_dir = os.path.join(args.out_dir, base64_encode(args.save_as))
    downloader = NiconicoDownloader(out_dir)

    auth = read_json(args.auth_json)
    auth = dict() if auth is None else auth
    while True:
        if 'niconico' not in auth:
            auth['niconico'] = {
                'username': input('Username >> '),
                'password': getpass('Password >> '),
            }

        try:
            downloader.authenticate(**auth['niconico'])
            write_json(args.auth_json, auth)
            break
        except NoSuchElementException:
            logger.error('Failed to login.')
            del auth['niconico']

    downloader(args.keyword)
Пример #5
0
def main(config_dir, output_dir):
    """Run the experiment."""
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    try:
        result = {}
        config_iter = itertools.product(
                config['models'],
                config['targets'],
                config['devices'])
        for (model, target, device) in config_iter:
            # TODO(weberlo): There has to be some idiom to get rid of this boilerplate.
            if model not in result:
                result[model] = {}
            if target not in result[model]:
                result[model][target] = {}
            if device not in result[model][target]:
                result[model][target][device] = {}
            result[model][target][device] = run_single(model, target, device, config)
    except Exception as e:
        write_status(output_dir, False, 'Exception encountered:\n' + render_exception(e))
        return 1

    write_json(output_dir, 'data.json', result)
    write_status(output_dir, True, 'success')
Пример #6
0
def analyze_experiment(info, experiments_dir, tmp_data_dir,
                       date_str, exp_name):
    exp_dir = os.path.join(experiments_dir, exp_name)

    exp_data_dir = os.path.join(tmp_data_dir, exp_name)
    tmp_analysis_dir = os.path.join(exp_data_dir, 'analysis')
    idemp_mkdir(tmp_analysis_dir)

    analyzed_data_dir = info.exp_data_dir(exp_name)
    if not os.path.exists(analyzed_data_dir):
        idemp_mkdir(analyzed_data_dir)

    subprocess.call([os.path.join(exp_dir, 'analyze.sh'),
                     info.exp_config_dir(exp_name), exp_data_dir, tmp_analysis_dir],
                    cwd=exp_dir)

    status = validate_status(tmp_analysis_dir)

    # read the analyzed data, append a timestamp field, and copy over to the permanent data dir
    if status['success']:
        data_exists = check_file_exists(tmp_analysis_dir, 'data.json')
        if not data_exists:
            status = {'success': False, 'message': 'No data.json file produced by {}'.format(exp_name)}
        else:
            # collect data to dump to data_*.json
            dump_data = {
                'timestamp'  : date_str,
            }
            dump_data.update(read_json(tmp_analysis_dir, 'data.json'))
            # fetch time spent on the experiment
            dump_data.update(get_timing_info(info, exp_name))
            write_json(analyzed_data_dir, 'data_{}.json'.format(date_str), dump_data)
    
    info.report_exp_status(exp_name, 'analysis', status)
    return status['success']
Пример #7
0
def add_value_to_node():
    nodes = common.read_json(OUTPUT_DIR, NODE_FILE_NAME)
    edges = common.read_json(OUTPUT_DIR, EDGE_FILE_NAME)
    for node in nodes:
        count = len(list(filter(lambda x: x['from'] == node['id'], edges)))
        count += len(list(filter(lambda x: x['to'] == node['id'], edges)))
        node['value'] = count
    common.write_json(nodes, OUTPUT_DIR, NODE_FILE_NAME)
Пример #8
0
def add_position_to_node():
    nodes = common.read_json(OUTPUT_DIR, NODE_FILE_NAME)
    positions = common.read_json(OUTPUT_DIR, POSITION_FILE_NAME)
    for node in nodes:
        position = positions[str(node['id'])]
        node['x'] = position['x']
        node['y'] = position['y']
    common.write_json(nodes, OUTPUT_DIR, NODE_FILE_NAME)
Пример #9
0
def munge_queries(set_id):
    print "Munging set_id:", set_id
    common.use_set(set_id)
    model = munger.munge_model(common.read_most_recent('model'))
    state = munger.munge_state(common.read_most_recent('state'))

    results = {"state":state, "model":model}
    common.write_json(results, "results.json")
Пример #10
0
def main(data_dir, config_dir, output_dir):
    config, msg = validate(config_dir)
    if config is None:
        write_status(output_dir, False, msg)
        return 1

    # No further analysis is required beyond the raw stats reported by the VTA
    # simulator, so we just propagate the data to the next stage of the
    # pipeline.
    data = read_json(data_dir, 'data.json')
    write_json(output_dir, 'data.json', data)
    write_status(output_dir, True, 'success')
Пример #11
0
def make_topology():
    entries = common.read_json(OUTPUT_DIR, ENTRIES_FILE_NAME)

    nodes = []
    edges = []

    for entry in entries:
        nodes.append(make_node(entry))
        edges += make_edges(entry, entries)

    common.write_json(nodes, OUTPUT_DIR, NODE_FILE_NAME)
    common.write_json(edges, OUTPUT_DIR, EDGE_FILE_NAME)
Пример #12
0
def main(data_dir, config_dir, output_dir):
    try:
        config, msg = validate_trials_config(config_dir)
        if config is None:
            write_status(output_dir, False, msg)
            return 1

        summary = {}

        baseline_dict = {}

        for model in sorted(config['models']):
            summary[model] = []
            baseline_dict[model] = {}
            # the script will not be run if there is an error
            cmd_id = 0
            for _, _, exp_config in parse_commands(model, config):
                baseline_params = None
                for specific_params in unfold_settings(exp_config):
                    batch_size = specific_params['batch_size']
                    if specific_params['type'] == 'baseline':
                        baseline_dict[model][batch_size] = {
                            'type': 'baseline',
                            'specific_params': specific_params,
                            'cmd_id': cmd_id
                        }

                    # if there is a corresponding baseline,
                    # let's match using the dict
                    baseline_params = None
                    if (batch_size in baseline_dict[model]
                            and specific_params['type'] != 'baseline'):
                        baseline_params = baseline_dict[model][batch_size]

                    stats, msg = parse_data_file(
                        exp_config['type'],
                        model,
                        config,
                        specific_params,
                        data_dir,
                        cmd_id,
                        baseline_params=baseline_params)
                    if stats is None:
                        write_status(output_dir, False, msg)
                        return 1
                    stats['command_id'] = cmd_id
                    summary[model].append(stats)
                cmd_id += 1
        write_json(output_dir, 'data.json', summary)
        write_status(output_dir, True, 'success')
    except Exception as e:
        write_status(output_dir, False, render_exception(e))
def main(argv):
    args = parse_arguments(argv)

    dirname = os.path.basename(args.input_dir)
    valid_dir = os.path.join(args.output_dir, dirname, 'valid')
    invalid_dir = os.path.join(args.output_dir, dirname, 'invalid')
    os.makedirs(valid_dir, exist_ok=True)
    os.makedirs(invalid_dir, exist_ok=True)
    removed_json = os.path.join(args.output_dir, dirname, '.cache.json')

    names = get_filenames(args.input_dir)
    valid_names = get_filenames(valid_dir)
    invalid_names = get_filenames(invalid_dir)
    removed_names = read_json(removed_json)
    removed_names = [] if removed_names is None else removed_names
    names = sorted(
        set(names) - set(valid_names) - set(invalid_names) -
        set(removed_names))

    # Instruction
    sys.stdout.write('Key input instructions:\n'
                     'j: Accept current image\n'
                     'k: Reject current image\n'
                     'u: Undo recent validation\n'
                     'd: Exclude image \n'
                     'q: Quit validation\n')

    i = 0
    while i < len(names):
        path = os.path.join(args.input_dir, names[i])
        key = show_image(path, args.size)

        if key == KeyStatus.UNDO and i > 1:
            i -= 1
            if os.path.exists(os.path.join(valid_dir, names[i])):
                os.remove(os.path.join(valid_dir, names[i]))
            elif os.path.exists(os.path.join(invalid_dir, names[i])):
                os.remove(os.path.join(invalid_dir, names[i]))
            else:
                removed_names.pop()
        elif key == KeyStatus.OK:
            shutil.copyfile(path, os.path.join(valid_dir, names[i]))
            i += 1
        elif key == KeyStatus.FAIL:
            shutil.copyfile(path, os.path.join(invalid_dir, names[i]))
            i += 1
        elif key == KeyStatus.REMOVE:
            removed_names.append(names[i])
            write_json(removed_json, removed_names)
            i += 1
        else:
            exit()
Пример #14
0
def process_score(info, score_metric, data_dir, graph_dir, timestamp):
    data = score_metric.compute_score(info)
    data['timestamp'] = timestamp
    write_json(data_dir, 'data_{}.json'.format(timestamp), data)

    # graphs failing is not a fatal error, just an inconvenience
    try:
        score_metric.score_graph(data, graph_dir)
        all_data = sort_data(data_dir)
        score_metric.longitudinal_graphs(all_data, graph_dir)
    except Exception as e:
        print(render_exception(e))
    finally:
        return score_metric.score_text(data)
Пример #15
0
def main():
    print "Processing"

    movies = common.read_json("tunefind.json")

    pool = Pool(5)
    results = [pool.apply_async(process_movie, [m]) for m in movies]

    updated_movies = []
    for w in results:
        w.wait()
        updated_movies.append(w.get())

    common.write_json("musicbrainz.json", updated_movies)
Пример #16
0
def main(argv):
    args = parse_arguments(argv)
    config = load_config(args.config)

    ctime = time.strftime('%y%m%d_%H%M')
    dst_dir = os.path.join(args.output_dir, '%s_%s2%s' % (ctime, *args.labels))
    os.makedirs(dst_dir, exist_ok=True)
    write_json(os.path.join(dst_dir, 'config.json'), config)

    datasets = setup_dataset(config, args.labels[0], args.labels[1])
    models = setup_model(config)
    trainer = setup_trainer(config, dst_dir, args.gpu,
                            datasets, models, args.labels)
    trainer.run(1000, args.max_epoch)
Пример #17
0
def delete_duplication():
    entries = common.read_json(OUTPUT_DIR, ENTRIES_FILE_NAME)
    fixed_entries = []
    titles = []
    index = 1
    for entry in entries:
        title = entry['title']
        if title in titles: continue
        fixed = entry
        fixed['id'] = index
        fixed_entries.append(fixed)
        titles.append(title)
        index += 1

    common.write_json(fixed_entries, OUTPUT_DIR, 'entries_fixed.json')
Пример #18
0
def log_error(experiment_name, model_name, specific_params, inp, err_msg,
              path_prefix):
    err_info = {'input': inp, 'msg': err_msg}

    logged_errors = {}
    if check_file_exists(path_prefix, 'errors.json'):
        logged_errors = read_json(path_prefix, 'errors.json')
    if experiment_name not in logged_errors:
        logged_errors[experiment_name] = {}
    if model_name not in logged_errors[experiment_name]:
        logged_errors[experiment_name][model_name] = []
    logged_errors[experiment_name][model_name].append({
        'err_info': err_info,
        **specific_params
    })
    write_json(path_prefix, 'errors.json', logged_errors)
Пример #19
0
def load_from_web():
    print "Loading from Web"

    movies = common.read_json(JSON_IN_FILE)

    pool = Pool(5)
    worker = [pool.apply_async(process_movie, [m]) for m in movies]

    imdb_movies = []
    for w in worker:
        w.wait()
        result = w.get()
        if result is not None:
            imdb_movies.append(w.get())

    common.write_json(JSON_OUT_FILE, imdb_movies)
Пример #20
0
def load_from_web():
    print "Loading from Web"

    movies = common.read_json(JSON_IN_FILE)

    pool = Pool(5)
    worker = [pool.apply_async(process_movie, [m]) for m in movies]

    imdb_movies = []
    for w in worker:
        w.wait()
        result = w.get()
        if result is not None:
            imdb_movies.append(w.get())

    common.write_json(JSON_OUT_FILE, imdb_movies)
Пример #21
0
def get_video_info(video_id):
    video_info_list = []
    items = youtube.videos().list(part="snippet, contentDetails",
                                  id=video_id).execute()["items"][0]
    resp_save_dest = "data/resp/yt/v/"
    timestamp = common.now_iso(1)  # UTC+0
    common.make_dir(resp_save_dest)
    common.write_json(resp_save_dest + timestamp + " " + video_id + ".json",
                      items)
    video_info_list.append(items["snippet"]["channelTitle"])  # 0
    video_info_list.append(items["snippet"]["channelId"])  # 1
    video_info_list.append(items["snippet"]["publishedAt"])  # 2
    video_info_list.append(items["snippet"]["title"])  # 3
    video_info_list.append(items["snippet"]["description"])  # 4
    video_info_list.append(items["contentDetails"]["duration"])  # 5
    return video_info_list  # Throw video_info_list to fileproc.
Пример #22
0
def main(config_dir, home_dir, output_dir):
    info = DashboardInfo(home_dir)
    conf = read_config(config_dir)

    data_dir = os.path.join(output_dir, 'data')
    graph_dir = os.path.join(output_dir, 'graphs')
    idemp_mkdir(data_dir)
    idemp_mkdir(graph_dir)

    timestamp = get_timestamp()

    score_confs = conf['score_confs']
    metrics = set(score_confs.keys())
    metrics = metrics.intersection(set(SCORE_METRICS.keys()))

    if not metrics:
        write_status(output_dir, True, 'No scores to report')
        return 0

    score_data = {}
    score_reports = {}
    for metric in metrics:
        score_metric = SCORE_METRICS[metric](score_confs[metric])
        valid, msg = check_prerequisites(info, score_metric.prereq())
        if not valid:
            write_status(output_dir, False, msg)
            return 1

        score_data_dir = os.path.join(data_dir, metric)
        score_graph_dir = os.path.join(graph_dir, metric)
        idemp_mkdir(score_data_dir)
        idemp_mkdir(score_graph_dir)

        try:
            report = process_score(info, score_metric, score_data_dir,
                                   score_graph_dir, timestamp)
            score_reports[metric] = report
        except Exception as e:
            write_status(
                output_dir, False,
                'Encountered exception while scoring {}:\n{}'.format(
                    metric, render_exception(e)))
            return 1

    report = {'title': 'Metric Scores', 'value': format_scores(score_reports)}
    write_json(output_dir, 'report.json', report)
    write_status(output_dir, True, 'success')
Пример #23
0
def crawl():
    segs = get_entry_urls()

    logger.write_log("start crawling")

    try:
        common.create_dir(OUTPUT_DIR)
        entries = []
        for entry in crawl_entries(segs):
            if (not entry):
                logger.write_log("error occurred")
                return
            entries.append(entry)
            common.write_json(entries, OUTPUT_DIR, ENTRIES_FILE_NAME)
    except Exception:
        logger.write_log("error occurred")
        logger.write_error(traceback.format_exc())
    finally:
        logger.write_log("end crawling")
Пример #24
0
def load_from_web():
    print ""
    print "Loading from Web..."

    request = urllib2.Request(EP_TUNEFIND_MOVIES)
    add_header(request)
    moviedata = parse_response(urllib2.urlopen(request))

    count = 0
    movies = []
    for m in moviedata["movies"]:
        count += 1
        print "#%02d: %s" % (count, m["name"])

        movie = {"title": m["name"], "soundtrack": get_songs(m['id'])}
        movies.append(movie)

        # One request per second
        sleep(1.1)

    common.write_json(JSON_OUT_FILE, movies)
Пример #25
0
def load_from_web():
    print "Loading from Web..."

    network = pylast.LastFMNetwork(api_key=API_KEY, api_secret=API_SECRET)
    movies = common.read_json(JSON_IN_FILE)

    song_chunks = []
    for m in movies:
        if len(m["soundtrack"]) > 0:
            song_chunks.append(m["soundtrack"])

    pool = Pool(5)
    worker = [pool.apply_async(process_songs, [chunk, network]) for chunk in song_chunks]

    lastfm_songs = []
    for w in worker:
        w.wait()
        for s in w.get():
            lastfm_songs.append(s)

    common.write_json(JSON_OUT_FILE, lastfm_songs)
Пример #26
0
def load_from_web():
    print ""
    print "Loading from Web..."

    request = urllib2.Request(EP_TUNEFIND_MOVIES)
    add_header(request)
    moviedata = parse_response(urllib2.urlopen(request))

    count = 0
    movies = []
    for m in moviedata["movies"]:
        count += 1
        print "#%02d: %s" % (count, m["name"])

        movie = {"title": m["name"], "soundtrack": get_songs(m['id'])}
        movies.append(movie)

        # One request per second
        sleep(1.1)

    common.write_json(JSON_OUT_FILE, movies)
Пример #27
0
def match():
    import deepmatching_wrapper as dm
    import cv2

    candidate_matching_database = common.load_pickle(Path("temp/candidate_matching_database.pickle"))

    common.prepare_clean_dir(Path("output/"))
    common.prepare_clean_dir(Path("output/images/"))

    output = {}
    for query_file, candidates in candidate_matching_database.items():
        query_name = Path(query_file).stem
        matching_result = []
        for target_class_name, target_images in candidates.items():
            for i, (target_path, similarity) in enumerate(target_images):
                print("Matching", query_file, "with target image", target_path)

                matches, name1, name2, qw, qh, tw, th, img1, img2 = dm.match(query_file, target_path)
                src_pts = np.float32([[m[0], m[1]] for m in matches])
                dst_pts = np.float32([[m[2], m[3]] for m in matches])

                i = 0
                inlier = []

                M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, setting.RANSAC_THRESHOLD)
                for index, m in enumerate(mask):
                    if np.isclose(m, 1):
                        i += 1
                        inlier.append(matches[index])

                output_name = "%s_%s_%02d.jpg" % (query_name, target_class_name, i)
                dm.draw(img1, img2, inlier, Path("output/images/") / output_name)

                matching_result.append({
                    "class_name": target_class_name,
                    "inlier": len(inlier)
                })
        output[query_file.name] = sorted(matching_result, key=lambda x: x["inlier"], reverse=True)

    common.write_json(Path("output/result.json"), output)
    def extract_face(self, img, basename):
        filename = '%s_%s.jpg' % (basename, '%02d')
        rects = self._detector(img)
        self._rects[basename] = list()

        if len(rects) > 0:
            logger.info('Extract face from "%s"', basename)
        src_h, src_w = img.shape[:2]
        for i, (x, y, w, h) in enumerate(rects):
            cnt_x, cnt_y = x + w // 2, y + h // 2
            cnt_d = int(max(w, h) * (1.0 + self._margin) * 0.5)
            x0, y0 = max(cnt_x - cnt_d, 0), max(cnt_y - cnt_d, 0)
            x1, y1 = min(cnt_x + cnt_d, src_w), min(cnt_y + cnt_d, src_h)

            if x1 - x0 < cnt_d * 2:
                if x0 > 0:
                    x0 = max(x1 - cnt_d * 2, 0)
                elif x1 < src_w:
                    x1 = min(x0 + cnt_d * 2, src_w)
            if y1 - y0 < cnt_d * 2:
                if y0 > 0:
                    y0 = max(y1 - cnt_d * 2, 0)
                elif y1 < src_h:
                    y1 = min(y0 + cnt_d * 2, src_h)

            if x1 - x0 < cnt_d * 2 and x1 - x0 < y1 - y0:
                new_d = x1 - x0
                y0 = cnt_y - new_d // 2
                y1 = y0 + new_d
            elif y1 - y0 < cnt_d * 2 and x1 - x0 > y1 - y0:
                new_d = y1 - y0
                x0 = cnt_x - new_d // 2
                x1 = x0 + new_d

            self._save_extract_face(img[y0:y1, x0:x1, :], filename % i)
            self._rects[basename].append([int(x0), int(x1), int(y0), int(y1)])
        write_json(os.path.join(self.dst_dir, self._CACHE_FILENAME),
                   self._rects)
Пример #29
0
    def main(data_dir, config_dir, output_dir):
        config, msg = validate_config(config_dir)
        if config is None:
            write_status(output_dir, False, msg)
            return 1

        devs = config['devices']
        listing_settings = generate_listing_settings(config)

        ret = {}
        for dev in devs:
            ret[dev] = {}
            for listing, settings in listing_settings.items():
                if not use_networks:
                    query = generate_data_query(config, dev, settings)
                    summary, success, msg = trials_stat_summary(
                        data_dir, *query)
                    if not success:
                        write_status(output_dir, False, msg)
                        return 1

                    ret[dev][listing] = summary['mean']
                    add_detailed_summary(ret, summary, dev, listing)
                    continue

                ret[dev][listing] = {}
                for network in config['networks']:
                    query = generate_data_query(config, dev, network, settings)
                    summary, success, msg = trials_stat_summary(
                        data_dir, *query)
                    if not success:
                        write_status(output_dir, False, msg)
                        return 1

                    ret[dev][listing][network] = summary['mean']
                    add_detailed_summary(ret, summary, dev, listing, network)
        write_json(output_dir, 'data.json', ret)
        write_status(output_dir, True, 'success')
Пример #30
0
def process_telemetry_statistics(info,
                                 exp_name,
                                 output_dir,
                                 time_str,
                                 cpu_stat_parser=parse_cpu_stat,
                                 gpu_stat_parser=parse_gpu_stat):
    '''
    Collect data of telemetry statistics and write to results directory
    Note: The "parsing" logic procedure written in this file is specialized to deal with
          telemetry collected at pipsqueak. They are not guaranteed to work on other platforms.
    '''
    telemetry_output_dir = info.subsys_telemetry_dir(exp_name)
    if not os.path.exists(telemetry_output_dir):
        idemp_mkdir(telemetry_output_dir)
    data_dir = os.path.join(output_dir, f'telemetry/{exp_name}')
    cpu_telemetry_dir = os.path.join(data_dir, 'cpu')
    gpu_telemetry_dir = os.path.join(data_dir, 'gpu')
    write_json(os.path.join(telemetry_output_dir, 'gpu'),
               f'gpu-{time_str}.json',
               gpu_stat_parser(gpu_telemetry_dir, time_str))
    write_json(os.path.join(telemetry_output_dir, 'cpu'),
               f'cpu-{time_str}.json',
               cpu_stat_parser(cpu_telemetry_dir, time_str))
Пример #31
0
def load_from_web():
    print "Loading from Web..."

    start_date = datetime.strptime(STARTDATE, "%d.%m.%Y")
    end_date = datetime.strptime(ENDDATE, "%d.%m.%Y")

    date_delta = timedelta(days=7)
    current_date = start_date

    days = []
    while current_date < end_date:
        days.append(current_date)
        current_date = current_date + date_delta

    pool = Pool(5)
    results = [pool.apply_async(get_charts, [d]) for d in days]

    charts = []
    for w in results:
        w.wait()
        charts.append(w.get())

    common.write_json(JSON_OUT_FILE, charts)
Пример #32
0
def extend_simrd_config(dest_dir, sim_conf_filename, model_name,
                        specific_params, log_name):
    if not check_file_exists(dest_dir, sim_conf_filename):
        prepare_out_file(dest_dir, sim_conf_filename)
        write_json(dest_dir, sim_conf_filename, dict())

    conf = read_json(dest_dir, sim_conf_filename)
    if model_name not in conf:
        conf[model_name] = []
    conf[model_name].append({
        'name':
        model_util.get_model_family(model_name),
        'batch_size':
        str(specific_params['batch_size']),
        'layers':
        specific_params.get('layers', model_util.get_model_layers(model_name)),
        'type':
        model_util.get_model_type(model_name),
        'log':
        log_name,
        'has_start':
        True
    })
    write_json(dest_dir, sim_conf_filename, conf)
Пример #33
0
def extend_simrd_config(dest_dir, sim_conf_filename, model_name,
                        specific_params, log_name):
    import model_util
    if not check_file_exists(dest_dir, sim_conf_filename):
        prepare_out_file(dest_dir, sim_conf_filename)
        write_json(dest_dir, sim_conf_filename, dict())

    conf = read_json(dest_dir, sim_conf_filename)
    if model_name not in conf:
        conf[model_name] = []
    name = model_util.format_model_name(model_name, specific_params)
    conf[model_name].append({
        'name':
        name,
        'title':
        name,
        'desc':
        model_util.format_input_description(model_name, specific_params),
        'log':
        log_name,
        'has_start':
        True
    })
    write_json(dest_dir, sim_conf_filename, conf)
Пример #34
0
def load_from_web():
    print "Loading from Web..."

    network = pylast.LastFMNetwork(api_key=API_KEY, api_secret=API_SECRET)
    movies = common.read_json(JSON_IN_FILE)

    song_chunks = []
    for m in movies:
        if len(m["soundtrack"]) > 0:
            song_chunks.append(m["soundtrack"])

    pool = Pool(5)
    worker = [
        pool.apply_async(process_songs, [chunk, network])
        for chunk in song_chunks
    ]

    lastfm_songs = []
    for w in worker:
        w.wait()
        for s in w.get():
            lastfm_songs.append(s)

    common.write_json(JSON_OUT_FILE, lastfm_songs)
Пример #35
0
def write_tasklist(path, taglist, tasklist):
    out = []
    for x in tasklist:
        x['tags'] = list(x['tags'])
        out.append(x)
    write_json(path, {'tags': list(taglist), 'tasks': out})
Пример #36
0
def save_reply(prefix, parameters):
    job_id = parameters['ids']['jobId']
    project_id = parameters['ids']['projectId']
    filename = "%s-%s.json" % (project_id, job_id)
    common.write_json(parameters, "%s/%s" % (prefix, filename))