예제 #1
0
    def get(self):
        project = Project()

        if not project.exists():
            return make_response(jsonify({}), 404)

        repo = project.repo
        metrics = set()
        params = {}

        for exp_name in repo.list_branches():
            for run_hash in repo.list_branch_commits(exp_name):
                run = repo.select_run_metrics(exp_name, run_hash)
                if run is not None:
                    run_params = copy.deepcopy(run.params)
                    if run_params is None or len(run_params) == 0:
                        continue
                    if '__METRICS__' in run_params:
                        del run_params['__METRICS__']
                    dump_dict_values(run_params, {})
                    params = deep_merge(params, run_params)
                    for m in run.metrics.keys():
                        metrics.add(m)

        dump_dict_values(params, True)

        return jsonify({
            'params': params,
            'metrics': list(metrics),
        })
예제 #2
0
    def get(self):
        project = Project()

        if not project.exists():
            return make_response(jsonify({}), 404)

        return jsonify({
            'branches': project.repo.list_branches(),
        })
예제 #3
0
파일: views.py 프로젝트: mihran113/aimui
    def post(self, experiment, commit_hash):
        # Get project
        project = Project()
        if not project.exists():
            return make_response(jsonify({}), 404)

        if project.repo.is_archived(experiment, commit_hash):
            project.repo.unarchive(experiment, commit_hash)
            return jsonify({
                'archived': False,
            })
        else:
            project.repo.archive(experiment, commit_hash)
            return jsonify({
                'archived': True,
            })
예제 #4
0
파일: views.py 프로젝트: mihran113/aimui
    def get(self):
        # Get project
        project = Project()
        if not project.exists():
            return make_response(jsonify({}), 404)

        raw_expression = request.args.get('q').strip()

        if 'run.archived' not in raw_expression:
            default_expression = 'run.archived is not True'
        else:
            default_expression = None

        if raw_expression:
            try:
                parser = Expression()
                parser.parse(raw_expression)
            except Diagnostic as d:
                parser_error_logs = d.logs or []
                for error_log in reversed(parser_error_logs):
                    if not isinstance(error_log, Notification):
                        continue
                    if error_log.severity != Severity.ERROR:
                        continue
                    error_location = error_log.location
                    if error_location:
                        return make_response(
                            jsonify({
                                'type': 'parse_error',
                                'statement': raw_expression,
                                'location': error_location.col,
                            }), 403)
                return make_response(jsonify({}), 403)
            except Exception:
                return make_response(jsonify({}), 403)

        runs = project.repo.select_runs(raw_expression, default_expression)

        serialized_runs = []
        for run in runs:
            serialized_runs.append(run.to_dict())

        return jsonify({
            'runs': serialized_runs,
        })
예제 #5
0
    def get(self):
        project = Project()

        if not project.exists():
            return make_response(jsonify({}), 404)

        last_synced_run = db.session\
            .query(func.max(Commit.session_started_at),
                   func.max(Commit.session_closed_at))\
            .first()
        last_synced_run_time = max(last_synced_run[0] or 0, last_synced_run[1]
                                   or 0)

        modified_runs = project.get_modified_runs(last_synced_run_time)
        upgrade_runs_table(project, modified_runs)

        all_runs = db.session\
            .query(Commit.hash,
                   Commit.experiment_name,
                   Commit.session_started_at)\
            .filter(Commit.session_started_at > 0)\
            .all()

        experiments = {r.experiment_name for r in all_runs}

        try:
            timezone = pytz.timezone(request.tz)
        except:
            timezone = None
        if not timezone:
            timezone = pytz.timezone('gmt')

        activity_counter = Counter([
            datetime.fromtimestamp(r.session_started_at,
                                   timezone).strftime('%Y-%m-%d')
            if r.session_started_at > 0 else 0 for r in all_runs
        ])

        return jsonify({
            'num_experiments': len(experiments),
            'num_runs': len(all_runs),
            'activity_map': dict(activity_counter),
        })
예제 #6
0
    def get(self):
        project = Project()

        if not project.exists():
            return make_response(jsonify({}), 404)

        return jsonify({
            'name':
            project.name,
            'path':
            project.path,
            'tf_enabled':
            project.tf_enabled,
            'description':
            project.description,
            'branches':
            project.repo.list_branches(),
            'telemetry_enabled':
            os.getenv('AIM_UI_TELEMETRY_ENABLED') or '1',
        })
예제 #7
0
    def get(self, experiment_name, commit_id, file_path):
        project = Project()

        if not project.exists():
            return make_response(jsonify({}), 404)

        objects_dir_path = os.path.join(os.getcwd(), '.aim', experiment_name,
                                        commit_id, 'objects')

        file_path = os.path.join(*file_path.split('+')) + '.log'
        dist_abs_path = os.path.join(objects_dir_path, file_path)

        if not os.path.isfile(dist_abs_path):
            return make_response(jsonify({}), 404)

        # Read file specified by found path
        try:
            obj_data_content = read_artifact_log(dist_abs_path, 500)
            comp_content = list(map(lambda x: json.loads(x), obj_data_content))
            return comp_content
        except:
            return []
예제 #8
0
    def get(self, experiment_name, commit_id):
        project = Project()

        if not project.exists():
            return make_response(jsonify({}), 404)

        dir_path = os.path.join(os.getcwd(), '.aim', experiment_name)

        # Check if experiment exists
        if not os.path.isdir(dir_path):
            return jsonify({
                'init': True,
                'branch_init': False,
            })

        # Get commits
        commits = get_branch_commits(dir_path)

        # Get specified commit
        commit = None
        if commit_id == 'latest':
            for commit_item, config in commits.items():
                if commit is None or config['date'] > commit['date']:
                    commit = config
        else:
            commit = commits.get(commit_id)

        if not commit:
            return jsonify({
                'init': True,
                'branch_init': True,
                'branch_empty': True,
            })

        if 'process' in commit.keys():
            if not commit['process']['finish']:
                if commit['process'].get('start_date'):
                    duration = time.time() - commit['process']['start_date']
                    commit['process']['time'] = duration
                else:
                    commit['process']['time'] = None
            elif commit['process'].get('start_date') is not None \
                    and commit['process'].get('finish_date') is not None:
                commit['process']['time'] = commit['process']['finish_date'] \
                                            - commit['process']['start_date']

        objects_dir_path = os.path.join(dir_path, commit['hash'], 'objects')
        meta_file_path = os.path.join(objects_dir_path, 'meta.json')

        # Read meta file content
        try:
            with open(meta_file_path, 'r+') as meta_file:
                meta_file_content = json.loads(meta_file.read())
        except:
            meta_file_content = {}

        # Get all artifacts(objects) listed in the meta file
        metric_objects = []
        model_objects = []
        dir_objects = []
        map_objects = []
        stats_objects = []

        # Limit distributions
        for obj_key, obj in meta_file_content.items():
            if obj['type'] == 'dir':
                dir_objects.append({
                    'name': obj['name'],
                    'cat': obj['cat'],
                    'data': obj['data'],
                    'data_path': obj['data_path'],
                })
            elif obj['type'] == 'models':
                model_file_path = os.path.join(objects_dir_path, 'models',
                                               '{}.aim'.format(obj['name']))
                model_file_size = os.stat(model_file_path).st_size
                model_objects.append({
                    'name': obj['name'],
                    'data': obj['data'],
                    'size': model_file_size,
                })
            elif (obj['type'] == 'metrics'
                  and obj['data_path'] != '__AIMRECORDS__') or \
                    ('map' in obj['type'] or obj['type'] == 'map'):
                # obj['type'] == 'distribution':
                # Get object's data file path
                obj_data_file_path = os.path.join(objects_dir_path,
                                                  obj['data_path'], obj_key)

                # Incompatible version
                if obj_key.endswith('.json'):
                    return make_response(jsonify({}), 501)

            if obj['type'] == 'metrics':
                steps = 200
                run = project.repo.select_run_metrics(experiment_name,
                                                      commit['hash'],
                                                      obj['name'])
                if run is not None and run.metrics.get(obj['name']) \
                        and len(run.metrics[obj['name']].traces):
                    metric = run.metrics[obj['name']]
                    run.open_storage()
                    metric.open_artifact()
                    traces = []
                    for trace in metric.traces:
                        num = trace.num_records
                        step = (num // steps) or 1
                        for r in trace.read_records(slice(0, num, step)):
                            base, metric_record = MetricRecord.deserialize(r)
                            if unsupported_float_type(metric_record.value):
                                continue
                            trace.append((
                                base.step,  # 0 => step
                                metric_record.value,  # 1 => value
                            ))
                        if (num - 1) % steps != 0:
                            for r in trace.read_records(num - 1):
                                base, metric_record = MetricRecord.deserialize(
                                    r)
                                if unsupported_float_type(metric_record.value):
                                    continue
                                trace.append((
                                    base.step,  # 0 => step
                                    metric_record.value,  # 1 => value
                                ))
                        traces.append(trace.to_dict())
                    metric.close_artifact()
                    run.close_storage()
                else:
                    traces = []

                metric_objects.append({
                    'name': obj['name'],
                    'mode': 'plot',
                    'traces': traces,
                })
            elif 'map' in obj['type'] or obj['type'] == 'map':
                try:
                    params_str = read_artifact_log(obj_data_file_path, 1)
                    if params_str:
                        map_objects.append({
                            'name':
                            obj['name'],
                            'data':
                            json.loads(params_str[0]),
                            'nested':
                            'nested_map' in obj['type']
                        })
                except:
                    pass

        # Return found objects
        return jsonify({
            'init': True,
            'branch_init': True,
            'branch_empty': False,
            'commit': commit,
            'commits': commits,
            'metrics': metric_objects,
            'models': model_objects,
            'dirs': dir_objects,
            'maps': map_objects,
            'stats': stats_objects,
        })
예제 #9
0
파일: views.py 프로젝트: mihran113/aimui
    def get(self):
        try:
            steps_num = int(request.args.get('p').strip())
        except:
            steps_num = 50

        # Get project
        project = Project()
        if not project.exists():
            return make_response(jsonify({}), 404)

        search_statement = request.args.get('q').strip()

        # Parse statement
        try:
            parser = Statement()
            parsed_stmt = parser.parse(search_statement.strip())
        except Diagnostic as d:
            parser_error_logs = d.logs or []
            for error_log in reversed(parser_error_logs):
                if not isinstance(error_log, Notification):
                    continue
                if error_log.severity != Severity.ERROR:
                    continue
                error_location = error_log.location
                if error_location:
                    return make_response(
                        jsonify({
                            'type': 'parse_error',
                            'statement': search_statement,
                            'location': error_location.col,
                        }), 403)
            return make_response(jsonify({}), 403)
        except Exception:
            return make_response(jsonify({}), 403)

        statement_select = parsed_stmt.node['select']
        statement_expr = parsed_stmt.node['expression']

        aim_select, tf_logs = separate_select_statement(statement_select)

        if 'run.archived' not in search_statement:
            default_expression = 'run.archived is not True'
        else:
            default_expression = None

        aim_select_result = project.repo.select(aim_select, statement_expr,
                                                default_expression)

        (
            aim_selected_runs,
            aim_selected_params,
            aim_selected_metrics,
        ) = (aim_select_result.runs, aim_select_result.get_selected_params(),
             aim_select_result.get_selected_metrics_context())

        aim_selected_runs.sort(key=lambda r: r.config.get('date'),
                               reverse=True)

        response = {
            'runs': [],
            'params': [],
            'agg_metrics': {},
            'meta': {
                'tf_selected': False,
                'params_selected': False,
                'metrics_selected': False,
            },
        }

        retrieve_traces = False
        retrieve_agg_metrics = False

        if len(aim_selected_params):
            response['meta']['params_selected'] = True
            response['params'] = aim_selected_params
            if len(aim_selected_metrics):
                response['meta']['metrics_selected'] = True
                response['agg_metrics'] = aim_selected_metrics
                retrieve_agg_metrics = True
        elif len(aim_selected_metrics) or len(tf_logs):
            response['meta']['metrics_selected'] = True
            retrieve_traces = True

        runs = []

        if aim_selected_runs and len(aim_selected_runs):
            runs += aim_selected_runs
        if len(tf_logs) > 0:
            if not retrieve_traces:
                # TODO: aggregate tf logs and return aggregated values
                response['meta']['tf_selected'] = True
                pass
            else:
                try:
                    tf_runs = select_tf_summary_scalars(
                        tf_logs, statement_expr)
                    if tf_runs and len(tf_runs):
                        runs += tf_runs
                except:
                    pass
                else:
                    response['meta']['tf_selected'] = True

        if retrieve_traces:
            for run in runs:
                if is_tf_run(run):
                    for metric in run['metrics']:
                        for trace in metric['traces']:
                            trace_scaled_data = []
                            for i in range(
                                    0, trace['num_steps'],
                                    trace['num_steps'] // steps_num or 1):
                                trace_scaled_data.append(trace['data'][i])
                            trace['data'] = trace_scaled_data
                else:
                    run.open_storage()
                    for metric in run.metrics.values():
                        try:
                            metric.open_artifact()
                            for trace in metric.traces:
                                step = (trace.num_records // steps_num) or 1
                                trace_steps = slice(0, trace.num_records, step)
                                for r in trace.read_records(trace_steps):
                                    base, metric_record = MetricRecord.deserialize(
                                        r)
                                    if unsupported_float_type(
                                            metric_record.value):
                                        continue
                                    trace.append((
                                        metric_record.value,
                                        base.step,
                                        (base.epoch
                                         if base.has_epoch else None),
                                        base.timestamp,
                                    ))
                                if (trace.num_records - 1) % step != 0:
                                    for r in trace.read_records(
                                            trace.num_records - 1):
                                        base, metric_record = MetricRecord.deserialize(
                                            r)
                                        if unsupported_float_type(
                                                metric_record.value):
                                            continue
                                        trace.append((
                                            metric_record.value,
                                            base.step,
                                            (base.epoch
                                             if base.has_epoch else None),
                                            base.timestamp,
                                        ))
                        except:
                            pass

                        try:
                            metric.close_artifact()
                        except:
                            pass
                    run.close_storage()

        if retrieve_agg_metrics:
            # TODO: Retrieve and return aggregated metrics
            pass

        def runs_resp_generator():
            with App.api.app_context():
                yield json.dumps({
                    'header': response,
                }).encode() + '\n'.encode()
                for run in runs:
                    if not is_tf_run(run):
                        yield json.dumps({
                            'run':
                            run.to_dict(
                                include_only_selected_agg_metrics=True),
                        }).encode() + '\n'.encode()
                    else:
                        yield json.dumps({
                            'run': run,
                        }).encode() + '\n'.encode()

        return Response(runs_resp_generator(), mimetype='application/json')