def admin_storage(): num_images = db.get_sample_count() num_human_annotations = db.get_human_annotation_count() paths = ( ('server_heatmap', config.get_server_heatmap_path()), ('server_image', config.get_server_image_path()), ('cnn', config.get_cnn_path()), ('caffe', config.get_caffe_path()), ('plot', config.get_plot_path()), ('train_data', config.get_train_data_path()), ) path_data = [] for path_name, path in paths: pstats = os.statvfs(path) path_data.append({ 'name': path_name, 'path': path, 'disk_total': bytes_humanfriendly(pstats.f_frsize * pstats.f_blocks), 'disk_avail': bytes_humanfriendly(pstats.f_frsize * pstats.f_bavail), 'used': bytes_humanfriendly(get_recursive_folder_size(path)) if path_name != 'train_data' else '?' }) return render_template('admin_storage.html', num_images=num_images, num_human_annotations=num_human_annotations, path_data=path_data, error=pop_last_error())
def admin_page(): num_images = db.get_sample_count() num_human_annotations = db.get_human_annotation_count() datasets = db.get_datasets() enqueued = db.get_unprocessed_samples() models = db.get_models(details=True) model_id_to_name = {m['_id']: m['name'] for m in models} secondary_items = db.get_queued_samples() enqueued2 = [] for item in secondary_items: model_name = model_id_to_name.get(item['model_id'], '???') if 'sample_id' in item: target_name = db.get_sample_by_id(item['sample_id'])['filename'] elif 'validation_model_id' in item: target_name = model_id_to_name.get(item['validation_model_id'], '?!?') else: target_name = '!!!' enqueued2.append((model_name, target_name, str(item['_id']))) enqueued2 = sorted(enqueued2, key=lambda item: item[0]) status = [(status_name, db.get_status(status_id)) for status_name, status_id in status_ids] return render_template('admin.html', num_images=num_images, num_human_annotations=num_human_annotations, datasets=datasets, enqueued=enqueued, status=status, error=pop_last_error(), models=models, enqueued2=enqueued2)
def admin_datasets(): datasets = db.get_datasets() for dataset in datasets: if dataset.get('date_accessed') is None: dataset['date_accessed'] = datetime.now() db.access_dataset(dataset['_id']) return render_template('admin_datasets.html', datasets=datasets, error=pop_last_error())
def user_datasets_page(): # Claim any unowned datasets try: added_datasets = json.loads(unquote_plus(request.cookies['datasets'])) for added_dataset in added_datasets: do_claim_dataset(added_dataset, ignore_errors=True) except: pass # Render page, which will reset the unowned datasets variable user_id = get_current_user_id() datasets = [] if user_id is None else db.get_datasets_by_user(user_id) return render_template('user_datasets.html', datasets=datasets, error=pop_last_error())
def validation_page(val_id_s): val_id = ObjectId(val_id_s) val = dict(db.get_validation_results_by_id(val_id)) train_model = db.get_model_by_id(val['train_model_id']) val_model = db.get_model_by_id(val['validation_model_id']) train_name = train_model['name'] val_name = '%s (%s)' % (val_model['name'], val['image_subset']) worst_predictions = val['worst_predictions'] return render_template('validation.html', val_id=val['validation_model_id'], train_name=train_name, val_name=val_name, worst_predictions=worst_predictions, error=pop_last_error())
def annotate(sid): # Query info from DB sample_id = ObjectId(sid) sample_entry = db.get_sample_by_id(sample_id) dataset_id = sample_entry['dataset_id'] name = sample_entry['name'] readonly = db.is_readonly_dataset_id(dataset_id) sample_index, sample_count, prev_sample_id, next_sample_id = db.get_sample_index( dataset_id, sample_id) # Unknown entry? if sample_entry is None: return error_redirect('Unknown entry: "%s".' % str(sample_id)) # Determine data image_filename = 'images/' + sample_entry['filename'] info_string = '' # Differential? Then load machine annotations, unless there are human annotations already annotations = db.get_human_annotations(sample_id) is_differential = request.args.get('differential') if is_differential and not annotations: annotations = db.get_machine_annotations(sample_id) if len(annotations): annotations_json = annotations[0]['positions'] # Machine annotations are in a different format if len(annotations_json) and isinstance(annotations_json[0], list): annotations_json = [{ 'x': a[0], 'y': a[1] } for a in annotations_json] else: annotations_json = [] return render_template("annotate.html", id=sid, image_filename=image_filename, info_string=info_string, error=pop_last_error(), height=sample_entry['size'][1], width=sample_entry['size'][0], annotations=annotations_json, margin=96, dataset_id=str(dataset_id), sample_index=sample_index, sample_count=sample_count, prev_id=str(prev_sample_id), next_id=str(next_sample_id), is_differential=is_differential, readonly=readonly, name=name)
def model_page(model_id_s): model_id = ObjectId(model_id_s) model = dict(db.get_model_by_id(model_id)) model_data = [(k, v) for k, v in sorted(model.iteritems())] exec_base_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'cnn')) exec_path = os.path.join(exec_base_path, str(model_id)) log_filename = os.path.join(exec_path, 'train.log') if os.path.isfile(log_filename): log_data = open(log_filename, 'rt').read() else: log_data = 'logfile not found: %s' % log_filename # Get validation results by model validation_results = db.get_all_validation_results(train_model_id=model_id) for vr in validation_results: vr['image_set'] = '(%s) %s' % (vr['image_subset'], db.get_model_by_id( vr['validation_model_id'])['name']) cm = vr['confusion_matrix'] vr['accuracy'] = '%.1f' % (100.0 * (cm[0][0] + cm[1][1]) / (cm[0][0] + cm[1][1] + cm[1][0] + cm[0][1])) validation_results = sorted(validation_results, key=lambda f: f['image_set']) # Get validation results by dataset validation_results_ds = db.get_all_validation_results( validation_model_id=model_id) for vr in validation_results_ds: vr['model_name'] = '(%s) %s' % (vr['image_subset'], db.get_model_by_id( vr['train_model_id'])['name']) cm = vr['confusion_matrix'] vr['accuracy'] = '%.1f' % (100.0 * (cm[0][0] + cm[1][1]) / (cm[0][0] + cm[1][1] + cm[1][0] + cm[0][1])) validation_results_ds = sorted(validation_results_ds, key=lambda f: f['model_name']) return render_template('model.html', model_id=model_id_s, name=model['name'], model_data=model_data, log=log_data, validation_results=validation_results, validation_results_ds=validation_results_ds, error=pop_last_error())
def dataset_info(dataset_id_str): print 'request.method', request.method if dataset_id_str == 'new' and request.method == 'POST': dataset_id = None dataset_info = None new_dataset_zoom = request.form['size'] else: dataset_id = ObjectId(dataset_id_str) dataset_info = db.get_dataset_by_id(dataset_id) new_dataset_zoom = None if dataset_info is None: return render_template("404.html") if request.method == 'POST': # File upload if dataset_info is not None: if db.is_readonly_dataset(dataset_info): set_error('Dataset is protected.') return redirect('/dataset/' + dataset_id_str) return upload_file(dataset_id, image_zoom=new_dataset_zoom) enqueued = db.get_unprocessed_samples(dataset_id=dataset_id) finished = db.get_processed_samples(dataset_id=dataset_id) for i, sample in enumerate(finished): sample['machine_distance'] = 1.0 / max( [0.001, sqrt(float(sample['machine_position_count']))]) sample['index'] = i errored = db.get_error_samples(dataset_id=dataset_id) # Get request data return render_template("dataset.html", dataset_name=dataset_info['name'], dataset_id=dataset_id_str, enqueued=enqueued, finished=finished, errored=errored, status=db.get_status('worker'), readonly=db.is_readonly_dataset(dataset_info), error=pop_last_error(), dataset_user=dataset_info.get('user'), image_zoom=dataset_info.get('image_zoom', 'default'))
def admin_models(): models = db.get_models(details=True) return render_template('admin_models.html', models=models, error=pop_last_error())
def admin_overview(): return render_template('admin.html', error=pop_last_error())
def show_info(sid): # Query info from DB sample_id = ObjectId(sid) sample_entry = db.get_sample_by_id(sample_id) sample_index, sample_count, prev_sample_id, next_sample_id = db.get_sample_index(sample_entry['dataset_id'], sample_id) # Unknown entry? if sample_entry is None: return error_redirect('Unknown entry: "%s".' % str(sample_id)) # Allow fixing of machine annotation? Only if not human-annotated and there is a machine annotation with stomata can_annotate_diff = (not sample_entry.get('annotated')) and (sample_entry.get('machine_position_count')) # Determine data refresh = False filename = sample_entry['filename'] name = sample_entry['name'] dataset_id = sample_entry['dataset_id'] readonly = db.is_readonly_dataset_id(dataset_id) info_table = [] for info_name, info_key in info_table_entries: info_value = sample_entry.get(info_key) if info_value is not None: info_table.append((info_name, info_value)) annotations = [] if sample_entry['error']: info_string = Markup('Error: <pre>' + sample_entry['error_string'] + '</pre>') elif not sample_entry['processed']: info_string = 'Processing...' refresh = True else: info_string = 'Processed.' annotation_data = db.get_machine_annotations(sample_id) + db.get_human_annotations(sample_id) has_image_output = False for ad in annotation_data: model_id = ad.get('model_id') an = {'info_string': ''} if model_id is not None: model_data = db.get_model_by_id(model_id) if model_data is None: an['info_string'] += '??? Unknown model ???' an['title'] = 'Unknown' else: an['title'] = model_data['name'] an['info_string'] += 'Margin: %d' % (ad.get('margin') or model_data['margin']) an['image_filename'] = 'heatmaps/' + ad['heatmap_image_filename'] has_image_output = True else: an['title'] = 'By user %s' % ad.get('user_name') if 'scale' in ad: an['info_string'] += ' - Scale: %.1f' % ad['scale'] positions = ad['positions'] if positions is not None: an['info_string'] += ' - %d stomata' % len(positions) annotations += [an] annotations = list(reversed(annotations)) if not has_image_output: annotations += [{'image_filename': 'images/' + filename, 'title': 'Input image', 'info_string': ''}] return render_template("info.html", id=sid, name=name, dataset_id=str(dataset_id), info_string=info_string, annotations=annotations, error=pop_last_error(), refresh=refresh, sample_index=sample_index, sample_count=sample_count, prev_id=str(prev_sample_id), next_id=str(next_sample_id), can_annotate_diff=can_annotate_diff, readonly=readonly, info_table=info_table)
def dataset_info(dataset_id_str): print 'request.method', request.method new_dataset_threshold_prob = None new_allow_reuse = False if dataset_id_str == 'new': print 'Creating new dataset' if request.method != 'POST': return redirect('/') dataset_id = None dataset_info = None new_dataset_zoom = request.form['size'] print 'Threshold prob:' print request.form['threshold'] try: v = float(request.form['threshold']) new_dataset_threshold_prob = min(max(v, 0.5), 1.0) print 'Specified thresh prob:', new_dataset_threshold_prob except ValueError: print 'Invalid threshold. Ignored.' try: new_allow_reuse = bool(request.form.get('reuseCheck')) print 'Specified allow reuse:', request.form.get( 'reuseCheck'), new_allow_reuse except ValueError: print 'Invalid reuse setting. Ignored.' else: dataset_id = ObjectId(dataset_id_str) db.access_dataset(dataset_id) dataset_info = db.get_dataset_by_id(dataset_id) new_dataset_zoom = None if dataset_info is None: return render_template("404.html") if request.method == 'POST': # File upload if dataset_info is not None: if db.is_readonly_dataset(dataset_info): set_error('Dataset is protected.') return redirect('/dataset/' + dataset_id_str) return upload_file(dataset_id, image_zoom=new_dataset_zoom, threshold_prob=new_dataset_threshold_prob, allow_reuse=new_allow_reuse) enqueued = db.get_unprocessed_samples(dataset_id=dataset_id) finished = db.get_processed_samples(dataset_id=dataset_id) for i, sample in enumerate(finished): sample['machine_distance'] = 1.0 / max( [0.001, sqrt(float(sample['machine_position_count']))]) sample['index'] = i errored = db.get_error_samples(dataset_id=dataset_id) threshold_prob = round(dataset_info.get('threshold_prob') or fc8_to_prob(default_prob_threshold), ndigits=3) # Get request data return render_template("dataset.html", dataset_name=dataset_info['name'], dataset_id=dataset_id_str, enqueued=enqueued, finished=finished, errored=errored, status=db.get_status('worker'), readonly=db.is_readonly_dataset(dataset_info), error=pop_last_error(), dataset_user=dataset_info.get('user'), image_zoom=dataset_info.get('image_zoom', 'default'), threshold_prob=threshold_prob)