コード例 #1
0
    def delete_range_row(self):
        if self.range_row.value:
            new_range_source = self.sources.ranges.data
            index_to_delete = int(self.range_row.value) - 1
            new_source_length = len(self.sources.ranges.data['category']) - 1

            if new_source_length == 0:
                clear_source_data(self.sources, 'ranges')
                self.range_row.options = ['']
                self.range_row.value = ''
                self.group_range.active = [0]
                self.range_not_operator_checkbox.active = []
                self.select_category.value = options.SELECT_CATEGORY_DEFAULT
                self.text_min.value = ''
                self.text_max.value = ''
            else:
                for key in list(new_range_source):
                    new_range_source[key].pop(index_to_delete)

                for i in range(index_to_delete, new_source_length):
                    new_range_source['row'][i] -= 1

                self.range_row.options = [
                    str(x + 1) for x in range(new_source_length)
                ]
                if self.range_row.value not in self.range_row.options:
                    self.range_row.value = self.range_row.options[-1]
                self.sources.ranges.data = new_range_source

            clear_source_selection(self.sources, 'ranges')
コード例 #2
0
ファイル: dvhs.py プロジェクト: mchamberland/DVH-Analytics
    def delete_ep_row(self):
        if self.ep_row.value:
            new_ep_source = self.sources.endpoint_defs.data
            index_to_delete = int(self.ep_row.value) - 1
            new_source_length = len(
                self.sources.endpoint_defs.data['output_type']) - 1

            if new_source_length == 0:
                clear_source_data(self.sources, 'endpoint_defs')
                self.ep_row.options = ['']
                self.ep_row.value = ''
            else:
                for key in list(new_ep_source):
                    new_ep_source[key].pop(index_to_delete)

                for i in range(index_to_delete, new_source_length):
                    new_ep_source['row'][i] -= 1

                self.ep_row.options = [
                    str(x + 1) for x in range(new_source_length)
                ]
                if self.ep_row.value not in self.ep_row.options:
                    self.ep_row.value = self.ep_row.options[-1]
                self.sources.endpoint_defs.data = new_ep_source

            self.update_source_endpoint_calcs(
            )  # not efficient, but still relatively quick
            clear_source_selection(self.sources, 'endpoint_defs')
コード例 #3
0
 def plot_tv(self):
     self.update_tv_data()
     z = self.slice_select.value
     if z in list(self.tv_data) and not self.sources.tv.data['x']:
         self.sources.tv.data = self.tv_data[z]
     else:
         clear_source_data(self.sources, 'tv')
コード例 #4
0
 def slice_ticker(self, attr, old, new):
     for i in range(1, 6):
         roi_number = str(i)
         z = self.slice_select.value
         if z in list(self.data[roi_number]):
             getattr(self.sources, 'roi%s_viewer' %
                     roi_number).data = self.data[roi_number][z]
         else:
             clear_source_data(self.sources, 'roi%s_viewer' % roi_number)
     clear_source_data(self.sources, 'tv')
コード例 #5
0
 def ticker(self, attr, old, new):
     self.update_roi_viewer_data()
     if self.roi_number == '1':
         self.update_slice()
     else:
         z = self.slice_select.value
         if z in list(self.roi_viewer_data[self.roi_number]):
             getattr(self.sources, 'roi%s_viewer' %
                     self.roi_number).data = self.roi_viewer_data[
                         self.roi_number][z]
         else:
             clear_source_data(self.sources,
                               'roi%s_viewer' % self.roi_number)
コード例 #6
0
    def update_histograms(self):

        if self.y_axis.value != '':
            # Update Histograms
            bin_size = int(self.histogram_bin_slider.value)
            width_fraction = 0.9

            for n in GROUP_LABELS:
                hist, bins = np.histogram(getattr(self.sources, 'time_%s' % n).data['y'], bins=bin_size)
                if self.histogram_radio_group.active == 1:
                    hist = np.divide(hist, np.float(np.max(hist)))
                    self.histograms.yaxis.axis_label = "Relative Frequency"
                else:
                    self.histograms.yaxis.axis_label = "Frequency"
                width = [width_fraction * (bins[1] - bins[0])] * bin_size
                center = (bins[:-1] + bins[1:]) / 2.
                getattr(self.sources, 'histogram_%s' % n).data = {'x': center,
                                                             'top': hist,
                                                             'width': width}
        else:
            for n in GROUP_LABELS:
                    clear_source_data(self.sources, 'histogram_%s' % n)
コード例 #7
0
    def plot_update_trend(self):
        if self.y_axis.value:

            selected_indices = {n: getattr(self.sources, 'time_%s' % n).selected.indices for n in GROUP_LABELS}
            for n in GROUP_LABELS:
                if not selected_indices[n]:
                    selected_indices[n] = range(len(getattr(self.sources, 'time_%s' % n).data['x']))

            group = {n: {'x': [], 'y': []} for n in GROUP_LABELS}

            for n in GROUP_LABELS:
                for i in range(len(getattr(self.sources, 'time_%s' % n).data['x'])):
                    if i in selected_indices[n]:
                        for v in ['x', 'y']:
                            group[n][v].append(getattr(self.sources, 'time_%s' % n).data[v][i])

            try:
                avg_len = int(self.look_back_distance.value)
            except:
                avg_len = 1

            try:
                percentile = float(self.plot_percentile.value)
            except:
                percentile = 90.

            # average daily data and keep track of points per day, calculate moving average

            group_collapsed = {n: [] for n in GROUP_LABELS}
            for n in GROUP_LABELS:
                if group[n]['x']:
                    group_collapsed[n] = collapse_into_single_dates(group[n]['x'], group[n]['y'])
                    if self.look_back_units.value == "Dates with a Sim":
                        x_trend, moving_avgs = moving_avg(group_collapsed[n], avg_len)
                    else:
                        x_trend, moving_avgs = moving_avg_by_calendar_day(group_collapsed[n], avg_len)

                    y_np = np.array(group[n]['y'])
                    upper_bound = float(np.percentile(y_np, 50. + percentile / 2.))
                    average = float(np.percentile(y_np, 50))
                    lower_bound = float(np.percentile(y_np, 50. - percentile / 2.))
                    getattr(self.sources, 'time_trend_%s' % n).data = {'x': x_trend,
                                                                  'y': moving_avgs,
                                                                  'mrn': ['Avg'] * len(x_trend)}
                    getattr(self.sources, 'time_bound_%s' % n).data = {'x': group[n]['x'],
                                                                  'mrn': ['Bound'] * len(group[n]['x']),
                                                                  'upper': [upper_bound] * len(group[n]['x']),
                                                                  'avg': [average] * len(group[n]['x']),
                                                                  'lower': [lower_bound] * len(group[n]['x'])}
                    getattr(self.sources, 'time_patch_%s' % n).data = {'x': [group[n]['x'][0], group[n]['x'][-1],
                                                                        group[n]['x'][-1], group[n]['x'][0]],
                                                                  'y': [upper_bound, upper_bound, lower_bound, lower_bound]}
                else:
                    for v in ['trend', 'bound', 'patch']:
                        clear_source_data(self.sources, 'time_%s_%s' % (v, n))

            x_var = str(self.y_axis.value)
            if x_var.startswith('DVH Endpoint'):
                self.histograms.xaxis.axis_label = x_var.split("DVH Endpoint: ")[1]
            elif x_var == 'EUD':
                self.histograms.xaxis.axis_label = "%s (Gy)" % x_var
            elif x_var == 'NTCP/TCP':
                self.histograms.xaxis.axis_label = "NTCP or TCP"
            else:
                if self.range_categories[x_var]['units']:
                    self.histograms.xaxis.axis_label = "%s (%s)" % (x_var, self.range_categories[x_var]['units'])
                else:
                    self.histograms.xaxis.axis_label = x_var

            # Normal Test
            s, p = {n: '' for n in GROUP_LABELS}, {n: '' for n in GROUP_LABELS}
            for n in GROUP_LABELS:
                if group[n]['y']:
                    s[n], p[n] = normaltest(group[n]['y'])
                    p[n] = "%0.3f" % p[n]

            # t-Test and Rank Sums
            pt, pr = '', ''
            if group['1']['y'] and group['2']['y']:
                st, pt = ttest_ind(group['1']['y'], group['2']['y'])
                sr, pr = ranksums(group['1']['y'], group['2']['y'])
                pt = "%0.3f" % pt
                pr = "%0.3f" % pr

            self.histogram_normaltest_1_text.text = "Group 1 Normal Test p-value = %s" % p['1']
            self.histogram_normaltest_2_text.text = "Group 2 Normal Test p-value = %s" % p['2']
            self.histogram_ttest_text.text = "Two Sample t-Test (Group 1 vs 2) p-value = %s" % pt
            self.histogram_ranksums_text.text = "Wilcoxon rank-sum (Group 1 vs 2) p-value = %s" % pr

        else:
            for n in GROUP_LABELS:
                for k in ['trend', 'bound', 'patch']:
                    clear_source_data(self.sources, "time_%s_%s" % (k, n))

            self.histogram_normaltest_1_text.text = "Group 1 Normal Test p-value = "
            self.histogram_normaltest_2_text.text = "Group 2 Normal Test p-value = "
            self.histogram_ttest_text.text = "Two Sample t-Test (Group 1 vs 2) p-value = "
            self.histogram_ranksums_text.text = "Wilcoxon rank-sum (Group 1 vs 2) p-value = "

        self.update_histograms()
コード例 #8
0
    def update_plot(self):
        new = str(self.y_axis.value)
        if new:
            clear_source_selection(self.sources, 'time_1')
            clear_source_selection(self.sources, 'time_2')

            if new.startswith('DVH Endpoint: '):
                y_var_name = new.split(': ')[1]
                y_source_values = self.sources.endpoint_calcs.data[y_var_name]
                y_source_uids = self.sources.endpoint_calcs.data['uid']
                y_source_mrns = self.sources.endpoint_calcs.data['mrn']
            elif new == 'EUD':
                y_source_values = self.sources.rad_bio.data['eud']
                y_source_uids = self.sources.rad_bio.data['uid']
                y_source_mrns = self.sources.rad_bio.data['mrn']
            elif new == 'NTCP/TCP':
                y_source_values = self.sources.rad_bio.data['ntcp_tcp']
                y_source_uids = self.sources.rad_bio.data['uid']
                y_source_mrns = self.sources.rad_bio.data['mrn']
            else:
                y_source = self.range_categories[new]['source']
                y_var_name = self.range_categories[new]['var_name']
                y_source_values = y_source.data[y_var_name]
                y_source_uids = y_source.data['uid']
                y_source_mrns = y_source.data['mrn']

            self.update_y_axis_label()

            sim_study_dates = self.sources.plans.data['sim_study_date']
            sim_study_dates_uids = self.sources.plans.data['uid']

            x_values = []
            skipped = []
            colors = []
            for v in range(len(y_source_values)):
                uid = y_source_uids[v]
                try:
                    sim_study_dates_index = sim_study_dates_uids.index(uid)
                    current_date_str = sim_study_dates[sim_study_dates_index]
                    if current_date_str == 'None':
                        current_date = datetime.now()
                    else:
                        current_date = datetime(int(current_date_str[0:4]),
                                                int(current_date_str[5:7]),
                                                int(current_date_str[8:10]))
                    x_values.append(current_date)
                    skipped.append(False)
                except:
                    skipped.append(True)

                # Get group color
                if not skipped[-1]:
                    if new.startswith('DVH Endpoint') or new in {'EUD', 'NTCP/TCP'} \
                            or self.range_categories[new]['source'] == self.sources.dvhs:
                        if new in {'EUD', 'NTCP/TCP'}:
                            roi = self.sources.rad_bio.data['roi_name'][v]
                        else:
                            roi = self.sources.dvhs.data['roi_name'][v]

                        found = {'Group 1': False, 'Group 2': False}

                        color = None

                        if self.current_dvh_group['1']:
                            r1, r1_max = 0, len(self.current_dvh_group['1'].study_instance_uid)
                            while r1 < r1_max and not found['Group 1']:
                                if self.current_dvh_group['1'].study_instance_uid[r1] == uid and \
                                        self.current_dvh_group['1'].roi_name[r1] == roi:
                                    found['Group 1'] = True
                                    color = options.GROUP_1_COLOR
                                r1 += 1

                        if self.current_dvh_group['2']:
                            r2, r2_max = 0, len(self.current_dvh_group['2'].study_instance_uid)
                            while r2 < r2_max and not found['Group 2']:
                                if self.current_dvh_group['2'].study_instance_uid[r2] == uid and \
                                        self.current_dvh_group['2'].roi_name[r2] == roi:
                                    found['Group 2'] = True
                                    if found['Group 1']:
                                        color = options.GROUP_1_and_2_COLOR
                                    else:
                                        color = options.GROUP_2_COLOR
                                r2 += 1

                        colors.append(color)
                    else:
                        if self.current_dvh_group['1'] and self.current_dvh_group['2']:
                            if uid in self.current_dvh_group['1'].study_instance_uid and \
                                    uid in self.current_dvh_group['2'].study_instance_uid:
                                colors.append(options.GROUP_1_and_2_COLOR)
                            elif uid in self.current_dvh_group['1'].study_instance_uid:
                                colors.append(options.GROUP_1_COLOR)
                            else:
                                colors.append(options.GROUP_2_COLOR)
                        elif self.current_dvh_group['1']:
                            colors.append(options.GROUP_1_COLOR)
                        else:
                            colors.append(options.GROUP_2_COLOR)

            y_values = []
            y_mrns = []
            for v in range(len(y_source_values)):
                if not skipped[v]:
                    y_values.append(y_source_values[v])
                    y_mrns.append(y_source_mrns[v])
                    if not isinstance(y_values[-1], (int, long, float)):
                        y_values[-1] = 0

            sort_index = sorted(range(len(x_values)), key=lambda k: x_values[k])
            x_values_sorted, y_values_sorted, y_mrns_sorted, colors_sorted = [], [], [], []

            for s in range(len(x_values)):
                x_values_sorted.append(x_values[sort_index[s]])
                y_values_sorted.append(y_values[sort_index[s]])
                y_mrns_sorted.append(y_mrns[sort_index[s]])
                colors_sorted.append(colors[sort_index[s]])

            source_time_1_data = {'x': [], 'y': [], 'mrn': [], 'date_str': []}
            source_time_2_data = {'x': [], 'y': [], 'mrn': [], 'date_str': []}
            for i in range(len(x_values_sorted)):
                if colors_sorted[i] in {options.GROUP_1_COLOR, options.GROUP_1_and_2_COLOR}:
                    source_time_1_data['x'].append(x_values_sorted[i])
                    source_time_1_data['y'].append(y_values_sorted[i])
                    source_time_1_data['mrn'].append(y_mrns_sorted[i])
                    source_time_1_data['date_str'].append(x_values_sorted[i].strftime("%Y-%m-%d"))
                if colors_sorted[i] in {options.GROUP_2_COLOR, options.GROUP_1_and_2_COLOR}:
                    source_time_2_data['x'].append(x_values_sorted[i])
                    source_time_2_data['y'].append(y_values_sorted[i])
                    source_time_2_data['mrn'].append(y_mrns_sorted[i])
                    source_time_2_data['date_str'].append(x_values_sorted[i].strftime("%Y-%m-%d"))

            self.sources.time_1.data = source_time_1_data
            self.sources.time_2.data = source_time_2_data
        else:
            clear_source_data(self.sources, 'time_1')
            clear_source_data(self.sources, 'time_2')

        self.plot_update_trend()
コード例 #9
0
    def update_dvh_data(self, dvh):

        dvh_group_1, dvh_group_2 = [], []
        group_1_constraint_count, group_2_constraint_count = group_constraint_count(
            self.sources)

        if group_1_constraint_count and group_2_constraint_count:
            extra_rows = 12
        elif group_1_constraint_count or group_2_constraint_count:
            extra_rows = 6
        else:
            extra_rows = 0

        print(str(datetime.now()), 'updating dvh data', sep=' ')
        line_colors = [
            color
            for j, color in itertools.izip(range(dvh.count +
                                                 extra_rows), self.colors)
        ]

        x_axis = np.round(
            np.add(np.linspace(0, dvh.bin_count, dvh.bin_count) / 100., 0.005),
            3)

        print(str(datetime.now()), 'beginning stat calcs', sep=' ')

        if self.dvhs.radio_group_dose.active == 1:
            stat_dose_scale = 'relative'
            x_axis_stat = dvh.get_resampled_x_axis()
        else:
            stat_dose_scale = 'absolute'
            x_axis_stat = x_axis
        if self.dvhs.radio_group_volume.active == 0:
            stat_volume_scale = 'absolute'
        else:
            stat_volume_scale = 'relative'

        print(str(datetime.now()), 'calculating patches', sep=' ')

        if group_1_constraint_count == 0:
            self.uids['1'] = []
            clear_source_data(self.sources, 'patch_1')
            clear_source_data(self.sources, 'stats_1')
        else:
            print(str(datetime.now()), 'Constructing Group 1 query', sep=' ')
            self.uids['1'], dvh_query_str = self.get_query(group=1)
            dvh_group_1 = DVH(uid=self.uids['1'], dvh_condition=dvh_query_str)
            self.uids['1'] = dvh_group_1.study_instance_uid
            stat_dvhs_1 = dvh_group_1.get_standard_stat_dvh(
                dose_scale=stat_dose_scale, volume_scale=stat_volume_scale)

            if self.dvhs.radio_group_dose.active == 1:
                x_axis_1 = dvh_group_1.get_resampled_x_axis()
            else:
                x_axis_1 = np.add(
                    np.linspace(0, dvh_group_1.bin_count,
                                dvh_group_1.bin_count) / 100., 0.005)

            self.sources.patch_1.data = {
                'x_patch':
                np.append(x_axis_1, x_axis_1[::-1]).tolist(),
                'y_patch':
                np.append(stat_dvhs_1['q3'], stat_dvhs_1['q1'][::-1]).tolist()
            }
            self.sources.stats_1.data = {
                'x': x_axis_1.tolist(),
                'min': stat_dvhs_1['min'].tolist(),
                'q1': stat_dvhs_1['q1'].tolist(),
                'mean': stat_dvhs_1['mean'].tolist(),
                'median': stat_dvhs_1['median'].tolist(),
                'q3': stat_dvhs_1['q3'].tolist(),
                'max': stat_dvhs_1['max'].tolist()
            }
        if group_2_constraint_count == 0:
            self.uids['2'] = []
            clear_source_data(self.sources, 'patch_2')
            clear_source_data(self.sources, 'stats_2')

        else:
            print(str(datetime.now()), 'Constructing Group 2 query', sep=' ')
            self.uids['2'], dvh_query_str = self.get_query(group=2)
            dvh_group_2 = DVH(uid=self.uids['2'], dvh_condition=dvh_query_str)
            self.uids['2'] = dvh_group_2.study_instance_uid
            stat_dvhs_2 = dvh_group_2.get_standard_stat_dvh(
                dose_scale=stat_dose_scale, volume_scale=stat_volume_scale)

            if self.dvhs.radio_group_dose.active == 1:
                x_axis_2 = dvh_group_2.get_resampled_x_axis()
            else:
                x_axis_2 = np.add(
                    np.linspace(0, dvh_group_2.bin_count,
                                dvh_group_2.bin_count) / 100., 0.005)

            self.sources.patch_2.data = {
                'x_patch':
                np.append(x_axis_2, x_axis_2[::-1]).tolist(),
                'y_patch':
                np.append(stat_dvhs_2['q3'], stat_dvhs_2['q1'][::-1]).tolist()
            }
            self.sources.stats_2.data = {
                'x': x_axis_2.tolist(),
                'min': stat_dvhs_2['min'].tolist(),
                'q1': stat_dvhs_2['q1'].tolist(),
                'mean': stat_dvhs_2['mean'].tolist(),
                'median': stat_dvhs_2['median'].tolist(),
                'q3': stat_dvhs_2['q3'].tolist(),
                'max': stat_dvhs_2['max'].tolist()
            }

        print(str(datetime.now()), 'patches calculated', sep=' ')

        if self.dvhs.radio_group_dose.active == 0:
            x_scale = ['Gy'] * (dvh.count + extra_rows + 1)
            self.dvhs.plot.xaxis.axis_label = "Dose (Gy)"
        else:
            x_scale = ['%RxDose'] * (dvh.count + extra_rows + 1)
            self.dvhs.plot.xaxis.axis_label = "Relative Dose (to Rx)"
        if self.dvhs.radio_group_volume.active == 0:
            y_scale = ['cm^3'] * (dvh.count + extra_rows + 1)
            self.dvhs.plot.yaxis.axis_label = "Absolute Volume (cc)"
        else:
            y_scale = ['%Vol'] * (dvh.count + extra_rows + 1)
            self.dvhs.plot.yaxis.axis_label = "Relative Volume"

        # new_endpoint_columns = [''] * (dvh.count + extra_rows + 1)

        x_data, y_data = [], []
        for n in range(dvh.count):
            if self.dvhs.radio_group_dose.active == 0:
                x_data.append(x_axis.tolist())
            else:
                x_data.append(np.divide(x_axis, dvh.rx_dose[n]).tolist())
            if self.dvhs.radio_group_volume.active == 0:
                y_data.append(
                    np.multiply(dvh.dvh[:, n], dvh.volume[n]).tolist())
            else:
                y_data.append(dvh.dvh[:, n].tolist())

        y_names = ['Max', 'Q3', 'Median', 'Mean', 'Q1', 'Min']

        # Determine Population group (blue (1) or red (2))
        dvh_groups = []
        for r in range(len(dvh.study_instance_uid)):

            current_uid = dvh.study_instance_uid[r]
            current_roi = dvh.roi_name[r]

            if dvh_group_1:
                for r1 in range(len(dvh_group_1.study_instance_uid)):
                    if dvh_group_1.study_instance_uid[
                            r1] == current_uid and dvh_group_1.roi_name[
                                r1] == current_roi:
                        dvh_groups.append('Group 1')

            if dvh_group_2:
                for r2 in range(len(dvh_group_2.study_instance_uid)):
                    if dvh_group_2.study_instance_uid[
                            r2] == current_uid and dvh_group_2.roi_name[
                                r2] == current_roi:
                        if len(dvh_groups) == r + 1:
                            dvh_groups[r] = 'Group 1 & 2'
                        else:
                            dvh_groups.append('Group 2')

            if len(dvh_groups) < r + 1:
                dvh_groups.append('error')

        dvh_groups.insert(0, 'Review')

        for n in range(6):
            if group_1_constraint_count > 0:
                dvh.mrn.append(y_names[n])
                dvh.roi_name.append('N/A')
                x_data.append(x_axis_stat.tolist())
                current = stat_dvhs_1[y_names[n].lower()].tolist()
                y_data.append(current)
                dvh_groups.append('Group 1')
            if group_2_constraint_count > 0:
                dvh.mrn.append(y_names[n])
                dvh.roi_name.append('N/A')
                x_data.append(x_axis_stat.tolist())
                current = stat_dvhs_2[y_names[n].lower()].tolist()
                y_data.append(current)
                dvh_groups.append('Group 2')

        # Adjust dvh object to include stats data
        attributes = [
            'rx_dose', 'volume', 'surface_area', 'min_dose', 'mean_dose',
            'max_dose', 'dist_to_ptv_min', 'dist_to_ptv_median',
            'dist_to_ptv_mean', 'dist_to_ptv_max', 'dist_to_ptv_centroids',
            'ptv_overlap', 'cross_section_max', 'cross_section_median',
            'spread_x', 'spread_y', 'spread_z'
        ]
        if extra_rows > 0:
            dvh.study_instance_uid.extend(['N/A'] * extra_rows)
            dvh.institutional_roi.extend(['N/A'] * extra_rows)
            dvh.physician_roi.extend(['N/A'] * extra_rows)
            dvh.roi_type.extend(['Stat'] * extra_rows)
        if group_1_constraint_count > 0:
            for attr in attributes:
                getattr(dvh,
                        attr).extend(calc_stats(getattr(dvh_group_1, attr)))

        if group_2_constraint_count > 0:
            for attr in attributes:
                getattr(dvh,
                        attr).extend(calc_stats(getattr(dvh_group_2, attr)))

        # Adjust dvh object for review dvh
        dvh.dvh = np.insert(dvh.dvh, 0, 0, 1)
        dvh.count += 1
        dvh.mrn.insert(0, self.dvhs.select_reviewed_mrn.value)
        dvh.study_instance_uid.insert(0, '')
        dvh.institutional_roi.insert(0, '')
        dvh.physician_roi.insert(0, '')
        dvh.roi_name.insert(0, self.dvhs.select_reviewed_dvh.value)
        dvh.roi_type.insert(0, 'Review')
        dvh.rx_dose.insert(0, 0)
        dvh.volume.insert(0, 0)
        dvh.surface_area.insert(0, '')
        dvh.min_dose.insert(0, '')
        dvh.mean_dose.insert(0, '')
        dvh.max_dose.insert(0, '')
        dvh.dist_to_ptv_min.insert(0, 'N/A')
        dvh.dist_to_ptv_mean.insert(0, 'N/A')
        dvh.dist_to_ptv_median.insert(0, 'N/A')
        dvh.dist_to_ptv_max.insert(0, 'N/A')
        dvh.dist_to_ptv_centroids.insert(0, 'N/A')
        dvh.ptv_overlap.insert(0, 'N/A')
        dvh.cross_section_max.insert(0, 'N/A')
        dvh.cross_section_median.insert(0, 'N/A')
        dvh.spread_x.insert(0, 'N/A')
        dvh.spread_y.insert(0, 'N/A')
        dvh.spread_z.insert(0, 'N/A')
        line_colors.insert(0, options.REVIEW_DVH_COLOR)
        x_data.insert(0, [0])
        y_data.insert(0, [0])

        # anonymize ids
        self.anon_id_map = {mrn: i for i, mrn in enumerate(list(set(dvh.mrn)))}
        anon_id = [self.anon_id_map[dvh.mrn[i]] for i in range(len(dvh.mrn))]

        print(str(datetime.now()), "writing sources.dvhs.data", sep=' ')
        self.sources.dvhs.data = {
            'mrn': dvh.mrn,
            'anon_id': anon_id,
            'group': dvh_groups,
            'uid': dvh.study_instance_uid,
            'roi_institutional': dvh.institutional_roi,
            'roi_physician': dvh.physician_roi,
            'roi_name': dvh.roi_name,
            'roi_type': dvh.roi_type,
            'rx_dose': dvh.rx_dose,
            'volume': dvh.volume,
            'surface_area': dvh.surface_area,
            'min_dose': dvh.min_dose,
            'mean_dose': dvh.mean_dose,
            'max_dose': dvh.max_dose,
            'dist_to_ptv_min': dvh.dist_to_ptv_min,
            'dist_to_ptv_mean': dvh.dist_to_ptv_mean,
            'dist_to_ptv_median': dvh.dist_to_ptv_median,
            'dist_to_ptv_max': dvh.dist_to_ptv_max,
            'dist_to_ptv_centroids': dvh.dist_to_ptv_centroids,
            'ptv_overlap': dvh.ptv_overlap,
            'cross_section_max': dvh.cross_section_max,
            'cross_section_median': dvh.cross_section_median,
            'spread_x': dvh.spread_x,
            'spread_y': dvh.spread_y,
            'spread_z': dvh.spread_z,
            'x': x_data,
            'y': y_data,
            'color': line_colors,
            'x_scale': x_scale,
            'y_scale': y_scale
        }

        print(str(datetime.now()),
              'begin updating beam, plan, rx data sources',
              sep=' ')
        self.update_beam_data(dvh.study_instance_uid)
        self.update_plan_data(dvh.study_instance_uid)
        self.update_rx_data(dvh.study_instance_uid)
        print(str(datetime.now()), 'all sources set', sep=' ')

        return {'1': dvh_group_1, '2': dvh_group_2}