Exemplo n.º 1
0
    def plot_input_and_hull_points(self, save, show, plot_img=False):
        fig, ax = plot_utils.setup_figure_1ax(shrink_ax=False)
        ax.scatter(self.input_points['x'], self.input_points['y'], s=2)
        ax.scatter(self.hull_points['x'], self.hull_points['y'], s=10)

        if plot_img:
            image = cv2.imread(self.input_img_path)
            ax.imshow(np.flipud(image), origin='lower', alpha=0.3)

        plot_utils.show_and_or_save(save=save,
                                    show=show,
                                    save_file_path=self.save_path)
Exemplo n.º 2
0
    def plot_time_distributions(self, save=False, show=False, file_name=None):
        fig, ax = plot_utils.setup_figure_1ax(size=(15, 9),
                                              x_label='Run time [s]',
                                              y_label='Density')

        assert len(self.all_data_dict.keys()) == 1

        run_times = self.all_data_dict['0']['run_times']

        median = np.median(run_times)

        # confidence intervals assuming gaussian distribution
        '''
        n = len(run_times)
        se = scipy.stats.sem(run_times)
        confidence = 0.95
        h = se * scipy.stats.t.ppf((1+confidence)/2., n-1)
        ci_1 = median-h
        ci_2 = median+1
        ax.errorbar(median, max(n) / 2., xerr=np.array([[ci_1, ci_2]]).T, linewidth=1, color='black', alpha=0.5, capsize=3, fmt='o')
        '''

        # quantile lines
        q5 = np.percentile(run_times, 5)
        q95 = np.percentile(run_times, 95)

        weights = np.ones_like(run_times) / float(len(run_times))
        n, bins, patches = ax.hist(run_times,
                                   bins=2000,
                                   facecolor='cyan',
                                   alpha=0.75,
                                   weights=weights)
        ax.plot([q5, q5], [0.001, 0.15], linewidth=3, color='blue', alpha=0.5)
        ax.plot([median, median], [0.001, 0.12],
                linewidth=3,
                color='red',
                alpha=0.5)
        ax.plot([q95, q95], [0.001, 0.09],
                linewidth=3,
                color='blue',
                alpha=0.5)
        # ax.set_xscale('log')

        self.evaluate_save_show(save, show, file_name)
Exemplo n.º 3
0
    def plot_speedup_vs_cores(self, save=False, show=False, file_name=None):
        """
        Plots the speedup vs number of cores for all algorithms in the run.
        """
        if len(self.run_config['run_params']['n_points']) > 1 or len(self.run_config['run_params']['sub_size']) > 1 \
                or len(self.run_config['run_params']['img_files']) > 1:
            raise NotImplementedError(
                "You can't do that. Only multiple numbers of cores and multiple algos."
            )

        fig, ax = plot_utils.setup_figure_1ax(size=(15, 9),
                                              x_label='# cores',
                                              y_label='Speedup')

        # set up empty data container holding n cores, run times, speedup of max n cores
        algo_t_core = {
            algorithm: {
                'n_cores': [],
                'run_times': [],
                'time_1core': 0.0,
                'last_speedup': 0.0,
                'errors': []
            }
            for algorithm in self.run_config['run_params']['algorithms']
        }

        # sorting wrt increasing n_cores
        for _, data in self.all_data_dict.iteritems():
            algo = data['algorithm']
            algo_t_core[algo]['n_cores'].append(data['n_cores'])
            algo_t_core[algo]['run_times'].append(data['median_run_times'])
            algo_t_core[algo]['errors'].append(data['errors'])
            if data['n_cores'] == 1:
                algo_t_core[algo]['time_1core'] = data['median_run_times']

        max_n_core = 1
        last_speedups = []
        for algorithm, algo_data in algo_t_core.iteritems():
            n_cores, run_times = plot_utils.sort_two_lists_wrt_first(
                algo_data['n_cores'], algo_data['run_times'])
            speedups = [
                algo_data['time_1core'] / run_times[n]
                for n in range(len(run_times))
            ]
            errors = algo_data['errors']
            errors = [[err[0] for err in errors], [err[1] for err in errors]]

            if max(n_cores) > max_n_core:
                max_n_core = max(n_cores)
            last_speedups.append(speedups[-1])

            ax.plot(n_cores, speedups, linewidth=3, label=algorithm, alpha=0.5)
            ax.plot(n_cores,
                    speedups,
                    'o',
                    linewidth=3,
                    alpha=0.3,
                    color='black',
                    markeredgecolor='none')
            ax.errorbar(n_cores,
                        speedups,
                        yerr=errors,
                        fmt='none',
                        alpha=0.5,
                        color='black',
                        capsize=3)

            print algorithm
        ax.plot([1, max_n_core], [1.0, max_n_core],
                'k--',
                linewidth=1,
                alpha=0.5,
                label='linear speedup')

        order = np.argsort(last_speedups)[::-1]
        plot_utils.ordered_legend(ax, order)
        self.evaluate_save_show(save, show, file_name)
Exemplo n.º 4
0
    def plot_shape_comparison(self, save=False, show=False, file_name=None):
        """
        Plots the absolute runtime vs input size for all algorithms in the run.

        For now, the runtimes are the median over all iterations for a run configuration.
        """

        if len(self.run_config['run_params']['n_cores']) > 1 or len(self.run_config['run_params']['sub_size']) > 1 \
                or len(self.run_config['run_params']['n_points']) > 1:
            raise NotImplementedError(
                "You can't do that. Only multiple input images and multiple algos allowed."
            )

        fig, ax = plot_utils.setup_figure_1ax(size=(13, 9),
                                              y_label='Run time [s]')

        # set up empty data container
        algo_runtimes = {
            algorithm: {
                'input_imgs': [],
                'run_times': [],
                'errors': []
            }
            for algorithm in self.run_config['run_params']['algorithms']
        }

        for key, data in self.all_data_dict.iteritems():
            algo_runtimes[data['algorithm']]['input_imgs'].append(data['img'])
            algo_runtimes[data['algorithm']]['run_times'].append(
                data['median_run_times'])
            algo_runtimes[data['algorithm']]['errors'].append(data['errors'])

        img_names = self.run_config['run_params']['img_files']
        img_names = [img_name.split('.')[0] for img_name in img_names]
        n_images = len(img_names)
        ind_pos = np.arange(n_images)

        width = 0.2
        offset = {}
        algo_idx = 0

        for algorithm in algo_runtimes.keys():
            offset[algorithm] = algo_idx * width
            algo_idx += 1

        # sort wrt to JARVIS algorithm
        sort_indices = np.argsort(
            np.array(algo_runtimes['jarvis']['run_times']))

        for algorithm, algo_data in algo_runtimes.iteritems():
            run_times = [algo_data['run_times'][i] for i in sort_indices]
            img_names = [algo_data['input_imgs'][i] for i in sort_indices]
            errors = [algo_data['errors'][i] for i in sort_indices]
            errors = [[err[0] for err in errors], [err[1] for err in errors]]

            print ''
            print '{:10} {}'.format('--- algo: ', algorithm)
            print '{:10} {}'.format('images: ', img_names)
            print '{:10} {}'.format('times: ',
                                    [round(t, 3) for t in run_times])

            ax.bar(ind_pos + offset[algorithm],
                   run_times,
                   width,
                   label=algorithm,
                   alpha=0.7)
            ax.errorbar(ind_pos + offset[algorithm],
                        run_times,
                        yerr=errors,
                        fmt='.',
                        capsize=3,
                        alpha=0.7)

        ax.xaxis.set_ticks_position('none')
        ax.set_xticks(np.arange(len(img_names)))
        ax.set_xticklabels(img_names, rotation=45, ha="right")
        ax.set_title("Input size: {n_points} points".format(
            n_points=self.run_config['run_params']['n_points'][0]))
        plt.xticks(ha='left')

        ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=False)

        self.evaluate_save_show(save, show, file_name)
Exemplo n.º 5
0
    def plot_weak_scaling(self, save=False, show=False, file_name=None):
        """
        Plots the speedup vs number of cores for all algorithms in the run.
        """

        fig, ax = plot_utils.setup_figure_1ax(size=(15, 9),
                                              x_label='# cores',
                                              y_label='Speedup')

        # set up empty data container holding n cores, run times, speedup of max n cores
        algo_t_core = {
            algorithm: {
                'n_cores': [],
                'run_times': [],
                'time_1core': 0.0,
                'last_speedup': 0.0,
                'errors': [],
                'n_points': 0
            }
            for algorithm in self.run_config['run_params']['algorithms']
        }

        # sorting wrt increasing n_cores
        for _, data in self.all_data_dict.iteritems():
            algo = data['algorithm']
            algo_t_core[algo]['n_cores'].append(data['n_cores'])
            algo_t_core[algo]['run_times'].append(data['median_run_times'])
            algo_t_core[algo]['errors'].append(data['errors'])
            algo_t_core[algo]['n_points'] = data['n_points']
            if data['n_cores'] == 1:
                algo_t_core[algo]['time_1core'] = data['median_run_times']

        max_n_core = 1
        last_speedups = []
        for algorithm, algo_data in algo_t_core.iteritems():
            n_cores, run_times = plot_utils.sort_two_lists_wrt_first(
                algo_data['n_cores'], algo_data['run_times'])
            n_points = algo_data['n_points']
            is_weak_scale_combo = (n_points / np.array(n_cores)) == 50000
            speedups = [
                algo_data['time_1core'] / run_times[n]
                for n in range(len(run_times)) if is_weak_scale_combo[n]
            ]
            errors = algo_data['errors']
            errors = [[err[0] for err in errors], [err[1] for err in errors]]

            if max(n_cores) > max_n_core:
                max_n_core = max(n_cores)
            last_speedups.append(speedups[-1])
            cores_set = list(set(n_cores))

            efficiency = [
                speedups[i] / cores_set[i] for i in range(len(speedups))
            ]

            ax.plot(cores_set,
                    efficiency,
                    linewidth=3,
                    label=algorithm,
                    alpha=0.5)
            ax.plot(cores_set,
                    efficiency,
                    'o',
                    linewidth=3,
                    alpha=0.3,
                    color='black',
                    markeredgecolor='none')
            # ax.errorbar(cores_set, efficiency, yerr=errors, fmt='none', alpha=0.5, color='black', capsize=3)

            print algorithm
        ax.plot([1, max_n_core], [1.0, 1.0],
                'k--',
                linewidth=1,
                alpha=0.5,
                label='efficiency')

        order = np.argsort(last_speedups)[::-1]
        plot_utils.ordered_legend(ax, order)
        self.evaluate_save_show(save, show, file_name)
Exemplo n.º 6
0
    def plot_runtimes_comparison(self, save=False, show=False, file_name=None):
        """
        Plots the absolute runtime vs input size for all algorithms in the run.

        For now, the runtimes are the median over all iterations for a run configuration.
        """

        if len(self.run_config['run_params']['n_cores']) > 1 or len(self.run_config['run_params']['sub_size']) > 1 \
                or len(self.run_config['run_params']['img_files']) > 1:
            raise NotImplementedError(
                "You can't do that. Only multiple input sizes and multiple algos."
            )

        fig, ax = plot_utils.setup_figure_1ax(
            size=(13, 9),
            x_label='Input size [number of points]',
            y_label='Run time [s]')

        # set up empty data container
        algo_runtimes = {
            algorithm: {
                'input_sizes': [],
                'run_times': [],
                'errors': []
            }
            for algorithm in self.run_config['run_params']['algorithms']
        }

        for key, data in self.all_data_dict.iteritems():
            algo_runtimes[data['algorithm']]['input_sizes'].append(
                data['n_points'])
            algo_runtimes[data['algorithm']]['run_times'].append(
                data['median_run_times'])
            algo_runtimes[data['algorithm']]['errors'].append(data['errors'])

        last_runtimes = []
        for algorithm, algo_data in algo_runtimes.iteritems():
            sort_indices = np.argsort(np.array(algo_data['input_sizes']))

            input_sizes = [algo_data['input_sizes'][i] for i in sort_indices]
            run_times = [algo_data['run_times'][i] for i in sort_indices]

            errors = [algo_data['errors'][i] for i in sort_indices]
            errors = [[err[0] for err in errors], [err[1] for err in errors]]

            last_runtimes.append(run_times[-1])
            ax.plot(input_sizes,
                    run_times,
                    linewidth=3,
                    label=algorithm,
                    alpha=0.5)
            ax.errorbar(input_sizes,
                        run_times,
                        yerr=errors,
                        fmt='none',
                        alpha=0.5,
                        color='black',
                        capsize=3)
            # ax.plot(input_sizes, run_times, 'o', linewidth=3, alpha=0.3, color='black', markeredgecolor='none')

        ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=False)

        order = np.argsort(last_runtimes)[::-1]
        plot_utils.ordered_legend(ax, order)
        self.evaluate_save_show(save, show, file_name)