Example #1
0
 def test_labeled_reporter_object_warning(self):
     label = "some_label_here"
     reporter = Reporter(label)
     msg = "testing labeled Reporter warning"
     reporter.warn(msg)
     self.assertOutputContains(msg)
     self.assertStartsWith(label + '.WARNING')
def main(opt, dataloader_train, dataloader_val, path=None):
    # basic settings
    torch.backends.cudnn.enabled = False
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = False
    else:
        device = "cpu"
    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    if not path is None: load(net, path)
    #net.load_checkpoint()
    #net=torch.load('/root/Desktop/res50_flop73_0.752.pth')
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(net.optimizer,
                                                        50,
                                                        eta_min=5e-6)
    #lr_scheduler=optim.lr_scheduler.StepLR(net.optimizer,10,0.8)
    reporter = Reporter(opt)
    #best_acc = net.get_eval_scores(dataloader_val)["accuracy"]
    best_acc = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(net, dataloader_train, net.optimizer)
        reporter.log_metric("train_loss", train_loss, epoch)
        lr_scheduler.step()
        scores = net.get_eval_scores(dataloader_val)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))
        reporter.log_metric("eval_acc", scores["accuracy"], epoch)
        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
        reporter.log_metric("best_acc", best_acc, epoch)
        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )
        print("==> Training epoch %d" % epoch)
Example #3
0
class Application(object):
    def __init__(self, file_name):
        self._file_name = file_name
        self._recorder = Recorder(self._file_name)
        self._reporter = Reporter(self._recorder)

    def start(self):
        self.start_services()
        self.print_report()

    def _make_app(self):
        pass

    def start_services(self):
        self._recorder.start()

    def print_report(self):
        self._reporter.display_report()
def init(models: Iterable[Model], opt_config, additional_debugging_names: Optional[List[str]] = None):
    # debugger and logging
    if additional_debugging_names is None:
        additional_debugging_names = []
    debugger = Reporter([model.name for model in models] + additional_debugging_names)
    logging_fh = logging.FileHandler(debugger.file_path('logs.log'), 'w')
    logging_fh.setLevel(logging.DEBUG)
    logging_fh.setFormatter(logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s] %(message)s'))
    logging.getLogger('').addHandler(logging_fh)
    # initialize plotter
    plotter = Plotter()
    # parse config
    if opt_config is not None:
        if isinstance(opt_config, str) and os.path.isfile(opt_config):
            with open(opt_config) as config_file:
                opt_config = yaml.safe_load(config_file)
                opt_config['sampling_count'] = tuple(opt_config['sampling_count'])
    logging.debug('optimization config parse complete, config:' + repr(opt_config))
    return debugger, opt_config, plotter
Example #5
0
def math_cut(drive_model: Model,
             cart_drive: np.ndarray,
             reporter: Reporter,
             plotter: Optional[Plotter],
             animation=False,
             center_point: Optional[Tuple[float, float]] = None):
    center = center_point or drive_model.center_point
    polar_math_drive = toExteriorPolarCoord(Point(center[0], center[1]),
                                            cart_drive, drive_model.sample_num)
    polar_math_driven, center_distance, phi = compute_dual_gear(
        polar_math_drive, k=drive_model.k)

    if animation:
        plot_sampled_function((polar_math_drive, polar_math_driven), (phi, ),
                              reporter.get_math_debug_dir_name(),
                              100,
                              0.001, [(0, 0), (center_distance, 0)], (8, 8),
                              ((-0.5, 1.5), (-1.1, 1.1)),
                              plotter=plotter)

    # save figures
    plotter.draw_contours(
        reporter.file_path('math_drive.png'),
        [('math_drive', toCartesianCoordAsNp(polar_math_drive, 0, 0))], None)
    plotter.draw_contours(
        reporter.file_path('math_driven.png'),
        [('math_driven', toCartesianCoordAsNp(polar_math_driven, 0, 0))], None)
    plotter.draw_contours(
        reporter.file_path('math_results.png'),
        [('math_drive', toCartesianCoordAsNp(polar_math_drive, 0, 0)),
         ('math_driven',
          np.array(
              rotate(
                  list(
                      toCartesianCoordAsNp(polar_math_driven, center_distance,
                                           0)), phi[0],
                  (center_distance, 0))))], [(0, 0), (center_distance, 0)])

    logging.info('math rotate complete')
    logging.info(f'Center Distance = {center_distance}')

    return center_distance, phi, polar_math_drive, polar_math_driven
def generate_3d_mesh(debugger: Reporter, filename: str, contour: np.ndarray,
                     thickness: float):
    """
    generate a 3D mesh of the given contour with the given thickness
    :param debugger: the debugger to provide directory for obj to be stored
    :param filename: filename (excluding the extension)
    :param contour: the contour to create 3d object with
    :param thickness: the thickness of 3d object mesh
    :return: None
    """
    if filename[-4:] != '.obj':
        filename = filename + '.obj'
    destination = debugger.file_path(filename)
    with open(destination, 'w') as obj_file:
        point_to_vertex = {}
        for index, point in enumerate(contour):
            point_to_vertex[tuple(point)] = (index * 2 + 1, index * 2 + 2)
            print(f'v {point[0]} {point[1]} 0', file=obj_file)
            print(f'v {point[0]} {point[1]} {thickness}', file=obj_file)

        contour_poly = Polygon(contour)
        triangles = triangulate(contour_poly)
        for triangle in triangles:
            triangle_bound = LineString(triangle.exterior)
            if not triangle_bound.within(contour_poly):
                continue
            *points, _ = triangle.exterior.coords
            face_1, face_2 = zip(*[point_to_vertex[point] for point in points])
            for face in (face_1[::-1], face_2):
                print('f ' + ' '.join([str(i) for i in face]), file=obj_file)
        for index, point in enumerate(contour):
            lower_point, upper_point = point_to_vertex[tuple(point)]
            lower_prev, upper_prev = point_to_vertex[tuple(contour[index - 1])]
            print('f ' + ' '.join([
                str(point) for point in (upper_prev, lower_point, upper_point)
            ]),
                  file=obj_file)
            print('f ' + ' '.join([
                str(point) for point in (upper_prev, lower_prev, lower_point)
            ]),
                  file=obj_file)
Example #7
0
def gen_rndchk_models(raw_dataset_folder, random_dataset_folder, minimum,
                      maximum, result_dir):
    raw_dset = Dataset.new_from_folders(raw_dataset_folder)
    raw_dset = raw_dset.filter_min_max(minimum, maximum)

    rnd_dset = Dataset.new_from_folders(random_dataset_folder)
    rnd_dset = rnd_dset.filter_min_max(minimum, maximum)

    r = Reporter()
    for cat, tset, vset in datasets_X_random(raw_dset, rnd_dset):
        print(cat)
        model = models.C64_16_2pr_C32_4_2pr_C64_32_2pr_F_D(
            2, 8, 'softmax', 'categorical_crossentropy')
        result = Trainer(model).train(tset, vset)
        h5_path = os.path.join(result_dir, '%s_random.h5' % cat)
        tensorflow.keras.Model.save(model, h5_path)
        r.add(
            result,
            category=cat,
            **report.report_epochs(**result._asdict()),
            **report.report_elapsed(**result._asdict()),
            **report.report_metrics(**result._asdict()),
        )
    r.save_report(result_dir + "/experiments.tsv")
def sampling_optimization(drive_contour: np.ndarray, driven_contour: np.ndarray, k: int, sampling_count: (int, int),
                          keep_count: int, resampling_accuracy: int, comparing_accuracy: int, debugger: Reporter,
                          max_sample_depth: int = 5, max_iteration: int = 1, smoothing: Tuple[int, int] = (0, 0),
                          visualization: Union[Dict, None] = None, draw_tar_functions: bool = False) \
        -> List[Tuple[float, float, float, float, float, np.ndarray, np.ndarray]]:
    """
    perform sampling optimization for drive contour and driven contour
    :param drive_contour: the driving gear's contour
    :param driven_contour: the driven gear's contour
    :param k: drive/driven ratio
    :param sampling_count: the number of samples in each dimension
    :param keep_count: the count of samples kept
    :param resampling_accuracy: count of points in the sampling procedure
    :param comparing_accuracy: count of samples during comparison
    :param debugger: the debugger for storing data
    :param max_sample_depth: maximum depth for the sampling optimization to use
    :param max_iteration: maximum time for drive/driven to swap and iterate
    :param smoothing: smoothing level to be taken by uniform re-sampling
    :param visualization: None for no figure, otherwise for visualization configuration
    :param draw_tar_functions: True for drawing tar functions in debug windows (affect performance)
    :return: final total score, score, center_x, center_y, center_distance, drive contour and driven contour
    """
    drive_contour = counterclockwise_orientation(drive_contour)
    driven_contour = counterclockwise_orientation(driven_contour)
    drive_polygon = Polygon(drive_contour)
    driven_polygon = Polygon(driven_contour)
    drive_polar = toExteriorPolarCoord(drive_polygon.centroid, drive_contour,
                                       resampling_accuracy)
    driven_polar = toExteriorPolarCoord(driven_polygon.centroid,
                                        driven_contour, resampling_accuracy)
    drive_smoothing, driven_smoothing = smoothing
    drive_contour = getUniformContourSampledShape(drive_contour,
                                                  resampling_accuracy,
                                                  drive_smoothing > 0)
    driven_contour = getUniformContourSampledShape(driven_contour,
                                                   resampling_accuracy,
                                                   driven_smoothing > 0)
    visualize_config = {
        'fig_size': (16, 9),
    }
    subplots = None
    if visualization is not None:
        visualize_config.update(visualization)
        plt.ion()
        fig, subplots = plt.subplots(3, 2)
        fig.set_size_inches(*visualize_config['fig_size'])
        update_polygon_subplots(drive_contour, driven_contour, subplots[0])

    debugging_root_directory = debugger.get_root_debug_dir_name()
    results = []
    # following two variables change during iteration
    drive = drive_contour
    driven = driven_contour
    for iteration_count in range(max_iteration):
        debug_directory = os.path.join(debugging_root_directory,
                                       f'iteration_{iteration_count}')
        os.makedirs(debug_directory, exist_ok=True)
        drive = counterclockwise_orientation(drive)
        new_res = sample_drive_gear(
            drive, driven_contour, k, sampling_count, keep_count,
            comparing_accuracy, max_sample_depth, debug_directory,
            subplots[1] if subplots is not None else None)
        results += [(None, score, *center, center_distance, drive, driven)
                    for score, *center, center_distance, driven in new_res]
        for index, result in enumerate(results):
            total_score, score, *center, center_distance, this_drive, driven = result
            if subplots is not None:
                update_polygon_subplots(
                    drive_contour, driven_contour,
                    subplots[0])  # so that the two subplots can iterate
                update_polygon_subplots(this_drive, driven, subplots[1])
                subplots[1][0].scatter(center[0], center[1], 3)
                subplots[1][0].text(0, 0, str(center))
                subplots[1][1].text(0, 0, str(score))
                subplots[1][1].scatter(0, 0, 3)
                if draw_tar_functions:
                    tars = [
                        triangle_area_representation(contour,
                                                     comparing_accuracy)
                        for contour in (this_drive, driven)
                    ]
                    for subplot, tar in zip(subplots[2], tars):
                        tar = tar[:, 0]
                        subplot.clear()
                        subplot.plot(range(len(tar)), tar, color='blue')
                if total_score is None:
                    total_score = score + shape_difference_rating(
                        this_drive,
                        drive_contour,
                        comparing_accuracy,
                        distance_function=trivial_distance)
                    results[index] = (total_score, *result[1:])
                score_str = "%.8f" % total_score
                plt.savefig(
                    os.path.join(debug_directory,
                                 f'final_result_{index}_{score_str}.png'))
                save_contour(
                    os.path.join(debug_directory,
                                 f'final_result_{index}_drive.dat'),
                    this_drive)
                save_contour(
                    os.path.join(debug_directory,
                                 f'final_result_{index}_driven.dat'), driven)
        *_, drive, driven = results[-1]  # get the last result
        drive_contour, driven_contour = driven_contour, drive_contour
        drive_polygon, driven_polygon = driven_polygon, drive_polygon
        drive_polar, driven_polar = driven_polar, drive_polar
        drive, driven = driven, drive
        drive_smoothing, driven_smoothing = driven_smoothing, drive_smoothing
        # drive_poly = Polygon(drive)
        # drive = shape_average(drive_polar, toExteriorPolarCoord(Polygon(drive).centroid, drive, resampling_accuracy),
        #                       drive_polygon.area, drive_poly.area)
        drive = phi_average.shape_average(
            drive_polar,
            toExteriorPolarCoord(
                Polygon(drive).centroid, drive, resampling_accuracy))
        drive = toCartesianCoordAsNp(drive, 0, 0)
        drive = getUniformContourSampledShape(drive, resampling_accuracy,
                                              drive_smoothing > 0)
        for subplot in subplots[2]:
            subplot.clear()
    return results
Example #9
0
def main(opt):
    # basic settings
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = True
    else:
        device = "cpu"
    ##################### Get Dataloader ####################
    dataloader_train, dataloader_val = custom_get_dataloaders(opt)
    # dummy_input is sample input of dataloaders
    if hasattr(dataloader_val, "dataset"):
        dummy_input = dataloader_val.dataset.__getitem__(0)
        dummy_input = dummy_input[0]
        dummy_input = dummy_input.unsqueeze(0)
    else:
        # for imagenet dali loader
        dummy_input = torch.rand(1, 3, 224, 224)

    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    net.load_checkpoint(opt.checkpoint)
    flops_before, params_before = model_summary(net.get_compress_part(),
                                                dummy_input)

    #####################  Load Pruning Strategy ###############
    compression_scheduler = distiller.file_config(net.get_compress_part(),
                                                  net.optimizer,
                                                  opt.compress_schedule_path)

    channel_config = get_channel_config(opt.search_result,
                                        opt.strategy_id)  # pruning strategy

    compression_scheduler = random_compression_scheduler(
        compression_scheduler, channel_config)

    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=dummy_input)

    flops_after, params_after = model_summary(net.get_compress_part(),
                                              dummy_input)
    ratio = flops_after / flops_before
    print("FLOPs ratio:", ratio)
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    with torch.no_grad():
        for index, sample in enumerate(tqdm(dataloader_train, leave=False)):
            _ = net.get_loss(sample)
            if index > 100:
                break

    strategy_score = net.get_eval_scores(dataloader_val)["accuracy"]

    print("Result file:{}, Strategy ID:{}, Evaluation score:{}".format(
        opt.search_result, opt.strategy_id, strategy_score))

    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
        net.optimizer, opt.epoch)
    reporter = Reporter(opt)
    best_acc = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(
            net,
            dataloader_train,
            net.optimizer,
        )
        reporter.log_metric("train_loss", train_loss, epoch)

        lr_scheduler.step()

        scores = net.get_eval_scores(dataloader_val)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))

        reporter.log_metric("eval_acc", scores["accuracy"], epoch)

        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
        reporter.log_metric("best_acc", best_acc, epoch)

        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )

        print("==> Training epoch %d" % epoch)
Example #10
0
 def __init__(self, file_name):
     self._file_name = file_name
     self._recorder = Recorder(self._file_name)
     self._reporter = Reporter(self._recorder)
Example #11
0
def train_gcn(seed, epochs, num_splits):
    # this is made intentional for lazy loading
    # for convenience in handle errors in argparsing faster
    from typing import Generator
    import random
    import os
    import sys
    import datetime as dt
    from copy import copy, deepcopy

    import numpy as np
    import pandas as pd
    import networkx as nx
    from matplotlib import pyplot
    import matplotlib
    import seaborn as sns

    import torch
    import torch_geometric as tg

    from sklearn.decomposition import PCA
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.datasets import load_iris
    from sklearn.model_selection import train_test_split
    from sklearn.naive_bayes import GaussianNB

    from mpl_proc import MplProc, ProxyObject

    from gf_dataset import GasFlowGraphs
    from locations import Coordinates
    from models import MyNet3, MyNet2, MyNet, cycle_loss, cycle_dst2
    from models import cycle_loss
    from report import FigRecord, StringRecord, Reporter

    from seed_all import seed_all

    from animator import Animator

    class LineDrawer:
        def __init__(self, *, ax: matplotlib.axes.Axes, kw_reg, kw_min,
                     kw_train, kw_test):
            self.min_diff = float('inf')
            self.ax = ax
            self.kw_reg = kw_reg
            self.kw_min = kw_min
            self.kw_train = kw_train

            class FakeHline:
                def set(self, *args, **kwargs):
                    pass

            self.kw_test = kw_test
            self.min_train_hline, self.min_test_hline = FakeHline(), FakeHline(
            )

        def append(self, *, train_loss: float, test_loss: float):
            crt_diff = abs(test_loss - train_loss)
            if crt_diff < self.min_diff:
                self.min_diff = crt_diff
                self.min_train_hline.set(**self.kw_reg)
                self.min_test_hline.set(**self.kw_reg)
                self.min_train_hline = self.ax.hlines(**self.kw_train,
                                                      **self.kw_min,
                                                      y=train_loss)
                self.min_test_hline = self.ax.hlines(**self.kw_test,
                                                     **self.kw_min,
                                                     y=test_loss)
            else:
                self.ax.hlines(**self.kw_reg, **self.kw_train, y=train_loss)
                self.ax.hlines(**self.kw_reg, **self.kw_test, y=test_loss)

    print("[ Using Seed : ", seed, " ]")
    seed_all(seed)

    mpl_proc = MplProc()

    animator = Animator(mpl_proc)
    graph_dataset = GasFlowGraphs()
    lines = LineDrawer(ax=mpl_proc.proxy_ax,
                       kw_min=dict(),
                       kw_reg=dict(linewidth=0.3, color='gray'),
                       kw_train=dict(linestyle=':', xmin=300, xmax=400),
                       kw_test=dict(xmin=400, xmax=500))

    for seed in range(num_splits):
        # torch.manual_seed(seed)
        train_graphs, test_graphs = torch.utils.data.random_split(
            graph_dataset, (len(graph_dataset) - 20, 20))

        decision_tree = DecisionTreeClassifier(min_samples_leaf=6,
                                               max_depth=4,
                                               max_leaf_nodes=12)
        X = np.concatenate([g.edge_attr.T for g in train_graphs])
        y = np.concatenate([g.y for g in train_graphs])[:, 1]
        decision_tree.fit(X, y)
        predicted = decision_tree.predict(
            np.concatenate([g.edge_attr.T for g in test_graphs]))
        target = np.array([g.y[0, 1].item() for g in test_graphs])

        test_loss = cycle_loss(target, predicted)
        train_loss = cycle_loss(y, decision_tree.predict(X))

        if abs(test_loss - train_loss) < lines.min_diff:
            train_loader = tg.data.DataLoader(train_graphs,
                                              batch_size=len(train_graphs))
            test_loader = tg.data.DataLoader(test_graphs,
                                             batch_size=len(test_graphs))

        lines.append(test_loss=test_loss, train_loss=train_loss)

    lines = LineDrawer(ax=mpl_proc.proxy_ax,
                       kw_min=dict(),
                       kw_reg=dict(linewidth=0.3, color='gray'),
                       kw_train=dict(linestyle=':', xmin=100, xmax=200),
                       kw_test=dict(xmin=200, xmax=300))

    for seed in range(num_splits):
        train_graphs, test_graphs = torch.utils.data.random_split(
            graph_dataset, (len(graph_dataset) - 20, 20))
        gnb = GaussianNB()
        X = np.concatenate([g.edge_attr.T for g in train_graphs])
        y = np.concatenate([g.y for g in train_graphs])[:, 1]
        gnb.fit(X, y)
        predicted = gnb.predict(
            np.concatenate([g.edge_attr.T for g in test_graphs]))
        target = np.array([g.y[0, 1].item() for g in test_graphs])

        lines.append(test_loss=cycle_loss(target, predicted),
                     train_loss=cycle_loss(y, gnb.predict(X)))

    mynet = MyNet3()

    # seed_all(seed)
    train_graphs, test_graphs = torch.utils.data.random_split(
        graph_dataset, (len(graph_dataset) - 20, 20))
    train_loader = tg.data.DataLoader(train_graphs,
                                      batch_size=len(train_graphs))
    test_loader = tg.data.DataLoader(test_graphs, batch_size=len(test_graphs))

    optimizer = torch.optim.Adam(mynet.parameters(), lr=0.001)

    # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    # torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)

    def train_epochs():
        for epoch in range(epochs):
            train_loss = 0
            for batch in train_loader:
                # criterion = torch.nn.MSELoss()
                predicted = mynet(batch)

                loss = cycle_loss(predicted.flatten(), batch.y[:, 1].float())
                # loss = criterion(predicted, batch.y.float())
                train_loss += loss.item()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                # lr_scheduler.step()
            train_loss /= len(train_loader)
            yield train_loss

    class IntersectionFinder:
        def __init__(self):
            self.old = (None, None)

        def intersects(self, a: float, b: float) -> bool:
            old_a, old_b = self.old
            self.old = a, b
            if old_a is None:
                return False
            if a == b:
                return True
            return (old_a > old_b) != (a > b)

    intersections = IntersectionFinder()

    min_test_loss = float('inf')
    min_test_epoch = -1
    for epoch_no, train_loss in enumerate(train_epochs()):
        with torch.no_grad():
            test_loss = 0.0
            for batch in test_loader:
                predicted = mynet(batch)

                loss = cycle_loss(predicted.flatten(), batch.y[:, 1].float())
                test_loss += loss.item()
            test_loss /= len(test_loader)
            if test_loss < min_test_loss:
                min_test_loss = test_loss
                best = deepcopy(mynet)
                min_test_epoch = epoch_no
            if intersections.intersects(train_loss, test_loss):
                mpl_proc.proxy_ax.scatter(epoch_no,
                                          train_loss,
                                          s=100,
                                          marker='x',
                                          color='#3d89be')

        animator.add(train_loss, test_loss)

    fig: matplotlib.figure.Figure
    ax1: matplotlib.axes.Axes
    ax2: matplotlib.axes.Axes
    ax3: matplotlib.axes.Axes
    ax4: matplotlib.axes.Axes
    fig, ((ax1, ax2), (ax3, ax4)) = pyplot.subplots(ncols=2,
                                                    nrows=2,
                                                    sharey=True)

    def draw_tables(ax: matplotlib.axes.Axes, net: torch.nn.Module,
                    data: tg.data.DataLoader):
        table = np.full((13, 12), np.nan)
        for batch in data:
            predicted = net(batch)
            Y = batch.y[:, 0] - 2008
            M = batch.y[:, 1]
            table[Y, M] = cycle_dst2(M.float(),
                                     predicted.flatten().detach().numpy())**.5

        mshow = ax.matshow(table, vmin=0, vmax=6)
        ax.set(yticks=range(13), yticklabels=range(2008, 2021))
        return mshow

    mshow = draw_tables(ax1, mynet, train_loader)
    draw_tables(ax2, mynet, test_loader)
    draw_tables(ax3, best, train_loader)
    draw_tables(ax4, best, test_loader)

    fig.subplots_adjust(right=0.8)
    cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
    fig.colorbar(mshow, cax=cbar_ax)

    ax1.title.set_text('last')
    ax3.title.set_text(f'best {min_test_epoch}')

    def nxt_num() -> int:
        return sum(
            (1
             for n in os.listdir('experiments') if n.startswith('exp-1'))) + 1

    N = nxt_num()

    reporter = Reporter('report4.md')
    reporter.append(StringRecord(f'# {N}'))
    reporter.append(StringRecord(f'''
    ```
    {mynet}
    ```
    '''))
    reporter.append(FigRecord(fig, 'exp-2', f'experiments/exp-2-{N}.png'))
    reporter.append(
        FigRecord(mpl_proc.proxy_fig, 'exp-1', f'experiments/exp-1-{N}.png'))

    reporter.write()

    pyplot.show()
Example #12
0
		'qual':read.qual,'cigar':read.alignedRead.cigarstring}
		observedReadsDB.insert(post)



if opt.force:
	errorCounter = counter(ref,opt,samfile=opt.samfile,makeDB=True)
else:
	errorCounter = counter(ref,opt,samfile=opt.samfile,makeDB=False)
errorCounter.INSTransitionStats()
errorCounter.DELTransitionStats()

errorCounter.SNPTransitionStats()
errorCounter.summary()
errorCounter.plotHist()
# # ## Do some meta and summary statistics
summ = db_summary(opt)
summ.errorDistribution()
summ.qualDistribution()
summ.qScoreCalibrationTest('SNP')
summ.qScoreCalibrationTest('Insertion')
summ.qScoreCalibrationTest('Deletion')

report = Reporter(opt=opt,counter=errorCounter,outfileDir= opt.outDir ,latexTemplate='../data/template.tex')
report.generatePdfReport()

end = time.clock()
logging.info("errorStats.py took %f seconds to run" % (end-start))


Example #13
0
    #检查文件完整性
    check_config_file(src_dir)
    check_config_file(src_dir, 'SubSourceCrawlerConfig.xml')
    check_config_file(src_dir, 'webForumConfiguration.xml')

    main = MainProcess(src_name, src_dir, thread_num)
    main.prepare()
    try:
        if flag == 'y' or flag == 'Y' or flag == 'yes' or flag == 'YES':
            main.copy_subsource_resource()
            main.run_subsourcecrawler()
            main.read_finished_xml()
        else:
            check_config_file(src_dir, 'finished.xml')
            main.read_finished_xml(True)
        main.run_ingentia()

        print 'Generating error report...'
        rpt = Reporter(src_name)
        rpt.gen()
    except OSError as e:
        print e
    except KeyboardInterrupt as e:
        print e
    finally:
        #main.rm_temp()
        send_gtalk("Testing [%s] finished " % src_name)


Example #14
0
def rotate_and_cut(drive_polygon: Polygon,
                   center_distance,
                   phi,
                   k=1,
                   debugger: Reporter = None,
                   replay_animation: bool = False,
                   plot_x_range: Tuple[float, float] = (-1.5, 3),
                   plot_y_range: Tuple[float, float] = (-2.25, 2.25),
                   save_rate: int = 4,
                   plotter: Optional[Plotter] = None):
    # save_rate: save 1 frame per save_rate frames
    from shapely.affinity import translate, rotate
    driven_polygon = to_polygon([center_distance] * len(phi))
    delta_theta = 2 * pi / len(phi) * k
    driven_polygon = translate(driven_polygon, center_distance)
    complete_phi = phi + [phi[0]]  # so that it rotates back
    phi_incremental = [0.0] + [
        complete_phi[i] - complete_phi[i - 1]
        for i in range(1, len(complete_phi))
    ]
    assert isclose(sum(phi_incremental) % (2 * pi), 0, rel_tol=1e-5)
    angle_sum = 0

    fig, subplot = plt.subplots(figsize=(7, 7))

    subplot.set_title('Dual Shape(Cut)')
    subplot.axis('equal')

    plt.ion()
    for index, angle in enumerate(phi_incremental):
        angle_sum = delta_theta * index
        _drive_polygon = rotate(drive_polygon,
                                angle_sum,
                                use_radians=True,
                                origin=(0, 0))
        driven_polygon = rotate(driven_polygon,
                                angle,
                                use_radians=True,
                                origin=(center_distance, 0))
        driven_polygon = driven_polygon.difference(_drive_polygon)
        _plot_polygon((_drive_polygon, driven_polygon),
                      plot_x_range + plot_y_range)
        plt.scatter((0, center_distance), (0, 0), s=100, c='b')
        if debugger is not None and index % save_rate == 0:
            file_path = os.path.join(debugger.get_cutting_debug_dir_name(),
                                     f'before_cut_{index // save_rate}.png')
            if plotter is None:
                fig.savefig(file_path)
            else:
                plotter.draw_contours(
                    file_path,
                    polygon_to_contour('carve_drive', _drive_polygon) +
                    polygon_to_contour('carve_driven', driven_polygon),
                    [(center_distance, 0), (0, 0)])
        plt.pause(0.00001)
    assert isclose(angle_sum, 2 * pi * k, rel_tol=1e-5)
    plt.ioff()

    driven_polygon = rotate(driven_polygon,
                            -complete_phi[-1],
                            use_radians=True,
                            origin=(center_distance,
                                    0))  # de-rotate to match phi

    if replay_animation:
        # replay the animation
        plt.ion()
        for index, angle in enumerate(phi):
            theta = delta_theta * index
            _drive_polygon = rotate(drive_polygon, theta, (0, 0), True)
            _driven_polygon = rotate(driven_polygon, angle,
                                     (center_distance, 0), True)
            _plot_polygon((_drive_polygon, _driven_polygon),
                          plot_x_range + plot_y_range)
            plt.scatter((0, center_distance), (0, 0), s=100, c='b')
            if debugger is not None and index % save_rate == 0:
                file_path = os.path.join(
                    debugger.get_cutting_debug_dir_name(),
                    f'after_cut_{index // save_rate}.png')
                if plotter is None:
                    fig.savefig(file_path)
                else:
                    plotter.draw_contours(
                        file_path,
                        polygon_to_contour('carve_drive', _drive_polygon) +
                        polygon_to_contour('carve_driven', _driven_polygon),
                        [(center_distance, 0), (0, 0)])
            plt.pause(0.001)
        plt.ioff()

    driven_polygon = translate(driven_polygon, -center_distance)
    return driven_polygon, fig, subplot
Example #15
0
def main(opt, channel_config, dataloader_train, dataloader_val, path):
    # basic settings
    torch.backends.cudnn.enabled = False
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = False
    else:
        device = "cpu"
    ##################### Get Dataloader ####################

    # dummy_input is sample input of dataloaders
    if hasattr(dataloader_val, "dataset"):
        dummy_input = dataloader_val.dataset.__getitem__(0)
        dummy_input = dummy_input[0]
        dummy_input = dummy_input.unsqueeze(0)
    else:
        # for imagenet dali loader
        dummy_input = torch.rand(1, 3, 224, 224)

    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    load(net, path)
    #net.load_checkpoint(opt.checkpoint)
    #####################  Load Pruning Strategy ###############
    compression_scheduler = distiller.file_config(net.get_compress_part(),
                                                  net.optimizer,
                                                  opt.compress_schedule_path)
    compression_scheduler = setCompressionScheduler(compression_scheduler,
                                                    channel_config)
    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=dummy_input)
    flops_after, params_after = model_summary(net.get_compress_part(),
                                              dummy_input)
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    t = tqdm(dataloader_train, leave=False)
    with torch.no_grad():
        for index, sample in enumerate(t):
            _ = net.get_loss(sample)
            if index > 100:
                break
    strategy_score = net.get_eval_scores(dataloader_val)["accuracy"]
    old = strategy_score
    print("Evaluation score:{}".format(strategy_score))
    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(net.optimizer,
                                                        100,
                                                        eta_min=5e-5)
    #lr_scheduler=optim.lr_scheduler.StepLR(net.optimizer,5,0.9)
    reporter = Reporter(opt)
    best_acc = strategy_score
    best_kappa = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        net.confusion_matrix.reset()
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(
            net,
            dataloader_train,
            net.optimizer,
        )
        reporter.log_metric("train_loss", train_loss, epoch)
        lr_scheduler.step()
        scores = net.get_eval_scores(dataloader_val)
        kappa = CaluKappa(net.confusion_matrix)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))
        reporter.log_metric("eval_acc", scores["accuracy"], epoch)
        reporter.log_metric("kappa", kappa, epoch)
        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
            best_kappa = kappa
            save_checkpoints(
                scores["accuracy"],
                net._net,
                reporter,
                opt.exp_name,
                epoch,
            )
        reporter.log_metric("best_acc", best_acc, epoch)
        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )
        print("==> Training epoch %d" % epoch)
    """将模型转换为torch script保存"""
    ckpt_name = "{}_best.pth".format(opt.exp_name)
    load(net, os.path.join(reporter.ckpt_log_dir, ckpt_name))
    net._net.eval()
    traced_script_module = torch.jit.trace(net._net,
                                           torch.rand(1, 3, 256, 256))
    traced_script_module.save(os.path.join(reporter.log_dir, "model.pt"))
    del net
    return old, best_acc, best_kappa, flops_after, params_after