def get_default_args():
    parser = classifier.init_classifier_compression_arg_parser()
    parser.add_argument(
        '--qe-no-quant-layers',
        '--qenql',
        type=str,
        nargs='+',
        metavar='LAYER_NAME',
        default=[],
        help='List of layer names for which to skip quantization.')
    parser.add_argument(
        '--qe-calib-portion',
        type=float,
        default=1.0,
        help=
        'The portion of the dataset to use for calibration stats collection.')
    parser.add_argument(
        '--qe-calib-batchsize',
        type=int,
        default=256,
        help=
        'The portion of the dataset to use for calibration stats collection.')
    parser.add_argument('--base-score', type=float, default=None)
    parser.add_argument('--quantize-inputs',
                        type=str,
                        nargs='+',
                        metavar='LAYER_NAME#INPUT_IDX',
                        default=[],
                        help='The inputs of layers to quantize')
    parser.add_argument('--resume-search-from',
                        type=str,
                        help='Search checkpoint file to resume.',
                        default=None)
    args = parser.parse_args()
    return args
def get_default_args():
    parser = classifier.init_classifier_compression_arg_parser()
    parser.add_argument(
        '--grid_resolution',
        '-gr',
        type=int,
        help='Number of intervals in the grid, one coordinate.',
        default=11)
    parser.add_argument('--min_ratio',
                        '-minr',
                        type=float,
                        help='min ration of the scale',
                        default=0.7)
    parser.add_argument('--max_ratio',
                        '-maxr',
                        type=float,
                        help='max ration of the scale',
                        default=1.3)
    parser.add_argument('--bcorr_w',
                        '-bcw',
                        action='store_true',
                        help='Bias correction for weights',
                        default=False)
    parser.add_argument('--experiment',
                        '-exp',
                        help='Name of the experiment',
                        default='default')
    parser.add_argument('--w_index', '-w_idx', nargs='+', type=int)
    args = parser.parse_args()
    return args
示例#3
0
def main():
    import amc_args
    # Parse arguments
    args = classifier.init_classifier_compression_arg_parser()
    args = amc_args.add_automl_args(args).parse_args()
    app = AutoCompressionSampleApp(args, script_dir=os.path.dirname(__file__))
    return app.train_auto_compressor()
def get_default_args():
    parser = classifier.init_classifier_compression_arg_parser()
    parser.add_argument('--bcorr_w', '-bcw', action='store_true', help='Bias correction for weights', default=False)
    parser.add_argument('--experiment', '-exp', help='Name of the experiment', default='default')
    parser.add_argument('--w_index', '-w_idx', nargs='+', type=int)
    args = parser.parse_args()
    return args
def get_default_args():
    parser = classifier.init_classifier_compression_arg_parser()
    parser.add_argument('--opt-maxiter',
                        dest='maxiter',
                        default=None,
                        type=int,
                        help='Max iteration for minimization method.')
    parser.add_argument('--opt-maxfev',
                        dest='maxfev',
                        default=None,
                        type=int,
                        help='Max iteration for minimization method.')
    parser.add_argument(
        '--opt-method',
        dest='method',
        default='Powell',
        help='Minimization method used by scip.optimize.minimize.')
    parser.add_argument(
        '--opt-bh',
        dest='basinhopping',
        action='store_true',
        default=False,
        help='Use scipy.optimize.basinhopping stochastic global minimum search.'
    )
    parser.add_argument(
        '--opt-bh-niter',
        dest='niter',
        default=100,
        help='Number of iterations for the basinhopping algorithm.')
    parser.add_argument('--opt-init-mode',
                        dest='init_mode',
                        default='NONE',
                        choices=list(_INIT_MODES),
                        help='The mode of quant initalization. Choices: ' +
                        '|'.join(list(_INIT_MODES)))
    parser.add_argument(
        '--opt-init-method',
        dest='init_mode_method',
        help=
        'If --opt-init-mode was specified as L1/L2/L3, this specifies the method of '
        'minimization.')
    parser.add_argument('--opt-val-size',
                        type=float,
                        default=1,
                        help='Use portion of the test size.')
    parser.add_argument(
        '--opt-eval-memoize-dataloader',
        dest='memoize_dataloader',
        action='store_true',
        default=False,
        help='Stores the input batch in memory to optimize performance.')
    parser.add_argument('--base-score', type=float, default=None)
    parser.add_argument(
        '--opt-search-clipping',
        dest='search_clipping',
        action='store_true',
        help='Search on clipping values instead of scale/zero_point.')
    args = parser.parse_args()
    return args
示例#6
0
def main():
    # Parse arguments
    args = parser.add_cmdline_args(classifier.init_classifier_compression_arg_parser(True)).parse_args()
    app = ClassifierCompressorSampleApp(args, script_dir=os.path.dirname(__file__))
    if app.handle_subapps():
        return
    init_knowledge_distillation(app.args, app.model, app.compression_scheduler)
    app.run_training_loop()
    # Finally run results on the test set
    return app.test()
示例#7
0
def main(individual):
    # Parse arguments
    #print(individual)
    args = parser.add_cmdline_args(
        classifier.init_classifier_compression_arg_parser(True)).parse_args()
    args.compress = 'FB_gene1.yaml'
    idx = 0
    num = 0  #initial 5%
    yaml_list = []
    for i in range(int(len(individual) / 7)):  #各ビット数/7
        num = 0
        for j in range(7):  #長さ
            if (individual[idx] == 1):
                num += pow(2, j)
            idx += 1
        if num >= 100:
            num = 90
        elif i == 59 and num <= 30:
            num = 31
        elif num <= 5:
            num = 6
        yaml_list.append(num / 100)
    print("yaml_list")
    print(yaml_list)
    write_yaml(yaml_list)

    app = ClassifierCompressorSampleApp(args,
                                        script_dir=os.path.dirname(__file__))
    if app.handle_subapps():
        return
    init_knowledge_distillation(app.args, app.model, app.compression_scheduler)
    app.run_training_loop()
    # Finally run results on the test set
    # return top1, top5, losssesが来る

    loaded_array = np.load(
        '/home/oza/pre-experiment/speeding/distiller/distiller/apputils/simple_gene.npz'
    )
    accuracy = loaded_array['array_1']
    sparce = loaded_array['array_2']
    print("accuracy: " + str(accuracy))
    print("sparce: " + str(sparce))
    accuracy /= 100
    sparce /= 100
    #return app.test()
    score = accuracy * sparce
    print("score: " + str(score))
    global max
    if (score > max):
        max = score
        print("max score: " + str(score))
        print("max individual: " + str(yaml_list))
    return accuracy * sparce
示例#8
0
def main():
    # Parse arguments
    args = parser.add_cmdline_args(
        classifier.init_classifier_compression_arg_parser(True)).parse_args()
    performance_tracker = apputils.SparsityAccuracyTracker(
        args.num_best_scores)
    #args.compress = 'OZA'
    #print(args.compress)
    #quit()
    app = ClassifierCompressorSampleApp(args,
                                        script_dir=os.path.dirname(__file__))
    if app.handle_subapps():
        return
    init_knowledge_distillation(app.args, app.model, app.compression_scheduler)
    app.run_training_loop()
    # Finally run results on the test set
    # return top1, top5, losssesが来る
    return app.test()
示例#9
0
        df_amc_results = pd.read_csv(os.path.join(ft_dir, "amc.csv"))
        top1_sorted_df_amc_results = df_amc_results.sort_values(
            by=['top1'], ascending=False)
        top1_sorted_df_amc_results = top1_sorted_df_amc_results[0:best_nets]
        checkpoints = [
            os.path.join(ft_dir, ckpt + "_checkpoint.pth.tar")
            for ckpt in top1_sorted_df_amc_results.ckpt_name
        ]
        return checkpoints

    try:
        set_start_method('forkserver')
    except RuntimeError:
        pass
    # Parse arguments
    argparser = classifier.init_classifier_compression_arg_parser()
    add_parallel_args(argparser)
    app_args = argparser.parse_args()

    print("Starting fine-tuning")
    stats_file = FTStatsLogger(
        os.path.join(app_args.scan_dir, app_args.output_csv))
    ft_dirs = get_immediate_subdirs(app_args.scan_dir)
    for ft_dir in ft_dirs:
        checkpoints = None
        if app_args.top_performing_chkpts:
            checkpoints = get_best_checkpoints(ft_dir)
        finetune_directory(ft_dir,
                           stats_file,
                           app_args,
                           checkpoints=checkpoints)
示例#10
0
    args.device = device
    if args.resumed_checkpoint_path:
        args.load_model_path = args.resumed_checkpoint_path
    if args.load_model_path:
        msglogger.info("Loading checkpoint from %s" % args.load_model_path)
        model = apputils.load_lean_checkpoint(model, args.load_model_path,
                                              model_device=args.device)

    quantizer = distiller.quantization.PostTrainLinearQuantizer.from_args(model, args)

    dummy_input = torch.rand(*model.input_shape, device=args.device)
    model, qp_dict = lapq.ptq_coordinate_search(quantizer, dummy_input, eval_fn, test_fn=test_fn,
                                                **lapq.cmdline_args_to_dict(args))

    results = test_fn(quantizer.model)
    msglogger.info("Arch: %s \tTest: \t top1 = %.3f \t top5 = %.3f \t loss = %.3f" %
                   (args.arch, results['top-1'], results['top-5'], results['loss']))
    distiller.yaml_ordered_save('%s.quant_params_dict.yaml' % args.arch, qp_dict)

    distiller.apputils.save_checkpoint(0, args.arch, model,
                                       extras={'top1': results['top-1'], 'qp_dict': qp_dict}, name=args.name,
                                       dir=msglogger.logdir)


if __name__ == "__main__":
    parser = classifier.init_classifier_compression_arg_parser(include_ptq_lapq_args=True)
    args = parser.parse_args()
    args.epochs = float('inf')  # hack for args parsing so there's no error in epochs
    cc = classifier.ClassifierCompressor(args, script_dir=os.path.dirname(__file__))
    image_classifier_ptq_lapq(cc.model, cc.criterion, [cc.pylogger, cc.tflogger], cc.args)