コード例 #1
0
def cr_dc40(
        try_arg, target_deps, origin_deps, pretrained_model_file,
        cluster_method, eqcls_layer_idxes,

        st_batch_size_per_gpu, st_max_epochs_per_gpu, st_lr_epoch_boundaries, st_lr_values,

        diff_factor,
        schedule_vector,

        restore_itr=0, restore_st_step=0,
        init_step=0,
        frequently_save_interval=None, frequently_save_last_epochs=None,
        slow_on_vec=False
):
    cr_base_pipeline(
        network_type='dc40',
        train_dataset=CIFAR10Data('train'), eval_dataset=CIFAR10Data('validation'),
        train_mode='train', eval_mode='eval',
        normal_builder_type=DC40Builder, model_type=TFModel,
        subsequent_strategy=DC40_SUBSEQUENT_STRATEGY, eqcls_follow_dict=DC40_FOLLOW_DICT,
        fc_layer_idxes=DC40_FC_LAYERS, st_gpu_idxes=[0],
        l2_factor=1e-4, eval_batch_size=500, image_size=32,
        frequently_save_interval=frequently_save_interval or 1000, frequently_save_last_epochs=frequently_save_last_epochs or 50, num_steps_per_ckpt_st=20000,

        try_arg=try_arg, target_deps=target_deps, origin_deps=origin_deps, pretrained_model_file=pretrained_model_file,
        cluster_method=cluster_method, eqcls_layer_idxes=eqcls_layer_idxes,
        st_batch_size_per_gpu=st_batch_size_per_gpu, st_max_epochs_per_gpu=st_max_epochs_per_gpu, st_lr_epoch_boundaries=st_lr_epoch_boundaries, st_lr_values=st_lr_values,
        diff_factor=diff_factor,
        schedule_vector=schedule_vector,
        restore_itr=restore_itr, restore_st_step=restore_st_step,
        init_step=init_step,
        slow_on_vec=slow_on_vec
    )
コード例 #2
0
def compare_magnitude(prefix):
    pruned = 'dc40_{}_prunedweights.hdf5'.format(prefix)
    target_deps = TARGET_DEPS
    save_hdf5 = '{}_trained.hdf5'.format(prefix)
    equivalent_eqcls_path = 'dc40_equivalent_eqcls_{}.npy'.format(prefix)
    if not os.path.exists(pruned):
        eqcls_dict = _produce_magnitude_equivalent_eqcls(
            target_deps=target_deps, save_path=equivalent_eqcls_path)
        bn_layer_to_eqcls = calculate_bn_eqcls_dc40(eqcls_dict)

        builder = DC40Builder(False, deps=DC40_ORIGIN_DEPS)
        prune_model = TFModel(CIFAR10Data('train'),
                              builder.build,
                              'eval',
                              batch_size=64,
                              image_size=32,
                              l2_factor=1e-4,
                              deps=DC40_ORIGIN_DEPS)
        prune_model.load_weights_from_file(PRETRAINED_MODEL_FILE)
        tfm_prune_filters_and_save_dc40(prune_model,
                                        eqcls_dict,
                                        bn_layer_to_eqcls=bn_layer_to_eqcls,
                                        save_file=pruned,
                                        new_deps=target_deps)
        del prune_model

    builder = DC40Builder(True, deps=target_deps)
    model = TFModel(
        CIFAR10Data('train'),
        builder.build,
        'eval',
        batch_size=BATCH_SIZE,
        image_size=32,  #TODO eval?
        l2_factor=DC40_L2_FACTOR,
        deps=target_deps)
    lr = model.get_piecewise_lr(values=LR_VALUES,
                                boundaries_epochs=LR_BOUNDARIES)
    optimizer = tf.train.MomentumOptimizer(lr, momentum=0.9, use_nesterov=True)
    train(model,
          train_dir='{}_train'.format(prefix),
          optimizer=optimizer,
          max_epochs_per_gpu=MAX_EPOCHS,
          gpu_idxes=[0],
          init_file=pruned,
          save_final_hdf5=save_hdf5,
          ckpt_dir='{}_ckpt'.format(prefix),
          ckpt_prefix=prefix,
          num_steps_every_ckpt=20000)
コード例 #3
0
def _produce_magnitude_equivalent_eqcls(target_deps, save_path):
    builder = DC40Builder(True, deps=DC40_ORIGIN_DEPS)
    prune_model = TFModel(CIFAR10Data('train'),
                          builder.build,
                          'train',
                          batch_size=BATCH_SIZE,
                          image_size=32,
                          l2_factor=DC40_L2_FACTOR,
                          deps=DC40_ORIGIN_DEPS)
    prune_model.load_weights_from_file(PRETRAINED_MODEL_FILE)

    equivalent_dict_eqcls = {}
    for i in DC40_ALL_CONV_LAYERS:
        kernel_value = prune_model.get_value(
            prune_model.get_kernel_tensors()[i])
        summed_kernel_value = np.sum(np.abs(kernel_value), axis=(0, 1, 2))
        assert len(summed_kernel_value) == DC40_ORIGIN_DEPS[i]
        index_array = np.argsort(summed_kernel_value)
        index_to_delete = index_array[:(DC40_ORIGIN_DEPS[i] - target_deps[i])]
        cur_eqcls = []
        for k in range(DC40_ORIGIN_DEPS[i]):
            if k not in index_to_delete:
                cur_eqcls.append([k])
        for k in index_to_delete:
            cur_eqcls[0].append(k)
        equivalent_dict_eqcls[i] = cur_eqcls

    np.save(save_path, equivalent_dict_eqcls)
    del prune_model
    return equivalent_dict_eqcls
コード例 #4
0
def eval_model(weights_path):
    dataset = CIFAR10Data('validation')
    deps = extract_deps_from_weights_file(weights_path)
    if deps is None:
        deps = DC40_ORIGIN_DEPS
    builder = DC40Builder(training=False, deps=deps)
    model = TFModel(dataset,
                    builder.build,
                    'eval',
                    batch_size=250,
                    image_size=32)
    model.load_weights_from_file(weights_path)
    evaluate_model(model,
                   num_examples=dataset.num_examples_per_epoch(),
                   results_record_file='origin_dc40_eval_record.txt')
コード例 #5
0
ファイル: backend.py プロジェクト: muzi-8/ReactCNN
                                record.save()
                    if step % CORR_SAVE_EVERY_STEP == 0:
                        for layer_idx, corr_cache in enumerate(corr_cache_list):
                            output_array = np.concatenate(corr_cache, axis=0)
                            corr_array = np.corrcoef(output_array, rowvar=False)
                            corr_array = np.exp(20*corr_array)              # mapping function
                            with open(CORR_FILE_PATTERN.format(layer_idx), 'w') as f:
                                print(format_array(corr_array), file=f)
                        print('corr file saved')

                    time.sleep(SLEEP_SECONDS)

                print('finished!')

            except Exception as e:  # pylint: disable=broad-except
                coord.request_stop(e)

            coord.request_stop()
            coord.join(threads, stop_grace_period_secs=10)

if __name__ == '__main__':
    vgg_deps = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
    fn = VFSFullSurveyBuilder(training=False).build_full_outs

    target = 'origin_vfs.npy'

    dataset = CIFAR10Data('validation', './')
    model = TFModel(dataset, fn, 'eval', batch_size=1, image_size=32)
    model.load_weights_from_np(target)

    launch_backend(model, num_examples=10000)