コード例 #1
0
ex_data_name = args.ex_data_name
ex_data_size = args.ex_data_size
ex_data_sel_rand = args.ex_data_sel_rand == 'yes'
pre_idx_path = args.pre_idx

setup_visibile_gpus(str(gpu_idx))

k.tensorflow_backend.set_session(tf.Session(config=gpu_config))

if not os.path.exists(save_model_dir):
    os.makedirs(save_model_dir)

data = MNIST(data_dir,
             data_name,
             validation_size,
             model_meta=model_mnist_meta,
             input_data_format=CHANNELS_LAST,
             output_data_format=data_format,
             train_size=train_size,
             train_sel_rand=train_sel_rand)

if pre_idx_path is not None:
    pre_idx = utils.load_model_idx(pre_idx_path)
    data.apply_pre_idx(pre_idx)
if ex_data_dir is not None and ex_data_name is not None and ex_data_size > 0:
    data.append_train_data(ex_data_dir,
                           ex_data_name,
                           ex_data_size,
                           input_data_format=CHANNELS_LAST,
                           output_data_format=data_format,
                           sel_rand=ex_data_sel_rand)
コード例 #2
0
        models_pred = []
        gpu_i = 0
        for _model_name in target_model_names:
            with tf.device('/gpu:' + str(gpu_i % gpu_count)):
                _path = os.path.join(target_model_dir, _model_name)
                models.append(MODEL(_path, sess,
                                    input_data_format=data_format, data_format=data_format,
                                    dropout=dropout, rand_params=para_random_spike, is_batch=True))
                models_idx.append(utils.load_model_idx(_path))
                gpu_i += 1

        for _eval_lab in eval_lab:
            models_pred.append(utils.load_obj(_eval_lab, directory=''))

        data = MNIST(_dir, name, model_meta=model_meta, validation_size=0,
                     input_data_format=CHANNELS_LAST, output_data_format=data_format, batch_size=batch_size,
                     boxmin=boxmin, boxmax=boxmax)

        num_labels = model_meta.labels
        if 0 <= num_labels <= 255:
            label_data_type = np.uint8
        else:
            label_data_type = np.uint16

        # construct detector and dropout_rate
        detector_dict, thrs, detector_gpu_idx = \
            worker.build_detector(detector_model_dir, detector_model_names, target_model_names, target_model_dir,
                                 "", MODEL, models, data, data_format, is_det_joint, models_idx, gpu_count)

        # concat reformer in front of models
        reformer_id = 0
コード例 #3
0
    "model_id\tidx\tl0\tl1\tl2\tl_inf\tfloat_pred\tfloor_pred\tceil_pred\tround_pred\tfloat_tran\t"
    "floor_tran\tceil_trans\tround_trans\tfloat_det\tfloor_det\tceil_det\tround_det"
    "\tfloat_pred_topk\tfloor_pred_topk\tceil_pred_topk\tround_pred_topk\n".
    encode())

gpu_thread_count = 2
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = str(gpu_thread_count)
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'

with tf.Session(config=gpu_config) as sess:
    data = MNIST(data_dir,
                 data_name,
                 model_meta=model_meta,
                 validation_size=5000,
                 input_data_format=CHANNELS_LAST,
                 output_data_format=data_format,
                 batch_size=batch_size)

    data_type = 'test' if is_test_data else 'train'

    # cache model names
    models_by_name = {
        _model_name: _model_name
        for _model_name in save_model_name_list
    }

    models_by_id = {}
    idx = 0
    for _model_name in save_model_name_list:
コード例 #4
0
if not os.path.exists(save_model_dir):
    os.makedirs(save_model_dir)

k.tensorflow_backend.set_session(tf.Session(config=gpu_config))

poolings = ["average", "max"]

shape = [model_meta.height, model_meta.width, model_meta.channel] \
    if data_format == CHANNELS_LAST else [model_meta.channel, model_meta.height, model_meta.width]
combination_I = [3, "average", 3]
combination_II = [3]
activation = "sigmoid"
reg_strength = 1e-9

data = MNIST(data_dir, data_name, validation_size, model_meta=model_meta,
             input_data_format=CHANNELS_LAST, output_data_format=data_format,
             train_size=train_size, train_sel_rand=train_sel_rand)

if set_name == 'mnist':
    epochs = 100
    AE_BIT = DAE(shape, combination_I, v_noise=0.1, activation=activation,
                 reg_strength=reg_strength, model_dir=save_model_dir,
                 data_format=data_format, input_data_format=data_format)
    AE_BIT.train(data, save_model_name + "_R", num_epochs=epochs)

    AE_I = DAE(shape, combination_I, v_noise=0.1, activation=activation,
               reg_strength=reg_strength, model_dir=save_model_dir,
               data_format=data_format, input_data_format=data_format)
    AE_I.train(data, save_model_name + "_I", num_epochs=epochs)

    AE_II = DAE(shape, combination_II, v_noise=0.1, activation=activation,
コード例 #5
0
is_one_bit = args.is_1bit == 'yes'
is_det_joint = args.is_det_joint == 'yes'
is_logits = args.is_logits == 'yes'

if palette_shade == -1:
    palette_shade = None
elif palette_shade > 6 or palette_shade < 2:
    print("Error: invalid palette shade value", palette_shade, ". Possible value [-1, 2-6]")
    exit(0)

model_list = args.model_name.split(",")
det_model_dir = args.det_model_dir
det_model_names = args.det_model_names.split(",")
det_model_reformers = args.reformer_name.split(',')

data = MNIST(args.data_dir, args.data_name, model_meta=model_meta, validation_size=5000,
             input_data_format=CHANNELS_LAST, output_data_format=data_format)

if (args.output_file is not None) and ( not os.path.exists(os.path.dirname(args.output_file)) ):
    os.makedirs(os.path.dirname(args.output_file))

fp = open(args.output_file, 'w') if args.output_file is not None else sys.stdout
fp.write("name\tl2\ttest_acc\ttest_unchg\ttest_loss\n")


with tf.Session(config=gpu_config) as sess:
    # cache model names
    models_by_name = {_model_name: _model_name for _model_name in model_list}

    models_by_id = {}
    idx = 0
    for _model_name in model_list:
コード例 #6
0
bagging = args.bagging == 'yes'
iter1 = args.iter1
iter2 = args.iter2
noise_mthd = args.noise_mthd
noise_iter = args.noise_iter
noise_distance = args.noise_distance
r_dis = args.r_dis
data_start = args.test_data_start
data_len = args.test_data_len
data_format = args.data_format
verbose = args.verbose
output_file = args.output_file

model_list = args.model_name.split(",")

data = MNIST(args.data_dir, args.data_name, 0, model_meta=model_meta,
             input_data_format=CHANNELS_LAST, output_data_format=data_format)

if (output_file is not None) and ( not os.path.exists(os.path.dirname(output_file)) ):
    os.makedirs(os.path.dirname(output_file))

fp = open(output_file, 'w') if output_file is not None else sys.stdout
fp.write("name\tl2\ttest_acc\ttest_unchg\ttest_loss\n")

with tf.Session(config=gpu_config) as sess:
    timestart = time.time()

    for model_name in model_list:
        print("=============================")
        print("valid rand_inference for model %s" % model_name)
        print("=============================")
        fp.write(model_name)
コード例 #7
0
if not os.path.exists(os.path.dirname(output_file)):
    os.makedirs(os.path.dirname(output_file))

fp = open(output_file, 'wb')
fp.write(
    "name\ttest_acc\ttest_acc_top_k\ttest_loss\ttrain_acc\ttrain_acc_top_k\ttrain_loss\n"
    .encode())
fp.write(save_model_name.encode())

with tf.Session(config=gpu_config) as sess:
    cur_path = os.path.join(save_model_dir, save_model_name)
    data = MNIST(data_dir,
                 data_name,
                 0,
                 model_meta=model_meta,
                 input_data_format=CHANNELS_LAST,
                 output_data_format=data_format,
                 batch_size=batch_size)

    sess.run(tf.local_variables_initializer())
    # if encrypt and iteration > 1:
    #     data.encrypt_tf(sess, os.path.join(save_model_dir, save_model_name), batch_size=batch_size)
    #     encrypt = False
    model = MODEL(cur_path,
                  sess,
                  output_logits=True,
                  input_data_format=data_format,
                  data_format=data_format,
                  dropout=dropout,
                  rand_params=para_random_spike,
コード例 #8
0
print()

for i in range(col_len * col_width):
    print("-", end='')

print()

setup_visibile_gpus(None)

with tf.Session(config=gpu_config) as sess:
    with tf.device('/cpu:0'):
        # load the original training and testing data
        data = MNIST(args.data_dir,
                     '',
                     model_meta=model_meta,
                     normalize=False,
                     batch_size=batch_size)

        data_type = 'test' if is_test else 'train'
        data_idx = data_idx[start_idx // step:start_idx // step + count]
        argsort_adv_img_idx = np.argsort(data_idx)
        back_argsort_adv_img_idx = np.argsort(argsort_adv_img_idx)
        data_ref, _, _ = data.get_data_by_idx(data_idx[argsort_adv_img_idx],
                                              data_type=data_type)
        data_ref = data_ref[back_argsort_adv_img_idx]
        data_ref = np.repeat(data_ref, duplicate, axis=0)

for i in range(0, count):
    row = math.floor(i / col_len)
    col = i % col_len * (3 if show_diff else 1)
コード例 #9
0
    if set_name == 'mnist':
        model_meta = model_mnist_meta
    elif set_name == 'fashion':
        model_meta = model_mnist_meta
    elif set_name == "cifar10":
        model_meta = model_cifar10_meta
    else:
        model_meta = None
        MODEL = None
        print("invalid data set name %s" % set_name)
        exit(0)

    data = MNIST(data_dir,
                 data_name,
                 0,
                 model_meta=model_meta,
                 input_data_format=CHANNELS_LAST,
                 output_data_format=data_format,
                 normalize=normalize)

    images = data.test_data[0:500]
    x_shape = images.shape

    if normalize:
        output_img = compress_float_py(images,
                                       data_format=data_format,
                                       quality=quality)
        output_img = np.clip(output_img * 255, 0, 255)
        output_img = output_img.astype(np.uint8)
    else:
        images = images.astype(np.uint8)
コード例 #10
0
if set_name == 'mnist' or set_name == 'fashion':
    model_meta = model_mnist_meta
elif set_name == "cifar10":
    model_meta = model_cifar10_meta
elif set_name == "cifar20":
    model_meta = model_cifar20_meta
elif set_name == "cifar100":
    model_meta = model_cifar100_meta
else:
    model_meta = None
    print("invalid data set name %s" % set_name)
    exit(0)

data_set = MNIST(data_dir,
                 data_name,
                 validation_size=0,
                 model_meta=model_meta,
                 normalize=False)
sample_size = data_set.train_count // partition_count

for i in range(partition_count):
    cur_partition_name = partition_name + str(i)
    cur_partition_dir = os.path.join(data_dir, cur_partition_name)
    if not os.path.exists(cur_partition_dir):
        os.makedirs(cur_partition_dir)

    # save json configuration
    config = {
        "name": cur_partition_name,
        "train-img": "train-img.gz",
        "train-label": "train-label.gz",