Exemple #1
0
def evaluate():
    FLAGS = get_args()
    # Create Dataflow object for training and testing set
    train_data, valid_data = loader.load_cifar(cifar_path=DATA_PATH,
                                               batch_size=FLAGS.bsize,
                                               subtract_mean=True)
    # Create a validation model
    valid_model = GoogLeNet_cifar(n_channel=3,
                                  n_class=10,
                                  bn=True,
                                  sub_imagenet_mean=False)
    valid_model.create_test_model()

    # create a Evaluator object for evaluation
    evaluator = Evaluator(valid_model)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        # load pre-trained model cifar
        saver.restore(
            sess, '{}inception-cifar-epoch-{}'.format(SAVE_PATH, FLAGS.load))
        print('training set:', end='')
        evaluator.accuracy(sess, train_data)
        print('testing set:', end='')
        evaluator.accuracy(sess, valid_data)
Exemple #2
0
def train():
    FLAGS = get_args()
    # Create Dataflow object for training and testing set
    train_data, valid_data = loader.load_cifar(cifar_path=DATA_PATH,
                                               batch_size=FLAGS.bsize,
                                               subtract_mean=True)

    pre_trained_path = None
    if FLAGS.finetune:
        # Load the pre-trained model (on ImageNet)
        # for convolutional layers if fine tuning
        pre_trained_path = PRETRINED_PATH

    # Create a training model
    train_model = GoogLeNet_cifar(n_channel=3,
                                  n_class=10,
                                  pre_trained_path=pre_trained_path,
                                  bn=True,
                                  wd=0,
                                  sub_imagenet_mean=False,
                                  conv_trainable=True,
                                  fc_trainable=True)
    train_model.create_train_model()
    # Create a validation model
    valid_model = GoogLeNet_cifar(n_channel=3,
                                  n_class=10,
                                  bn=True,
                                  sub_imagenet_mean=False)
    valid_model.create_test_model()

    # create a Trainer object for training control
    trainer = Trainer(train_model, valid_model, train_data, init_lr=FLAGS.lr)

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(SAVE_PATH)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        writer.add_graph(sess.graph)
        for epoch_id in range(FLAGS.maxepoch):
            # train one epoch
            trainer.train_epoch(sess,
                                keep_prob=FLAGS.keep_prob,
                                summary_writer=writer)
            # test the model on validation set after each epoch
            trainer.valid_epoch(sess,
                                dataflow=valid_data,
                                summary_writer=writer)
            saver.save(
                sess, '{}inception-cifar-epoch-{}'.format(SAVE_PATH, epoch_id))
        saver.save(sess,
                   '{}inception-cifar-epoch-{}'.format(SAVE_PATH, epoch_id))
        writer.close()
Exemple #3
0
def create_encode_images():
    """
    只要用一次
    """
    # data_batch_1 里有1025张图,拿出先200张作为测试
    print("load cifar data....")
    images, _ = load_cifar("../../data/cifar/data_batch_1", 200, 825)

    model = EncoderDecoder("./test4/training_checkpoints", key_enable=True)

    for i in range(0, 200):
        image = np.array(images[i] * 0.5 + 0.5)
        plt.imsave("./test4/data/{}_IN.png".format(i + 1), image)

    for i in range(0, 200):
        o = model.encode(input_path="./test4/data/{}_IN.png".format(i + 1))
        plt.imsave("./test4/data/{}_EN.png".format(i + 1), o)
def train():
    FLAGS = get_args()
    train_data, valid_data = loader.load_cifar(cifar_path=DATA_PATH,
                                               batch_size=FLAGS.bsize,
                                               subtract_mean=True)

    pre_trained_path = None
    if FLAGS.finetune:
        pre_trained_path = PRETRINED_PATH

    train_model = GoogLeNet_cifar(n_channel=3,
                                  n_class=10,
                                  pre_trained_path=pre_trained_path,
                                  bn=True,
                                  wd=0,
                                  sub_imagenet_mean=False,
                                  conv_trainable=True,
                                  fc_trainable=True)
    train_model.create_train_model()

    valid_model = GoogLeNet_cifar(n_channel=3,
                                  n_class=10,
                                  bn=True,
                                  sub_imagenet_mean=False)
    valid_model.create_test_model()

    trainer = Trainer(train_model, valid_model, train_data, init_lr=FLAGS.lr)

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(SAVE_PATH)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        writer.add_graph(sess.graph)
        for epoch_id in range(FLAGS.maxepoch):
            trainer.train_epoch(sess,
                                keep_prob=FLAGS.keep_prob,
                                summary_writer=writer)
            trainer.valid_epoch(sess,
                                dataflow=valid_data,
                                summary_writer=writer)
            saver.save(
                sess, '{}inception-cifar-epoch-{}'.format(SAVE_PATH, epoch_id))
        saver.save(sess,
                   '{}inception-cifar-epoch-{}'.format(SAVE_PATH, epoch_id))
        writer.close()
Exemple #5
0
def train():
    FLAGS = get_args()
    train_data, valid_data = loader.load_cifar(cifar_path=DATA_PATH,
                                               batch_size=FLAGS.bsize,
                                               substract_mean=True)

    train_model = VGG_CIFAR10(n_channel=3,
                              n_class=10,
                              pre_trained_path=None,
                              bn=True,
                              wd=5e-3,
                              trainable=True,
                              sub_vgg_mean=False)
    train_model.create_train_model()

    valid_model = VGG_CIFAR10(n_channel=3,
                              n_class=10,
                              bn=True,
                              sub_vgg_mean=False)
    valid_model.create_test_model()

    trainer = Trainer(train_model, valid_model, train_data, init_lr=FLAGS.lr)

    with tf.Session() as sess:
        writer = tf.summary.FileWriter(SAVE_PATH)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        writer.add_graph(sess.graph)
        for epoch_id in range(FLAGS.maxepoch):
            trainer.train_epoch(sess,
                                keep_prob=FLAGS.keep_prob,
                                summary_writer=writer)
            trainer.valid_epoch(sess,
                                dataflow=valid_data,
                                summary_writer=writer)
            saver.save(sess,
                       '{}vgg-cifar-epoch-{}'.format(SAVE_PATH, epoch_id))
        saver.save(sess, '{}vgg-cifar-epoch-{}'.format(SAVE_PATH, epoch_id))
Exemple #6
0
def evaluate():
    FLAGS = get_args()
    train_data, valid_data = loader.load_cifar(cifar_path=DATA_PATH,
                                               batch_size=FLAGS.bsize,
                                               substract_mean=True)

    valid_model = VGG_CIFAR10(n_channel=3,
                              n_class=10,
                              bn=True,
                              sub_vgg_mean=False)
    valid_model.create_test_model()

    evaluator = Evaluator(valid_model)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        saver.restore(sess,
                      '{}vgg-cifar-epoch-{}'.format(SAVE_PATH, FLAGS.load))
        print('training set:', end='')
        evaluator.accuracy(sess, train_data)
        print('testing set:', end='')
        evaluator.accuracy(sess, valid_data)
Exemple #7
0
def train(config_loader):
    """
    train main loop
    :param config_loader:
    :return:
    """
    # # since the number of the ship images in cifar10-batch1 is 1025,
    # the first 1000 images are for training, then the next 25 images are for testing
    train_image_num = 1000
    test_image_num = 25

    # start
    print("initial training progress....")

    # load data
    if "cifar" in config_loader.data_dir:
        print("load cifar data....")
        images, test_images = loader.load_cifar(
            config_loader.data_dir, train_image_num, test_image_num)
    elif "mnist" in config_loader.data_dir:
        print("load mnist data....")
        images, test_images = loader.load_mnist(
            config_loader.data_dir, train_image_num, test_image_num)
    else:
        print("neither cifar nor mnist data found...")
        return

    print("create mask list....")
    mask_list = create_mask_list()

    # checkpoint instance
    print("initial checkpoint....")
    checkpoint_prefix = os.path.join(config_loader.checkpoints_dir, "ckpt")

    # The call function of Generator and Discriminator have been decorated
    # with tf.contrib.eager.defun()
    # We get a performance speedup if defun is used (~25 seconds per epoch)
    print("initial encoder....")
    encoder = auto.Encoder()
    print("initial decoder....")
    decoder = auto.Decoder()

    # initial optimizer
    print("initial optimizer....")
    train_optimizer = tf.train.AdamOptimizer(2e-4, beta1=0.5)

    checkpoint = tf.train.Checkpoint(
        train_optimizer=train_optimizer, encoder=encoder, decoder=decoder)

    print("initial train log....")
    log_tool = train_tool.LogTool(
        config_loader.log_dir, config_loader.save_period)

    # restoring the latest checkpoint in checkpoint_dir if necessary
    if config_loader.load_latest_checkpoint is True:
        checkpoint.restore(tf.train.latest_checkpoint(
            config_loader.checkpoints_dir))
        print("load latest checkpoint....")

    
    def train_each_round(epoch):
        # initial input number
        image_num = 1
        # initial loss
        train_loss = 0
        # calculate image length
        image_len = images.shape[0]
        # each epoch, run all the images
        for i in range(image_len):
            # print("input_image {}".format(image_num))
            input_image = images[i:i + 1, :, :, :]
            # calculate input number
            image_num = image_num + 1

            with tf.GradientTape() as train_tape:
                # global train
                train_loss = global_train_iterator(input_image=input_image, mask_list=mask_list, train_tape=train_tape,
                                                   encoder=encoder, decoder=decoder, train_optimizer=train_optimizer)

        # save test result
        if epoch % config_loader.save_period == 0:
            rand = random.randint(0, test_images.shape[0] - 1)
            # get a random image
            input_image = test_images[rand:rand + 1, :, :, :]
            # show encoder output
            encoder_output = encoder(input_image, training=True)
            # crop the encoder output
            encoder_output = crop_image(encoder_output, mask_list)
            # decoder
            decoder_output = decoder(encoder_output, training=True)
            titles = ["IN", "EN", "DE"]
            image_list = [input_image, tf.reshape(
                encoder_output, [1, 128, 128, 3]), decoder_output]
            log_tool.save_image_list(image_list=image_list, title_list=titles)
            # evaluate in test data
            test_loss = evaluate_test_loss(test_images=test_images, image_num=test_image_num, encoder=encoder,
                                           decoder=decoder)
            # save loss and test loss in log file
            log_tool.save_loss(train_loss=train_loss, test_loss=test_loss)

     # start training
    foreach_training(log_tool=log_tool, checkpoint=checkpoint, checkpoint_prefix=checkpoint_prefix,
                     config_loader=config_loader, train_each_round=train_each_round)
Exemple #8
0
if __name__ == '__main__':
    args = parse_args()
    args.save_path = os.path.join(args.root_path, args.exp_name)
    args.model_path = os.path.join(args.save_path, 'model')
    prepare_save_path(args)
    get_gen_loss, get_disc_loss = objectives.losses[args.objective]
    json.dump(vars(args), open(os.path.join(args.save_path, 'args.json'), 'w'))
    if args.use_visdom:
        import viz
        viz.setup(args.server, args.port, env=args.exp_name, use_tanh=True)
    else:
        viz = MockVisdom()
    encoder = SNEncoder if args.use_sn else Encoder
    train_loader, test_loader, shape = load_cifar(args.dataset_loc,
                                                  args.batch_size,
                                                  args.test_batch_size)

    gan = Decoder(shape, args.gen_h_size, args.z_size, True, nn.ReLU(True),
                  4).cuda()
    discriminator = encoder(shape, args.disc_h_size, 1, True,
                            nn.LeakyReLU(0.1, True), 4).cuda()
    gan.apply(weights_init)
    discriminator.apply(weights_init)
    weight_clip = weight_cliping(0.05)

    generator_optimizer = optim.Adam(gan.parameters(),
                                     lr=args.lr,
                                     betas=(args.beta1, args.beta2))
    discriminator_optimizer = optim.Adam(discriminator.parameters(),
                                         lr=args.lr,
Exemple #9
0
def train():
    FLAGS = get_args()
    train_data, valid_data = loader.load_cifar(cifar_path=FLAGS.data_path,
                                               batch_size=FLAGS.bsize,
                                               substract_mean=True)

    train_model = VGG_CIFAR10(n_channel=3,
                              n_class=10,
                              pre_trained_path=None,
                              bn=True,
                              wd=5e-3,
                              trainable=True,
                              sub_vgg_mean=False)
    train_model.create_train_model()

    valid_model = VGG_CIFAR10(n_channel=3,
                              n_class=10,
                              bn=True,
                              sub_vgg_mean=False)
    valid_model.create_test_model()

    trainer = Trainer(train_model, valid_model, train_data, init_lr=FLAGS.lr)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        writer = tf.compat.v1.summary.FileWriter(FLAGS.save_path)
        saver = tf.compat.v1.train.Saver()
        if FLAGS.saved_model != '':
            saver.restore(sess, FLAGS.saved_model)

        sess.run(tf.global_variables_initializer())
        writer.add_graph(sess.graph)
        for epoch_id in range(FLAGS.maxepoch):
            trainer.train_epoch(sess,
                                keep_prob=FLAGS.keep_prob,
                                summary_writer=writer)
            trainer.valid_epoch(sess,
                                dataflow=valid_data,
                                summary_writer=writer)

            # connection part
            msg = {}
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            server_address = ('0.0.0.0', 5555)
            sock.connect(server_address)

            if scheduler_op == 'g' or 's':
                saved_model = '{}vgg-cifar-epoch-{}'.format(
                    FLAGS.save_path, epoch_id)
                saver.save(sess, saved_model)
                # generate command
                if scheduler_op == 'g':
                    sch_gpus = FLAGS.gpus.split(',') + target_gpus
                else:
                    sch_gpus = FLAGS.gpus.split(',') - target_gpus

                current_gpus = ','.join(sch_gpus)

                config_command = '--train --port ' + str(FLAGS.port) + ' --data_path ' + str(FLAGS.data_path) +\
                                 ' --saved_model ' + saved_model + ' --save_path ' + FLAGS.save_path + ' --lr ' +\
                                 str(FLAGS.lr) + ' --bsize ' + str(FLAGS.bsize) + ' --keep_prob ' +\
                                 str(FLAGS.keep_prob) + ' --maxepoch ' + str(FLAGS.maxepoch-epoch_id-1) +\
                                 ' --gpus ' + current_gpus
                # send command back to the scheduler
                msg['config'] = config_command
                msg['id'] = FLAGS.id
                msg['ep'] = epoch_id + 1
                msg['gpus'] = sch_gpus
                sock.sendall(dict_to_binary(msg))
                # leave the scheduler to restart
                exit()
            else:
                msg['id'] = FLAGS.id
                msg['ep'] = epoch_id + 1
                sock.sendall(dict_to_binary(msg))

            # saver.save(sess, '{}vgg-cifar-epoch-{}'.format(SAVE_PATH, epoch_id))
        saver.save(sess, '{}vgg-cifar-epoch-{}'.format(FLAGS.save_path,
                                                       epoch_id))
Exemple #10
0
def train(config_loader):
    """
    train main loop
    :param config_loader:
    :return:
    """
    # # since the number of the ship images in cifar10-batch1 is 1025,
    # the first 1000 images are for training, then the next 25 images are for testing
    train_image_num = 1000
    test_image_num = 25

    # start
    print("initial training progress....")

    print("load cifar data....")
    images, test_images = load_cifar(config_loader.data_dir, train_image_num,
                                     test_image_num)

    # checkpoint instance
    print("initial checkpoint....")
    checkpoint_prefix = os.path.join(config_loader.checkpoints_dir, "ckpt")

    # The call function of Generator and Discriminator have been decorated
    # with tf.contrib.eager.defun()
    # We get a performance speedup if defun is used (~25 seconds per epoch)
    print("initial encoder....")
    encoder = auto.Encoder()
    print("initial decoder....")
    decoder = auto.Decoder()
    print("initial key encoder....")
    key_encoder = auto.Encoder()

    print("create noise....")
    noise = tf.zeros([1, 32, 32, 1], dtype=tf.float32)

    key_path = "../../watermark/key.npy"
    secret_key = None
    if os.path.exists(key_path):
        print("load key....")
        secret_key = np.load(key_path)
        # convert to tensor
        secret_key = tf.convert_to_tensor(secret_key, dtype=tf.float32)
    else:
        print("error: key not found")
        sys.exit()

    # initial optimizer
    print("initial optimizer....")
    train_optimizer = tf.train.AdamOptimizer(2e-4, beta1=0.5)

    checkpoint = tf.train.Checkpoint(train_optimizer=train_optimizer,
                                     encoder=encoder,
                                     decoder=decoder,
                                     key_encoder=key_encoder)

    print("initial train log....")
    log_tool = train_tool.LogTool(config_loader.log_dir,
                                  config_loader.save_period)

    # restoring the latest checkpoint in checkpoint_dir if necessary
    if config_loader.load_latest_checkpoint is True:
        checkpoint.restore(
            tf.train.latest_checkpoint(config_loader.checkpoints_dir))
        print("load latest checkpoint....")

    def train_each_round(epoch):
        # initial input number
        # image_num = 1
        # initial loss
        train_loss = 0
        # calculate image length
        image_len = images.shape[0]
        # each epoch, run all the images
        for i in range(image_len):
            # print("input_image {}".format(image_num))
            input_image = images[i:i + 1, :, :, :]
            # calculate input number
            # image_num = image_num + 1

            with tf.GradientTape() as train_tape:
                # global train
                train_loss = global_train_iterator(
                    input_image=input_image,
                    train_tape=train_tape,
                    key_encoder=key_encoder,
                    encoder=encoder,
                    decoder=decoder,
                    train_optimizer=train_optimizer,
                    secret_key=secret_key,
                    noise=noise)

        # save test result
        if epoch % config_loader.save_period == 0:
            rand = random.randint(0, test_images.shape[0] - 1)
            # get a random image
            input_image = test_images[rand:rand + 1, :, :, :]
            # encode
            encoder_output = encoder(input_image, training=True)

            # show encoder output with secret key
            key_output = key_encoder(secret_key, training=True)

            out_positive = tf.concat([encoder_output, key_output], axis=-1)
            # decoder
            decoder_output_positive = decoder(out_positive, training=True)

            # show encoder output with wrong key
            # 变换 256, 512, 1024位数, 查看输出情况
            # 1) 变换256位, 变换左上角16*16bit
            wrong_key_256 = np.array(secret_key)
            wrong_key_256[0, 0:16, 0:16, 0] = - \
                1*wrong_key_256[0, 0:16, 0:16, 0]
            wrong_key_256 = tf.convert_to_tensor(wrong_key_256)
            wrong_key_output_256 = key_encoder(wrong_key_256, training=True)
            out_negitive_256 = tf.concat(
                [encoder_output, wrong_key_output_256], axis=-1)
            # decoder
            decoder_output_negitive_256 = decoder(out_negitive_256,
                                                  training=True)

            # 2) 变换512位, 变换上方32*16bit
            wrong_key_512 = np.array(secret_key)
            wrong_key_512[0, 0:32, 0:16, 0] = - \
                1*wrong_key_512[0, 0:32, 0:16, 0]
            wrong_key_512 = tf.convert_to_tensor(wrong_key_512)
            wrong_key_output_512 = key_encoder(wrong_key_512, training=True)
            out_negitive_512 = tf.concat(
                [encoder_output, wrong_key_output_512], axis=-1)
            # decoder
            decoder_output_negitive_512 = decoder(out_negitive_512,
                                                  training=True)

            # 3) 变换1024位, 全部变换
            wrong_key_1024 = -1 * secret_key
            wrong_key_output_1024 = key_encoder(wrong_key_1024, training=True)
            out_negitive_1024 = tf.concat(
                [encoder_output, wrong_key_output_1024], axis=-1)
            # decoder
            decoder_output_negitive_1024 = decoder(out_negitive_1024,
                                                   training=True)

            titles = ["IN", "EN", "DE+", "DE-256", "DE-512", "DE-1024"]
            image_list = [
                input_image,
                tf.reshape(encoder_output, [1, 128, 128, 3]),
                decoder_output_positive, decoder_output_negitive_256,
                decoder_output_negitive_512, decoder_output_negitive_1024
            ]

            # record
            log_tool.save_image_list(image_list=image_list, title_list=titles)
            log_tool.save_image_list(image_list=image_list, title_list=titles)
            # evaluate in test data
            test_loss = evaluate_test_loss(test_images=test_images,
                                           image_num=test_image_num,
                                           encoder=encoder,
                                           key_encoder=key_encoder,
                                           decoder=decoder,
                                           secret_key=secret_key)
            # save loss and test loss in log file
            log_tool.save_loss(train_loss=train_loss, test_loss=test_loss)

    # start training
    foreach_training(log_tool=log_tool,
                     checkpoint=checkpoint,
                     checkpoint_prefix=checkpoint_prefix,
                     config_loader=config_loader,
                     train_each_round=train_each_round)