Esempio n. 1
0
def main():
    searchPath = "Testdata/*.png"
    #Please use same patchsize for all images.
    patchSize = a.scale_size
    inputs = tf.placeholder(tf.float32, [1, patchSize, patchSize, 3])
    with tf.variable_scope("generator") as scope:
        out_channels = 3
        outputs = create_generator(inputs, out_channels)
    saver = tf.train.Saver()
    saver.restore(sess, "artificialKi67/compact.ckpt")
    files = glob.glob(searchPath)
    print(files)
    for myfile in files:
        print(myfile)
        analyze(myfile, outputs, inputs, patchSize)
Esempio n. 2
0
def training(epochs=1, batch_size=128):
    """This function fits a gan model.

    Parameters
    ----------
    epochs : int, optional
        Number of epochs (1 by default).
    batch_size : int, optional
        Number of steps by epoch (128 by default).

    Returns
    -------

    """

    x_train = numpy.load('../data/train_data.npy')

    generator = models.create_generator()
    discriminator = models.create_discriminator()
    gan = models.create_gan(discriminator, generator)

    for e in range(1, epochs + 1):
        for _ in tqdm(range(batch_size), desc="Epoch {}".format(e)):
            noise = numpy.random.normal(0, 1, [batch_size, 100])

            generated_images = generator.predict(noise)

            image_batch = x_train[numpy.random.randint(low=0,
                                                       high=x_train.shape[0],
                                                       size=batch_size)]

            # x = numpy.concatenate([image_batch, generated_images])
            #
            # y_dis = numpy.zeros(2 * batch_size)
            # y_dis[:batch_size] = 0.9
            #
            # discriminator.trainable = True
            # discriminator.train_on_batch(x, y_dis)
            #
            # noise = numpy.random.normal(0, 1, [batch_size, 100])
            # y_gen = numpy.ones(batch_size)
            #
            # discriminator.trainable = False
            #
            # gan.train_on_batch(noise, y_gen)

        if e == 1 or e % 20 == 0:
            helpers.plot_generated_images(e, generator)
Esempio n. 3
0
def _subprocess(gpu_queue, args, models_dir, model_name, cv_index, cv_size,
                split_seed, tta_index):
    """子プロセスの予測処理。"""
    if 'CUDA_VISIBLE_DEVICES' not in os.environ:
        # GPUの固定
        gpu_id = gpu_queue.get()
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
        # TensorFlowのログ抑止
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        # モデルの読み込み
        import models
        _subprocess_context['model'] = models.load(models_dir / model_name)
    else:
        import models

    result_path = pathlib.Path(models_dir / _CACHE_NAME_FORMAT.format(
        target=args.target, model=model_name, tta=tta_index))
    if result_path.is_file():
        print('スキップ: {}'.format(result_path))
        return
    else:
        seed = 1234 + tta_index
        np.random.seed(seed)

        assert args.target in ('val', 'test')
        if args.target == 'val':
            _, (X_target, _), _ = data.load_data(cv_index, cv_size, split_seed)
        else:
            _, _, X_target = data.load_data(cv_index, cv_size, split_seed)

        pattern_index = len(_SIZE_PATTERNS) * tta_index // args.tta_size
        img_size = _SIZE_PATTERNS[pattern_index]
        batch_size = int(_BATCH_SIZE * ((_BASE_SIZE**2) /
                                        (img_size[0] * img_size[1]))**1.5)

        gen = models.create_generator(img_size, mixup=False)
        proba_target = _subprocess_context['model'].predict_generator(
            gen.flow(X_target,
                     batch_size=batch_size,
                     data_augmentation=True,
                     shuffle=False,
                     random_state=seed),
            steps=gen.steps_per_epoch(len(X_target), batch_size),
            verbose=0)

        result_path.parent.mkdir(parents=True, exist_ok=True)
        joblib.dump(proba_target, result_path)
    def __init__(self, predict, loss_fn, args, logger=None):
        super(NoBoxAttack, self).__init__(predict, loss_fn, args.clip_min, args.clip_max)
        self.args = args
        flow_args = [args.n_blocks, args.flow_hidden_size, args.flow_hidden,
                     args.flow_model, args.flow_layer_type]
        self.G = create_generator(args, args.model, args.deterministic_G,
                                  args.dataset, flow_args)

        self.pick_prob_start = None
        self.robust_train_flag = False
        if args.train_with_robust:
            if args.ensemble_adv_trained:
                if args.dataset=="mnist":
                    adv_model_names = args.adv_models
                    type = get_model_type(adv_model_names[0])
                    self.robust_critic = load_model(args, adv_model_names[0],
                                                    type=type).to(args.dev)

                elif args.dataset =='cifar':
                    adv_model_names = args.adv_models
                    l_test_classif_paths = []
                    adv_path = os.path.join(args.dir_test_models, "pretrained_classifiers",
                                        args.dataset, "ensemble_adv_trained",
                                        adv_model_names + '.pt')
                    init_func, _ = ARCHITECTURES[adv_model_names[i]]
                    temp_model = init_func().to(args.dev)
                    self.robust_critic = nn.DataParallel(temp_model)
                    self.robust_critic.load_state_dict(torch.load(adv_path))
            else:
                self.robust_critic = Net(1, 28, 28).to(args.dev)
                self.robust_critic.load_state_dict(torch.load(args.robust_model_path))

            self.robust_train_flag = True
            self.pick_prob_start = args.robust_sample_prob
            self.pick_rob_prob = Bernoulli(torch.tensor([1-args.robust_sample_prob]))
            self.anneal_rate = (1.0 - args.robust_sample_prob) / args.attack_epochs

        self.logger = logger
Esempio n. 5
0
def training(epochs, batch_size):

    X_train = load_data()
    batch_count = int(X_train.shape[0] / batch_size)

    generator= create_generator(learning_rate,beta_1,encoding_dims)
    discriminator= create_discriminator(learning_rate,beta_1)
    gan = create_gan(discriminator, generator,encoding_dims)

    valid = np.ones((batch_size, 1))
    fake = np.zeros((batch_size, 1))

    seed = np.random.normal(0,1, [25, encoding_dims])

    for e in range(1,epochs+1 ):
        print("Epoch %d" %e)
        for _ in range(batch_count):

          noise= np.random.normal(0,1, [batch_size, encoding_dims])
          generated_images = generator.predict(noise)

          image_batch = X_train[np.random.randint(low=0,high=X_train.shape[0],size=batch_size)]

          discriminator.trainable=True
          d_loss_real = discriminator.train_on_batch(image_batch, valid)
          d_loss_fake = discriminator.train_on_batch(generated_images, fake)
          d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

          noise= np.random.normal(0,1, [batch_size, encoding_dims])

          discriminator.trainable=False
          g_loss = gan.train_on_batch(noise,valid)

          print ("%d [D loss: %f] [G loss: %f]" % (e, d_loss, g_loss))
        if ipython:
            display.clear_output(wait=True)
        plot_generated_images(e, generator,seed,outdir)
    generator.save('{}/gan_model'.format(outdir))
Esempio n. 6
0
def _run():
    import keras
    import models
    logger = tk.log.get(__name__)
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', help='epoch数。', default=300, type=int)
    parser.add_argument('--batch-size', help='バッチサイズ。', default=16, type=int)
    parser.add_argument('--warm',
                        help='models/model.fold{cv_index}.h5を読み込む',
                        action='store_true',
                        default=False)
    parser.add_argument('--cv-index', help='CVの何番目か。', type=int)
    parser.add_argument('--cv-size', help='CVの分割数。', default=5, type=int)
    parser.add_argument('--split-seed', help='分割のシード値。', default=123, type=int)
    args = parser.parse_args()
    assert args.cv_index in range(args.cv_size)
    model_path = _MODELS_DIR / 'model.fold{}.h5'.format(args.cv_index)

    (X_train,
     y_train), (X_val, y_val), _ = data.load_data(args.cv_index, args.cv_size,
                                                  args.split_seed)
    num_classes = len(np.unique(y_train))
    y_train = tk.ml.to_categorical(num_classes)(y_train)
    y_val = tk.ml.to_categorical(num_classes)(y_val)
    logger.info('len(X_train) = {} len(X_val) = {}'.format(
        len(X_train), len(X_val)))

    model = models.create_network(num_classes)

    # 学習率:
    # ・lr 0.5、batch size 256くらいが多いのでその辺を基準に
    # ・バッチサイズに比例させるのが良いとのうわさ
    lr = 0.5 * args.batch_size / 256 * hvd.size()
    opt = keras.optimizers.SGD(lr=lr, momentum=0.9, nesterov=True)
    opt = hvd.DistributedOptimizer(opt)
    model.compile(opt, 'categorical_crossentropy', ['acc'])

    if hvd.rank() == 0 and args.cv_index == 0:
        model.summary(print_fn=logger.info)
        logger.info('network depth: %d', tk.dl.count_network_depth(model))

    if args.warm:
        model.load_weights(str(model_path))
        logger.info('{} loaded'.format(model_path))
    else:
        assert not model_path.exists()  # 誤操作対策

    callbacks = []
    if args.warm and args.epochs < 300:  # 短縮モード
        callbacks.append(tk.dl.learning_rate_callback((0, 0.5)))
    else:
        callbacks.append(tk.dl.learning_rate_callback())
    callbacks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
    callbacks.append(hvd.callbacks.MetricAverageCallback())
    callbacks.append(
        hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, verbose=1))
    if hvd.rank() == 0:
        callbacks.append(tk.dl.tsv_log_callback(_MODELS_DIR / 'history.tsv'))
    callbacks.append(tk.dl.freeze_bn_callback(0.95))

    gen = models.create_generator((299, 299), mixup=True)
    model.fit_generator(
        gen.flow(X_train,
                 y_train,
                 batch_size=args.batch_size,
                 data_augmentation=True,
                 shuffle=True),
        steps_per_epoch=gen.steps_per_epoch(len(X_train), args.batch_size) //
        hvd.size(),
        epochs=args.epochs,
        verbose=1 if hvd.rank() == 0 else 0,
        validation_data=gen.flow(X_val,
                                 y_val,
                                 batch_size=args.batch_size,
                                 shuffle=True),
        validation_steps=gen.steps_per_epoch(len(X_val), args.batch_size) //
        hvd.size(),  # * 3は省略
        callbacks=callbacks)

    if hvd.rank() == 0:
        model.save(str(model_path))

        proba_val = model.predict_generator(
            gen.flow(X_val, y_val, batch_size=args.batch_size),
            gen.steps_per_epoch(len(X_val), args.batch_size),
            verbose=1)
        joblib.dump(proba_val,
                    _MODELS_DIR / 'proba_val.fold{}.pkl'.format(args.cv_index))

        pred_val = proba_val.argmax(axis=-1)
        logger.info('val_acc: {:.1f}%'.format(
            sklearn.metrics.accuracy_score(y_val.argmax(axis=-1), pred_val) *
            100))