Exemplo n.º 1
0
def predict(dataset, network, models, dic, net_name):
    #测试数据
    #valdata = dataset.
    val_data = dataset.data_generateor(
        'pred',
        rand_h=dic['rand_h'],
        rand_h_num=dic['rand_h_num'],
        height=dic['height'],
        pooling_stride=dic['pooling_stride'],
        batch=Config.pred_batch)
    val_data_sum = []
    pred_sum = []
    for i in range(2000 // Config.pred_batch):
        val_data_i = next(val_data)
        #print(np.shape(val_data[0]))
        pred = models.pred(network, val_data=val_data_i[0])
        val_data_sum.extend(val_data_i[1])
        pred_sum.extend(pred)

    #保存数据
    save_data(
        file_dir='saved_results/' + net_name + '/',
        filename='predict',
        data=pred_sum)
    save_data(
        file_dir='saved_results/' + net_name + '/',
        filename='real_data',
        data=val_data_sum)

    #画图
    print(np.shape(pred_sum), np.shape(val_data_sum))
    plot_fig(val_data_sum, pred_sum, 'real', 'pred')
Exemplo n.º 2
0
    def train(self, model, train_gen, val_gen, net_name, eps, is_graph=True):
        '''
        训练数据
        '''
        assert self.mode == "training", "Create model in training mode."
        # Callbacks
        callbacks = [
            keras.callbacks.TensorBoard(
                log_dir=self.log_dir,
                histogram_freq=0,
                write_graph=True,
                write_images=False),
            keras.callbacks.ModelCheckpoint(
                self.checkpoint_path, verbose=0, save_weights_only=True),
        ]

        log("\nStarting at epoch {}.\n".format(self.epoch))
        log("Checkpoint Path: {}".format(self.checkpoint_path))

        model.compile(loss=Config.loss, optimizer=Config.optimizer)

        # if os.name is 'nt':
        #     workers = 0
        # else:
        #     workers = multiprocessing.cpu_count()
        history = History()
        history = model.fit_generator(
            train_gen,
            initial_epoch=self.epoch,
            epochs=eps,
            steps_per_epoch=self.STEPS_PER_EPOCH,
            callbacks=callbacks,
            validation_data=val_gen,
            validation_steps=self.VALIDATION_STEPS,
            #max_queue_size=100,
            #workers=0,
            #use_multiprocessing=False,
        )

        #model.save('save/train_model.h5')
        save_data(
            file_dir='saved_results/' + net_name + '/',
            filename='val_loss',
            data=history.history["val_loss"])

        save_data(
            file_dir='saved_results/' + net_name + '/',
            filename='train_loss',
            data=history.history["loss"])

        if is_graph:
            plot_fig(history.history["loss"], history.history["val_loss"],
                     'train_loss', 'val_loss')
Exemplo n.º 3
0
def test_model(model,
               labels,
               data_name='sk_eigenjoint_nor_528',
               valid_segment_idx=650):
    data = data_dir + '/' + data_name
    file_paths = glob(data + "/*.npy")
    losses = []
    ground_ys = []
    pred_ys = []
    for index, file_path in enumerate(file_paths):
        if index >= valid_segment_idx:
            print 'Predict ' + file_path
            valid_x = np.load(file_path)
            valid_x = np.reshape(valid_x,
                                 newshape=(1, valid_x.shape[0],
                                           valid_x.shape[1]))
            valid_y = labels[index]
            #                valid_y = valid_y[:-1]
            pred_y = model.predict_on_batch(valid_x)
            file_name = str(index - valid_segment_idx)
            plot_fig(pred_y, valid_y, file_name, save_flag=True)

            pred_y = np.argmax(pred_y, axis=2)
            pred_y = pred_y.ravel()
            ground_ys.append(valid_y)
            pred_ys.append(pred_y)
            #            pred_y = clear_pred(pred_y)

            loss = eval_jaccard(valid_y, pred_y)
            losses.append(loss)
    ground_ys = np.concatenate(ground_ys)
    pred_ys = np.concatenate(pred_ys)

    print(classification_report(ground_ys, pred_ys))

    cnf_matrix = confusion_matrix(ground_ys, pred_ys)
    np.set_printoptions(precision=2)
    cnf_matrix = cnf_matrix.astype('float') / cnf_matrix.sum(
        axis=1)[:, np.newaxis]
    plt.figure()
    plot_confusion_matrix(cnf_matrix,
                          classes=range(21),
                          normalize=True,
                          title='Normalized confusion matrix')

    plt.show()
    #    print cnf_matrix

    return losses, cnf_matrix
Exemplo n.º 4
0
def train(config, model, sde, train_loader):
    ema_model = copy.deepcopy(model)
    optim = torch.optim.Adam(model.parameters(), lr=1e-3)
    loss_fn = get_sde_loss_fn(sde,
                              train=True,
                              reduce_mean=True,
                              continuous=True,
                              likelihood_weighting=False)
    ema = EMA(config.train.ema_decay)
    num = 0
    for epoch in range(config.train.epochs):
        with tqdm(train_loader) as it:
            for data, _ in it:
                num += 1
                optim.zero_grad()
                loss = loss_fn(model, data.cuda())
                loss.backward()
                optim.step()
                it.set_postfix(ordered_dict={
                    'train loss': loss.item(),
                    'epoch': epoch
                },
                               refresh=False)
                if num % config.train.update_ema_every == 0:
                    if num < 1000:
                        ema_model.load_state_dict(model.state_dict())
                    else:
                        ema.update_model_average(ema_model, model)

    # sample
    predictor = get_predictor(config.sampling.predictor)
    corrector = get_corrector(config.sampling.corrector)
    sampling_fn = get_pc_sampler(sde,
                                 config.shape,
                                 predictor=predictor,
                                 corrector=corrector,
                                 inverse_scaler=lambda x: x,
                                 snr=config.sampling.snr,
                                 eps=config.sampling.eps)

    samples, n = sampling_fn(model)
    samples = samples.detach().cpu().numpy()
    plot_fig(samples)
    # plt.show()
    return sde
Exemplo n.º 5
0
 def histogramme(self):
     histo = histogramme_valeur(self.resultat)
     histo_lum = histogramme_luminance(self.resultat)
     ddp_ = ddp(self.resultat)
     res_ddp = ddp_cumule(self.resultat)
     # print(pt_to_pt(self.resultat, histogram_equalization, res_ddp, 0, 255, 0, 255))
     plot_fig(histo,
              self.config.figure_param,
              2,
              2,
              1,
              title="Histogramme de Valeur",
              clear=True)
     plot_fig(histo_lum,
              self.config.figure_param,
              2,
              2,
              2,
              title="Histogramme de Luminance")
     plot_fig(ddp_, self.config.figure_param, 2, 2, 3, title="PDF")
     plot_fig(res_ddp, self.config.figure_param, 2, 2, 4, title="CDF")
     plt.show()
Exemplo n.º 6
0
            hypotheses, yy = get_hypotheses(num_eval_batches, num_eval_samples,
                                            sess, y_hat, m.idx2token)

            logging.info("# write results")
            model_output = "barrages_E%02dL%.2f" % (epoch, _loss)
            if not os.path.exists(hp.evaldir): os.makedirs(hp.evaldir)
            translation = os.path.join(hp.evaldir, model_output)
            with open(translation, 'w', encoding="utf-8") as fout:
                fout.write("\n".join(hypotheses))

            logging.info(hypotheses[1:10])
            logging.info("# calc bleu score and append it to translation")
            # calc_bleu(hp.eval_y2, translation)
            bleu_score.append(
                calc_belu_nltk(hp.bpe_model, hp.eval_y2, hypotheses))
            cur_epoch = list(range(len(bleu_score)))
            plot_fig(cur_epoch, bleu_score, "BLEU-2", hp.belu_fig)

            logging.info("# save models")
            ckpt_name = os.path.join(hp.logdir, model_output)
            saver.save(sess, ckpt_name, global_step=_gs)
            logging.info(
                "after training of {} epochs, {} has been saved.".format(
                    epoch, ckpt_name))

            logging.info("# fall back to train mode")
            sess.run(train_init_op)
    summary_writer.close()

logging.info("# Done")
Exemplo n.º 7
0
                                  num_bits=8,
                                  loss_type='hybrid',
                                  hybrid_coeff=0.001,
                                  model_prediction='x_start',
                                  model_output='logistic_pars').cuda()

optim = torch.optim.Adam(md.parameters(), lr=1e-3)
train_loader, test_loader, val_loader = get_mnist_loaders()

num = 0
for epoch in range(epochs):
    with tqdm(train_loader) as it:
        for x, label in it:
            num += 1
            optim.zero_grad()
            x *= 255
            x = x.long().cuda()
            loss = torch.sum(diffusion.training_losses(md, x))
            loss.backward()
            optim.step()
            it.set_postfix(ordered_dict={
                'train_loss': loss.item(),
                'epoch': epoch
            },
                           refresh=False)

shape = (36, 1, 28, 28)
samples = diffusion.p_sample_loop(md, shape, num_timesteps=None)
plot_fig(samples.detach().cpu().numpy(), show=True)
plt.show()
Exemplo n.º 8
0
            if num % update_ema_every == 0:
                if num < 1000:
                    ema_model.load_state_dict(model.state_dict())
                else:
                    ema.update_model_average(ema_model, model)

    # sample images
    shape = (36, 1, 28, 28)
    l = 5
    cond = (torch.ones(36) * l).cuda()
    eta = 0.
    res = diffusion.sample(shape, cond=cond)
    res = res.detach().cpu().numpy()
    res1 = diffusion.ddim_sample(shape, eta, cond=cond).detach().cpu().numpy()
    plot_fig(res)
    plot_fig(res1)
    plt.show()

elif tp == 'mix_gau_ddpm':
    epochs = 20
    ema_decay = 0.9999
    update_ema_every = 10
    p = 0.5

    model = Unet(dim=16, channels=1, dim_mults=(1, 2, 4)).cuda()
    # betas = cosine_beta_schedule(1000, )
    betas = np.linspace(1e-4, 1e-2, 1000)
    diffusion = mix_gaussian_ddpm(model,
                                  loss_type='l2',
                                  betas=betas,
Exemplo n.º 9
0
                               refresh=False)
        torch.save(s_phi.state_dict(), schedule_path)
else:
    s_phi.load_state_dict(torch.load(schedule_path))

if not load_res:
    for i in range(len(hat_alphan)):
        for j in range(len(hat_betan)):
            schedule_beta = schedule.noise_schedule(alpha=hat_alphan[i],
                                                    beta=hat_betan[j],
                                                    shape=(1, 1, 28, 28))
            write_to_file(schedule_beta.detach().cpu().numpy(), beta_path)
            img = schedule.sample(shape=config.shape, betas=schedule_beta)
            fig = plot_fig(
                img.detach().cpu().numpy(), father_path + '/fig/fig_' +
                str(round(hat_alphan[i].item(), 2)) + '_' +
                str(round(hat_betan[j].item(), 2)) + '_' +
                str(len(schedule_beta)) + '.jpg')
            # plt.imsave(father_path + '/fig/fig_' + str(i) + '_' + str(j) + '.jpg', fig)
            plt.close()
else:
    schedule_betas = read_file(beta_path).to('cuda:0')
    for i in range(schedule_betas.shape[0]):
        schedule_beta = schedule_betas[i]
        img = schedule.sample(shape=config.shape, betas=schedule_beta)
        fig = plot_fig(
            img.detach().cpu().numpy(),
            father_path + '/fig/fig_' + str(i) + '_' + str(i + 10) + '.jpg')
        # plt.imsave(father_path + '/fig/fig_' + str(i) + '_' + str(i+10) + '.jpg', fig)
        plt.close()
Exemplo n.º 10
0
def padim(category, batch_size, rd, net_type='eff', is_plot=False):
    loader = MVTecADLoader()
    loader.load(category=category, repeat=1, max_rot=0)

    train_set = loader.train.batch(batch_size=batch_size,
                                   drop_remainder=True).shuffle(
                                       buffer_size=loader.num_train,
                                       reshuffle_each_iteration=True)
    test_set = loader.test.batch(batch_size=1, drop_remainder=False)

    net, _shape = embedding_net(net_type=net_type)
    h, w, c = _shape  # height and width of layer1, channel sum of layer 1, 2, and 3, and randomly sampled dimension

    out = []
    for x, _, _ in train_set:
        l1, l2, l3 = net(x)
        _out = tf.reshape(embedding_concat(embedding_concat(l1, l2), l3),
                          (batch_size, h * w, c))  # (b, h x w, c)
        out.append(_out.numpy())

    # calculate multivariate Gaussian distribution.
    out = np.concatenate(out, axis=0)
    out = np.transpose(out, axes=[0, 2, 1])  # (b, c, h * w)

    # RD: random dimension selecting
    tmp = tf.unstack(out, axis=0)
    _tmp = []
    rd_indices = tf.random.shuffle(tf.range(c))[:rd]
    for tensor in tmp:
        _tmp.append(tf.gather(tensor, rd_indices))
    out = tf.stack(_tmp, axis=0)

    mu = np.mean(out, axis=0)
    cov = np.zeros((rd, rd, h * w))
    identity = np.identity(rd)

    for idx in range(h * w):
        cov[:, :, idx] = np.cov(out[:, :, idx], rowvar=False) + 0.01 * identity

    train_outputs = [mu, cov]

    out, gt_list, gt_mask, batch_size, test_imgs = [], [], [], 1, []
    #  x - data |   y - mask    |   z - binary label
    for x, y, z in test_set:
        test_imgs.append(x.numpy())
        gt_list.append(z.numpy())
        gt_mask.append(y.numpy())

        l1, l2, l3 = net(x)
        _out = tf.reshape(embedding_concat(embedding_concat(l1, l2), l3),
                          (batch_size, h * w, c))  # (BS, h x w, c)
        out.append(_out.numpy())

    # calculate multivariate Gaussian distribution. skip random dimension selecting
    out = np.concatenate(out, axis=0)
    gt_list = np.concatenate(gt_list, axis=0)
    out = np.transpose(out, axes=[0, 2, 1])

    # RD
    tmp = tf.unstack(out, axis=0)
    _tmp = []
    for tensor in tmp:
        _tmp.append(tf.gather(tensor, rd_indices))
    out = tf.stack(_tmp, axis=0)

    b, _, _ = out.shape

    dist_list = []
    for idx in range(h * w):
        mu = train_outputs[0][:, idx]
        cov_inv = np.linalg.inv(train_outputs[1][:, :, idx])
        dist = [mahalanobis(sample[:, idx], mu, cov_inv) for sample in out]
        dist_list.append(dist)

    dist_list = np.reshape(np.transpose(np.asarray(dist_list), axes=[1, 0]),
                           (b, h, w))

    ################
    #   DATA Level #
    ################
    # upsample
    score_map = tf.squeeze(
        tf.image.resize(np.expand_dims(dist_list, -1), size=[h, w])).numpy()

    for i in range(score_map.shape[0]):
        score_map[i] = gaussian_filter(score_map[i], sigma=4)

    # Normalization
    max_score = score_map.max()
    min_score = score_map.min()
    scores = (score_map - min_score) / (max_score - min_score)
    scores = -scores

    # calculate image-level ROC AUC score
    img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)

    gt_list = np.asarray(gt_list)
    img_roc_auc = metrics.roc_auc_score(gt_list, img_scores)

    if is_plot is True:
        fpr, tpr, _ = metrics.roc_curve(gt_list, img_scores)
        precision, recall, _ = metrics.precision_recall_curve(
            gt_list, img_scores)

        save_dir = os.path.join(os.getcwd(), 'img')
        if os.path.isdir(save_dir) is False:
            os.mkdir(save_dir)
        draw_auc(fpr, tpr, img_roc_auc,
                 os.path.join(save_dir, 'AUROC-{}.png'.format(category)))
        base_line = np.sum(gt_list) / len(gt_list)
        draw_precision_recall(
            precision, recall, base_line,
            os.path.join(os.path.join(save_dir, 'PR-{}.png'.format(category))))

    #################
    #   PATCH Level #
    #################
    # upsample
    score_map = tf.squeeze(
        tf.image.resize(np.expand_dims(dist_list, -1), size=[224,
                                                             224])).numpy()

    for i in range(score_map.shape[0]):
        score_map[i] = gaussian_filter(score_map[i], sigma=4)

    # Normalization
    max_score = score_map.max()
    min_score = score_map.min()
    scores = (score_map - min_score) / (max_score - min_score)
    # Note that Binary mask indicates 0 for good and 1 for anomaly. It is opposite from our setting.
    # scores = -scores

    # calculate per-pixel level ROCAUC
    gt_mask = np.asarray(gt_mask)
    fp_list, tp_list, _ = metrics.roc_curve(gt_mask.flatten(),
                                            scores.flatten())
    patch_auc = metrics.auc(fp_list, tp_list)

    precision, recall, threshold = metrics.precision_recall_curve(
        gt_mask.flatten(), scores.flatten(), pos_label=1)
    numerator = 2 * precision * recall
    denominator = precision + recall

    numerator[np.where(denominator == 0)] = 0
    denominator[np.where(denominator == 0)] = 1

    # get optimal threshold
    f1_list = numerator / denominator
    best_ths = threshold[np.argmax(f1_list).astype(int)]

    print('[{}] image ROCAUC: {:.04f}\t pixel ROCAUC: {:.04f}'.format(
        category, img_roc_auc, patch_auc))

    if is_plot is True:
        save_dir = os.path.join(os.getcwd(), 'img')
        if os.path.isdir(save_dir) is False:
            os.mkdir(save_dir)
        plot_fig(test_imgs, scores, gt_mask, best_ths, save_dir, category)

    return img_roc_auc, patch_auc