Esempio n. 1
0
def test_pix2pix_fid(model, opt):
    opt.phase = 'val'
    opt.num_threads = 0
    opt.batch_size = 1
    opt.serial_batches = True
    opt.no_flip = True
    opt.load_size = 256
    opt.display_id = -1
    dataset = create_dataset(opt)
    model.model_eval()

    result_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_results')
    util.mkdirs(result_dir)

    fake_B = {}
    for i, data in enumerate(dataset):
        model.set_input(data)
        with torch.no_grad():
            model.forward()
        visuals = model.get_current_visuals()
        fake_B[data['A_paths'][0]] = visuals['fake_B']
        util.save_images(visuals, model.image_paths, result_dir, direction=opt.direction,
                         aspect_ratio=opt.aspect_ratio)

    block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
    inception_model = InceptionV3([block_idx])
    inception_model.to(model.device)
    inception_model.eval()
    npz = np.load(os.path.join(opt.dataroot, 'real_stat_B.npz'))
    fid = get_fid(list(fake_B.values()), inception_model, npz, model.device, opt.batch_size)

    return fid
Esempio n. 2
0
def add_user(idUsuario, usuario):
    #trata os parâmetros de entrada
    idUsuario = str(idUsuario)
    coop = str(usuario["cooperativa"])
    conta = str(usuario["conta"])
    nome = usuario["nome"]
    imagem = usuario["imagem"]
    base = imagem.split(",")[1]

    #define a workspace do usuário
    user_path = util.base_dir + idUsuario + "/"
    user_images = user_path + "images/base/"
    nome_imagem = "base_image_" + str(datetime.now()).replace(" ",
                                                              "_") + ".jpeg"
    fullpath = user_images + nome_imagem

    #cria o objeto para inserir no banco
    obj = {}
    obj['idUsuario'] = idUsuario
    obj['cooperativa'] = coop
    obj['conta'] = conta
    obj['nome'] = nome
    obj['imagem_base'] = fullpath

    user = provide.get_user(str(idUsuario), collection_users)
    if (user is None):
        #salva a imagem base de autenticação na workspace do usuário
        util.configure_workspace(idUsuario)
        util.save_images(fullpath, base)
        obj['matrix_base'] = generate_matrix(obj)
        provide.insereDB(obj, collection_users)
        return jsonify({"messagem": "Usuario criado"}), 201
    else:
        return jsonify({"mensagem": "Usuário com esse ID já existe"}), 409
Esempio n. 3
0
def main():
    _, palette = util.get_label_info(
        os.path.join(config.data_dir, "class_dict.csv"))

    model = nn.build_model(classes=len(palette))
    model(tf.zeros((1, config.height, config.width, 3)))

    file_names = [
        file_name[:-4] for file_name in os.listdir(
            os.path.join(config.data_dir, config.image_dir))
    ]
    for file_name in tqdm.tqdm(file_names):
        image = util.load_image(file_name)
        label = util.load_label(file_name)
        image, label = util.random_crop(image, label)

        image = np.expand_dims(image, 0).astype('float32')

        output = model.predict(image / 255.0)
        output = np.array(output[0, :, :, :])
        output = np.argmax(output, axis=-1)
        output = util.colour_code_segmentation(output, palette)
        output = np.uint8(output)
        util.save_images([output, label],
                         os.path.join('results', f'{file_name}.jpg'),
                         titles=['Pred', 'Label'])
Esempio n. 4
0
def test_pix2pix_mIoU(model, opt):
    opt.phase = 'val'
    opt.num_threads = 0
    opt.batch_size = 1
    opt.serial_batches = True
    opt.no_flip = True
    opt.load_size = 256
    opt.display_id = -1
    dataset = create_dataset(opt)
    model.model_eval()

    result_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_results')
    util.mkdirs(result_dir)

    fake_B = {}
    names = []
    for i, data in enumerate(dataset):
        model.set_input(data)

        with torch.no_grad():
            model.forward()

        visuals = model.get_current_visuals()
        fake_B[data['A_paths'][0]] = visuals['fake_B']

        for path in range(len(model.image_paths)):
            short_path = ntpath.basename(model.image_paths[0][0])
            name = os.path.splitext(short_path)[0]
            if name not in names:
                names.append(name)
        util.save_images(visuals,
                         model.image_paths,
                         result_dir,
                         direction=opt.direction,
                         aspect_ratio=opt.aspect_ratio)

    drn_model = DRNSeg('drn_d_105', 19, pretrained=False).to(model.device)
    util.load_network(drn_model, opt.drn_path, verbose=False)
    drn_model.eval()

    mIoU = get_mIoU(list(fake_B.values()),
                    names,
                    drn_model,
                    model.device,
                    table_path=os.path.join(opt.dataroot, 'table.txt'),
                    data_dir=opt.dataroot,
                    batch_size=opt.batch_size,
                    num_workers=opt.num_threads)
    return mIoU
Esempio n. 5
0
def evaluate_cam(val_loader, model, criterion, args):
    losses = AverageMeter('Loss', ':.4e')
    ap = APMeter()

    # switch to evaluate mode
    model.eval()

    # Image de-standardization
    image_mean_value = [0.485, .456, .406]
    image_std_value = [.229, .224, .225]
    image_mean_value = torch.reshape(torch.tensor(image_mean_value), (1, 3, 1, 1))
    image_std_value = torch.reshape(torch.tensor(image_std_value), (1, 3, 1, 1))

    with torch.no_grad():
        for i, (images, target, image_id) in enumerate(tqdm(val_loader, desc='Evaluate')):
            images = images.cuda(args.gpu, non_blocking=True)
            target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            ap.add(output.detach(), target)
            losses.update(loss.item(), images.size(0))

            # image de-normalizing
            images = images.clone().detach().cpu() * image_std_value + image_mean_value
            images = images.numpy().transpose(0, 2, 3, 1) * 255.
            images = images[:, :, :, ::-1]

            # extract CAM
            cam = get_cam_all_class(model, target)
            cam = cam.cpu().numpy().transpose(0, 2, 3, 1)

            # for all class
            for j in range(cam.shape[0]):
                blend_tensor = torch.empty((cam.shape[3], 3, 321, 321))
                for k in range(cam.shape[3]):
                    cam_ = resize_cam(cam[j, :, :, k])
                    blend, heatmap = blend_cam(images[j], cam_)
                    if target[j, k]:
                        blend = mark_target(blend, text=CAT_LIST[k])
                    blend = blend[:, :, ::-1] / 255.
                    blend = blend.transpose(2, 0, 1)
                    blend_tensor[k] = torch.tensor(blend)
                save_images('result', i, j, blend_tensor, args)

    return ap.value(), losses.avg
Esempio n. 6
0
def user_auth(idUsuario, usuario):
    #trata os parâmetros de entrada
    idUsuario = str(idUsuario)
    coop = str(usuario["cooperativa"])
    conta = str(usuario["conta"])
    nome = usuario["nome"]
    imagem = usuario["imagem"]
    base = imagem.split(",")[1]

    user_path = util.base_dir + idUsuario
    user_images = user_path + "/images/auth/"
    nome_imagem = "auth_image_" + str(datetime.now()).replace(" ",
                                                              "_") + ".jpeg"
    fullpath = user_images + nome_imagem

    user = provide.get_user(str(idUsuario), collection_users)
    if (user is not None):
        util.save_images(fullpath, base)
        user = provide.get_user(idUsuario, collection_users)
        match = facial_auth(fullpath, user['matrix_base'])

        obj = {}
        obj['idUsuario'] = idUsuario
        obj['nome'] = nome
        obj['dth_auth'] = str(datetime.now()).replace(" ", "_")
        obj['imagem'] = fullpath
        obj['match'] = str(match[0])

        if (match):
            provide.insereDB(obj, collection_auth)
            return jsonify({"messagem": "Usuário reconhecido"}), 200
        else:
            provide.insereDB(obj, collection_auth)
            return jsonify({"messagem": "Usuário não reconhecido"}), 403
    else:
        return jsonify({"messagem":
                        "Nenhum usuário encontrado com esse ID"}), 404
Esempio n. 7
0
def evaluate():
    model = torch.load(os.path.join('weights', 'model.pt'),
                       device)['model'].float().eval()

    half = device.type != 'cpu'
    if half:
        model.half()
    model.eval()

    img = torch.zeros((1, 3, config.image_size, config.image_size),
                      device=device)
    _ = model(img.half() if half else img) if device.type != 'cpu' else None

    file_names = [
        file_name[:-4] for file_name in os.listdir(
            os.path.join(config.data_dir, config.image_dir))
    ]

    for file_name in file_names:
        image = util.load_image(file_name)
        label = util.load_label(file_name)

        image, label = util.resize(image, label)

        image = torch.from_numpy(np.expand_dims(image.transpose(2, 0, 1), 0))
        image = image.to(device, non_blocking=True)
        image = image.half() if half else image.float()

        pred = model(image / 255.0)
        pred = np.array(pred.cpu().detach().numpy()[0, :, :, :].transpose(
            1, 2, 0))
        pred = util.reverse_one_hot(pred)
        pred = util.colour_code_segmentation(pred, palette)
        pred = np.uint8(pred)
        util.save_images([label, pred], ['TRUE', 'PRED'],
                         os.path.join('results', f'{file_name}.jpg'))
Esempio n. 8
0
model = FETModel(opt)  # create a model given other options
model.setup(opt)  # regular setup: load and print networks; create schedulers
model.eval()

# create a website
web_dir = os.path.join(opt['testresults_dir'], opt['name'], 'test_%s' %
                       (opt['epoch']))  # define the website directory
webpage = html.HTML(
    web_dir, 'Test Experiment = %s, Epoch = %s' % (opt['name'], opt['epoch']))

cnt = 1
for source_path in source_paths:
    for _, refs_path in enumerate(ref_paths):
        source = make_test_data(source_path, opt['load_size'],
                                opt['crop_size'])  # 1*3*128*128
        refs = torch.zeros(opt['K'], opt['input_nc'], opt['crop_size'],
                           opt['crop_size'])  # K*3*128*128
        for i, ref_path in enumerate(refs_path):
            refs[i] = make_test_data(ref_path, opt['load_size'],
                                     opt['crop_size'])
        refs.unsqueeze_(0)  # 1*K*3*128*128
        data = {'source': source, 'refs': refs}

        model.set_input(data)
        model.test()  # run inference
        visuals = model.get_current_visuals()  # get image results
        print('processing: %d/%d' % (cnt, len(source_paths) * len(ref_paths)))
        save_images(webpage, cnt, visuals, opt['K'])
        cnt += 1

webpage.save()  # save the HTML
Esempio n. 9
0
    def train_epoch(self,
                    sess,
                    saver,
                    train_next_element,
                    i_epoch,
                    n_batch,
                    truncated_norm,
                    z_fix=None):
        t_start = None
        global_step = 0
        for i_batch in range(n_batch):
            if i_batch == 1:
                t_start = time.time()
            batch_imgs = sess.run(train_next_element)
            batch_imgs = self.preprocess(batch_imgs)
            batch_z = truncated_norm.rvs(
                [self.args.batch_size, 1, 1, self.args.z_dim])
            feed_dict_ = {
                self.inputs: batch_imgs,
                self.z: batch_z,
                self.is_training: True
            }
            # update D network
            _, d_loss, d_lr, g_lr = sess.run(
                [self.train_d, self.d_loss, self.d_lr, self.g_lr],
                feed_dict=feed_dict_)
            self.d_loss_log.append(d_loss)

            # update G network
            g_loss = None
            if i_batch % self.args.n_critic == 0:
                if self.args.ema_decay is not None:
                    _, g_loss, _, global_step = sess.run([
                        self.ema_train_g, self.g_loss, self.add_step,
                        self.global_step
                    ],
                                                         feed_dict=feed_dict_)
                else:
                    _, g_loss, _, global_step = sess.run([
                        self.train_g, self.g_loss, self.add_step,
                        self.global_step
                    ],
                                                         feed_dict=feed_dict_)
            self.g_loss_log.append(g_loss)

            last_train_str = "[epoch:%d/%d, global_step:%d] -d_loss:%.3f - g_loss:%.3f -d_lr:%.e -g_lr:%.e" % (
                i_epoch + 1, int(
                    self.args.epochs), global_step, d_loss, g_loss, d_lr, g_lr)
            if i_batch > 0:
                last_train_str += (' -ETA:%ds' %
                                   util.cal_ETA(t_start, i_batch, n_batch))
            if (i_batch + 1) % 20 == 0 or i_batch == 0:
                tf.logging.info(last_train_str)

            # show fake_imgs
            if global_step % self.args.show_steps == 0:
                tf.logging.info('generating fake imgs in steps %d...' %
                                global_step)
                # do ema
                if self.args.ema_decay is not None:
                    # save temp weights for generator
                    saver.save(
                        sess,
                        os.path.join(self.args.checkpoint_dir,
                                     'temp_model.ckpt'))
                    sess.run(self.assign_vars,
                             feed_dict={
                                 self.inputs: batch_imgs,
                                 self.z: batch_z,
                                 self.is_training: False
                             })
                    tf.logging.info('After EMA...')

                if z_fix is not None:
                    show_z = z_fix
                else:
                    show_z = truncated_norm.rvs(
                        [self.args.batch_size, 1, 1, self.args.z_dim])
                fake_imgs = sess.run(self.fake_images,
                                     feed_dict={self.z: show_z})
                manifold_h = int(np.floor(np.sqrt(self.args.sample_num)))
                util.save_images(fake_imgs, [manifold_h, manifold_h],
                                 image_path=os.path.join(
                                     self.args.result_dir, 'fake_steps_' +
                                     str(global_step) + '.jpg'))
                if self.args.ema_decay is not None:
                    # restore temp weights for generator
                    saver.restore(
                        sess,
                        os.path.join(self.args.checkpoint_dir,
                                     'temp_model.ckpt'))
                    tf.logging.info('Recover weights over...')

        return global_step, self.d_loss_log, self.g_loss_log
Esempio n. 10
0
    def train_epoch(self,
                    sess,
                    train_next_element,
                    i_epoch,
                    n_batch,
                    global_step,
                    truncated_norm,
                    z_fix=None):
        t_start = None
        for i_batch in range(n_batch):
            if i_batch == 1:
                t_start = time.time()
            batch_imgs = sess.run(train_next_element)
            batch_imgs = self.preprocess(batch_imgs)
            batch_z = truncated_norm.rvs(
                [self.args.batch_size, 1, 1, self.args.z_dim])
            feed_dict_ = {
                self.inputs: batch_imgs,
                self.z: batch_z,
                self.is_training: True
            }
            # update D network
            _, d_loss, d_grads_norm = sess.run(
                [self.train_d, self.d_loss, self.d_grads_norm],
                feed_dict=feed_dict_)
            self.d_loss_log.append(d_loss)

            # update G network
            g_loss = None
            g_grads_norm = None
            if i_batch % self.args.n_critic == 0:
                _, g_loss, g_grads_norm = sess.run(
                    [self.train_g, self.g_loss, self.g_grads_norm],
                    feed_dict=feed_dict_)
                self.g_loss_log.append(g_loss)

            global_step += 1

            last_train_str = "[epoch:%d/%d, global_step:%d] -d_loss:%.3f - g_loss:%.3f -d_norm:%.3f -g_norm:%.3f" % (
                i_epoch + 1, int(self.args.epochs), global_step, d_loss,
                g_loss, d_grads_norm, g_grads_norm)
            if i_batch > 0:
                last_train_str += (' -ETA:%ds' %
                                   util.cal_ETA(t_start, i_batch, n_batch))
            if (i_batch + 1) % 10 == 0 or i_batch == 0:
                tf.logging.info(last_train_str)

            # show fake_imgs
            if global_step % self.args.show_steps == 0:
                tf.logging.info('generating fake imgs in steps %d...' %
                                global_step)

                if z_fix is not None:
                    show_z = z_fix
                else:
                    show_z = batch_z
                fake_imgs = sess.run(self.fake_images,
                                     feed_dict={self.z: show_z})
                manifold_h = int(np.floor(np.sqrt(self.args.sample_num)))
                util.save_images(fake_imgs, [manifold_h, manifold_h],
                                 image_path=os.path.join(
                                     self.args.result_dir, 'fake_steps_' +
                                     str(global_step) + '.jpg'))

        return global_step, self.d_loss_log, self.g_loss_log