Exemple #1
0
def main():
    args = parse_args()

    vol = load_volume(args.input, stop=args.depth, ext='.bmp')

    with h5py.File('downsampled_histogram.h5', 'r') as f:
        hist = f['hist'][:]
    hist = np.cumsum(hist)
    vol = np.floor(255 * hist[vol])
    vol[vol > 255] = 255
    vol[vol < 0] = 0
    vol = vol.astype(np.uint8)

    if not os.path.isdir(args.output):
        os.makedirs(args.output)

    sums, counts = summate(integral_image(vol), np.asarray(args.window))

    for t in np.arange(0, 1, 0.01):
        seg = threshold_bradley_nd(vol,
                                   t=t,
                                   s=np.asarray(args.window),
                                   sums=sums,
                                   counts=counts)
        dirname = os.path.join(args.output, '{0:.2f}'.format(t))
        save_imgs(seg * 255, dirname)
Exemple #2
0
def test(cfg: Namespace) -> None:
    assert cfg.checkpoint not in [None, ""]
    assert cfg.device == "cpu" or (cfg.device == "cuda"
                                   and T.cuda.is_available())

    exp_dir = ROOT_EXP_DIR / cfg.exp_name
    os.makedirs(exp_dir / "out", exist_ok=True)
    cfg.to_file(exp_dir / "test_config.json")
    logger.info(f"[exp dir={exp_dir}]")

    model = CAE()
    model.load_state_dict(T.load(cfg.checkpoint))
    model.eval()
    if cfg.device == "cuda":
        model.cuda()
    logger.info(f"[model={cfg.checkpoint}] on {cfg.device}")

    dataloader = DataLoader(dataset=ImageFolder720p(cfg.dataset_path),
                            batch_size=1,
                            shuffle=cfg.shuffle)
    logger.info(f"[dataset={cfg.dataset_path}]")

    loss_criterion = nn.MSELoss()

    for batch_idx, data in enumerate(dataloader, start=1):
        img, patches, _ = data
        if cfg.device == "cuda":
            patches = patches.cuda()

        if batch_idx % cfg.batch_every == 0:
            pass

        out = T.zeros(6, 10, 3, 128, 128)
        avg_loss = 0

        for i in range(6):
            for j in range(10):
                x = patches[:, :, i, j, :, :].cuda()
                y = model(x)
                out[i, j] = y.data

                loss = loss_criterion(y, x)
                avg_loss += (1 / 60) * loss.item()

        logger.debug("[%5d/%5d] avg_loss: %f", batch_idx, len(dataloader),
                     avg_loss)

        # save output
        out = np.transpose(out, (0, 3, 1, 4, 2))
        out = np.reshape(out, (768, 1280, 3))
        out = np.transpose(out, (2, 0, 1))

        y = T.cat((img[0], out), dim=2)
        save_imgs(
            imgs=y.unsqueeze(0),
            to_size=(3, 768, 2 * 1280),
            name=exp_dir / f"out/test_{batch_idx}.png",
        )
Exemple #3
0
def train(args):
    model = AutoencoderConv().cuda()
    if args.load:
        model.load_state_dict(torch.load(args.chkpt))
        print("Loaded model from", args.chkpt)
    model.train()
    print("Done setup model")

    dataset = ImageFolder720p(args.dataset_path)
    dataloader = DataLoader(
        dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers
    )
    print("Done setup dataloader: {len} batches of size {batch_size}".format(len=len(dataloader), batch_size=args.batch_size))

    mse_loss = nn.MSELoss()
    adam = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=1e-5)
    sgd = torch.optim.SGD(model.parameters(), lr=args.learning_rate)

    optimizer = adam

    for ei in range(args.resume_epoch, args.num_epochs):
        for bi, (img, patches, _) in enumerate(dataloader):

            avg_loss = 0
            for i in range(6):
                for j in range(10):
                    x = Variable(patches[:, :, i, j, :, :]).cuda()
                    y = model(x)
                    loss = mse_loss(y, x)

                    avg_loss += 0.6 * loss.item()

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

            print('[%3d/%3d][%5d/%5d] loss: %f' % (ei, args.num_epochs, bi, len(dataloader), avg_loss))

            # save img
            if bi % args.out_every == 0:
                out = torch.zeros(6, 10, 3, 128, 128)
                for i in range(6):
                    for j in range(10):
                        x = Variable(patches[0, :, i, j, :, :].unsqueeze(0)).cuda()
                        out[i, j] = model(x).cpu().data

                out = np.transpose(out, (0, 3, 1, 4, 2))
                out = np.reshape(out, (768, 1280, 3))
                out = np.transpose(out, (2, 0, 1))

                y = torch.cat((img[0], out), dim=2).unsqueeze(0)
                save_imgs(imgs=y, to_size=(3, 768, 2 * 1280), name="out/{exp_name}/out_{ei}_{bi}.png".format(exp_name=args.exp_name, ei=ei, bi=bi))

            # save model
            if bi % args.save_every == args.save_every - 1:
                torch.save(model.state_dict(), "checkpoints/{exp_name}/model_{ei}_{bi}.state".format(exp_name=args.exp_name, ei=ei, bi=bi))

    torch.save(model.state_dict(), "checkpoints/{exp_name}/model_final.state".format(exp_name=args.exp_name))
Exemple #4
0
def test(cfg: Namespace) -> None:
    logger.info("=== Testing ===")

    # initial setup
    prologue(cfg)

    model = CAE()
    model.load_state_dict(torch.load(cfg.chkpt))
    model.eval()
    if cfg.device == "cuda":
        model.cuda()

    logger.info("Loaded model")

    dataset = ImageFolder720p(cfg.dataset_path)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=cfg.shuffle)

    logger.info("Loaded data")

    loss_criterion = nn.MSELoss()

    for batch_idx, data in enumerate(dataloader, start=1):
        img, patches, _ = data
        if cfg.device == 'cuda':
            patches = patches.cuda()

        if batch_idx % cfg.batch_every == 0:
            pass

        out = torch.zeros(6, 10, 3, 128, 128)
        # enc = torch.zeros(6, 10, 16, 8, 8)
        avg_loss = 0

        for i in range(6):
            for j in range(10):
                x = Variable(patches[:, :, i, j, :, :]).cuda()
                y = model(x)
                out[i, j] = y.data

                loss = loss_criterion(y, x)
                avg_loss += (1 / 60) * loss.item()

        logger.debug('[%5d/%5d] avg_loss: %f' %
                     (batch_idx, len(dataloader), avg_loss))

        # save output
        out = np.transpose(out, (0, 3, 1, 4, 2))
        out = np.reshape(out, (768, 1280, 3))
        out = np.transpose(out, (2, 0, 1))

        y = torch.cat((img[0], out), dim=2)
        save_imgs(
            imgs=y.unsqueeze(0),
            to_size=(3, 768, 2 * 1280),
            name=f"../experiments/{cfg.exp_name}/out/test_{batch_idx}.png")

    # final setup
    epilogue(cfg)
Exemple #5
0
 def test(self, epoch):
     self.net.eval()
     pd_num = 0
     path = os.path.join(self.pred, f'epoch_{epoch}')
     if not os.path.exists(path):
         os.mkdir(path)
     with torch.no_grad():
         for data in tqdm(self.testset):
             res = self.net(data)
             save_imgs(path, res, pd_num)
             pd_num += 1
Exemple #6
0
def main():
    args = parse_args()

    start = time.time()

    print("Loading image volume")
    vol = load_volume(args.input)

    if args.show:
        plt.imshow(vol[0], cmap='Greys_r')
        plt.show()

    print("Prepping threshold subvolume shape")
    if len(args.shape) < len(vol.shape):
        shape = list(vol.shape[:len(vol.shape) - len(args.shape)])
        shape.extend(args.shape)
    else:
        shape = args.shape

    if len(args.step) < len(shape):
        step = list(vol.shape[:len(vol.shape) - len(args.step)])
        step.extend(args.step)
    else:
        step = args.step

    print("Thresholding subvolumes")
    step = args.step

    thresh = np.zeros(vol.shape)
    for i in range(0, vol.shape[0], step[0] if step else vol.shape[0]):
        endi = i + shape[0] if i + shape[0] < vol.shape[0] else vol.shape[0]
        for j in range(0, vol.shape[1], step[1] if step else vol.shape[1]):
            endj = j + shape[1] if j + shape[1] < vol.shape[1] else vol.shape[1]
            for k in range(0, vol.shape[2], step[2] if step else vol.shape[2]):
                endk = k + shape[2] if k + shape[2] < vol.shape[
                    2] else vol.shape[2]
                subvol = np.copy(vol[i:endi, j:endj, k:endk])
                subvol = threshold_bradley_nd(subvol,
                                              s=(4, shape[1], shape[2]),
                                              t=args.threshold)
                subvol = np.abs(1 - subvol) if np.max(subvol) > 0 else subvol
                subvol = binary_opening(subvol)
                subvol = binary_fill_holes(subvol)
                subvol[subvol > 0] = 1
                thresh[i:endi, j:endj, k:endk] += subvol

    if args.show:
        plt.imshow(thresh[0], cmap='Greys_r')
        plt.show()
    thresh[thresh > 0] = 255
    print("Saving segmentation")
    save_imgs(thresh, args.output)
    print("Running Time: {}s".format(time.time() - start))
Exemple #7
0
def test(args):
    model = AutoencoderConv().cuda()
    model.load_state_dict(torch.load(args.chkpt))
    model.eval()
    print("Loaded model from", args.chkpt)

    dataset = ImageFolder720p(args.dataset_path)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=args.shuffle)
    print(f"Done setup dataloader: {len(dataloader)}")

    mse_loss = nn.MSELoss()

    for bi, (img, patches, path) in enumerate(dataloader):
        if "frame_40" not in path[0]: continue

        out = torch.zeros(6, 10, 3, 128, 128)
        # enc = torch.zeros(6, 10, 16, 8, 8)
        avg_loss = 0

        for i in range(6):
            for j in range(10):
                x = Variable(patches[:, :, i, j, :, :]).cuda()
                y = model(x)

                # e = model.enc_x.data
                # p = torch.tensor(np.random.permutation(e.reshape(-1, 1)).reshape(1, 16, 8, 8)).cuda()
                # out[i, j] = model.decode(p).data

                # enc[i, j] = model.enc_x.data
                out[i, j] = y.data

                loss = mse_loss(y, x)
                avg_loss += 0.6 * loss.item()

        print('[%5d/%5d] loss: %f' % (bi, len(dataloader), avg_loss))

        # save output
        out = np.transpose(out, (0, 3, 1, 4, 2))
        out = np.reshape(out, (768, 1280, 3))
        out = np.transpose(out, (2, 0, 1))

        y = torch.cat((img[0], out), dim=2)
        save_imgs(imgs=y.unsqueeze(0),
                  to_size=(3, 768, 2 * 1280),
                  name=f"./test/{args.out_dir}/test_{bi}.png")

        break
    def img_sample(self, iter_time, save_dir, batch_size=4):
        imgs, clses, segs, irises, coordinates = self.data.train_random_batch_include_iris(
            batch_size=batch_size)

        feed = {
            self.model.mask_tfph: segs,
            self.model.cls_tfph: clses,
            self.model.rate_tfph: 0.5,  # rate: 1 - keep_prob
            self.model.iden_model.img_tfph: irises,
            self.model.iden_model.train_mode: False
        }

        samples = self.sess.run(self.model.g_sample, feed_dict=feed)
        utils.save_imgs(img_stores=[segs, samples, imgs],
                        iter_time=iter_time,
                        save_dir=save_dir,
                        is_vertical=True)
    def save(self):
	torm = {'anime': ['imdb', 'artist', 'album', 'company'],
	    'books': ['artist', 'album', 'year', 'company', 'tube', 'imdb', 'scrs'],
	    'games': ['imdb', 'artist', 'album'],
	    'movies': ['artist', 'album', 'company'],
	    'music': ['scrs', 'imdb', 'company', 'tube', 'lngs'],
	    'pics': ['imdb', 'artist', 'album', 'year', 'company', 'tube'],
	    'tv': ['artist', 'album', 'company'],
	    'misc': ['scrs', 'imdb', 'artist', 'album', 'year', 'company', 'tube', 'lngs']
	}
	toadd = {'anime': ['year', 'tube', 'lngs'],
	    'books': ['lngs'],
	    'games': ['year', 'company', 'lngs', 'tube'],
	    'movies': ['imdb', 'year', 'tube', 'lngs'],
	    'music': ['album', 'artist', 'year'],
	    'pics': ['lngs'],
	    'tv': ['imdb', 'year', 'tube', 'lngs'],
	    'misc': [],
	}
	for k in torm.get(self.cleaned_data['section'], []):
	    del self.instance.attrs[k]
	for k in toadd.get(self.cleaned_data['section'], []):
	    self.instance.attrs[k] = self.cleaned_data[k]

	self.instance.section = self.cleaned_data['section']
	if self.cleaned_data['section'] == 'misc':
	    self.instance.subcat = 'misc'
	else:
	    self.instance.subcat = self.cleaned_data[self.cleaned_data['section']]

	if self.cleaned_data['notify'] == 'yes':
	    if self.instance.author.attrs.has_key('notify'):
		l = self.instance.author.attrs['notify']
		l.append(self.instance.id)
		self.instance.author.attrs['notify'] = l
	    else:
		self.instance.author.attrs['notify'] = [self.instance.id]
	if self.cleaned_data['featured'] == 'yes':
	    if self.instance.author.attrs.has_key('featured'):
		l = self.instance.author.attrs['featured']
		l.append(self.instance.id)
		self.instance.author.attrs['featured'] = l
	    else:
		self.instance.author.attrs['featured'] = [self.instance.id]
	if self.cleaned_data['poster']:
	    #removing old image if new uploaded
	    drop_pics(self.instance)
	if self.cleaned_data['poster']:
	    self.instance.poster = save_imgs(self.cleaned_data['poster'], self.instance.author.id, 'poster')
	self.instance.title = self.cleaned_data['title']
	self.instance.text = self.cleaned_data['text']
	_save_torrent(self.instance, self.cleaned_data['torrent'])

	if self.cleaned_data['tube']:
	    self.instance.attrs['tube'] = self.cleaned_data['tube']
	_update_notification_delivery_queue(self.cleaned_data['tags'], self.instance.author)
	self.instance.save()
	self.instance.tags = self.cleaned_data['tags']
Exemple #10
0
def test(cfg):
	os.makedirs(f"./test/{cfg['exp_name']}", exist_ok=True)

	model = CAE().cuda()

	model.load_state_dict(torch.load(cfg['chkpt']))
	model.eval()
	logger.info("Loaded model from", cfg['chkpt'])

	dataset = ImageFolder720p(cfg['dataset_path'])
	dataloader = DataLoader(dataset, batch_size=1, shuffle=cfg['shuffle'])
	logger.info(f"Done setup dataloader: {len(dataloader)}")

	mse_loss = nn.MSELoss()

	for bi, (img, patches, path) in enumerate(dataloader):

		out = torch.zeros(6, 10, 3, 128, 128)
		# enc = torch.zeros(6, 10, 16, 8, 8)
		avg_loss = 0

		for i in range(6):
			for j in range(10):
				x = Variable(patches[:, :, i, j, :, :]).cuda()
				y = model(x)

				# e = model.enc_x.data
				# p = torch.tensor(np.random.permutation(e.reshape(-1, 1)).reshape(1, 16, 8, 8)).cuda()
				# out[i, j] = model.decode(p).data

				# enc[i, j] = model.enc_x.data
				out[i, j] = y.data

				loss = mse_loss(y, x)
				avg_loss += (1 / 60) * loss.item()

		logger.debug('[%5d/%5d] avg_loss: %f' % (bi, len(dataloader), avg_loss))

		# save output
		out = np.transpose(out, (0, 3, 1, 4, 2))
		out = np.reshape(out, (768, 1280, 3))
		out = np.transpose(out, (2, 0, 1))

		y = torch.cat((img[0], out), dim=2)
		save_imgs(imgs=y.unsqueeze(0), to_size=(3, 768, 2 * 1280), name=f"./test/{cfg['exp_name']}/test_{bi}.png")
def test(solver, model_dir, log_dir, test_dir):
    if FLAGS.load_model is not None:
        flag, iter_time = solver.load_model(logger=None, model_dir=model_dir, is_train=False)

        if flag is True:
            print(' [!] Load Success! Iter: {}'.format(iter_time))
        else:
            exit(' [!] Failed to restore model {}'.format(FLAGS.load_gan_model))

    segs, outputs, clses, imgs = solver.generate_test_imgs()
    solver.test_top_k(segs, outputs, clses, log_dir)

    print('Saving imgs...')
    for i in range(segs.shape[0]):
        if i % 100 == 0:
            print('[{}/{}] saving...'.format(i, segs.shape[0]))

        utils.save_imgs(img_stores=[imgs[i:i+1], segs[i:i+1], outputs[i:i+1]], save_dir=test_dir,
                        img_name=os.path.basename(solver.data.test_paths[i]), is_vertical=False, margin=0)
    def save(self):
	if self.cleaned_data['image']:
	    self.instance.image = save_imgs(self.cleaned_data['image'], self.instance.id, 'avatar')
	if self.cleaned_data['name']:
	    self.instance.full_name = self.cleaned_data['name']
	if not self.cleaned_data['text']:
	    self.instance.text = ""
	for k in ['title', 'birthday', 'im', 'text']:
	    if self.cleaned_data[k]:
		setattr(self.instance, k, self.cleaned_data[k])
	self.instance.save()
    def sample(self, iterTime, saveDir, num_imgs=4):
        feed = {
            self.model.ratePh: 0.5,  # rate: 1 - keep_prob
            self.model.trainMode: True
        }

        img, predCls, segImg = self.sess.run([
            self.model.imgTrain, self.model.predClsTrain,
            self.model.segImgTrain
        ],
                                             feed_dict=feed)

        # if batch_size is bigger than num_imgs, we just show num_imgs
        num_imgs = np.minimum(num_imgs, img.shape[0])

        # Save imgs
        utils.save_imgs(
            img_stores=[img[:num_imgs], predCls[:num_imgs], segImg[:num_imgs]],
            iterTime=iterTime,
            saveDir=saveDir,
            is_vertical=True)
 def save(self):
     if self.cleaned_data['image']:
         self.instance.image = save_imgs(self.cleaned_data['image'],
                                         self.instance.id, 'avatar')
     if self.cleaned_data['name']:
         self.instance.full_name = self.cleaned_data['name']
     if not self.cleaned_data['text']:
         self.instance.text = ""
     for k in ['title', 'birthday', 'im', 'text']:
         if self.cleaned_data[k]:
             setattr(self.instance, k, self.cleaned_data[k])
     self.instance.save()
def eval(args):
    e1 = E1(args.sep, int((args.resize / 64)))
    e2 = E2(args.sep, int((args.resize / 64)))
    decoder = Decoder(int((args.resize / 64)))

    if torch.cuda.is_available():
        e1 = e1.cuda()
        e2 = e2.cuda()
        decoder = decoder.cuda()

    if args.load != '':
        save_file = os.path.join(args.load, 'checkpoint')
        _iter = load_model_for_eval(save_file, e1, e2, decoder)

    e1 = e1.eval()
    e2 = e2.eval()
    decoder = decoder.eval()

    if not os.path.exists(args.out) and args.out != "":
        os.mkdir(args.out)

    save_imgs(args, e1, e2, decoder, _iter)
    def save(self):
	from tagging.models import TaggedItem
	self.instance.poster = save_imgs(self.cleaned_data['poster'], self.instance.author.id, 'poster')
	self.instance.title=self.cleaned_data['title']
	self.instance.text=self.cleaned_data['text']
	if not CENSORSHIP:
	    self.instance.approved = True
	_save_torrent(self.instance, self.cleaned_data['torrent'])

	self.instance.save()
	self.instance.tags = self.cleaned_data['tags']
	_update_notification_delivery_queue(self.cleaned_data['tags'], self.instance.author)
	return self.instance.id
    def save(self):
        from tagging.models import TaggedItem
        self.instance.poster = save_imgs(self.cleaned_data['poster'],
                                         self.instance.author.id, 'poster')
        self.instance.title = self.cleaned_data['title']
        self.instance.text = self.cleaned_data['text']
        if not CENSORSHIP:
            self.instance.approved = True
        _save_torrent(self.instance, self.cleaned_data['torrent'])

        self.instance.save()
        self.instance.tags = self.cleaned_data['tags']
        _update_notification_delivery_queue(self.cleaned_data['tags'],
                                            self.instance.author)
        return self.instance.id
Exemple #18
0
def train(cfg: Namespace) -> None:
    print(cfg.device)
    assert cfg.device == 'cpu' or (cfg.device == 'cuda'
                                   and T.cuda.is_available())

    logger.info('training: experiment %s' % (cfg.exp_name))

    # make dir-tree
    exp_dir = ROOT_DIR / 'experiments' / cfg.exp_name

    for d in ['out', 'checkpoint', 'logs']:
        os.makedirs(exp_dir / d, exist_ok=True)

    cfg.to_file(exp_dir / 'train_config.txt')

    # tb writer
    writer = SummaryWriter(exp_dir / 'logs')

    model = CAE()
    model.train()
    if cfg.device == 'cuda':
        model.cuda()
    logger.info(f'loaded model on {cfg.device}')

    dataset = ImageFolder720p(cfg.dataset_path)
    dataloader = DataLoader(dataset,
                            batch_size=cfg.batch_size,
                            shuffle=cfg.shuffle,
                            num_workers=cfg.num_workers)
    logger.info('loaded dataset')

    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.learning_rate,
                           weight_decay=1e-5)
    loss_criterion = nn.MSELoss()

    avg_loss, epoch_avg = 0.0, 0.0
    ts = 0

    # EPOCHS
    for epoch_idx in range(cfg.start_epoch, cfg.num_epochs + 1):
        # BATCHES
        for batch_idx, data in enumerate(dataloader, start=1):
            img, patches, _ = data

            if cfg.device == 'cuda':
                patches = patches.cuda()

            avg_loss_per_image = 0.0
            for i in range(6):
                for j in range(10):
                    optimizer.zero_grad()

                    x = patches[:, :, i, j, :, :]
                    y = model(x)
                    loss = loss_criterion(y, x)

                    avg_loss_per_image += (1 / 60) * loss.item()

                    loss.backward()
                    optimizer.step()

            avg_loss += avg_loss_per_image
            epoch_avg += avg_loss_per_image

            if batch_idx % cfg.batch_every == 0:
                writer.add_scalar('train/avg_loss', avg_loss / cfg.batch_every,
                                  ts)

                for name, param in model.named_parameters():
                    writer.add_histogram(name, param, ts)

                logger.debug('[%3d/%3d][%5d/%5d] avg_loss: %.8f' %
                             (epoch_idx, cfg.num_epochs, batch_idx,
                              len(dataloader), avg_loss / cfg.batch_every))

                avg_loss = 0.0
                ts += 1
            # -- end batch every

            if batch_idx % cfg.save_every == 0:
                out = T.zeros(6, 10, 3, 128, 128)
                for i in range(6):
                    for j in range(10):
                        x = patches[0, :, i, j, :, :].unsqueeze(0).cuda()
                        out[i, j] = model(x).cpu().data

                out = np.transpose(out, (0, 3, 1, 4, 2))
                out = np.reshape(out, (768, 1280, 3))
                out = np.transpose(out, (2, 0, 1))

                y = T.cat((img[0], out), dim=2).unsqueeze(0)
                save_imgs(imgs=y,
                          to_size=(3, 768, 2 * 1280),
                          name=exp_dir / f'out/{epoch_idx}_{batch_idx}.png')
            # -- end save every
        # -- end batches

        if epoch_idx % cfg.epoch_every == 0:
            epoch_avg /= (len(dataloader) * cfg.epoch_every)

            writer.add_scalar('train/epoch_avg_loss',
                              avg_loss / cfg.batch_every,
                              epoch_idx // cfg.epoch_every)

            logger.info('Epoch avg = %.8f' % epoch_avg)
            epoch_avg = 0.0

            T.save(model.state_dict(),
                   exp_dir / f'checkpoint/model_{epoch_idx}.state')
        # -- end epoch every

# -- end epoch

# save final model
    T.save(model.state_dict(), exp_dir / 'model_final.state')

    # cleaning
    writer.close()
Exemple #19
0
def evaluate(model,
             eval_dataset,
             num_workers=0,
             print_detail=True,
             save_img=True):
    """
    Launch evalution.

    Args:
        model(nn.Layer): A sementic segmentation model.
        eval_dataset (paddle.io.Dataset): Used to read and process validation datasets.
        num_workers (int, optional): Num workers for data loader. Default: 0.
        print_detail (bool, optional): Whether to print detailed information about the evaluation process. Default: True.

    Returns:
        float: The mIoU of validation datasets.
        float: The accuracy of validation datasets.
    """
    logger.info('Validating')
    evaluator = Eval(eval_dataset.NUM_CLASSES)
    evaluator.reset()

    model.eval()

    nranks = paddle.distributed.ParallelEnv().nranks
    local_rank = paddle.distributed.ParallelEnv().local_rank
    if nranks > 1:
        # Initialize parallel environment if not done.
        if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
        ):
            paddle.distributed.init_parallel_env()

    batch_sampler = paddle.io.DistributedBatchSampler(
        eval_dataset, batch_size=1, shuffle=False, drop_last=True)
    loader = paddle.io.DataLoader(
        eval_dataset,
        batch_sampler=batch_sampler,
        num_workers=num_workers,
        return_list=True,
    )

    progbar_val = progbar.Progbar(
        target=len(loader), verbose=0 if nranks < 2 else 2)

    reader_cost_averager = TimeAverager()
    batch_cost_averager = TimeAverager()
    batch_start = time.time()

    with paddle.no_grad():
        for idx, (x, y, _, item) in enumerate(loader):
            reader_cost_averager.record(time.time() - batch_start)

            # Forward
            y = y.astype('int64')
            pred = model(x)  # 1, c, h, w
            if len(pred) > 1:
                pred = pred[0]

            # Convert to numpy
            label = y.squeeze(axis=1).numpy()  #
            argpred = np.argmax(pred.numpy(), axis=1)  # 1, 1, H, W
            if save_img:
                save_imgs(argpred, item, './output/')

            # Add to evaluator
            evaluator.add_batch(label, argpred)

            batch_cost_averager.record(
                time.time() - batch_start, num_samples=len(label))
            batch_cost = batch_cost_averager.get_average()
            reader_cost = reader_cost_averager.get_average()

            if local_rank == 0 and print_detail and idx % 10 == 0:
                progbar_val.update(idx + 1, [('batch_cost', batch_cost),
                                             ('reader cost', reader_cost)])
            reader_cost_averager.reset()
            batch_cost_averager.reset()
            batch_start = time.time()

        PA = evaluator.pixel_accuracy()
        MPA = evaluator.mean_pixel_accuracy()
        MIoU = evaluator.mean_iou()
        FWIoU = evaluator.fwiou()
        PC = evaluator.mean_precision()
        logger.info(
            'PA1:{:.3f}, MPA1:{:.3f}, MIoU1:{:.3f}, FWIoU1:{:.3f}, PC:{:.3f}'.
            format(PA, MPA, MIoU, FWIoU, PC))

    return PA, MPA, MIoU, FWIoU
Exemple #20
0
def train():

    mnist = tf.keras.datasets.mnist

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # data, info = tf.load("mnist", with_info=True, data_dir='/data/tensorflow_datasets')
    train_data = x_train

    if not os.path.exists('./images'):
        os.makedirs('./images')

    # settting hyperparameter
    latent_dim = 100
    epochs = 800
    batch_size = 32
    buffer_size = 6000
    save_interval = 50
    n_critic = 5

    strategy = tf.distribute.MirroredStrategy()
    print('Number of devies: {}'.format(strategy.num_replicas_in_sync))

    with strategy.scope():

        generator = Generator()
        discriminator = Critic()

        gen_optimizer = tf.keras.optimizers.Adam(0.0001, 0.5, 0.9)
        disc_optimizer = tf.keras.optimizers.Adam(0.0001, 0.5, 0.9)

    train_dataset = train_data.map(normalize).shuffle(buffer_size).batch(
        batch_size)

    @tf.function
    def train_discriminator(images):
        noise = tf.random.normal([batch_size, latent_dim])

        with tf.GradientTape() as disc_tape:
            generated_imgs = generator(noise, training=True)

            generated_output = discriminator(generated_imgs, training=True)
            real_output = discriminator(images, training=True)

            interpolated_img = random_weighted_average(
                [images, generated_imgs])

            validity_interpolated = discriminator(interpolated_img,
                                                  training=True)

            disc_loss = discriminator_loss(real_output, generated_output,
                                           validity_interpolated,
                                           interpolated_img)

        grad_disc = disc_tape.gradient(disc_loss,
                                       discriminator.trainable_variables)
        disc_optimizer.apply_gradients(
            zip(grad_disc, discriminator.trainable_variables))

        return disc_loss

    @tf.function
    def train_generator():
        noise = tf.random.normal([batch_size, latent_dim])

        with tf.GradientTape() as gen_tape:
            generated_imgs = generator(noise, training=True)
            generated_output = discriminator(generated_imgs, training=True)

            gen_loss = generator_loss(generated_output)

        grad_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)
        gen_optimizer.apply_gradients(
            zip(grad_gen, generator.trainable_variables))

        return gen_loss

    seed = tf.random.normal([16, latent_dim])

    for epoch in range(epochs):
        start = time.time()
        disc_loss = 0
        gen_loss = 0

        for images in train_dataset:
            disc_loss += train_discriminator(images)

            if disc_optimizer.iterations.numpy() % n_critic == 0:
                gen_loss += train_generator()

        print('Time for epoch {} is {} sec - gen_loss = {}, disc_loss = {}'.
              format(epoch + 1,
                     time.time() - start, gen_loss / batch_size,
                     disc_loss / (batch_size * n_critic)))

        if epoch % save_interval == 0:
            save_imgs(epoch, generator, seed)
    def save(self):
        torm = {
            'anime': ['imdb', 'artist', 'album', 'company'],
            'books':
            ['artist', 'album', 'year', 'company', 'tube', 'imdb', 'scrs'],
            'games': ['imdb', 'artist', 'album'],
            'movies': ['artist', 'album', 'company'],
            'music': ['scrs', 'imdb', 'company', 'tube', 'lngs'],
            'pics': ['imdb', 'artist', 'album', 'year', 'company', 'tube'],
            'tv': ['artist', 'album', 'company'],
            'misc': [
                'scrs', 'imdb', 'artist', 'album', 'year', 'company', 'tube',
                'lngs'
            ]
        }
        toadd = {
            'anime': ['year', 'tube', 'lngs'],
            'books': ['lngs'],
            'games': ['year', 'company', 'lngs', 'tube'],
            'movies': ['imdb', 'year', 'tube', 'lngs'],
            'music': ['album', 'artist', 'year'],
            'pics': ['lngs'],
            'tv': ['imdb', 'year', 'tube', 'lngs'],
            'misc': [],
        }
        for k in torm.get(self.cleaned_data['section'], []):
            del self.instance.attrs[k]
        for k in toadd.get(self.cleaned_data['section'], []):
            self.instance.attrs[k] = self.cleaned_data[k]

        self.instance.section = self.cleaned_data['section']
        if self.cleaned_data['section'] == 'misc':
            self.instance.subcat = 'misc'
        else:
            self.instance.subcat = self.cleaned_data[
                self.cleaned_data['section']]

        if self.cleaned_data['notify'] == 'yes':
            if self.instance.author.attrs.has_key('notify'):
                l = self.instance.author.attrs['notify']
                l.append(self.instance.id)
                self.instance.author.attrs['notify'] = l
            else:
                self.instance.author.attrs['notify'] = [self.instance.id]
        if self.cleaned_data['featured'] == 'yes':
            if self.instance.author.attrs.has_key('featured'):
                l = self.instance.author.attrs['featured']
                l.append(self.instance.id)
                self.instance.author.attrs['featured'] = l
            else:
                self.instance.author.attrs['featured'] = [self.instance.id]
        if self.cleaned_data['poster']:
            #removing old image if new uploaded
            drop_pics(self.instance)
        if self.cleaned_data['poster']:
            self.instance.poster = save_imgs(self.cleaned_data['poster'],
                                             self.instance.author.id, 'poster')
        self.instance.title = self.cleaned_data['title']
        self.instance.text = self.cleaned_data['text']
        _save_torrent(self.instance, self.cleaned_data['torrent'])

        if self.cleaned_data['tube']:
            self.instance.attrs['tube'] = self.cleaned_data['tube']
        _update_notification_delivery_queue(self.cleaned_data['tags'],
                                            self.instance.author)
        self.instance.save()
        self.instance.tags = self.cleaned_data['tags']
Exemple #22
0
def test(cfg: Namespace) -> None:
    logger.info("=== Testing ===")

    if cfg.alg == "32":
        from autoencoder2bpp import AutoEncoder
    elif cfg.alg == "16":
        from autoencoder025bpp import AutoEncoder
    elif cfg.alg == "8":
        from autoencoder006bpp import AutoEncoder

    # initial setup
    prologue(cfg)

    model = CAE()
    model.load_state_dict(torch.load(cfg.chkpt))
    model.eval()
    if cfg.device == "cuda":
        model.cuda()

    logger.info("Loaded model")

    dataset = ImageFolder720p(cfg.dataset_path)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=cfg.shuffle)

    logger.info("Loaded data")

    loss_criterion = nn.MSELoss()
    if cfg.weigh_path is not None:
        loss_criterion = weighted_loss_function

    for batch_idx, data in enumerate(dataloader, start=1):
        img, patches, _, weights = data
        if cfg.device == 'cuda':
            patches = patches.cuda()

        out = torch.zeros(3, 256, 256)
        avg_loss = 0

        x = Variable(patches[:, :, :, :]).cpu()
        y = model(x).cpu()
        out = y.data

        if cfg.weigh_path:
            w = Variable(weights)
            w = w[:, None, :, :]
            w = torch.cat((w, w, w), dim=1)
            if cfg.device == "cuda":
                w = w.cuda()
            loss = loss_criterion(y, x, w)
        else:
            loss = loss_criterion(y, x)
        avg_loss += loss.item()

        logger.debug('[%5d/%5d] avg_loss: %f' %
                     (batch_idx, len(dataloader), avg_loss))

        # save output
        out = np.reshape(out, (3, 256, 256))

        #print(model.encoded)
        concat = torch.cat((img[0], out), dim=2).unsqueeze(0)
        save_imgs(
            imgs=concat,
            to_size=(3, 256, 2 * 256),
            name=f"../experiments/{cfg.exp_name}/out_test/test_{batch_idx}.png"
        )

    # final setup
    epilogue(cfg)
    def save(self):
	l = self.topic.attrs.get('scrs', [])
	fn = save_imgs(self.cleaned_data['scr'], self.topic.author.id, 'scr')
	l.append(fn)
	self.topic.attrs['scrs'] = l
	self.topic.save()
Exemple #24
0
def train():
    if not os.path.exists('./images'):
        os.makedirs('./images')

    # settting hyperparameter
    epochs = 800
    batch_size = 4 
    save_interval = 2 

    # Model setting
    generator = Generator(n_blocks=5)
    discriminator = Discriminator()

    # Optimizer setting
    gen_optimizer = tf.keras.optimizers.Adam(0.0005, 0.9)
    disc_optimizer = tf.keras.optimizers.Adam(0.0005, 0.9)

    image_ds = tf.data.Dataset.list_files('./data/data512x512/*', shuffle=True)
    train_dataset = image_ds.map(lambda x: preprocess_data(x), num_parallel_calls=AUTOTUNE).batch(batch_size).prefetch(AUTOTUNE).cache()

    # Loss setting
    content_layer = 'block5_conv4'  # SRGAN-VGG54
    extractor = ContentModel(content_layer)
    extractor.trainable = False

    cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=False)
    mse = tf.keras.losses.MeanSquaredError()

    @tf.function
    def train_step(lr_images, hr_images):
        with tf.GradientTape(persistent=True) as tape:
            sr_images = generator(lr_images)  # sr -> super resolution

            real_output = discriminator(hr_images)
            fake_output = discriminator(sr_images)

            # adversarial loss
            gen_loss = generator_loss(cross_entropy, fake_output) * 1e-3
            disc_loss = discriminator_loss(cross_entropy, real_output, fake_output) * 1e-3

            # content loss
            hr_feat = extractor(hr_images)
            sr_feat = extractor(sr_images)
            cont_loss = content_loss(mse, hr_feat, sr_feat) * 0.006

            perc_loss = cont_loss + gen_loss

        grad_gen = tape.gradient(perc_loss, generator.trainable_variables)
        grad_disc = tape.gradient(disc_loss, discriminator.trainable_variables)

        gen_optimizer.apply_gradients(zip(grad_gen, generator.trainable_variables))
        disc_optimizer.apply_gradients(zip(grad_disc, discriminator.trainable_variables))

        return gen_loss, disc_loss, cont_loss

    total_iter = 0
    for epoch in range(1, epochs + 1):
        start = time.time()
        total_gen_loss = 0
        total_disc_loss = 0
        total_cont_loss = 0

        for i, (lr_images, hr_images) in enumerate(train_dataset, 1):
            total_iter += 1
            gen_loss, disc_loss, cont_loss = train_step(lr_images, hr_images)

            if i % 100 == 0:
                print(f'Batch:{i}({total_iter}) -> gen_loss: {gen_loss}, disc_loss: {disc_loss}, cont_loss: {cont_loss}')
                save_imgs(epoch, generator, lr_images, hr_images)

            total_gen_loss += gen_loss
            total_disc_loss += disc_loss
            total_cont_loss += cont_loss

        print('Time for epoch {} is {} sec -> gen_loss: {}, disc_loss: {}, cont_loss: {}'.format(epoch, 
                                                                                                 time.time() - start, 
                                                                                                 total_gen_loss / i, 
                                                                                                 total_disc_loss / i,
                                                                                                 total_cont_loss / i))
Exemple #25
0
def train():

    data, info = tfds.load("mnist",
                           with_info=True,
                           data_dir='/data/tensorflow_datasets')
    train_data = data['train']

    if not os.path.exists('./images'):
        os.makedirs('./images')

    # settting hyperparameter
    latent_dim = 100
    epochs = 800
    batch_size = 32
    buffer_size = 6000
    save_interval = 50
    n_critic = 5

    generator = Generator()
    discriminator = Critic()

    gen_optimizer = tf.keras.optimizers.RMSprop(0.00005)
    disc_optimizer = tf.keras.optimizers.RMSprop(0.00005)

    train_dataset = train_data.map(normalize).shuffle(buffer_size).batch(
        batch_size)

    @tf.function
    def train_discriminator(images):
        noise = tf.random.normal([batch_size, latent_dim])

        with tf.GradientTape() as disc_tape:
            generated_imgs = generator(noise, training=True)

            generated_output = discriminator(generated_imgs, training=True)
            real_output = discriminator(images, training=True)

            disc_loss = discriminator_loss(real_output, generated_output)

        grad_disc = disc_tape.gradient(disc_loss,
                                       discriminator.trainable_variables)
        disc_optimizer.apply_gradients(
            zip(grad_disc, discriminator.trainable_variables))

        for param in discriminator.trainable_variables:
            # Except gamma and beta in Batch Normalization
            if param.name.split('/')[-1].find(
                    'gamma') == -1 and param.name.split('/')[-1].find(
                        'beta') == -1:
                param.assign(tf.clip_by_value(param, -0.01, 0.01))

        return disc_loss

    @tf.function
    def train_generator():
        noise = tf.random.normal([batch_size, latent_dim])

        with tf.GradientTape() as gen_tape:
            generated_imgs = generator(noise, training=True)
            generated_output = discriminator(generated_imgs, training=True)

            gen_loss = generator_loss(generated_output)

        grad_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)
        gen_optimizer.apply_gradients(
            zip(grad_gen, generator.trainable_variables))

        return gen_loss

    seed = tf.random.normal([16, latent_dim])

    for epoch in range(epochs):
        start = time.time()
        disc_loss = 0
        gen_loss = 0

        for images in train_dataset:
            disc_loss += train_discriminator(images)

            if disc_optimizer.iterations.numpy() % n_critic == 0:
                gen_loss += train_generator()

        print('Time for epoch {} is {} sec - gen_loss = {}, disc_loss = {}'.
              format(epoch + 1,
                     time.time() - start, gen_loss / batch_size,
                     disc_loss / (batch_size * n_critic)))

        if epoch % save_interval == 0:
            save_imgs(epoch, generator, seed)
Exemple #26
0
def train(cfg: Namespace) -> None:
    logger.info("=== Training ===")

    # initial setup
    writer = prologue(cfg)

    # train-related code
    model = CAE()
    model.train()
    if cfg.device == "cuda":
        model.cuda()
    logger.debug(f"Model loaded on {cfg.device}")

    dataset = ImageFolder720p(cfg.dataset_path)
    dataloader = DataLoader(dataset,
                            batch_size=cfg.batch_size,
                            shuffle=cfg.shuffle,
                            num_workers=cfg.num_workers)
    logger.debug("Data loaded")

    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.learning_rate,
                           weight_decay=1e-5)
    loss_criterion = nn.MSELoss()
    # scheduler = ...

    avg_loss, epoch_avg = 0.0, 0.0
    ts = 0

    # train-loop
    for epoch_idx in range(cfg.start_epoch, cfg.num_epochs + 1):

        # scheduler.step()

        for batch_idx, data in enumerate(dataloader, start=1):
            img, y, patches, _ = data

            if cfg.device == "cuda":
                patches = patches.cuda()

            avg_loss_per_image = 0.0  # 初始化单张图片的损失
            for i in range(1):
                for j in range(1):
                    optimizer.zero_grad()

                    x = Variable(patches[:, :, i, j, :, :])
                    y = model(x)
                    loss = loss_criterion(y, x)

                    avg_loss_per_image += (1 / 60) * loss.item()
                    avg_loss_per_image += (1 / 1) * loss.item()

                    loss.backward()
                    optimizer.step()

            avg_loss += avg_loss_per_image
            epoch_avg += avg_loss_per_image

            if batch_idx % cfg.batch_every == 0:
                writer.add_scalar("train/avg_loss", avg_loss / cfg.batch_every,
                                  ts)

                for name, param in model.named_parameters():
                    writer.add_histogram(name, param, ts)

                logger.debug('[%3d/%3d][%5d/%5d] avg_loss: %.8f' %
                             (epoch_idx, cfg.num_epochs, batch_idx,
                              len(dataloader), avg_loss / cfg.batch_every))
                avg_loss = 0.0
                ts += 1

            if batch_idx % cfg.save_every == 0:
                # out = torch.zeros(6, 10, 3, 128, 128)
                out = torch.zeros(1, 1, 3, 256, 256)
                for i in range(1):
                    for j in range(1):
                        x = Variable(patches[0, :, i,
                                             j, :, :].unsqueeze(0)).cuda()
                        out[i, j] = model(x).cpu().data

                out = np.transpose(out, (0, 3, 1, 4, 2))
                # out = np.reshape(out, (768, 1280, 3))
                out = np.reshape(out, (256, 256, 3))
                out = np.transpose(out, (2, 0, 1))

                y = torch.cat((img[0], out), dim=2).unsqueeze(0)
                # save_imgs(imgs=y, to_size=(3, 768, 2 * 1280), name=f"/data2/TDL/paper_fabric/workdir/4_30_short_eassy/{cfg.exp_name}/out/out_{epoch_idx}_{batch_idx}.png")
                save_imgs(
                    imgs=y,
                    to_size=(3, 256, 2 * 256),
                    name=
                    f"/data2/TDL/paper_fabric/workdir/4_30_short_eassy/{cfg.exp_name}/out/out_{epoch_idx}_{batch_idx}.png"
                )

        # -- batch-loop

        if epoch_idx % cfg.epoch_every == 0:
            epoch_avg /= (len(dataloader) * cfg.epoch_every)

            writer.add_scalar("train/epoch_avg_loss",
                              avg_loss / cfg.batch_every,
                              epoch_idx // cfg.epoch_every)

            logger.info("Epoch avg = %.8f" % epoch_avg)
            epoch_avg = 0.0
            torch.save(
                model.state_dict(),
                f"/data2/TDL/paper_fabric/workdir/4_30_short_eassy/{cfg.exp_name}/chkpt/model_{epoch_idx}.pth"
            )

    # -- train-loop

    # save final model
    torch.save(
        model.state_dict(),
        f"/data2/TDL/paper_fabric/workdir/4_30_short_eassy/{cfg.exp_name}/model_final.pth"
    )

    # final setup
    epilogue(cfg, writer)
Exemple #27
0
def train(cfg: Namespace) -> None:
    assert cfg.device == "cpu" or (cfg.device == "cuda"
                                   and T.cuda.is_available())

    root_dir = Path(__file__).resolve().parents[1]

    logger.info("training: experiment %s" % (cfg.exp_name))

    # make dir-tree
    exp_dir = root_dir / "experiments" / cfg.exp_name

    for d in ["out", "checkpoint", "logs"]:
        os.makedirs(exp_dir / d, exist_ok=True)

    cfg.to_file(exp_dir / "train_config.json")

    # tb tb_writer
    tb_writer = SummaryWriter(exp_dir / "logs")
    logger.info("started tensorboard writer")

    model = CAE()
    model.train()
    if cfg.device == "cuda":
        model.cuda()
    logger.info(f"loaded model on {cfg.device}")

    dataloader = DataLoader(
        dataset=ImageFolder720p(cfg.dataset_path),
        batch_size=cfg.batch_size,
        shuffle=cfg.shuffle,
        num_workers=cfg.num_workers,
    )
    logger.info(f"loaded dataset from {cfg.dataset_path}")

    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.learning_rate,
                           weight_decay=1e-5)
    loss_criterion = nn.MSELoss()

    avg_loss, epoch_avg = 0.0, 0.0
    ts = 0

    # EPOCHS
    for epoch_idx in range(cfg.start_epoch, cfg.num_epochs + 1):
        # BATCHES
        for batch_idx, data in enumerate(dataloader, start=1):
            img, patches, _ = data

            if cfg.device == "cuda":
                patches = patches.cuda()

            avg_loss_per_image = 0.0
            for i in range(6):
                for j in range(10):
                    optimizer.zero_grad()

                    x = patches[:, :, i, j, :, :]
                    y = model(x)
                    loss = loss_criterion(y, x)

                    avg_loss_per_image += (1 / 60) * loss.item()

                    loss.backward()
                    optimizer.step()

            avg_loss += avg_loss_per_image
            epoch_avg += avg_loss_per_image

            if batch_idx % cfg.batch_every == 0:
                tb_writer.add_scalar("train/avg_loss",
                                     avg_loss / cfg.batch_every, ts)

                for name, param in model.named_parameters():
                    tb_writer.add_histogram(name, param, ts)

                logger.debug("[%3d/%3d][%5d/%5d] avg_loss: %.8f" % (
                    epoch_idx,
                    cfg.num_epochs,
                    batch_idx,
                    len(dataloader),
                    avg_loss / cfg.batch_every,
                ))

                avg_loss = 0.0
                ts += 1
            # -- end batch every

            if batch_idx % cfg.save_every == 0:
                out = T.zeros(6, 10, 3, 128, 128)
                for i in range(6):
                    for j in range(10):
                        x = patches[0, :, i, j, :, :].unsqueeze(0).cuda()
                        out[i, j] = model(x).cpu().data

                out = np.transpose(out, (0, 3, 1, 4, 2))
                out = np.reshape(out, (768, 1280, 3))
                out = np.transpose(out, (2, 0, 1))

                y = T.cat((img[0], out), dim=2).unsqueeze(0)
                save_imgs(
                    imgs=y,
                    to_size=(3, 768, 2 * 1280),
                    name=exp_dir / f"out/{epoch_idx}_{batch_idx}.png",
                )
            # -- end save every
        # -- end batches

        if epoch_idx % cfg.epoch_every == 0:
            epoch_avg /= len(dataloader) * cfg.epoch_every

            tb_writer.add_scalar(
                "train/epoch_avg_loss",
                avg_loss / cfg.batch_every,
                epoch_idx // cfg.epoch_every,
            )

            logger.info("Epoch avg = %.8f" % epoch_avg)
            epoch_avg = 0.0

            T.save(model.state_dict(),
                   exp_dir / f"checkpoint/model_{epoch_idx}.pth")
        # -- end epoch every
    # -- end epoch

    # save final model
    T.save(model.state_dict(), exp_dir / "model_final.pth")

    # cleaning
    tb_writer.close()
Exemple #28
0
def main():
    args = parse_args()
    images = args.input
    vol = load_volume(images, ext='.bmp')

    with h5py.File('downsampled_histogram.h5', 'r') as f:
        hist = f['hist'][:]
        hist = np.cumsum(hist)

    vol = np.floor(255 * hist[vol])
    vol[vol > 255] = 255
    vol[vol < 0] = 0
    vol = vol.astype(np.uint8)

    sel = ndi.generate_binary_structure(3, 1)
    sel[0] = 0
    sel[2] = 0

    subject = os.path.basename(args.input)
    print(subject)
    params = {}

    if args.recover_parameters:
        # try:
        with open(args.parameter_file, 'r') as f:
            params = json.load(f)
            t_iris = params[subject]['t_iris']
            t_pupil = params[subject]['t_pupil']
            window_iris = tuple(params[subject]['window_iris'])
            window_pupil = tuple(params[subject]['window_pupil'])
    # except KeyError:
    #    pass

    if args.t_iris is not None:
        t_iris = args.t_iris

    if args.t_pupil is not None:
        t_pupil = args.t_pupil

    if args.window_iris is not None:
        window_iris = tuple(args.window_iris)

    if args.window_pupil is not None:
        window_pupil = tuple(args.window_pupil)

    depth = args.depth
    radius = int(min(vol.shape[1], vol.shape[2]) / 4)

    if args.save_iris:
        iris_seg = np.zeros(vol.shape, dtype=np.uint8)
    if args.save_pupil:
        pupil_seg = np.zeros(vol.shape, dtype=np.uint8)
    seg = np.zeros(vol.shape, dtype=np.uint8)

    sums, counts = None, None

    for i in range(0, vol.shape[0], depth):
        subvol = np.copy(vol[i:i + depth])
        orig_shape = subvol.shape
        if subvol.shape[0] < depth:
            subvol = np.concatenate([
                subvol,
                np.repeat(subvol[-1].reshape(1, subvol.shape[1],
                                             subvol.shape[2]),
                          depth - subvol.shape[0],
                          axis=0)
            ],
                                    axis=0)

        if all([
                window_iris[j] == window_pupil[j]
                for j in range(len(window_iris))
        ]):
            sums, counts = summate(integral_image(subvol),
                                   np.asarray(window_iris))

        # Iris Segmentation
        iris = 1.0 - threshold_bradley_nd(
            subvol, t=t_iris, s=window_iris, sums=sums, counts=counts)
        iris = ndi.binary_fill_holes(iris, structure=sel)

        # Pupil Segmentation
        pupil = 1.0 - threshold_bradley_nd(
            subvol, t=t_pupil, s=window_pupil, sums=sums, counts=counts)
        pupil = ndi.binary_fill_holes(pupil, structure=sel)
        pupil = ndi.binary_erosion(pupil, structure=sel)
        pupil = ndi.binary_dilation(pupil, structure=sel)
        pupil = ndi.binary_dilation(pupil, structure=sel).astype(np.uint8)
        pupil_collapsed = (np.sum(pupil, axis=0) > 1).astype(np.uint8)
        pupil_collapsed = remove_small_objects(label(pupil_collapsed),
                                               min_size=200).astype(np.uint8)
        circle_mask = np.zeros(pupil_collapsed.shape, dtype=np.uint8)

        try:
            objs = regionprops(label(pupil_collapsed),
                               intensity_image=np.mean(subvol, axis=0).astype(
                                   np.uint8))
            for obj in objs:
                if obj.convex_area > 1000 and obj.solidity < 0.5 or np.sum(
                        obj.inertia_tensor_eigvals) == 0:
                    pupil_collapsed[obj.coords[:, 0], obj.coords[:, 1]] = 0

            pupil_idx = np.argmax([
                o.area * np.abs(o.orientation) * o.solidity /
                (o.eccentricity + 1e-7) /
                (o.inertia_tensor_eigvals[0] - o.inertia_tensor_eigvals[1])
                for o in objs
            ])
            pupil_obj = objs[pupil_idx]
            circle_coords = circle(pupil_obj.centroid[0],
                                   pupil_obj.centroid[1],
                                   radius,
                                   shape=pupil_collapsed.shape)
            circle_mask[circle_coords] = 1
        except (ValueError, IndexError):
            pass

        pupil = np.logical_and(
            pupil,
            np.repeat(pupil_collapsed.reshape((1, ) + pupil_collapsed.shape),
                      pupil.shape[0],
                      axis=0))

        # Final Segmentation
        final = np.logical_xor(iris, pupil).astype(np.uint8)
        final = ndi.binary_dilation(final, structure=sel)
        final[:, circle_mask == 0] = 0

        # Save it
        seg[i:i + depth] = final[:orig_shape[0]]
        if args.save_iris:
            iris_seg[i:i + depth] += iris[:orig_shape[0]]
        if args.save_pupil:
            pupil_seg[i:i + depth] += pupil[:orig_shape[0]]

    seg[:, np.sum(seg, axis=0) < 20] = 0
    seg = ndi.binary_erosion(seg, structure=sel)
    seg = ndi.binary_erosion(seg, structure=sel).astype(np.uint8)

    outdir = os.path.join(args.output, subject)
    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    seg[seg.nonzero()] = 255
    save_imgs(seg, outdir, prefix=subject)

    if args.save_iris:
        outdir = os.path.join(args.output, 'iris', subject)
        if not os.path.isdir(outdir):
            os.makedirs(outdir)
        iris_seg[iris_seg.nonzero()] = 255
        save_imgs(iris_seg, outdir)

    if args.save_pupil:
        outdir = os.path.join(args.output, 'pupil', subject)
        if not os.path.isdir(outdir):
            os.makedirs(outdir)
        pupil_seg[pupil_seg.nonzero()] = 255
        save_imgs(pupil_seg, outdir)

    shutil.copy(args.parameter_file, args.parameter_file + '.bak')

    with open(args.parameter_file, 'w') as f:
        params[subject] = {
            't_iris': t_iris,
            't_pupil': t_pupil,
            'window_iris': window_iris,
            'window_pupil': window_pupil,
        }
        json.dump(params, f)
 def save(self):
     l = self.topic.attrs.get('scrs', [])
     fn = save_imgs(self.cleaned_data['scr'], self.topic.author.id, 'scr')
     l.append(fn)
     self.topic.attrs['scrs'] = l
     self.topic.save()
    def eval(self,
             tb_writer=None,
             iter_time=None,
             save_dir=None,
             is_debug=False):
        if self.multi_test:
            run_ops = [
                self.model.mIoU_metric_update,
                self.model.accuracy_metric_update,
                self.model.precision_metric_update,
                self.model.recall_metric_update,
                self.model.per_class_accuracy_metric_update, self.model.imgVal,
                self.model.predClsVal, self.model.segImgVal,
                self.model.img_name_val, self.model.user_id_val,
                self.model.imgVal_s1, self.model.imgVal_s2,
                self.model.predVal_s1, self.model.predVal_s2,
                self.model.segImgVal_s1, self.model.segImgVal_s2
            ]
        else:
            run_ops = [
                self.model.mIoU_metric_update,
                self.model.accuracy_metric_update,
                self.model.precision_metric_update,
                self.model.recall_metric_update,
                self.model.per_class_accuracy_metric_update, self.model.imgVal,
                self.model.predClsVal, self.model.segImgVal,
                self.model.img_name_val, self.model.user_id_val
            ]

        feed = {
            self.model.ratePh: 0.,  # rate: 1 - keep_prob
            self.model.trainMode: False
        }

        # Initialize/reset the running variables
        self.sess.run(self.model.running_vars_initializer)

        per_cla_acc_mat = None
        for iterTime in range(self.data.numValImgs):
            img_s1, img_s2, pred_s1, pred_s2, segImg_s1, segImg_s2 = None, None, None, None, None, None

            if self.multi_test:
                _, _, _, _, per_cla_acc_mat, img, predCls, segImg, img_name, user_id, \
                img_s1, img_s2, pred_s1, pred_s2, segImg_s1, segImg_s2 = self.sess.run(run_ops, feed_dict=feed)
            else:
                _, _, _, _, per_cla_acc_mat, img, predCls, segImg, img_name, user_id = \
                    self.sess.run(run_ops, feed_dict=feed)

            if iterTime % 100 == 0:
                msg = "\r - Evaluating progress: {:.2f}%".format(
                    (iterTime / self.data.numValImgs) * 100.)

                # Print it.
                sys.stdout.write(msg)
                sys.stdout.flush()

            ############################################################################################################
            if not self.is_train:
                # Save images
                utils.save_imgs(img_stores=[img, predCls, segImg],
                                saveDir=save_dir,
                                img_name=img_name.astype('U26'),
                                is_vertical=False)

            if not self.is_train and is_debug and self.multi_test:
                # # Step 1: save rotated images
                predCls_s1 = np.argmax(
                    pred_s1, axis=-1)  # predict class using argmax function
                utils.save_imgs(img_stores=[img_s1, predCls_s1, segImg_s1],
                                saveDir=os.path.join(save_dir, 'debug'),
                                name_append='step1_',
                                img_name=img_name.astype('U26'),
                                is_vertical=True)

                # Step 2: save inverse-roated images
                predCls_s2 = np.argmax(
                    pred_s2, axis=-1)  # predict class using argmax function
                utils.save_imgs(img_stores=[img_s2, predCls_s2, segImg_s2],
                                saveDir=os.path.join(save_dir, 'debug'),
                                name_append='step2_',
                                img_name=img_name.astype('U26'),
                                is_vertical=True)

                # Step 3: Save comparison image that includes img, single_pred, multi-test_pred, gt
                utils.save_imgs(img_stores=[
                    img,
                    np.expand_dims(predCls_s1[5], axis=0), predCls, segImg
                ],
                                saveDir=os.path.join(save_dir, 'debug'),
                                name_append='step3_',
                                img_name=img_name.astype('U26'),
                                is_vertical=False)

            ############################################################################################################

        # Calculate the mIoU
        mIoU, accuracy, precision, recall, f1_score, metric_summary_op = self.sess.run(
            [
                self.model.mIoU_metric, self.model.accuracy_metric,
                self.model.precision_metric, self.model.recall_metric,
                self.model.f1_score_metric, self.model.metric_summary_op
            ])

        if self.is_train:
            # Write to tensorboard
            tb_writer.add_summary(metric_summary_op, iter_time)
            tb_writer.flush()

        mIoU *= 100.
        accuracy *= 100.
        precision *= 100.
        recall *= 100.
        f1_score *= 100.
        per_cla_acc_mat *= 100.

        return mIoU, accuracy, per_cla_acc_mat, precision, recall, f1_score
Exemple #31
0
def train(args):
    if not os.path.exists(args.out):
        os.makedirs(args.out)

    _iter = 0

    comp_transformA = transforms.Compose([
        transforms.CenterCrop(args.cropA),
        transforms.Resize(args.resize),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    comp_transformB = transforms.Compose([
        transforms.CenterCrop(args.cropB),
        transforms.Resize(args.resize),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    domA_train = CustomDataset(os.path.join(args.root, 'trainA.txt'), transform=comp_transformA)
    domB_train = CustomDataset(os.path.join(args.root, 'trainB.txt'), transform=comp_transformB)

    A_label = torch.full((args.bs,), 1)
    B_label = torch.full((args.bs,), 0)
    B_separate = torch.full((args.bs, args.sep * (args.resize // 64) * (args.resize // 64)), 0)

    e1 = E1(args.sep, args.resize // 64)
    e2 = E2(args.sep, args.resize // 64)
    decoder = Decoder(args.resize // 64)
    disc = Disc(args.sep, args.resize // 64)

    mse = nn.MSELoss()
    bce = nn.BCELoss()

    if torch.cuda.is_available():
        e1 = e1.cuda()
        e2 = e2.cuda()
        decoder = decoder.cuda()
        disc = disc.cuda()

        A_label = A_label.cuda()
        B_label = B_label.cuda()
        B_separate = B_separate.cuda()

        mse = mse.cuda()
        bce = bce.cuda()

    ae_params = list(e1.parameters()) + list(e2.parameters()) + list(decoder.parameters())
    ae_optimizer = optim.Adam(ae_params, lr=args.lr, betas=(0.5, 0.999))

    disc_params = disc.parameters()
    disc_optimizer = optim.Adam(disc_params, lr=args.disclr, betas=(0.5, 0.999))

    if args.load != '':
        save_file = os.path.join(args.load, 'checkpoint')
        _iter = load_model(save_file, e1, e2, decoder, ae_optimizer, disc, disc_optimizer)

    e1 = e1.train()
    e2 = e2.train()
    decoder = decoder.train()
    disc = disc.train()

    print('Started training...')
    while True:
        domA_loader = torch.utils.data.DataLoader(domA_train, batch_size=args.bs,
                                                  shuffle=True, num_workers=6)
        domB_loader = torch.utils.data.DataLoader(domB_train, batch_size=args.bs,
                                                  shuffle=True, num_workers=6)
        if _iter >= args.iters:
            break

        for domA_img, domB_img in zip(domA_loader, domB_loader):
            if domA_img.size(0) != args.bs or domB_img.size(0) != args.bs:
                break

            domA_img = Variable(domA_img)
            domB_img = Variable(domB_img)

            if torch.cuda.is_available():
                domA_img = domA_img.cuda()
                domB_img = domB_img.cuda()

            domA_img = domA_img.view((-1, 3, args.resize, args.resize))
            domB_img = domB_img.view((-1, 3, args.resize, args.resize))

            ae_optimizer.zero_grad()

            A_common = e1(domA_img)
            A_separate = e2(domA_img)
            A_encoding = torch.cat([A_common, A_separate], dim=1)

            B_common = e1(domB_img)
            B_encoding = torch.cat([B_common, B_separate], dim=1)

            A_decoding = decoder(A_encoding)
            B_decoding = decoder(B_encoding)

            loss = mse(A_decoding, domA_img) + mse(B_decoding, domB_img)

            if args.discweight > 0:
                preds_A = disc(A_common)
                preds_B = disc(B_common)
                loss += args.discweight * (bce(preds_A, B_label) + bce(preds_B, B_label))

            loss.backward()
            torch.nn.utils.clip_grad_norm_(ae_params, 5)
            ae_optimizer.step()

            if args.discweight > 0:
                disc_optimizer.zero_grad()

                A_common = e1(domA_img)
                B_common = e1(domB_img)

                disc_A = disc(A_common)
                disc_B = disc(B_common)

                loss2 = bce(disc_A, A_label) + bce(disc_B, B_label)

                loss2.backward()
                torch.nn.utils.clip_grad_norm_(disc_params, 5)
                disc_optimizer.step()

            if _iter % args.progress_iter == 0:
                print('Outfile: %s | Iteration %d | loss %.6f | loss1: %.6f | loss2: %.6f' % (args.out, _iter, loss+loss2, loss, loss2))

            if _iter % args.display_iter == 0:
                e1 = e1.eval()
                e2 = e2.eval()
                decoder = decoder.eval()

                save_imgs(args, e1, e2, decoder, _iter)

                e1 = e1.train()
                e2 = e2.train()
                decoder = decoder.train()

            if _iter % args.save_iter == 0:
                save_file = os.path.join(args.out, 'checkpoint_%d' % _iter)
                save_model(save_file, e1, e2, decoder, ae_optimizer, disc, disc_optimizer, _iter)
                

            _iter += 1
Exemple #32
0
def train(cfg):
    os.makedirs(f"out/{cfg['exp_name']}", exist_ok=True)
    os.makedirs(f"checkpoints/{cfg['exp_name']}", exist_ok=True)

    # dump config for current experiment
    with open(f"checkpoints/{cfg['exp_name']}/setup.cfg", "wt") as f:
        for k, v in cfg.items():
            f.write("%15s: %s\n" % (k, v))

    model = CAE().cuda()

    if cfg['load']:
        model.load_state_dict(torch.load(cfg['chkpt']))
        logger.info("Loaded model from", cfg['chkpt'])

    model.train()
    logger.info("Done setup model")

    dataset = ImageFolder720p(cfg['dataset_path'])
    dataloader = DataLoader(dataset,
                            batch_size=cfg['batch_size'],
                            shuffle=cfg['shuffle'],
                            num_workers=cfg['num_workers'])
    logger.info(
        f"Done setup dataloader: {len(dataloader)} batches of size {cfg['batch_size']}"
    )

    mse_loss = nn.MSELoss()
    adam = torch.optim.Adam(model.parameters(),
                            lr=cfg['learning_rate'],
                            weight_decay=1e-5)
    sgd = torch.optim.SGD(model.parameters(), lr=cfg['learning_rate'])

    optimizer = adam

    ra = 0

    for ei in range(cfg['resume_epoch'], cfg['num_epochs']):
        for bi, (img, patches, _) in enumerate(dataloader):

            avg_loss = 0
            for i in range(6):
                for j in range(10):
                    x = Variable(patches[:, :, i, j, :, :]).cuda()
                    y = model(x)
                    loss = mse_loss(y, x)

                    avg_loss += (1 / 60) * loss.item()

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

            ra = avg_loss if bi == 0 else ra * bi / (bi + 1) + avg_loss / (bi +
                                                                           1)

            logger.debug('[%3d/%3d][%5d/%5d] avg_loss: %f, ra: %f' %
                         (ei + 1, cfg['num_epochs'], bi + 1, len(dataloader),
                          avg_loss, ra))

            # save img
            if bi % cfg['out_every'] == 0:
                out = torch.zeros(6, 10, 3, 128, 128)
                for i in range(6):
                    for j in range(10):
                        x = Variable(patches[0, :, i,
                                             j, :, :].unsqueeze(0)).cuda()
                        out[i, j] = model(x).cpu().data

                out = np.transpose(out, (0, 3, 1, 4, 2))
                out = np.reshape(out, (768, 1280, 3))
                out = np.transpose(out, (2, 0, 1))

                y = torch.cat((img[0], out), dim=2).unsqueeze(0)
                save_imgs(imgs=y,
                          to_size=(3, 768, 2 * 1280),
                          name=f"out/{cfg['exp_name']}/out_{ei}_{bi}.png")

            # save model
            if bi % cfg['save_every'] == cfg['save_every'] - 1:
                torch.save(
                    model.state_dict(),
                    f"checkpoints/{cfg['exp_name']}/model_{ei}_{bi}.state")

    # save final model
    torch.save(model.state_dict(),
               f"checkpoints/{cfg['exp_name']}/model_final.state")
Exemple #33
0
def train():

    (train_data, _), (_, _) = tf.keras.datasets.mnist.load_data()

    if not os.path.exists('./images'):
        os.makedirs('./images')

    # settting hyperparameter
    latent_dim = 100
    epochs = 800
    batch_size = 32
    buffer_size = 6000
    save_interval = 50
    n_critic = 5

    generator = Generator()
    discriminator = Critic()

    gen_optimizer = tf.keras.optimizers.Adam(0.0001, 0.5, 0.9)
    disc_optimizer = tf.keras.optimizers.Adam(0.0001, 0.5, 0.9)

    # Rescale -1 to 1
    train_data = train_data / 127.5 - 1.
    train_data = np.expand_dims(train_data, axis=3).astype('float32')

    train_dataset = tf.data.Dataset.from_tensor_slices(train_data).shuffle(
        buffer_size).batch(batch_size)

    @tf.function
    def train_discriminator(images):
        noise = tf.random.normal([batch_size, latent_dim])

        with tf.GradientTape() as disc_tape:
            generated_imgs = generator(noise, training=True)

            generated_output = discriminator(generated_imgs, training=True)
            real_output = discriminator(images, training=True)

            interpolated_img = random_weighted_average(
                [images, generated_imgs])

            validity_interpolated = discriminator(interpolated_img,
                                                  training=True)

            disc_loss = discriminator_loss(real_output, generated_output,
                                           validity_interpolated,
                                           interpolated_img)

        grad_disc = disc_tape.gradient(disc_loss,
                                       discriminator.trainable_variables)
        disc_optimizer.apply_gradients(
            zip(grad_disc, discriminator.trainable_variables))

        return disc_loss

    @tf.function
    def train_generator():
        noise = tf.random.normal([batch_size, latent_dim])

        with tf.GradientTape() as gen_tape:
            generated_imgs = generator(noise, training=True)
            generated_output = discriminator(generated_imgs, training=True)

            gen_loss = generator_loss(generated_output)

        grad_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)
        gen_optimizer.apply_gradients(
            zip(grad_gen, generator.trainable_variables))

        return gen_loss

    seed = tf.random.normal([16, latent_dim])

    for epoch in range(epochs):
        start = time.time()
        disc_loss = 0
        gen_loss = 0

        for images in train_dataset:
            disc_loss += train_discriminator(images)

            if disc_optimizer.iterations.numpy() % n_critic == 0:
                gen_loss += train_generator()

        print('Time for epoch {} is {} sec - gen_loss = {}, disc_loss = {}'.
              format(epoch + 1,
                     time.time() - start, gen_loss / batch_size,
                     disc_loss / (batch_size * n_critic)))

        if epoch % save_interval == 0:
            save_imgs(epoch, generator, seed)
Exemple #34
0
def train():
    (train_data, _), (_, _) = tf.keras.datasets.mnist.load_data()
    print(type(train_data))

    if not os.path.exists('./images'):
        os.makedirs('./images')

    # settting hyperparameter
    latent_dim = 100
    epochs = 800
    batch_size = 200
    buffer_size = 6000
    save_interval = 50

    generator = Generator()
    discriminator = Discriminator()

    gen_optimizer = tf.keras.optimizers.Adam(0.0002, 0.5)
    disc_optimizer = tf.keras.optimizers.Adam(0.0002, 0.5)

    # Rescale -1 to 1
    train_data = train_data / 127.5 - 1.
    train_data = np.expand_dims(train_data, axis=3).astype('float32')

    train_dataset = tf.data.Dataset.from_tensor_slices(train_data).shuffle(
        buffer_size).batch(batch_size)

    cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

    @tf.function
    def train_step(images):
        noise = tf.random.normal([batch_size, latent_dim])

        with tf.GradientTape(persistent=True) as tape:
            generated_images = generator(noise)

            real_output = discriminator(images)
            generated_output = discriminator(generated_images)

            gen_loss = generator_loss(cross_entropy, generated_output)
            disc_loss = discriminator_loss(cross_entropy, real_output,
                                           generated_output)

        grad_gen = tape.gradient(gen_loss, generator.trainable_variables)
        grad_disc = tape.gradient(disc_loss, discriminator.trainable_variables)

        gen_optimizer.apply_gradients(
            zip(grad_gen, generator.trainable_variables))
        disc_optimizer.apply_gradients(
            zip(grad_disc, discriminator.trainable_variables))

        return gen_loss, disc_loss

    seed = tf.random.normal([16, latent_dim])

    for epoch in range(epochs):
        start = time.time()
        total_gen_loss = 0
        total_disc_loss = 0

        for images in train_dataset:
            gen_loss, disc_loss = train_step(images)

            total_gen_loss += gen_loss
            total_disc_loss += disc_loss

        print('Time for epoch {} is {} sec - gen_loss = {}, disc_loss = {}'.
              format(epoch + 1,
                     time.time() - start, total_gen_loss / batch_size,
                     total_disc_loss / batch_size))
        if epoch % save_interval == 0:
            save_imgs(epoch, generator, seed)