Пример #1
0
Файл: drs.py Проект: yqGANs/gold
def drs(netG, netD, num_samples=10, perc=10, nz=100, ny=10, batch_size=100, eps=1e-6):
	M, w = get_score_stats(netG, netD)
	ones = np.ones(batch_size).astype('int64')

	images = [[] for _ in range(ny)]
	for cls in range(10):
		while len(images[cls]) < num_samples:
			z = make_z(batch_size, nz).cuda()
			y = make_y(batch_size, ny, cls).cuda()
			with torch.no_grad():
				x = netG(z, y)
				r = np.exp(gold(netD, x, y, 1, w))

			p = np.minimum(ones, r/M)
			f = np.log(p + eps) - np.log(1 - p + eps)  # inverse sigmoid
			f = (f - np.percentile(f, perc))
			p = [1 / (1 + math.exp(-x)) for x in f]  # sigmoid
			accept = np.random.binomial(ones, p)

			for i in range(batch_size):
				if accept[i] and len(images[cls]) < num_samples:
					images[cls].append(x[i].detach().cpu())

	images = torch.stack([x for l in images for x in l])
	return images
def generate_samples_cond(config, n_samples, model_name, y_class):
    n_samples = int(n_samples)

    # Initializing generator from configuration
    G = utils.initialize(config, model_name)

    # Update batch size setting used for G
    G_batch_size = max(config['G_batch_size'], config['batch_size'])
    z_ = utils.prepare_z(G_batch_size,
                         G.dim_z,
                         device='cuda',
                         fp16=config['G_fp16'],
                         z_var=config['z_var'])

    # Preparing fixed y tensor
    y_ = utils.make_y(G_batch_size, y_class)

    # Sample function
    sample = functools.partial(utils.sample_cond, G=G, z_=z_, y_=y_)

    # Sampling a number of images and save them to an NPZ
    print('Sampling %d images from class %d...' % (n_samples, y_class))

    x, y = [], []
    for i in trange(int(np.ceil(n_samples / float(G_batch_size)))):
        with torch.no_grad():
            images, labels = sample()
        x += [np.uint8(255 * (images.cpu().numpy() + 1) / 2.)]
        y += [labels.cpu().numpy()]
    x = np.concatenate(x, 0)[:n_samples]
    y = np.concatenate(y, 0)[:n_samples]

    return x, y
Пример #3
0
Файл: drs.py Проект: yqGANs/gold
def sample_scores(netG, netD, nz=100, ny=10, wd=1, wc=1, sample_size=50000, batch_size=100):
	scores = []
	for i in range(sample_size // batch_size):
		z = make_z(batch_size, nz).cuda()
		y = make_y(batch_size, ny).cuda()
		with torch.no_grad():
			x = netG(z, y)
			s = gold(netD, x, y, wd, wc)
		scores.append(s)
	scores = np.concatenate(scores, axis=0)
	return scores
Пример #4
0
def train_classifier(netG, args, device, testset=None):
    print('\nTraining a classifier')
    loader = data_utils.DataLoader(range(args.netC_per_epoch),
                                   batch_size=args.netC_batch_size,
                                   shuffle=True,
                                   num_workers=8)
    netC = network.LeNet(args.image_size, args.nc, args.ny).to(device)
    optimizerC = optim.Adam(netC.parameters(),
                            lr=args.netC_lr,
                            betas=(args.netC_beta1, args.netC_beta2))
    criterionCE = nn.CrossEntropyLoss().to(device)

    for epoch in range(1, args.netC_epochs + 1):
        adjust_learning_rate(optimizerC, epoch, args.netC_lr,
                             args.netC_lr_period)
        info = {'num': 0, 'loss_C': 0, 'acc': 0}

        # train network
        netC.train()
        for i, x in enumerate(loader):
            # forward
            fake_z = make_z(len(x), args.nz).to(device)  # B x nz
            fake_y = make_y(len(x), args.ny).to(device)  # B
            with torch.no_grad():
                fake_x = netG(fake_z, fake_y)  # B x nc x H x W
            out_fake = netC(fake_x)  # B x nc
            loss_C = criterionCE(out_fake, fake_y)
            acc = accuracy(out_fake, fake_y)

            # backward
            optimizerC.zero_grad()
            loss_C.backward()
            optimizerC.step()

            # update loss info
            info['num'] += 1
            info['loss_C'] += loss_C.item()
            info['acc'] += acc

        # evaluate performance
        info = normalize_info(info)
        message = "Epoch: {}  C: {:.4f}  acc (train): {:.4f}".format(
            epoch, info['loss_C'], info['acc'])

        if testset and epoch % args.netC_eval_period == 0:
            test_acc = eval_classifier(netC, args, device, testset)
            message += "  acc (test): {:.4f}".format(test_acc)

        print(message)
    print('')

    return netC
Пример #5
0
    def __init__(self,
                 config,
                 model_name,
                 thr=None,
                 multi_gans=None,
                 gan_weights=None):
        # Updating settings
        G_batch_size = config['G_batch_size']
        n_classes = config['n_classes']

        # Loading GAN weights
        if multi_gans is None:
            self.G = utils.initialize(config, model_name)
        else:
            # Assuming that weight files follows the naming convention:
            # model_name_k, where k is in [0,multi_gans-1]
            self.G = [
                utils.initialize(config, model_name + "_%d" % k)
                for k in range(multi_gans)
            ]
        self.multi_gans = multi_gans
        self.gan_weights = gan_weights

        # Preparing sampling functions
        self.z_, self.y_ = utils.prepare_z_y(G_batch_size,
                                             config['dim_z'],
                                             n_classes,
                                             device='cuda',
                                             fp16=config['G_fp16'],
                                             z_var=config['z_var'],
                                             thr=thr)

        # Preparing fixed y tensors
        self.y_fixed = {
            y: utils.make_y(G_batch_size, y)
            for y in range(n_classes)
        }
Пример #6
0
def train_acgan_full(trainset, model, args, device, use_gold=False):
	# preprocess dataset
	if len(trainset) < args.per_epoch:
		n_iter = args.per_epoch // len(trainset)
		trainset = data_utils.ConcatDataset([trainset] * n_iter)
	loader = data_utils.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=8)

	# preprocess model
	netG = model['net_G']
	netD = model['net_D']
	optimizerG = model['optim_G']
	optimizerD = model['optim_D']

	# initialize criterion
	criterionGAN = network.GANLoss(reduction='none').to(device)
	criterionCE = nn.CrossEntropyLoss(reduction='none').to(device)

	# initialize loss info
	info = {'num': 0, 'loss_G': 0, 'loss_G_cls': 0, 'loss_D_real': 0, 'loss_D_fake': 0, 'loss_C_real': 0, 'loss_C_fake': 0}

	# train one epoch
	for i, (real_x, real_y) in enumerate(loader):
		# forward
		real_x = real_x.to(device)  # B x nc x H x W
		real_y = real_y.to(device)  # B
		fake_z = make_z(len(real_x), args.nz).to(device)  # B x nz
		fake_y = make_y(len(real_x), args.ny).to(device)  # B

		#########################
		# (1) Update D network
		#########################

		optimizerD.zero_grad()

		# real loss
		out_D, out_C = netD(real_x)  # B x 1, B x nc
		loss_D_real = torch.mean(criterionGAN(out_D, True))
		loss_C_real = torch.mean(criterionCE(out_C, real_y))

		# fake loss
		fake_x = netG(fake_z, fake_y)  # B x nc x H x W
		out_D, out_C = netD(fake_x.detach())  # B x 1, B x nc
		with torch.no_grad():
			gold = gold_score(netD, fake_x, fake_y)

		if use_gold:
			weight = gold
		else:
			weight = torch.ones(len(gold)).to(device)

		loss_D_fake = torch.mean(criterionGAN(out_D, False) * weight)
		loss_C_fake = torch.mean(criterionCE(out_C, fake_y) * weight) * args.lambda_C_fake

		loss_D = loss_D_real + loss_D_fake + loss_C_real + loss_C_fake
		loss_D.backward()
		optimizerD.step()

		#########################
		# (2) Update G network
		#########################

		optimizerG.zero_grad()

		# GAN & classification loss
		fake_x = netG(fake_z, fake_y)  # B x nc x H x W
		out_D, out_C = netD(fake_x)  # B x 1, B x nc
		loss_G = torch.mean(criterionGAN(out_D, True))
		loss_G_cls = torch.mean(criterionCE(out_C, fake_y))

		# backward loss
		loss_G_total = loss_G + loss_G_cls
		loss_G_total.backward()
		optimizerG.step()

		# update loss info
		info['num'] += 1

		info['loss_G'] += loss_G.item()
		info['loss_G_cls'] += loss_G_cls.item()

		info['loss_D_real'] += loss_D_real.item()
		info['loss_D_fake'] += loss_D_fake.item()

		info['loss_C_real'] += loss_C_real.item()
		info['loss_C_fake'] += loss_C_fake.item()

	info = normalize_info(info)
	return info
Пример #7
0
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, CSVLogger


tokenizer = Tokenizer(num_words=MAX_NB_WORDS)

train_csv_file = pd.read_csv(TRAIN_DATASET, delimiter="\t")
train_csv_file = train_csv_file.dropna()

# make the X
input_text = train_csv_file['text']
X = make_X(input_text=input_text, tokenizer=tokenizer, MAX_SEQUENCE_LENGTH)

# make the y
y = train_csv_file[TASK] 
y = make_y(y)

x_train, y_train, x_val, y_val = make_train_val_split(X, y, VALIDATION_SPLIT)

embeddings_index = load_word_vectors(W2V_FILE)

word_index = tokenizer.word_index
num_output_units =  y_train.shape[1]

nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = make_embedding_matrix(word_index, embeddings_index, nb_words, EMBEDDING_DIM)

model = create_model_cnn()

model.compile(loss='categorical_crossentropy',
              optimizer='adadelta',
Пример #8
0
def train_acgan_semi(trainset, pool, model, args, device=None, use_gold=False):
	# preprocess dataset (labels of pool = -1)
	if args.dataset != 'lsun':
		pool = deepcopy(pool)

	if args.dataset == 'synthetic':
		ones = torch.ones(len(pool.dataset)).long()
		pool.dataset.tensors = (pool.dataset.tensors[0], -ones)
	else:
		ones = torch.ones(len(pool.dataset)).long()
		pool.dataset.train_labels = -ones

	if args.dataset == 'cifar10':
		trainset = deepcopy(trainset)
		trainset.dataset.train_labels = torch.tensor(trainset.dataset.train_labels).long()

	dataset = data_utils.ConcatDataset([trainset, pool])
	if len(dataset) < args.per_epoch:
		n_iter = args.per_epoch // len(dataset)
		dataset = data_utils.ConcatDataset([dataset] * n_iter)
	loader = data_utils.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)

	# preprocess model
	netG = model['net_G']
	netD = model['net_D']
	optimizerG = model['optim_G']
	optimizerD = model['optim_D']

	# initialize criterion
	criterionGAN = network.GANLoss(reduction='none').to(device)
	criterionCE = nn.CrossEntropyLoss(reduction='none').to(device)

	# initialize loss info
	info = {'num': 0, 'loss_G': 0, 'loss_G_cls': 0, 'loss_D_real': 0, 'loss_D_fake': 0, 'loss_C_real': 0, 'loss_C_fake': 0}

	# train one epoch
	for i, (real_x, real_y) in enumerate(loader):
		idx_l = [i for i in range(len(real_x)) if real_y[i] != -1]

		# forward
		real_x = real_x.to(device)  # B x nc x H x W
		real_y = real_y.to(device)  # B
		fake_z = make_z(len(real_x), args.nz).to(device)  # B x nz
		fake_y = make_y(len(real_x), args.ny).to(device)  # B

		#########################
		# (1) Update D network
		#########################

		optimizerD.zero_grad()

		# real loss
		out_D, out_C = netD(real_x)  # B x 1, B x nc
		loss_D_real = torch.mean(criterionGAN(out_D, True))
		if len(idx_l) > 0:
			loss_C_real = torch.mean(criterionCE(out_C[idx_l], real_y[idx_l]))
		else:
			loss_C_real = 0

		# fake loss
		fake_x = netG(fake_z, fake_y)  # B x nc x H x W
		out_D, out_C = netD(fake_x.detach())  # B x 1, B x nc
		with torch.no_grad():
			gold = gold_score(netD, fake_x, fake_y)

		if use_gold:
			weight = gold
		else:
			weight = torch.ones(len(gold)).to(device)

		loss_D_fake = torch.mean(criterionGAN(out_D, False) * weight)
		if len(idx_l) > 0:
			loss_C_fake = torch.mean(criterionCE(out_C[idx_l], fake_y[idx_l]) * weight[idx_l]) * args.lambda_C_fake
		else:
			loss_C_fake = 0

		loss_D = loss_D_real + loss_D_fake + loss_C_real + loss_C_fake
		loss_D.backward()
		optimizerD.step()

		#########################
		# (2) Update G network
		#########################

		optimizerG.zero_grad()

		# GAN & classification loss
		fake_x = netG(fake_z, fake_y)  # B x nc x H x W
		out_D, out_C = netD(fake_x)  # B x 1, B x nc
		loss_G = torch.mean(criterionGAN(out_D, True))
		loss_G_cls = torch.mean(criterionCE(out_C, fake_y))

		# backward loss
		loss_G_total = loss_G + loss_G_cls
		loss_G_total.backward()
		optimizerG.step()

		# update loss info
		info['num'] += 1

		info['loss_G'] += loss_G.item()
		info['loss_G_cls'] += loss_G_cls.item()

		info['loss_D_real'] += loss_D_real.item()
		info['loss_D_fake'] += loss_D_fake.item()

		if len(idx_l) > 0:
			info['loss_C_real'] += loss_C_real.item()
			info['loss_C_fake'] += loss_C_fake.item()

	info = normalize_info(info)
	return info