Exemplo n.º 1
0
import argparse
import plotting
import numpy as np

data_dir = '/home/tim/data'

parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='./log')
parser.add_argument('--data_dir', type=str, default='/home/tim/data')
parser.add_argument('--plot_title', type=str, default=None)
args = parser.parse_args()
print(args)

data_dir = args.data_dir

trainx, trainy = cifar10_data.load(data_dir)

ids = [[] for i in range(10)]
for i in range(trainx.shape[0]):
    if len(ids[trainy[i]]) < 10:
        ids[trainy[i]].append(i)
    if np.alltrue(np.asarray([len(_ids) >= 10 for _ids in ids])):
        break

images = np.zeros((10*10,32,32,3),dtype='uint8')
for i in range(len(ids)):
    for j in range(len(ids[i])):
        images[10*j+i] = trainx[ids[i][j]].transpose([1,2,0])
print(ids)

img_tile = plotting.img_tile(images, aspect_ratio=1.0, border_color=1.0, stretch=True)
Exemplo n.º 2
0
parser.add_argument('--count', default=400)
parser.add_argument('--batch_size', default=100)
parser.add_argument('--unlabeled_weight', type=float, default=1.)
parser.add_argument('--learning_rate', type=float, default=0.0003)
parser.add_argument('--data_dir', type=str, default='/home/bigdata/Desktop/CT-GANs') #add your own path
args = parser.parse_args()
print(args)

# fixed random seeds
rng_data = np.random.RandomState(args.seed_data)
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))

# load CIFAR-10
trainx, trainy = cifar10_data.load(args.data_dir, subset='train') 
testx, testy = cifar10_data.load(args.data_dir, subset='test')



#######   
#pad
#######

trainx = np.pad(trainx, ((0, 0), (0, 0), (2, 2), (2, 2)), 'reflect')



trainx_unl_org = trainx.copy()
trainx_unl2_org = trainx.copy()
Exemplo n.º 3
0
            return np.transpose(
                np.cast[theano.config.floatX]((-127.5 + mat) / 127.5),
                (3, 2, 0, 1))

        import svhn_data
        eval_x, eval_y = svhn_data.load('./svhn/', 'test')
        eval_y = np.int32(eval_y)
        eval_x = rescale(eval_x)

    else:

        def rescale(mat):
            return np.cast[theano.config.floatX](mat)

        import cifar10_data
        eval_x, eval_y = cifar10_data.load('./cifar10/', 'test')
        eval_y = np.int32(eval_y)
        eval_x = rescale(eval_x)

elif args.dataset == 'mnist':
    gen_final_non = ln.sigmoid
    num_classes = 10
    dim_input = (28, 28)
    in_channels = 1
    colorImg = False
    generation_scale = False
'''
models
'''
# symbols
sym_y_g = T.ivector()
Exemplo n.º 4
0
from settings import settings_binary
from validation_utils import hamming_dist, mean_average_precison
from sklearn.metrics import average_precision_score
from scipy.spatial.distance import cdist

# settings
args = settings_binary()

# fixed random seeds
rng_data = np.random.RandomState(args.seed_data)
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2**15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2**15)))

# load CIFAR-10
trainx, trainy = cifar10_data.load(args.data_dir, subset='train')
nr_batches_train = int(trainx.shape[0] / args.batch_size)

testx, testy = cifar10_data.load(args.data_dir, subset='test')
inds = rng_data.permutation(testx.shape[0])
testx = testx[inds]
testy = testy[inds]
testx = testx[0:100]
testy = testy[0:100]

# specify generative model
noise_dim = (args.batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(
    nn.batch_norm(ll.DenseLayer(gen_layers[-1],
 def __init__(self, data_dir, deterministic=False):
     self.train = Dataset(load(data_dir, "train")[0], deterministic)
     self.test = Dataset(load(data_dir, "test")[0], deterministic)
Exemplo n.º 6
0
colorImg=True
generation_scale=True
z_generated=num_classes
# evaluation
vis_epoch=10
eval_epoch=1
batch_size_eval=200


'''
data
'''
def rescale(mat):
    return np.cast[theano.config.floatX](mat)

train_x, train_y = cifar10_data.load('/home/chongxuan/mfs/data/cifar10/','train')
eval_x, eval_y = cifar10_data.load('/home/chongxuan/mfs/data/cifar10/','test')

train_y = np.int32(train_y)
eval_y = np.int32(eval_y)
train_x = rescale(train_x)
eval_x = rescale(eval_x)
x_unlabelled = train_x.copy()

rng_data = np.random.RandomState(ssl_data_seed)
inds = rng_data.permutation(train_x.shape[0])
train_x = train_x[inds]
train_y = train_y[inds]
x_labelled = []
y_labelled = []
for j in range(num_classes):
Exemplo n.º 7
0
 def __init__(self, data_dir, deterministic=False):
     self.train = Dataset(load(data_dir, "train")[0], deterministic)
     self.test = Dataset(load(data_dir, "test")[0], deterministic)
Exemplo n.º 8
0
def main():
    if not os.path.isdir(opt.save_img):
        os.mkdir(opt.save_img)
    if not os.path.isdir(opt.logs):
        os.mkdir(opt.logs)
    if not os.path.isdir(opt.data_dir):
        os.mkdir(opt.data_dir)
    #record loss values
    f = open('loss.txt', 'w')
    loss_res = []

    # Data
    trainx, trainy = cifar10_data.load(opt.data_dir, subset='train')
    testx, testy = cifar10_data.load(opt.data_dir, subset='test')

    # Model
    G = _G(num_classes=opt.num_classes)
    D = _D(num_classes=opt.num_classes)
    if use_cuda:
        D = torch.nn.DataParallel(D).cuda()
        G = torch.nn.DataParallel(G).cuda()
        cudnn.benchmark = True
    D.apply(weights_init)
    G.apply(weights_init)
    print('    G params: %.2fM,D params: %.2fM' %
          (sum(p.numel() for p in G.parameters()) / 1000000.0,
           sum(p.numel() for p in D.parameters()) / 1000000.0))
    optimizerD = optim.Adam(D.parameters(), lr=opt.lr, betas=(0.5, 0.999))
    optimizerG = optim.Adam(G.parameters(), lr=opt.lr, betas=(0.5, 0.999))
    T = Train(G, D, optimizerG, optimizerD, opt.num_classes)
    #data shffule
    train = {}
    for i in range(10):
        train[i] = trainx[trainy == i][:opt.count]
    y_data = np.concatenate(
        [trainy[trainy == i][:opt.count] for i in range(10)], axis=0)
    x_data = np.concatenate([train[i] for i in range(10)], axis=0)
    ids = np.arange(x_data.shape[0])
    np.random.shuffle(ids)
    trainx = x_data[ids]
    trainy = y_data[ids]

    datagen.fit(trainx)

    nr_batches_train = int(trainx.shape[0] / opt.train_batch_size)
    nr_batches_test = int(testx.shape[0] / opt.test_batch_size)
    # Train
    best_acc = 0.0
    weight_gen_loss = 0.0
    for epoch in range(opt.epochs):
        D_loss, G_loss, Train_acc = 0.0, 0.0, 0.0
        index = 0
        if epoch == opt.G_epochs:
            weight_gen_loss = 1.0
        # train G
        if epoch < opt.G_epochs:
            for x_batch, y_batch in gen_minibatches(
                    trainx, trainy, batch_size=opt.train_batch_size,
                    shuffle=True):
                gen_y = torch.from_numpy(
                    np.int32(
                        np.random.choice(opt.num_classes,
                                         (y_batch.shape[0], )))).long()
                x_batch = torch.from_numpy(x_batch)
                y_batch = torch.from_numpy(y_batch).long()
                d_loss, train_acc = T.train_batch_disc(x_batch, y_batch, gen_y,
                                                       weight_gen_loss)
                D_loss += d_loss
                Train_acc += train_acc
                for j in range(2):
                    gen_y_ = y_batch
                    G_loss += T.train_batch_gen(x_batch, gen_y_,
                                                weight_gen_loss)
        else:
            # train Classifier
            for x_batch, y_batch in datagen.flow(
                    trainx, trainy, batch_size=opt.train_batch_size):
                index += 1
                gen_y = torch.from_numpy(
                    np.int32(
                        np.random.choice(opt.num_classes,
                                         (y_batch.shape[0], )))).long()
                x_batch = torch.from_numpy(x_batch)
                y_batch = torch.from_numpy(y_batch).long()
                d_loss, train_acc = T.train_batch_disc(x_batch, y_batch, gen_y,
                                                       weight_gen_loss)
                D_loss += d_loss
                Train_acc += train_acc
                if index == nr_batches_train:
                    break
        D_loss /= nr_batches_train
        G_loss /= (nr_batches_train * 2)
        Train_acc /= nr_batches_train
        # test
        test_acc = 0.0
        if epoch > opt.G_epochs and epoch % 100 == 0:
            adjust_learning_rate(optimizerD, 0.1)
        for x_batch, y_batch in gen_minibatches(testx,
                                                testy,
                                                batch_size=opt.test_batch_size,
                                                shuffle=False):
            x_batch = torch.from_numpy(x_batch)
            y_batch = torch.from_numpy(y_batch).long()
            test_acc += T.test(x_batch, y_batch)
        test_acc /= nr_batches_test
        if test_acc > best_acc:
            best_acc = test_acc
            #save gen img
        if epoch <= opt.G_epochs:
            T.save_png(opt.save_img, epoch)
        if (epoch + 1) % (opt.fre_print) == 0:
            print(
                "Iteration %d, D_loss = %.4f,G_loss = %.4f,train acc = %.4f, test acc = %.4f,best acc = %.4f,lr = %.8f"
                % (epoch, D_loss, G_loss, Train_acc, test_acc, best_acc,
                   optimizerD.param_groups[0]['lr']))
            loss_res.append(
                "Iteration %d, D_loss = %.4f,G_loss = %.4f,train acc = %.4f, test acc = %.4f,best acc = %.4f,lr = %.8f \n"
                % (epoch, D_loss, G_loss, Train_acc, test_acc, best_acc,
                   optimizerD.param_groups[0]['lr']))
        #viso
        writer.add_scalar('train/D_loss', D_loss, epoch)
        writer.add_scalar('train/G_loss', G_loss, epoch)
        writer.add_scalar('train/acc', Train_acc, epoch)
        writer.add_scalar('test/acc', test_acc, epoch)
    f.writelines(loss_res)
    f.close()
Exemplo n.º 9
0
    testx = np.reshape(testx, (-1, 28, 28, channel))
    npad = ((0, 0), (2, 2), (2, 2), (0, 0))
    trainx = np.pad(trainx,
                    pad_width=npad,
                    mode='constant',
                    constant_values=-1)
    testx = np.pad(testx, pad_width=npad, mode='constant', constant_values=-1)
elif image_dir == 'svhn':
    channel = 3
    trainx, trainy = svhn_data.load('./data/svhn', 'train')
    testx, testy = svhn_data.load('./data/svhn', 'test')
    trainx = rescale(trainx)
    testx = rescale(testx)
else:
    channel = 3
    trainx, trainy = cifar10_data.load("./data/cifar10", subset='train')
    testx, testy = cifar10_data.load("./data/cifar10", subset='test')
    trainx = np.transpose(trainx, [0, 2, 3, 1])
    testx = np.transpose(testx, [0, 2, 3, 1])

print(trainx.shape)
print(np.max(trainx), np.min(trainx))


def generator_conv(z, reuse=False):
    with tf.variable_scope('generator') as scope:
        if reuse:
            scope.reuse_variables()
        train = ly.fully_connected(z,
                                   4 * 4 * 512,
                                   activation_fn=tf.nn.relu,