def _build_network(self, dim_a, params):
        # Initialize graph
        with tf.variable_scope('base'):
            # Build autoencoder
            ae_inputs = tf.placeholder(
                tf.float32, (None, self.image_size, self.image_size, 1),
                name='input')
            self.loss_ae, latent_space, self.ae_output = autoencoder(ae_inputs)

            # Build fully connected layers
            self.y, loss_policy = fully_connected_layers(
                tf.contrib.layers.flatten(latent_space), dim_a,
                params['fc_layers_neurons'], params['loss_function_type'])

        variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'base')
        self.train_policy = tf.train.GradientDescentOptimizer(
            learning_rate=params['learning_rate']).minimize(loss_policy,
                                                            var_list=variables)

        self.train_ae = tf.train.AdamOptimizer(
            learning_rate=params['learning_rate']).minimize(self.loss_ae)

        # Initialize tensorflow
        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)
        self.saver = tf.train.Saver()
Esempio n. 2
0
def build_autoencoder(config):
    model = autoencoder()
    loss_function = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=config['lr'],
                                 weight_decay=1e-5)
    return model, loss_function, optimizer
Esempio n. 3
0
    def run(self, train=True, show_performance=True):
        batch_per_ep = self.database.shape[
            0] // self.batch_size  # calculate the number of batches per epoch
        graph = tf.Graph()
        sess = tf.Session(graph=graph)

        with graph.as_default():
            loss, train_op, ae_inputs, ae_output = autoencoder(
                self.lr)  # create the network
            init = tf.global_variables_initializer()
            saver = tf.train.Saver()
            sess.run(init)

            if self.use_pre_trained_weights:
                saver.restore(sess, self.graph_loc)

            if train:
                for ep in range(self.epoch_num):  # epochs loop
                    for batch_n in range(batch_per_ep):  # batches loop
                        batch_img = self._next_batch(
                            self.database, self.batch_size)  # read a batch
                        batch_img = batch_img.reshape(
                            (-1, self.image_size, self.image_size, 1))
                        _, c, outputs = sess.run(
                            [train_op, loss, ae_output],
                            feed_dict={ae_inputs: batch_img})
                        print('Epoch: {} - cost= {:.5f}'.format((ep + 1), c))
                        print('Batch progress:',
                              '%.3f' % (batch_n / batch_per_ep * 100), '%')

                        if self.save_graph:
                            saver.save(sess, self.graph_loc + 'new')

            if show_performance:
                # test the trained network
                batch_img = self._next_batch(self.database,
                                             self.batch_size)  # read a batch
                batch_img = batch_img.reshape(
                    (-1, self.image_size, self.image_size, 1))
                recon_img = sess.run([ae_output],
                                     feed_dict={ae_inputs: batch_img})[0]

                # plot the reconstructed images and their ground truths (inputs)
                plt.figure(1)
                plt.title('Reconstructed Images')
                for i in range(5):
                    plt.subplot(1, 5, i + 1)
                    plt.imshow(recon_img[i, ..., 0], cmap='gray')
                plt.figure(2)
                plt.title('Input Images')
                for i in range(5):
                    plt.subplot(1, 5, i + 1)
                    plt.imshow(batch_img[i, ..., 0], cmap='gray')
                plt.show()
Esempio n. 4
0
def train_autoencoder(Autoencoder, Superclass, Old_superclass):
    # ================== used to train the new encoder ==================
    print('\n=========== refesh the autoencoders ===========')
    for dict in Superclass:
        refresh = 'false'
        if dict not in Old_superclass.keys():
            refresh = 'true'
        elif Superclass[dict] != Old_superclass[dict]:
            refresh = 'true'
        if refresh == 'true':
            print('\nrefeshing the autoencoder:' + dict)
            Autoencoder[dict] = autoencoder(args)
            if cf.use_cuda:
                Autoencoder[dict].cuda()
                cudnn.benchmark = True
            for epoch in range(args.num_epochs_train):
                Autoencoder[dict].train()
                required_train_loader = get_dataLoder(args,
                                                      classes=Superclass[dict],
                                                      mode='Train',
                                                      encoded=True,
                                                      one_hot=False)
                param = list(Autoencoder[dict].parameters())
                optimizer, lr = get_optim(param,
                                          args,
                                          mode='preTrain',
                                          epoch=epoch)
                for batch_idx, (inputs,
                                targets) in enumerate(required_train_loader):
                    if batch_idx >= args.num_test:
                        break
                    if cf.use_cuda:
                        inputs = inputs.cuda()  # GPU settings
                    optimizer.zero_grad()
                    inputs = Variable(inputs)
                    reconstructions, _ = Autoencoder[dict](inputs)
                    loss = cross_entropy(reconstructions, inputs)
                    loss.backward()  # Backward Propagation
                    optimizer.step()  # Optimizer update
                    sys.stdout.write('\r')
                    sys.stdout.write(
                        'Refreshing autoencoder:' + dict +
                        ' with Epoch [%3d/%3d] Iter [%3d]\t\t Loss: %.4f' %
                        (epoch + 1, args.num_epochs_train, batch_idx + 1,
                         loss.item()))
                    sys.stdout.flush()
            print('\nautoencoder model:' + str(dict) +
                  ' is constrcuted with final loss:' + str(loss.item()))
    return Autoencoder
Esempio n. 5
0
def train_autoencoder(data):

    image_shape = [image_size, image_size]
    training_steps = 100000
    steps_per_epoch = 1000
    epochs = training_steps // steps_per_epoch

    model, encoder, decoder = m.autoencoder(image_size, latent_dims=6)

    model.summary()

    model.compile(optimizer='adam', loss='mse')

    dataset = make_infinite_dataset(data["n_train"],
                                    (data["x_train"], data["x_train"]))

    model.fit(dataset, epochs=epochs, steps_per_epoch=steps_per_epoch)

    return model, encoder, decoder
Esempio n. 6
0
z_dimension = 64


def plot_embedding(data, label, title):
    x_min, x_max = np.min(data, 0), np.max(data, 0)
    # data = (data - x_min) / (x_max - x_min)
    plt.figure(figsize=(4, 4))

    for i in range(data.shape[0]):
        plt.scatter(data[i, 0], data[i, 1])
    plt.title(title)
    plt.show()


# 创建对象
AE = models.autoencoder(dimension=z_dimension).to(device)
AE.load_state_dict(torch.load('./AE.pth'))

feature = np.loadtxt("./data/feature.csv", delimiter=",")
label = np.loadtxt("./data/label.csv", delimiter=",")

data = feature[0:1000, :]
label = label[0:1000]

reduction_data, _ = AE(
    torch.from_numpy(data).clone().detach().type(torch.FloatTensor).view(
        1000, 1, 16, 16).to(device))

reduction_data = reduction_data.cpu().detach().numpy()

tsne_original = TSNE(n_components=2, init='pca', random_state=0)
Esempio n. 7
0
def train_test_autoencoder(newclasses, Autoencoder):
    # ================== used to train the new encoder ==================
    Autoencoder[str(newclasses)] = autoencoder(args)
    if cf.use_cuda:
        Autoencoder[str(newclasses)].cuda()
        cudnn.benchmark = True
    for epoch in range(args.num_epochs_train):
        Autoencoder[str(newclasses)].train()
        required_train_loader = get_dataLoder(args,
                                              classes=[newclasses],
                                              mode='Train',
                                              encoded=True,
                                              one_hot=True)
        param = list(Autoencoder[str(newclasses)].parameters())
        optimizer, lr = get_optim(param, args, mode='preTrain', epoch=epoch)
        print('\n==> Epoch #%d, LR=%.4f' % (epoch + 1, lr))
        for batch_idx, (inputs, targets) in enumerate(required_train_loader):
            if batch_idx >= args.num_test:
                break
            if cf.use_cuda:
                inputs = inputs.cuda()  # GPU settings
            optimizer.zero_grad()
            inputs = Variable(inputs)
            reconstructions, _ = Autoencoder[str(newclasses)](inputs)
            loss = cross_entropy(reconstructions, inputs)
            loss.backward()  # Backward Propagation
            optimizer.step()  # Optimizer update
            sys.stdout.write('\r')
            sys.stdout.write(
                'Train autoencoder:' + str(newclasses) +
                ' with Epoch [%3d/%3d] Iter [%3d]\t\t Loss: %.4f' %
                (epoch + 1, args.num_epochs_train, batch_idx + 1, loss.item()))
            sys.stdout.flush()
    # =============== used to classify it and nut it in a proper superclass ==============
    if Autoencoder:
        Loss = {}
        Rel = {}
        print('\ntesting the new data in previous autoencoders')
        for dict in Autoencoder:
            Loss[dict] = 0
            required_valid_loader = get_dataLoder(args,
                                                  classes=[int(dict)],
                                                  mode='Valid',
                                                  encoded=True,
                                                  one_hot=True)
            for batch_idx, (inputs,
                            targets) in enumerate(required_valid_loader):
                if batch_idx >= args.num_test:
                    break
                if cf.use_cuda:
                    inputs = inputs.cuda()  # GPU settings
                inputs = Variable(inputs)
                reconstructions, _ = Autoencoder[dict](inputs)
                loss = cross_entropy(reconstructions, inputs)
                Loss[dict] += loss.data.cpu().numpy(
                ) if cf.use_cuda else loss.data.numpy()
        print('\nAutoencoder:' + str(newclasses) +
              ' is been delated and wait for update for every ten classes')
        Autoencoder.pop(
            str(newclasses), '\nthe class:' + str(newclasses) +
            ' is not been delated as the dict not exist')
        highest = 0
        test_result = ''
        for dict in Loss:
            Rel[dict] = 1 - abs(
                (Loss[dict] - Loss[str(newclasses)]) / Loss[str(newclasses)])
            if Rel[dict] >= highest and Rel[
                    dict] >= args.rel_th and dict != str(newclasses):
                highest = Rel[dict]
                test_result = dict
                print('\nnewclass:' + str(newclasses) +
                      ' is add to superclass with class:' + dict)
        print('\nClass rel:', Rel, ' and Loss:', Loss)
        return Autoencoder, test_result
    else:
        return Autoencoder, _
Esempio n. 8
0
batches_per_epoch = 150
batch_size = 16
gamma = .5  #between 0 and 1

#image parameters
img_size = 32  #Size of square image
channels = 3  #1 for grayscale

#Model parameters
z = 100  #Generator input
h = 128  #Autoencoder hidden representation
adam = Adam(lr=0.00005)  #lr: between 0.0001 and 0.00005
#In the paper, Adam's learning rate decays if M stalls. This is not implemented.

#Build models
generator = models.decoder(z, img_size, channels)
discriminator = models.autoencoder(h, img_size, channels)
gan = models.gan(generator, discriminator)

generator.compile(loss=models.l1Loss, optimizer=adam)
discriminator.compile(loss=models.l1Loss, optimizer=adam)
gan.compile(loss=models.l1Loss, optimizer=adam)

#Load data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
dataGenerator = image.ImageDataGenerator(
    preprocessing_function=utils.dataRescale)
batchIterator = dataGenerator.flow(X_train, batch_size=batch_size)

trainer = train.GANTrainer(generator, discriminator, gan, batchIterator)
trainer.train(epochs, batches_per_epoch, batch_size, gamma)
Esempio n. 9
0
            data[i][k * M2 + j] = data1[k][j][i]
            data[i + N_each][k * M2 + j] = data2[k][j][i]
            data[i + int(N_each * 2)][k * M2 + j] = data3[k][j][i]
            data[i + int(N_each * 3)][k * M2 + j] = data4[k][j][i]

print(data.shape)

for j in range(0, M):
    data[:, j] = stats.zscore(data[:, j])

if restart == 1:
    model = torch.load(PATH)
    model.eval()
    print("Using the prvious model ...")
else:
    model = my_model.autoencoder(inputDim, outputDim)
    print("Using a new model ...")

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(device)
model.to(device)

criterion = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=learningRate, amsgrad=True)

inputs = Variable(torch.from_numpy(data).float())
labels = Variable(torch.from_numpy(data).float())

inputs = inputs.to(device)
print('Loading data')


dim = 2048
nb_classes = 30
epochs = 20
train_class_size = 2000
test_class_size = 2000
batch_size = 100

trainset_ = torch.load('../datasets/LSUN/trainset.pth')
testset_  = torch.load('../datasets/LSUN/testset.pth')
trainset = data_utils.TensorDataset(trainset_[0], trainset_[1])
testset = data_utils.TensorDataset(testset_[0], testset_[1])

rec_model = autoencoder(32)
state = torch.load('./models/AE_LSUN_32_code_size_'+str(nb_classes)+'_classes.pth')
rec_model.load_state_dict(state)

print('Reconstructing data')
trainset = reconstruct_dataset_with_AE(trainset, rec_model.cuda(), bs = 1000, real_data_ratio=0)  
testset = reconstruct_dataset_with_AE(testset, rec_model.cuda(), bs = 1000, real_data_ratio=0)

train_loader = data_utils.DataLoader(trainset, batch_size=batch_size, shuffle = True)
test_loader = data_utils.DataLoader(testset, batch_size=batch_size, shuffle = False)
#rec_model = autoencoder(32)
#state = torch.load('./models/AE_32_code_size_500_classes_2000_samples.pth')
#rec_model.load_state(state)
pretrained_model_dict = torch.load('../pretrained_models/batch_classifier_LSUN_30_classes.pth')
pretrained_model = new_models.Classifier_2048_features(30)
pretrained_model.load_state_dict(pretrained_model_dict)
def compute_quant(outdir, student_h_dim, use_bn):

    input_size = 784  # The image size = 28 x 28 = 784
    hidden_size = student_h_dim  # The number of nodes at the hidden layer
    num_classes = 10  # The number of output classes. In this case, from 0 to 9

    ### loading different pytorch version
    import torch._utils
    try:
        torch._utils._rebuild_tensor_v2
    except AttributeError:

        def _rebuild_tensor_v2(storage, storage_offset, size, stride,
                               requires_grad, backward_hooks):
            tensor = torch._utils._rebuild_tensor(storage, storage_offset,
                                                  size, stride)
            tensor.requires_grad = requires_grad
            tensor._backward_hooks = backward_hooks
            return tensor

        torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
    """# Create student model"""

    student_h_dim = student_h_dim

    if student_h_dim == 2:
        fname = os.path.join(outdir, 'teacher.pth')
        student_model = autoencoder(h_dim=student_h_dim).cuda()

    else:
        fname = os.path.join(outdir, 'student.pth')
        if use_bn:
            student_model = autoencoder(h_dim=student_h_dim).cuda()
        else:
            student_model = autoencoder_nobatchnorm(h_dim=student_h_dim).cuda()

    print('loading student model', fname)

    checkpoint = torch.load(fname)
    student_model.load_state_dict(checkpoint, strict=False)

    #=========================
    # generate last_z
    student_model.eval()

    # student_model.train()
    # test_loader = train_loader

    accs_train = 0.0
    count = 0.0
    all_z = None
    all_y = None
    for data in train_loader:
        img, y = data
        img = img.view(img.size(0), -1)
        img = Variable(img).cuda()

        y = Variable(y).cuda()

        last_z = student_model.encoder(img)

        out = student_model.decoder(last_z)

        scores = mnist_model(out)
        y_pred = scores.max(1)[1]

        acc = ((y_pred == y).sum().float() / float(batch_size)).item()

        accs_train += acc
        count += 1.0

        if all_z is None:
            all_z = last_z.data
            all_y = y.data
        else:
            all_z = torch.cat((all_z, last_z))
            all_y = torch.cat((all_y, y))
            #=========================

    #znp =  all_z.data.cpu()[:,-2:].numpy()
    ix = all_z.size(1) - 2

    print 'ix is ', ix

    print 'acc train is', accs_train / count

    znp = all_z.data.cpu()[:, ix:ix + 2].numpy()
    ynp = all_y.data.cpu().numpy()

    ###########################################################################

    # run test
    all_test_z = None
    all_test_y = None

    all_flip_acc = 0.0
    count = 0.0
    count2 = 0.0
    all_ae_loss = 0.0

    student_model.train()

    for iter_, data in enumerate(test_loader):
        img, y = data
        img = img.view(img.size(0), -1)
        img = Variable(img).cuda()

        y = Variable(y).cuda()

        last_z = student_model.encoder(img)

        out = student_model.decoder(last_z)

        # print out.max(), out.min(), img.max(), img.min()

        ae_loss = ((out - img)**2).mean().item()
        all_ae_loss += ae_loss
        if all_test_z is None:
            all_test_z = last_z.data
            all_test_y = y.data
        else:
            all_test_z = torch.cat((all_test_z, last_z))
            all_test_y = torch.cat((all_test_y, y))
            #=========================

        # run on generated image
        scores = mnist_model(out)
        y_pred = scores.max(1)[1]
        y = y_pred

        # swap labels
        flip_idx = torch.randperm(batch_size).cuda()

        flipped_y = y.clone()[flip_idx]

        are_different = (flipped_y != y).nonzero()

        flipped_z = last_z.clone()

        flipped_z[:, ix:ix + 2] = flipped_z[flip_idx, ix:ix + 2]

        flipped_out = student_model.decoder(flipped_z)

        flipped_scores = mnist_model(flipped_out)
        flipped_y_pred = flipped_scores.max(1)[1]

        flip_acc = ((
            flipped_y_pred[are_different] == flipped_y[are_different]))

        all_flip_acc += float(flip_acc.sum().item())
        count += float(flip_acc.size(0))
        count2 += 1

        if iter_ < 0:

            out_pic = out.clone().detach().view(-1, 28).cpu()
            flipped_out_pic = flipped_out.clone().detach().view(-1, 28).cpu()
            flipped_target_pic = out[flip_idx, :].clone().detach().view(
                -1, 28).cpu()
            outfname = os.path.join(outdir, 'eval_out_%i.png' % iter_)
            show2(out_pic * 255, outfname)
            outfname = os.path.join(outdir, 'eval_out_flipped_%i.png' % iter_)
            show2(flipped_out_pic * 255, outfname)
            outfname = os.path.join(outdir,
                                    'eval_flip_target_out_%i.png' % iter_)
            show2(flipped_target_pic * 255, outfname)
            print 'saved out', outfname
            print y[0:20]
            print flipped_y_pred[0:20]
            print y[flip_idx][0:20]
            print flip_idx[0:20]
            print(flipped_y_pred == y[flip_idx]
                  ).sum(), batch_size, flip_acc.sum()

    print 'FLIP TEST', all_flip_acc / count
    # print 'REC TEST', all_ae_loss/count2

    rec = all_ae_loss / count2
    flip = all_flip_acc / count

    result_txt = 'QUANTITATIVE RESULTS >> %s latent code dimension: %i reconstruction:  %f swaps ok: %f' % (
        outdir, student_h_dim, rec, flip)

    print result_txt

    f = open(os.path.join(outdir, 'log_quant.txt'), 'w')
    f.write(result_txt + '\n')
    f.close()

    znp_test = all_test_z.data.cpu()[:, ix:ix + 2].numpy()
    ynp_test = all_test_y.data.cpu().numpy()

    # test_score = regr.score(znp_test, ynp_test)
    # print 'test score', test_score

    return float(rec), float(flip)
Esempio n. 12
0
                            download=True)

test_dataset = dsets.MNIST(root='./data',
                           train=False,
                           transform=transforms.ToTensor())
""" --------- Train the Autoencoder ------------ """
input_size = 784  # The image size = 28 x 28 = 784
hidden_size = 2  # The number of nodes at the hidden layer
num_classes = 10  # The number of output classes. In this case, from 0 to 9
num_epochs = params.num_epochs  # The number of times entire dataset is trained
batch_size = 128  # The size of input data took for one iteration
learning_rate = params.lr  # The speed of convergence

show_every = max(1, int(num_epochs / 10))
# initialize model
model = autoencoder(h_dim=hidden_size).cuda()

# Data loaders
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)

# initialize optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=params.wd)
nb_classes = opts['nb_classes']
trainset, testset = {}, {}

trainset_ = torch.load('../datasets/LSUN/trainset.pth')
testset_ = torch.load('../datasets/LSUN/testset.pth')
trainset = data_utils.TensorDataset(trainset_[0], trainset_[1])
testset = data_utils.TensorDataset(testset_[0], testset_[1])

train_loader = data_utils.DataLoader(trainset,
                                     batch_size=opts['batch_size'],
                                     shuffle=True)
test_loader = data_utils.DataLoader(testset,
                                    batch_size=opts['batch_size'],
                                    shuffle=False)

autoencoder_model = autoencoder(code_size).cuda()
#autoencoder_model.apply(weights_init)
#classifier_model = torch.load(root+'batch_training/results/LSUN/models/LSUN_classifier_original.pth')

classifier_model_dict = torch.load(
    '../pretrained_models/batch_classifier_LSUN_30_classes.pth')
classifier_model = new_models.Classifier_2048_features(30)
classifier_model.load_state_dict(classifier_model_dict)
classifier_model = classifier_model.cuda()
criterion_AE = nn.MSELoss().cuda()
criterion_classif = nn.MSELoss().cuda()
#optimizer = torch.optim.SGD(autoencoder_model.parameters(), lr=opts['learning_rate'], momentum=0.99)
optimizer_main = torch.optim.Adam(autoencoder_model.parameters(),
                                  lr=opts['learning_rate'],
                                  betas=(0.9, 0.999),
                                  weight_decay=1e-5)
Esempio n. 14
0
    #	Corrupt(noise_stdev=0.1),
    Convolution2D([3, 3, 64, 128], activation=tf.nn.relu, scope='conv2_1'),
    Convolution2D([3, 3, 128, 128], activation=tf.nn.relu, scope='conv2_2'),
    Convolution2D([3, 3, 128, 128], activation=tf.nn.relu, scope='conv2_3'),
    #	MaxPooling(kernel_shape=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', scope='pool2'),
    #	Unfold(scope='unfold'),
    #	Corrupt(),
    #	FullyConnected(512, activation=tf.nn.relu, scope='fc1')
]

blocks = []

model = models.autoencoder(layers,
                           blocks,
                           lr=1e-3,
                           lr_decay=0.1,
                           mbs=50,
                           pred_mbs=500,
                           seed=456)
model.start_session()
print("Training . . . ")
losses, params = model.train(X_train, X_test, epochs=30, early_stopping=5)
print("Denoising . . . ")
X_train = model.denoise(X_train)
X_test = model.denoise(X_test)
model.stop_session()

print("Saving results . . . ")
np.save("results/vgg7_ae_losses.npy", losses)
np.savez("results/vgg7_ae_params.npz", **params)
np.save("results/vgg7_ae_X_train.npy", X_train)
Esempio n. 15
0
show_every = max(1, int(num_epochs / 10))

# Data loaders
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)

# initialize teacher model
if params.student_h_dim > 3 and (not params.use_student_batchnorm):
    teacher_model = autoencoder_nobatchnorm(h_dim=hidden_size).cuda()
else:
    teacher_model = autoencoder(
        h_dim=hidden_size).cuda()  # first teacher always uses batchnorm

# initialize optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(teacher_model.parameters(),
                             lr=learning_rate,
                             weight_decay=params.wd)

# load teacher model
checkpoint = torch.load(params.teacher)
teacher_model.load_state_dict(checkpoint)
"""# Create student model"""
student_h_dim = params.student_h_dim

if params.use_student_batchnorm:
    student_model = autoencoder(h_dim=student_h_dim).cuda()