from dcgan import DCGAN, normalize_sample_to_signal from model import build_multi_input_main_residual_network from data import dataSet import numpy as np import matplotlib.pyplot as plt # get condition labels data = dataSet() condition_labels = data.get_condition_number_data() # load the model dcgan = DCGAN() # not specify the epoch EPOCH = None dcgan.load_model(epoch=EPOCH) print(condition_labels.shape) noise = np.random.normal(0, 1, (condition_labels.shape[0], dcgan.latent_dim)) gen_imgs = dcgan.generator.predict([noise, condition_labels]) gen_imgs = normalize_sample_to_signal(gen_imgs) model = build_multi_input_main_residual_network(32, 500, 8, 1, loop_depth=20) train_name = 'Resnet_block_REDUCE_AE_%s' % (20) MODEL_CHECK_PT = "%s.kerascheckpts" % (train_name) model.load_weights(MODEL_CHECK_PT) # print(model.evaluate(x, y)) signal, srf = data.get_test_show_data() srf_pred = model.predict(gen_imgs) print(model.metrics_names, model.evaluate(signal, srf)) fig = plt.figure()
import numpy as np from data import dataSet from model import KDE import pandas as pd import sys import argparse if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('dataSetName', nargs = 1, help='MNIST OR CIFAR, case insensitive') args = parser.parse_args() dataSetName = "".join(args.dataSetName) data = dataSet(dataSetName) data.showSamples(numRow = 10, numCol = 10, imgPath = dataSetName) grid = [0.05, 0.08, 0.1, 0.2, 0.5, 1, 1.5, 2] likelihoodList = [] for sigma in grid: print("\n\nEVALUATING GAUSSIAN KDE ON {} DATA AT SIGMA {} \n".format(dataSetName.upper(), sigma)) model = KDE(data.train[0], sigma) likelihood = model(data.val[0]) likelihoodList.append(likelihood) saveDict = {"sigma": grid, "likelihood": likelihoodList} df = pd.DataFrame.from_dict(saveDict) df.to_csv("result/{}.csv".format(dataSetName))
def save_imgs(self, epoch): r, c = 2, 5 if epoch == None: epoch = "" # Load the dataset from data import dataSet # x, y = tool_wear_dataset.get_recoginition_data_in_class_num(class_num=50) data = dataSet() x, y = data.get_reinforced_data() x = x[600:, :, 0:6] y = data.get_reinforced_condition_data()[600:, :] print(x.shape, y.shape) noise = np.random.normal(0, 1, (r * c, self.latent_dim)) another_idx = np.random.randint(0, y.shape[0], r * c) sampled_labels = y[another_idx] X_train = x[another_idx] print(sampled_labels.shape, sampled_labels) gen_imgs = self.generator.predict([noise, sampled_labels]) gen_imgs = normalize_sample_to_signal(gen_imgs) for channel in range(6): fig, axs = plt.subplots(r, c) import os directory_path = os.path.join("images", "%s" % (channel + 1)) if not os.path.exists(directory_path): os.mkdir(directory_path) cnt = 0 for i in range(r): for j in range(c): # Force in Y axs[i, j].plot(X_train[cnt, :, channel]) #axs[i, j].plot(gen_imgs[cnt, :, channel], label="%d" % (cnt)) axs[i, j].set_title("%d in No. %d" % (sampled_labels[cnt, 0], cnt)) # axs[i, j].set_title("%d " % (sampled_labels[cnt][0])) # axs[i, j].legend() # axs[i, j].axis('off') cnt += 1 fig.savefig("images/%d/channel_%d_epoch_%s_ORI.svg" % (channel + 1, channel + 1, epoch)) plt.close("all") for channel in range(6): fig, axs = plt.subplots(r, c) import os directory_path = os.path.join("images", "%s" % (channel + 1)) if not os.path.exists(directory_path): os.mkdir(directory_path) cnt = 0 for i in range(r): for j in range(c): # Force in Y # axs[i,j].plot(X_train[cnt,:,channel]) axs[i, j].plot(gen_imgs[cnt, :, channel], label="%d" % (cnt)) axs[i, j].set_title("%d in No. %d" % (sampled_labels[cnt, 0], cnt)) # axs[i, j].set_title("%d " % (sampled_labels[cnt][0])) # axs[i, j].legend() # axs[i, j].axis('off') cnt += 1 fig.savefig("images/%d/channel_%d_epoch_%s_GEN.svg" % (channel + 1, channel + 1, epoch)) plt.close("all")
test_size = 500 valid_size = 500 batch_size = 50 lr = 0.0005 epochs = 20 save_dir = "/home/zzhang/test/experiment" early_stop = 0.005 snapshot = 0 device = 0 IfTest = False IfCuda = True if __name__ == '__main__': #load data train1, test, valid = data.generatSet(test_size, valid_size) trainset = data.dataSet(train1) validset = data.dataSet(valid) testset = data.dataSet(test) #model mymodel = AlexNet() # if IfTest: #print('\nLoading model from {}...'.format(snapshot)) # mymodel.load_state_dict(torch.load(snapshot)) if IfCuda: torch.cuda.set_device(device) mymodel = mymodel.cuda() trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True,
def train(self, epochs, batch_size=128, save_interval=50): # Load the dataset from data import dataSet # x, y = tool_wear_dataset.get_recoginition_data_in_class_num(class_num=50) data = dataSet() x, sf_raw_data = data.get_reinforced_data() # Only simulate force x = x[600:, :, 0:6] sf_raw_data = sf_raw_data[600:] y = data.get_reinforced_condition_data()[600:, :] # x = x[:, ::10, :] (X_train, y_train) = x, y X_train = normalize_signal_to_sample(X_train) # # Rescale -1 to 1 # X_train = X_train / 127.5 - 1. # X_train = np.expand_dims(X_train, axis=3) # Adversarial ground truths valid = np.ones((batch_size, 1)) fake = np.zeros((batch_size, 1)) for epoch in range(epochs): # Adversarial ground truths # replace them with random label -> # 7 if not (epoch % 3 == 0 and epoch % 100 != 0): # Soft adjustment valid = np.ones((batch_size, 1)) fake = np.zeros((batch_size, 1)) else: valid = generate_random_arr_between(0, 0.3, (batch_size, 1)) fake = generate_random_arr_between(0.7, 1.2, (batch_size, 1)) # --------------------- # Train Discriminator # --------------------- # Select a random half of images # fake_labels = generate_random_arr_between(30,210,batch_size) idx = np.random.randint(0, X_train.shape[0], batch_size) imgs, labels = X_train[idx], y_train[idx] # labels = data.get_reinforced_condition_data() another_idx = np.random.randint(0, X_train.shape[0], batch_size) fake_labels = y_train[another_idx] sf_labels = sf_raw_data[another_idx] # Sample noise and generate a batch of new images noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) gen_imgs = self.generator.predict([noise, fake_labels]) # Train the discriminator (real classified as ones and generated as zeros) # (imgs.shape, gen_imgs.shape) # make the labels noisy for the discriminator if epoch % 5 == 0 and epoch % 100 != 0: # flip the right to wrong d_loss_real = self.discriminator.train_on_batch( [imgs, labels], [fake, sf_labels]) d_loss_fake = self.discriminator.train_on_batch( [gen_imgs, fake_labels], [valid, sf_labels]) else: d_loss_real = self.discriminator.train_on_batch( [imgs, labels], [valid, sf_labels]) d_loss_fake = self.discriminator.train_on_batch( [gen_imgs, fake_labels], [fake, sf_labels]) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # print(self.discriminator.metrics_names) # --------------------- # Train Generator # --------------------- # Condition on labels (tool wear 0-300) # sampled_labels = np.random.randint(0, 50, batch_size).reshape(-1, 1) # sampled_labels = generate_random_arr_between(30, 210, batch_size) another_idx = np.random.randint(0, X_train.shape[0], batch_size) sampled_labels = y_train[another_idx] # Train the generator (wants discriminator to mistake images as real) for i in range(4): g_loss = self.combined.train_on_batch([noise, sampled_labels], valid) # Plot the progress # print(self.discriminator.metrics_names) # print(d_loss_real,"\n",d_loss_fake,"\n",d_loss) if epoch % 100 == 0: print( "%d [D all loss %f, recognize loss: %f, sf_logcosh %f , acc.: %.2f%%(True %.2f%%, Fake %.2f%%)] [G loss: %f]" % (epoch + self.PRE_EPOCH, d_loss[0], d_loss[1], d_loss[2], 100 * d_loss[3], 100 * d_loss_real[3], 100 * d_loss_fake[3], g_loss)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_model(epoch=epoch + self.PRE_EPOCH) if epoch % 2000 == 0: self.save_imgs(epoch + self.PRE_EPOCH)