def __init__(self, num_items=3): self.num_items = num_items self.purple, self.blue, self.orange, self.pu_bl, self.pu_or, self.bl_pu, self.bl_or, self.or_pu, self.or_bl, \ self.pu_hand, self.bl_hand, self.or_hand = getImages() self.env_model = MDN(num_components=NUM_COMPONENTS, in_dim=LATENT_DIM + 4, out_dim=LATENT_DIM, model_path="models/env_model_0002.h5") self.encoder = load_model("models/encoder_2001.h5") self.dqn_model = load_model('models/controller_0002.h5') self.decoder = load_model("models/decoder_2001.h5") self.r_model = load_model("models/r_model_0002.h5") self.s_bar = None
import random from sklearn.manifold import TSNE from keras.models import load_model from load_process_images import getImages latent_dim = 4 IMAGE_WIDTH = 64 IMAGE_HEIGHT = 64 CHANNELS = 3 decoder = load_model('models/decoder_1001.h5') encoder = load_model('models/encoder_1001.h5') imgs_list = [] for i in getImages(): imgs_list.append(i) imgs = getImages(True) im_size = 64 while True: c = random.choice(range(0, imgs.shape[0])) img = imgs[c, :, :] print(c) #img = imgs[117] img = img.reshape((1, 64, 64, 3)) encoded = np.asarray(encoder.predict(img)) #encoded_logvar = encoded[1, :, :] #store log(var) vector for later #encoded = encoded[0, :, :] #get just means
def autoencode_images(): images = getImages(return_single=True) encoder = load_model('models/encoder_1001.h5') return np.asarray(encoder.predict(images))
activation='relu')(h_decoded) deconv = Conv2DTranspose(64, (5, 5), strides=(2, 2), activation='relu')(deconv) deconv = Conv2DTranspose(32, (6, 6), strides=(2, 2), activation='relu')(deconv) decoded_mean = Conv2DTranspose(3, (6, 6), strides=(2, 2), activation='sigmoid')(deconv) encoder = Model(img_input, z, name='encoder') decoder = Model(decoder_input, decoded_mean, name='decoder') decoder.summary() reconstructed = decoder(z) ae = Model(img_input, reconstructed, name='vae') opt = RMSprop(lr=0.00025) ae.compile(optimizer='adam', loss=vae_loss) ae.summary() x_train = y_train = x_test = y_test = getImages(return_single=True) try: history = ae.fit(x_train, x_train, shuffle=True, epochs=epochs, batch_size=batch_size, validation_data=(x_test, x_test)) finally: sss = 0 encoder.save('models/encoder_1001.h5') decoder.save('models/decoder_1001.h5') ae.save('models/ae_1001.h5') n = 15
h_decoded = Reshape((1, 1, 1024))(h_decoded) deconv = Conv2DTranspose(128, (5, 5), strides= (2,2), activation='relu')(h_decoded) deconv = Conv2DTranspose(64, (5, 5), strides= (2,2), activation='relu')(deconv) deconv = Conv2DTranspose(32, (6, 6), strides= (2,2), activation='relu')(deconv) decoded_mean = Conv2DTranspose(3, (6, 6), strides= (2,2), activation='sigmoid')(deconv) encoder = Model(img_input, [z_mean, z_log_var], name='encoder') decoder = Model(decoder_input, decoded_mean, name='decoder') decoder.summary() reconstructed = decoder(z) vae = Model(img_input, reconstructed, name='vae') opt = RMSprop(lr=0.00025) vae.compile(optimizer='adam', loss=vae_loss) vae.summary() x_train = y_train = x_test = y_test = getImages(return_single=True, use_all=False, val=False) try: history = vae.fit(x_train, x_train, shuffle=True, epochs=epochs, batch_size=batch_size, validation_data=(x_test, x_test)) finally: sss = 0 encoder.save('models/encoder_2001.h5') decoder.save('models/decoder_2001.h5') vae.save('models/vae_2001.h5') n = 15
def __init__(self, num_items=3, use_all=True, val=False): self.state = None self.num_items = num_items self.purple, self.blue, self.orange, self.pu_bl, self.pu_or, self.bl_pu, self.bl_or, self.or_pu, self.or_bl,\ self.pu_hand, self.bl_hand, self.or_hand = getImages(return_single=False ,use_all=use_all, val=val)