Example #1
0
    def __init__(self, batch_size, version):
        super(CycleGAN_CNN, self).__init__(batch_size, version)

        self.encoder_X = ConverterA_CNN(name="encoder_cnn")
        self.decoder_X = ConverterB_CNN(name="decoder_cnn")
        self.discriminator = SeqDiscriminator_CNN(class_num=62, mode=1, fixed_length=False, name='discriminator')

        self.data_X = datamanager(time_major=False, seed=19940610)
        self.data_Y = datamanager(time_major=False, seed=19940610)
        self.data_Y.shuffle_train(seed=19931028)

        self.build_network()

        self.build_sess()
        self.build_dirs()
Example #2
0
    def sample(self, epoch):
        data = datamanager(time_major=False, seed=0)
        data.shuffle_train(0)
        X = data(self.batch_size, var_list=["AccGyo", "lens", "labels", "XYZ"])

        feed_dict = {
            self.source_X: X["AccGyo"][:, :, :, None],
            self.length_X: X["lens"]
        }

        logit = self.sess.run(self.G, feed_dict=feed_dict)
        logit = np.array([filtering(seq, window=5) for seq in logit])

        ori, target, lens = X["AccGyo"], X["XYZ"], X['lens']

        for i in range(4):
            idx = i * 3
            plt.subplot(4, 3, 3 * i + 1)
            plt.plot(ori[idx, :lens[idx], :])
            plt.subplot(4, 3, 3 * i + 2)
            # plt.plot(logit[idx, :lens[idx], :, 0])
            plt.plot(logit[idx, :lens[idx], 0, 0], logit[idx, :lens[idx], 1,
                                                         0])
            plt.xticks([])
            plt.title(str(np.argmax(X["labels"][idx])))
            plt.subplot(4, 3, 3 * i + 3)
            # plt.plot(target[idx, :lens[idx], :])
            plt.plot(target[idx, :lens[idx], 0], target[idx, :lens[idx], 1])
            plt.xticks([])

        plt.savefig(
            os.path.join(self.fig_dir, "sample_epoch_{}.png".format(epoch)))
        plt.clf()
Example #3
0
    def sample(self, epoch):
        print "sample after epoch {}".format(epoch)
        data = datamanager(time_major=False,seed=19940610)
        data_X = data(self.batch_size, phase='train', var_list=["AccGyo", "lens", "labels", "XYZ"])
        feed_dict={
            self.source_X:data_X['AccGyo'][:,:,:,None],
            self.len_X:data_X['lens']
        }

        data_X['enX_outputs'], data_X['deX_outputs'] = self.sess.run([self.encoder_outputs, self.decoder_outputs], feed_dict=feed_dict)
        np.savez(os.path.join(self.fig_dir, "sample_epoch_{}.npz".format(epoch)), **data_X)
        
        ori, lens, pred = data_X['XYZ'], data_X['lens'], data_X["enX_outputs"][:,:,:,0]

        indexes, tmp = [], {}
        for i, label in enumerate(np.argmax(data_X['labels'], axis=1)):
            if not tmp.has_key(label):
                indexes.append(i)
                tmp[label] = 0
        for i in range(4):
            for j in range(3):
                idx = i*3 + j
                pic_idx = indexes[idx]
                plt.subplot(4,6, idx*2+1)
                plt.plot(ori[pic_idx, :lens[pic_idx], 0], ori[pic_idx, :lens[pic_idx], 1], color='g')
                plt.xticks([])
                plt.subplot(4,6, idx*2+2)
                plt.plot(pred[pic_idx, :lens[pic_idx], 0], pred[pic_idx, :lens[pic_idx], 1], color='r')
                plt.xticks([])
        plt.savefig(os.path.join(self.fig_dir, "sample_epoch_{}.png".format(epoch)))
        plt.clf()
Example #4
0
    def __init__(self):
        """Generate reduc plot html"""

        # Get all tags with raw data that are not cut
        dm = datamanager.datamanager()
        dm.gettags()
        # Sort tags with latest one first
        self.tags = dm.tags[::-1]
        self.htmldir = 'browser'
Example #5
0
    def sample(self, epoch):
        print "sample after epoch {}".format(epoch)
        data = datamanager(seed=19931028)
        data_X = data(self.batch_size, phase='train', var_list=["AccGyo", "lens", "labels", "XYZ"])
        feed_dict={
            self.source_X:data_X['AccGyo'],
            self.len_X:data_X['lens']
        }

        data_X['enX_outputs'], data_X['deX_outputs'] = self.sess.run([self.en_outputs_1, self.de_outputs_1], feed_dict=feed_dict)
        np.savez(os.path.join(self.fig_dir, "sample_epoch_{}.npz".format(epoch)), **data_X)
Example #6
0
    def __init__(self, batch_size, version):
        super(DTN, self).__init__(batch_size, version)

        self.classifier_X = SeqDiscriminator_CNN(class_num=62,
                                                 mode=0,
                                                 fixed_length=False,
                                                 name="cls_x_cnn")
        self.classifier_Y = SeqDiscriminator_CNN(class_num=62,
                                                 mode=0,
                                                 fixed_length=False,
                                                 name="cls_y_cnn")

        self.autoencoder_X = AutoEncoder_CNN(input_depth=6,
                                             output_depth=6,
                                             fixed_length=False,
                                             name="ae_X")
        self.autoencoder_Y = AutoEncoder_CNN(input_depth=3,
                                             output_depth=3,
                                             fixed_length=False,
                                             name="ae_Y")

        self.encoder = ConverterA_CNN(name="encoder_XtoY")
        self.discriminator = SeqDiscriminator_CNN(class_num=None,
                                                  mode=1,
                                                  fixed_length=False,
                                                  name="discriminator")

        self.data_X = datamanager(time_major=False)
        self.data_Y = datamanager(time_major=False, seed=1)

        self.build_placeholder()
        self.build_classifier()
        # self.build_dtn()
        # self.build_ae()
        self.build_gan()
        self.build_optimizer()
        self.build_summary()

        self.build_sess()
        self.build_dirs()
Example #7
0
    def __init__(self, batch_size, version='saegan', gpu='0'):
        super(SAEGAN, self).__init__(batch_size, version, gpu)

        self.data_img = datamanager('CT_img',
                                    train_ratio=0.8,
                                    expand_dim=3,
                                    seed=0)
        self.data_seq = datamanager('CT_seq',
                                    train_ratio=0.8,
                                    expand_dim=3,
                                    seed=1)

        self.sample_data_Img = self.data_img(self.batch_size,
                                             phase='test',
                                             var_list=['data'])
        self.sample_data_Seq = self.data_seq(self.batch_size,
                                             phase='test',
                                             var_list=['data'])

        self.len_latent = 64
        # self.autoencoder_img = AutoEncoder_Image(self.len_latent, name='AEI')
        # self.autoencoder_seq = AutoEncoder_Seq(self.len_latent, name='AES')
        self.autoencoder_img = Variational_AutoEncoder_Image(self.len_latent,
                                                             name='VAEI')
        self.autoencoder_seq = Variational_AutoEncoder_Seq(self.len_latent,
                                                           name='VAES')
        self.latent_discriminator = Latent_Discriminator(name='LD')

        self.class_num = 20
        self.latent_classifier = Latent_Classifier(class_num=self.class_num,
                                                   name='LC')

        self.build_placeholder()
        self.build_network()
        self.build_optimizer()

        self.build_dirs(appendix="../")
        self.build_sess()
Example #8
0
 def test(self):
     data = datamanager(time_major=False)
     loss = 0
     for _ in range(107):
         X = self.data_X(self.batch_size, var_list=["lens", "AccGyo", "XYZ"])
         feed_dict = {
                 self.source_X: X["AccGyo"][:,:,:,None],
                 self.len_X: X["lens"],
                 self.target_X: X["AccGyo"][:,:,:,None],
                 self.mid_X: X["XYZ"][:,:,:,None]
         }
         loss += self.sess.run(self.transform_loss, feed_dict=feed_dict)
     loss /= float(107 * self.batch_size)
     print loss
Example #9
0
    def __init__(self, batch_size, gan_type='gan', version="cyclegan"):
        self.gan_type = gan_type
        super(CycleGAN, self).__init__(batch_size, version)

        self.data_Seq = datamanager('CT', train_ratio=0.8, expand_dim=3, seed=0)
        self.data_Img = datamanager('CT_img', train_ratio=0.8, expand_dim=3, seed=1)
        self.sample_data_Seq = self.data_Seq(self.batch_size, phase='test', var_list=['data'])
        self.sample_data_Img = self.data_Img(self.batch_size, phase='test', var_list=['data'])

        self.critic_iter = 3

        self.generator_SeqtoImg = CNN_Generator_SeqtoImg(output_dim=1, name="cnn_generator_SeqtoImg")
        self.generator_ImgtoSeq = CNN_Generator_ImgtoSeq(output_dim=1, name="cnn_generator_ImgtoSeq")
        self.discriminator_Seq = CNN_Discriminator_Seq(name="cnn_discriminator_Seq")
        self.discriminator_Img = CNN_Discriminator_Img(name="cnn_discriminator_Img")

        self.build_placeholder()
        self.build_network()
        self.build_optimizer()
        self.build_summary()

        self.build_sess()
        self.build_dirs()
Example #10
0
 def get_latent(self):
     data = datamanager(time_major=self.time_major)
     for i in range(107):
         X = data(64, phase='train', var_list=["lens", "AccGyo", "XYZ"])
         if not self.time_major:
             X["XYZ"] = X["XYZ"]
             X["AccGyo"] = X["AccGyo"]
         feed_dict = {
             self.source: X[self.input_pointer],
             self.length: X["lens"],
             self.target: X[self.output_pointer],
         }
         latent = self.sess.run(self.autoencoder.latent,
                                feed_dict=feed_dict)
         print latent.shape
Example #11
0
    def __init__(self, class_num, data_dim, batch_size, version):
        super(Classifier, self).__init__(batch_size, version)
        self.data = datamanager(time_major=False)

        self.class_num = class_num
        self.data_dim = data_dim

        # CNN classifier
        self.classifier = SeqDiscriminator_CNN(class_num=class_num,
                                               mode=0,
                                               fixed_length=False,
                                               name="CNN_classifier")

        self.build_network()

        self.build_sess()
        self.build_dirs()
Example #12
0
    def gen_latent(self):
        data = datamanager(time_major=False)
        train_x, train_y = [], []
        for _ in range(108):
            X = data(self.batch_size,
                     phase='train',
                     var_list=[self.input_pointer, "lens", "labels"])
            feed_dict = {
                self.source: X[self.input_pointer][:, :, :, None],
                self.length: X["lens"]
            }
            train_x.append(self.sess.run(self.embedding, feed_dict=feed_dict))
            train_y.append(np.argmax(X["labels"], axis=1))
        train_x = np.concatenate(train_x, axis=0)
        train_y = np.concatenate(train_y)[:data.train_num]

        test_x, test_y = [], []
        for _ in range(27):
            X = data(self.batch_size,
                     phase='test',
                     var_list=[self.input_pointer, "lens", "labels"])
            feed_dict = {
                self.source: X[self.input_pointer][:, :, :, None],
                self.length: X["lens"]
            }
            test_x.append(self.sess.run(self.embedding, feed_dict=feed_dict))
            test_y.append(np.argmax(X["labels"], axis=1))
        test_x = np.concatenate(test_x, axis=0)[:data.test_num, :]
        test_y = np.concatenate(test_y)[:data.test_num]

        print train_x.shape, train_y.shape, test_x.shape, test_y.shape
        to_save = {
            "train_embedding": train_x,
            "train_label": train_y,
            "test_embedding": test_x,
            "test_label": test_y,
            "classifier_name": self.version,
            "data_seed": 19940610
        }
        print to_save["classifier_name"]
        np.savez(
            "/home/scw4750/songbinxu/autoencoder/data/" + self.version +
            "_emb.npz", **to_save)
Example #13
0
    def __init__(self, batch_size, version):
        super(DAE_GAN, self).__init__(batch_size, version)

        self.data = datamanager(time_major=False)
        self.autoencoder = AutoEncoder_CNN(6,
                                           6,
                                           100,
                                           fixed_length=False,
                                           name='autoencoder')
        # self.encoder = SeqDiscriminator_CNN(class_num=None, mode=1, fixed_length=False, name='encoder')
        # self.decoder = dynamic_decoder([64,64], 6, 'lstm', name='decoder')

        self.time_major = False

        self.build_placeholder()
        self.build_network()
        self.build_optimizer()

        self.build_sess()
Example #14
0
def Seq2Seq_CNN(mode):
    from autoencoder import ConverterA_CNN, AE
    from encoders import encoder_bi
    from decoders import dynamic_decoder
    from datamanager import datamanager

    for fold_id in range(5):
        autoencoder = ConverterA_CNN(name="encoderdecoder_CNN")
        data = datamanager(time_major=False,
                           expand_dim=True,
                           train_ratio=None,
                           fold_k=5,
                           seed=233)
        ae = AE(autoencoder, data, 64, "Seq2Seq_CNN/fold_{}".format(fold_id))
        if mode == 'train':
            ae.train(epoches=100)
        elif mode == 'test':
            # ae.saver.restore(ae.sess, os.path.join(ae.model_dir, "model.ckpt-10699"))
            ae.load_model()
            ae.test()
        tf.reset_default_graph()
Example #15
0
    def __init__(self, batch_size, noise_dim=100, version="CGAN"):
        super(CGAN, self).__init__(batch_size, version)

        self.noise_dim = noise_dim

        self.data = datamanager('CT', train_ratio=0.8, expand_dim=3, seed=0)
        self.data_test = self.data(self.batch_size,
                                   'test',
                                   var_list=['data', 'labels'])
        self.class_num = self.data.class_num

        self.Generator = Generator(output_dim=1, name='G')
        self.Discriminator = Discriminator(name='D')

        self.build_placeholder()
        self.build_gan()
        self.build_optimizer()
        self.build_summary()

        self.build_sess()
        self.build_dirs()
    def __init__(self, batch_size, version):
        super(infoGAN, self).__init__(batch_size, version or 'infoGAN')

        self.noise_dim = 100
        self.SUPERVISED = True

        self.data = datamanager("data/CharacterTrajectories/CharacterTrajectories.npz", 
                   train_ratio=1.0, fold_k=None, norm=None, expand_dim=3, seed=0)
        self.class_num = self.data.class_num

        # code
        self.len_code = 2

        self.Generator = Generator(output_dim=1, name='G')
        self.Discriminator = Discriminator(name='D')
        self.Classifier = Classifier(class_num=self.class_num + self.len_code, name='C')

        self.build_placeholder()
        self.build_gan()
        self.build_optimizer()
        self.build_summary()
        self.build_sess()
        self.build_dirs()
Example #17
0
def Seq2Seq_RNN(mode):
    from autoencoder import AutoEncoder_RNN, AE
    from encoders import encoder_bi
    from decoders import dynamic_decoder
    from datamanager import datamanager

    tmp = []
    for fold_id in range(5):
        autoencoder = AutoEncoder_RNN(encoder_bi, {
            "hidden_units": [64, 64],
            "cell_type": "gru"
        },
                                      dynamic_decoder, {
                                          "hidden_units": [64, 64],
                                          "cell_type": "gru"
                                      },
                                      input_depth=6,
                                      output_depth=3,
                                      embedding_dim=50,
                                      name="encoderdecoder_gru")
        data = datamanager(time_major=True,
                           expand_dim=False,
                           train_ratio=None,
                           fold_k=5,
                           seed=233)
        ae = AE(autoencoder, data, 64, "Seq2Seq_GRU/fold_{}".format(fold_id))

        if mode == 'train':
            ae.train(epoches=100)
        elif mode == 'test':
            ae.saver.restore(ae.sess,
                             os.path.join(ae.model_dir, "model.ckpt-10699"))
            a, b = ae.test()
            tmp.append([a, b])

        tf.reset_default_graph()
    print tmp
Example #18
0
 def __init__(self):
     self.mg = datamanager()
     self.companies = pd.read_pickle('../Database/naver_companies.pickle')
Example #19
0
    def tsne(self):
        data_img = datamanager('CT_img', train_ratio=0.8, expand_dim=3, seed=0)
        data_seq = datamanager('CT_seq', train_ratio=0.8, expand_dim=3, seed=1)

        img_embs, seq_embs, img_labels, seq_labels = [], [], [], []

        for i in range(data_img.train_num // self.batch_size + 1):
            img = data_img(self.batch_size,
                           phase='train',
                           var_list=['data', 'labels'])
            seq = data_seq(self.batch_size,
                           phase='train',
                           var_list=['data', 'labels'])
            feed_dict = {
                self.source_img: img['data'],
                self.source_seq: seq['data']
            }
            emb_img, emb_seq = self.sess.run(
                [self.emb_img_test, self.emb_seq_test], feed_dict=feed_dict)
            img_embs.append(emb_img)
            seq_embs.append(emb_seq)

            img_labels.append(np.argmax(img['labels'], 1))
            seq_labels.append(np.argmax(seq['labels'], 1))

        embs = np.concatenate(img_embs + seq_embs, axis=0)
        img_labels = np.concatenate(img_labels)
        seq_labels = np.concatenate(seq_labels)
        domain_labels = np.array([0] * (len(img_embs) * self.batch_size) +
                                 [1] * (len(seq_embs) * self.batch_size))
        print embs.shape, domain_labels.shape

        from sklearn.manifold import TSNE
        model = TSNE(n_components=2, random_state=0)
        embs = model.fit_transform(embs)
        # np.save(os.path.join(self.fig_dir, "tsne", 'emb.npy'), embs)

        # embs = np.load(os.path.join(self.fig_dir, "tsne", 'emb.npy'))

        plt.scatter(embs[:, 0], embs[:, 1], c=domain_labels)
        plt.colorbar()

        keys = [
            'a', 'b', 'c', 'd', 'e', 'g', 'h', 'l', 'm', 'n', 'o', 'p', 'q',
            'r', 's', 'u', 'v', 'w', 'y', 'z'
        ]
        img_labels_dict = {}
        for i in range(200):
            img_labels_dict[img_labels[i]] = i
        seq_labels_dict = {}
        for i in range(200):
            seq_labels_dict[seq_labels[i]] = i + len(img_labels)
        for i, v in img_labels_dict.iteritems():
            plt.text(embs[v][0] - 5,
                     embs[v][1] + 5,
                     s=keys[i],
                     fontsize=10,
                     color='b')
        for i, v in seq_labels_dict.iteritems():
            plt.text(embs[v][0] + 5,
                     embs[v][1] - 5,
                     s=keys[i],
                     fontsize=10,
                     color='r')

        plt.savefig(os.path.join(self.fig_dir, "tsne", "tsne.png"))
        plt.clf()
Example #20
0
import numpy as np
import bmxdata
from datamanager import datamanager
from reduce_plot import genplots
import os
import cPickle as cP
import astropy.units as u
from astropy.coordinates import EarthLocation, AltAz,SkyCoord
from astropy.time import Time
import time

telescope_loc = EarthLocation(lat=40.87792*u.deg, lon=-72.85852*u.deg, height=0*u.m)

# Initialize empty data manager so we can use its functions
dm = datamanager()
dm.gettags()

class reduce(object):

    def __init__(self, tag):
        """ Init with a tag string E.g.
        r = reduce('170928_1000')
        """

        # Store tag string and get filename
        self.tag = tag
        self.rawfname = dm.getrawfname(tag)
        self.redfname = dm.getreducedfname(tag) 

        print('loading data...')
        t = time.time()