Пример #1
0
def main(batch_size, file_dir):
    # Prepare the dataset. We use both the training & test MNIST digits.
    x = get_data(file_dir)
    
    gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
    gan.compile(
        d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
        g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
        loss_fn=keras.losses.BinaryCrossentropy(from_logits=True)
    )
    # To limit the execution time, we only train on 100 batches. You can train on
    # the entire dataset. You will need about 20 epochs to get nice results.
    print(generator.summary())
    print(discriminator.summary())
    history = gan.fit(x, batch_size=batch_size, epochs=20)
    g_loss, d_loss = history.history['g_loss'], history.history['d_loss']
    plt.plot(g_loss)
    plt.plot(d_loss)
    plt.xticks(np.arange(0, 20, step=1))  # Set label locations.
    plt.xlabel('epochs')
    plt.ylabel('loss')
    plt.title('Protein Structure Generation With DCGAN')
    # print(xticks(np.arange(0, 20, step=1)))
    # pred = np.stack(history.history['pred'], axis=0)
    # labels = np.stack(history.history['label'], axis=0)
    # accuracies = get_accuracies(pred, labels)
    # plt.plot(accuracies)
    plt.legend(['Generator loss', 'Discriminator loss'], loc='upper right')
    plt.show()
Пример #2
0
    def fit(self):

        for g in ParameterGrid(self.parameters):
            self.iter += 1
            print(
                '\nTraining:',
                str(self.iter) + '/' +
                str(len(ParameterGrid(self.parameters))), '- Parameters:', g)

            # Model
            model = GAN(self.name, self.train_dataset, self.test_dataset,
                        self.shape, **g)

            score = model.train()

            print(
                '\tScore: emd: %f\t fid: %f\t inception: %f\t knn: %f\t mmd: %f\t mode: %f'
                % (score.emd, score.fid, score.inception, score.knn, score.mmd,
                   score.mode))

            self.results.append({'score': score, 'params': g})

            # Write to results
            with open('resources/results.txt', "w+") as f:
                f.write(
                    '\nemd: %f\t fid: %f\t inception: %f\t knn: %f\t mmd: %f\t mode: %f'
                    % (score.emd, score.fid, score.inception, score.knn,
                       score.mmd, score.mode))
    def __init__(self, num_historical_days, batch_size=128):
        self.batch_size = batch_size
        self.data = []
        files = [os.path.join('C:\\Users\\SPL\\PycharmProjects\\StockMarketGAN-master./stock_data', f) for f in os.listdir('C:\\Users\\SPL\\PycharmProjects\\StockMarketGAN-master./stock_data')]
        for file in files:
            print(file)
            #Read in file -- note that parse_dates will be need later
            df = pd.read_csv(file, index_col='timestamp', parse_dates=True)
            df = df[['open','high','low','close','volume']]
            # #Create new index with missing days
            # idx = pd.date_range(df.index[-1], df.index[0])
            # #Reindex and fill the missing day with the value from the day before
            # df = df.reindex(idx, method='bfill').sort_index(ascending=False)
            #Normilize using a of size num_historical_days
            df = ((df -
            df.rolling(num_historical_days).mean().shift(-num_historical_days))
            /(df.rolling(num_historical_days).max().shift(-num_historical_days)
            -df.rolling(num_historical_days).min().shift(-num_historical_days)))
            #Drop the last 10 day that we don't have data for
            df = df.dropna()
            #Hold out the last year of trading for testing
            #Padding to keep labels from bleeding
            df = df[400:]
            #This may not create good samples if num_historical_days is a
            #mutliple of 7
            for i in range(num_historical_days, len(df), num_historical_days):
                self.data.append(df.values[i-num_historical_days:i])

        self.gan = GAN(num_features=5, num_historical_days=num_historical_days,
                        generator_input_size=200)
Пример #4
0
def test_gaussian(mean, std, num_data, make_gif=False, use_gpu=True):
    num_iters = 30
    noise_size = 1
    sample_size = 1
    batch_size = 512
    # Sample some data
    data = torch.randn(num_data, sample_size) * std + mean
    data = data.resize_(num_data, sample_size)
    data_loader = DataLoader(data,
                             batch_size=batch_size,
                             shuffle=True,
                             pin_memory=use_gpu,
                             num_workers=4,
                             drop_last=True)
    # data_iter = iter(data_loader)
    # Construct a GAN
    gen = MLPGenerator(noise_size, sample_size)
    dis = MLPDiscriminator(sample_size)
    gan = GAN(gen, dis, data_loader, use_gpu=use_gpu)

    plt.ion()
    for ii in range(num_iters):
        gan.train(20, batch_size)
        # Sample and visualize

        # True Distribution
        x = data.numpy().reshape(num_data)
        # x = np.random.randn(20000) * std + mean
        y, bin_edges = np.histogram(x, bins=200, density=True)
        bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1])
        plt.plot(bin_centers, y, '-')

        # Generator Approximation
        x = gan.sample_gen(20000).data.cpu().numpy()
        y, bin_edges = np.histogram(x, bins=200, density=True)
        bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1])
        plt.plot(bin_centers, y, '-')

        # Discriminator Probability
        axes = plt.gca()
        x_lim = axes.get_xlim()
        x = torch.linspace(axes.get_xlim()[0],
                           axes.get_xlim()[1], 200).resize_(200, sample_size)
        if use_gpu:
            x = x.cuda()
        y = dis.forward(Variable(x))
        plt.plot(x.cpu().numpy(), y.data.cpu().numpy(), '-')

        if make_gif:
            plt.savefig('./figs/pic' + str(ii).zfill(3))

        plt.pause(0.01)
        plt.cla()

    if make_gif:
        subprocess.call([
            'convert', '-loop', '0', '-delay', '50', './figs/pic*.png',
            './figs/output.gif'
        ])
Пример #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('image_dir', type=str)
    parser.add_argument('--batch_size', '-bs', type=int, default=64)
    parser.add_argument('--nb_epoch', '-e', type=int, default=1000)
    parser.add_argument('--noise_dim', '-nd', type=int, default=100)
    parser.add_argument('--height', '-ht', type=int, default=128)
    parser.add_argument('--width', '-wd', type=int, default=128)
    parser.add_argument('--save_steps', '-ss', type=int, default=1)
    parser.add_argument('--visualize_steps', '-vs', type=int, default=1)
    parser.add_argument('--logdir', '-ld', type=str, default="../logs")
    parser.add_argument('--noise_mode', '-nm', type=str, default="uniform")
    parser.add_argument('--upsampling', '-up', type=str, default="deconv")
    parser.add_argument('--metrics', '-m', type=str, default="JSD")
    parser.add_argument('--lr_d', type=float, default=1e-4)
    parser.add_argument('--lr_g', type=float, default=1e-4)
    parser.add_argument('--norm_d', type=str, default=None)
    parser.add_argument('--norm_g', type=str, default=None)
    parser.add_argument('--model', type=str, default='residual')

    args = parser.parse_args()

    # output config to csv
    args_to_csv(os.path.join(args.logdir, 'config.csv'), args)

    input_shape = (args.height, args.width, 3)

    image_sampler = ImageSampler(args.image_dir,
                                 target_size=(args.width, args.height))
    noise_sampler = NoiseSampler(args.noise_mode)

    if args.model == 'residual':
        generator = ResidualGenerator(args.noise_dim,
                                      target_size=(args.width, args.height),
                                      upsampling=args.upsampling,
                                      normalization=args.norm_g)
        discriminator = ResidualDiscriminator(input_shape,
                                              normalization=args.norm_d)
    elif args.model == 'plane':
        generator = Generator(args.noise_dim,
                              upsampling=args.upsampling,
                              normalization=args.norm_g)
        discriminator = Discriminator(input_shape,
                                      normalization=args.norm_d)
    else:
        raise ValueError

    gan = GAN(generator,
              discriminator,
              metrics=args.metrics,
              lr_d=args.lr_d,
              lr_g=args.lr_g)

    gan.fit(image_sampler.flow_from_directory(args.batch_size),
            noise_sampler,
            nb_epoch=args.nb_epoch,
            logdir=args.logdir,
            save_steps=args.save_steps,
            visualize_steps=args.visualize_steps)
Пример #6
0
def worker_policy(args, manager, config):
    init_logging_handler(args.log_dir, '_policy')
    agent = GAN(None, args, manager, config, 0, pre=True)

    best = float('inf')
    for e in range(args.epoch):
        agent.imitating(e)
        best = agent.imit_test(e, best)
Пример #7
0
    def __init__(self, dataset, n_blocks, final_shape):
        GAN.__init__(self, dataset, self.LATENT_DIM)

        self.n_blocks = n_blocks
        self.final_shape = final_shape
        # Works only for 3 dimensional images (HxWxC)
        self.image_size = final_shape[0:2]  # Allows for rectangular images
        self.channels = final_shape[2]
Пример #8
0
    def __init__(self, dataset, n_blocks, start_shape, latent_dim):
        GAN.__init__(self, dataset, latent_dim)

        self.start_shape = start_shape
        self.discriminator_models = Discriminator.define_discriminator(n_blocks, start_shape)
        self.generator_models = Generator.define_generator(latent_dim, n_blocks, start_shape)

        self.gan_models = ProGAN._define_composite(self.discriminator_models, self.generator_models)

        self.constant_latent_vector = ProGAN._generate_latent_points(self.latent_dim, 1)
Пример #9
0
def main():
    args = get_args()

    envs, args = build_envs(args)

    transfer_GAN = GAN(args.num_stack)

    if args.cuda:
        transfer_GAN.cuda()
    print(transfer_GAN)

    test_GAN(transfer_GAN, envs, args)
Пример #10
0
def main():
    args = get_args()

    replay_buffer = ReplayMemory(args.replay_size)
    envs, args = build_envs(args)

    transfer_GAN = GAN(args.num_stack, args)

    if args.cuda:
        transfer_GAN.cuda()
    #print(transfer_GAN)

    train_GAN(transfer_GAN, envs, replay_buffer, args)
Пример #11
0
def sample_GAN(args, num_samples=10):
    with open(os.path.join(args.save_dir_GAN, 'config.pkl')) as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir_GAN, 'simple_vocab.pkl')) as f:
        chars, vocab = cPickle.load(f)
    gan = GAN(saved_args, is_training=False)
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        ckpt = tf.train.get_checkpoint_state(args.save_dir_GAN)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            return gan.generate_samples(sess, saved_args, chars, vocab, args.n)
Пример #12
0
def sample_GAN(args, num_samples = 10):
    with open(os.path.join(args.save_dir_GAN, 'config.pkl')) as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir_GAN, 'simple_vocab.pkl')) as f:
        chars, vocab = cPickle.load(f)
    gan = GAN(saved_args, is_training = False)
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        ckpt = tf.train.get_checkpoint_state(args.save_dir_GAN)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            return gan.generate_samples(sess, saved_args, chars, vocab, args.n)
Пример #13
0
    def build_fn(self):
        gan = GAN()

        if self.mode == self.TRAIN_MODE:
            criterion = self.build_criterion()
            d_optimizer, g_optimizer = self.build_optimizers(
                gan.discriminator, gan.generator)

            return gan.train_fn(criterion, d_optimizer, g_optimizer)
        elif self.mode == self.EVALUATE_MODE:
            return gan.evaluate_fn()
        elif self.mode == self.PREDICT_MODE:
            return gan.predict_fn()
        else:
            raise ValueError(f"unknown mode: {self.mode}")
    def __init__(self, height=55,width=35, channels=1,epochs =100, batch=16, checkpoint=50,sim_path='',real_path='',data_limit=0.001,generator_steps=2,discriminator_steps=1):
        self.W = width
        self.H = height
        self.C = channels
        self.EPOCHS = epochs
        self.BATCH = batch
        self.CHECKPOINT = checkpoint
        self.DATA_LIMIT=data_limit
        self.GEN_STEPS = generator_steps
        self.DISC_STEPS = discriminator_steps

        self.X_real = self.load_h5py(real_path)
        self.X_sim = self.load_h5py(sim_path)

        self.refiner = Generator(height=self.H, width=self.W, channels=self.C)
        self.discriminator = Discriminator(height=self.H, width=self.W, channels=self.C)
        self.discriminator.trainable = False

        self.synthetic_image = Input(shape=(self.H, self.W, self.C))
        self.real_or_fake = Input(shape=(self.H, self.W, self.C))


        self.refined_image = self.refiner.Generator(self.synthetic_image)
        self.discriminator_output = self.discriminator.Discriminator(self.real_or_fake)
        self.combined = self.discriminator.Discriminator(self.refined_image)

        model_inputs  = [self.synthetic_image]
        model_outputs = [self.refined_image, self.combined]
        self.gan = GAN(model_inputs=model_inputs,model_outputs=model_outputs)
Пример #15
0
    def __init__(self,
                 width=28,
                 height=28,
                 channel=1,
                 latent_size=100,
                 epochs=50000,
                 batch=32,
                 checkpoint=50,
                 model_type='DCGAN',
                 data_path=''):

        self.W = width
        self.H = height
        self.C = channel
        self.latent_size = latent_size
        self.epochs = epochs
        self.batch_size = batch
        self.checkpoint = checkpoint
        self.model_type = model_type

        self.generator = Generator(width=self.W,
                                   height=self.H,
                                   channel=self.C,
                                   latent_size=self.latent_size,
                                   model_type=self.model_type)
        self.discriminator = Discriminator(width=self.W,
                                           height=self.H,
                                           channel=self.C,
                                           model_type=self.model_type)

        self.gan = GAN(generator=self.generator.Generator,
                       discriminator=self.discriminator.Discriminator)
        self.load_npy(data_path)
    def __init__(self,
                 width=28,
                 height=28,
                 channels=1,
                 latent_size=100,
                 epochs=50000,
                 batch=32,
                 checkpoint=50,
                 model_type=-1,
                 outdir='data'):
        """
        outdir - Save the generated images at given checkpoints to this dir.
        """
        self.W = width
        self.H = height
        self.C = channels
        self.EPOCHS = epochs
        self.BATCH = batch
        self.CHECKPOINT = checkpoint
        self.model_type = model_type

        self.LATENT_SPACE_SIZE = latent_size
        os.makedirs(outdir, exist_ok=True)
        self.outdir = outdir
        self.generator = Generator(height=self.H,
                                   width=self.W,
                                   channels=self.C,
                                   latent_size=self.LATENT_SPACE_SIZE)
        self.discriminator = Discriminator(height=self.H,
                                           width=self.W,
                                           channels=self.C)
        self.gan = GAN(generator=self.generator.Generator,
                       discriminator=self.discriminator.Discriminator)

        self.load_MNIST()
Пример #17
0
    def __init__(self,
                 width=28,
                 height=28,
                 channels=1,
                 latent_size=100,
                 epochs=50000,
                 batch=32,
                 checkpoint=50,
                 model_type=-1):
        self.W = width
        self.H = height
        self.C = channels
        self.EPOCHS = epochs
        self.BATCH = batch
        self.CHECKPOINT = checkpoint
        self.model_type = model_type

        self.LATENT_SPACE_SIZE = latent_size

        self.generator = Generator(height=self.H,
                                   width=self.W,
                                   channels=self.C,
                                   latent_size=self.LATENT_SPACE_SIZE)
        self.discriminator = Discriminator(height=self.H,
                                           width=self.W,
                                           channels=self.C)
        self.gan = GAN(generator=self.generator.Generator,
                       discriminator=self.discriminator.Discriminator)

        self.load_MNIST()
Пример #18
0
def load_model(argl):
    if argl.model == 'gan':
        return GAN(epochs=argl.epochs,
                   batch_size=argl.batch_size,
                   nz=argl.noise)
    else:
        return None
Пример #19
0
def test_gan(
        n_iters=1000,
        learning_rate=1e-4,
        n_mc_samples=1,
        scale_init=0.01,
        dim_z=2,
    ):

    datasets = load_data('../20150717-/mnist.pkl.gz')

    train_set, validate_set = datasets
    train_x, train_y = train_set
    validate_x, validate_y = validate_set
    xs = np.r_[train_x, validate_x]
    optimize_params = {
        'learning_rate' : learning_rate,
        'n_iters'       : n_iters,
        'minibatch_size': 100,
        'calc_history'     : 'all',
        'calc_hist'     : 'all',
        'n_mod_history'    : 100,
        'n_mod_hist'    : 100,
        'patience'      : 5000,
        'patience_increase': 2,
        'improvement_threshold': 0.995,
    }

    all_params = {
            'hyper_params': {
            'rng_seed'          : 1234,
            'dim_z'             : dim_z,
            'n_hidden'          : [500, 500],
            'n_mc_sampling'     : n_mc_samples,
            'scale_init'        : scale_init,
            'nonlinear_q'       : 'relu',
            'nonlinear_p'       : 'relu',
            'type_px'           : 'bernoulli',
            'optimizer'         : 'adam',
            'learning_process'  : 'early_stopping'
        }
    }
    all_params.update({'optimize_params': optimize_params})

    model = GAN(**all_params)
    model.fit(xs)

    return datasets, model
Пример #20
0
def main():
    args = get_args()

    # Training Generator/Discriminator
    if args.model == 'GAN':
        model = GAN()
    # elif args.model == 'LSGAN':
    #     model = LSGAN()
    # elif args.model == 'WGAN':
    #     model = WGAN()
    # elif args.model == 'WGAN_GP':
    #     model = WGAN_GP()
    # elif args.model == 'DRAGAN':
    #     model = DRAGAN()
    # elif args.model == 'EBGAN':
    #     model = EBGAN()
    # elif args.model == 'BEGAN':
    #     model = BEGAN()
    # elif args.model == 'SNGAN':
    #     model = SNGAN()
    elif args.model == 'AnoGAN':
        model = AnoGAN()
    model.train()

    # Anomaly Detection
    if args.model == 'AnoGAN':
        model.anomaly_detect()
Пример #21
0
def build_gan():
    noise_dim = 64
    gen = dcgan_gen(noise_dim=noise_dim)
    disc = dcgan_disc()
    gan = GAN(gen, disc)
    batch_gen = MNISTBatchGenerator()
    noise_gen = NoiseGenerator([(noise_dim,)])

    return (gan, batch_gen, noise_gen)
Пример #22
0
    def __init__(self,
                 features=DESIRED_FEATURES,
                 generator_path=None,
                 discriminator_path=None,
                 fbnet_path=None,
                 multip_factor=20,
                 log_history=False,
                 log_id=None,
                 score_threshold=0.75):
        """
        Parameters
        ----------
        generator_path: str (optional)
            Path to the weights of a pretrained generator.
        discriminator_path: str (optional)
             Path to the weights of a pretrained generator.
        fbnet_path: str (optional)
            Path to the saved model.
        features: list (optional)
            Features for which to optimize sequences
        multip_factor: int (optional)
            Factor indicating how many times best sequences are added to the discriminator at each step.
        log_id: str (optional)
            If log_id is provided, history logs during training will be written into "./Experiments/Experiment_{log_id}"
            else, the history is logged into self.history
        """

        self.GAN = GAN(generator_weights_path=generator_path, discriminator_weights_path=discriminator_path)
        self.FBNet = Feedback(fbnet_path)
        self.tokenizer = self.FBNet.tokenizer
        self.label_order = np.array(['B', 'C', 'E', 'G', 'H', 'I', 'S',
                                     'T'])  # order of labels as output by Multilabel binarizer - don't change!
        self.desired_features = features
        self.multip_factor = multip_factor
        self.score_threshold = score_threshold
        self.data = None
        self.OneHot = OneHot_Seq(letter_type=TASK_TYPE)
        self.id = log_id
        self.log_history = log_history
        if self.id:
            # If experiment is getting logged, initialize files to log it
            self.log_initialize()
        if log_history:
            self.history = {'D_loss': [], 'G_loss': [], 'average_score': [], 'best_score': [], 'percent_fake': []}
Пример #23
0
def main():
    #input
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--mnist-dir",
        default='/tmp/mnist-data',
        help="Directory where mnist downloaded dataset will be stored")
    parser.add_argument("--output-dir",
                        default='output',
                        help="Directory where models will be saved")
    parser.add_argument(
        "--train-digits",
        help=
        "Comma separated list of digits to train generators for (e.g. '1,2,3')"
    )
    parser.add_argument(
        "--train-mnist",
        action='store_true',
        help=
        "If specified, train the mnist classifier based on generated digits from saved models"
    )
    global args

    args = parser.parse_args()

    mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets(args.mnist_dir,
                                                                one_hot=True)

    if args.train_digits:
        gan = GAN()
        for digit in map(int, args.train_digits.split(',')):
            path = "%s/digit-%d/model" % (args.output_dir, digit)
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            gan.train_digit(mnist_data, digit, path)
    elif args.train_mnist:
        gan = GAN()
        print("Loading generator models...")
        sessions = [
            gan.restore_session("%s/digit-%d" % (args.output_dir, digit))
            for digit in range(10)
        ]
        print("Done")
        samples = [[], []]

        mnist = MNIST()
        for step in range(20000):
            if len(samples[0]) < 50:
                samples = gen_samples(gan, sessions)
            xs = samples[0][:50]
            ys = samples[1][:50]
            samples[0] = samples[0][50:]
            samples[1] = samples[1][50:]
            mnist.train_batch(xs, ys, step)
        test_accuracy = mnist.eval_batch(mnist_data.test.images,
                                         mnist_data.test.labels)
        print("Test accuracy %g" % test_accuracy)
Пример #24
0
def worker_estimator(args, manager, config, make_env):
    init_logging_handler(args.log_dir, '_estimator')
    agent = GAN(make_env, args, manager, config, args.process, pre_irl=True)
    agent.load(args.save_dir + '/best')

    best0, best1 = float('inf'), float('inf')
    for e in range(args.epoch):
        agent.train_disc(e, args.batchsz_traj)
        best0 = agent.test_disc(e, args.batchsz, best0)
Пример #25
0
def main():

    for i in range(0, 1000):
        a = np.random.randint(10, 41)
        b = np.random.randint(a, 70)
        c = np.random.randint(10, b)

        args = {
            'attack_type': "portsweep",
            'max_epochs': 7000,
            'batch_size': 255,
            'sample_size': 500,
            'optimizer_learning_rate': 0.001,
            'generator_layers': [a, b, c]
        }
        for iter in range(0, 10):
            gan = GAN(**args)
            gan.train()
            print("GAN finished with layers:")
            print(str([a, b, c]))
Пример #26
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--mnist-dir", default='/tmp/mnist-data', help="Directory where mnist downloaded dataset will be stored")
    parser.add_argument("--output-dir", default="output", help="Directory where models will be saved")
    parser.add_argumnet("--train-digits", help="Comma Separated list of digits to train generators")
    parser.add_argument("--train-mnist", action='store_true', help='If specified, train the mnist classifier based on generated digits from saved models')
    global args

    args = parser.parse_args()

    mnist_data = tf.contrib.learn.python.learn.dataset.mnist.read_data_sets(args.mnist_dir, one_hot=True)

    if args.train_digits:
        gan = GAN()
        for digit in map(int, args.train_digits.split(',')):
            path = f'{args.output_dir}/digit-{digit}/model'
            if not os.path.exists(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            gan.train_digit(mnist_data,digit,path)
        
    elif args.train_mins:
        gan = GAN()
        print('LOADING GENERATOR MODELS')

    elif args.train_mnist:
        gan = GAN()
        session = [gan.restore_session(f"{args.output_dir}/digit-{digit}") for digit in range(10)]
        print('DONE')
        samples = [[],[]]

        mnist = MNIST()

        for step in range(200000):

            if len(samples[0])<50:
                samples = gen_samples(gan, sessions)

            xs = samples[0][:50]
            ys = samples[1][:50]

            samples[0] = samples[0][50:]
            samples[1] = samples[1][50:]

            mnist.train_batch(xs, ys, step)
        test_accuracy = mnist_accuracy = mnist.eval_batch test(mnist_data.test.image, mnist_data.test.labels)
        print('TEST ACCURACY {test_accuracy}')
 def __init__(self, num_historical_days, days=10, pct_change=0):
     self.data = []
     self.labels = []
     self.test_data = []
     self.test_labels = []
     assert os.path.exists('./models/checkpoint')
     gan = GAN(num_features=5, num_historical_days=num_historical_days,
                     generator_input_size=200, is_train=False)
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         saver = tf.train.Saver()
         with open('./models/checkpoint', 'rb') as f:
             model_name = next(f).split('"')[1]
         saver.restore(sess, "./models/{}".format(model_name))
         files = [os.path.join('./stock_data', f) for f in os.listdir('./stock_data')]
         for file in files:
             print(file)
             #Read in file -- note that parse_dates will be need later
             df = pd.read_csv(file, index_col='timestamp', parse_dates=True)
             df = df[['open','high','low','close','volume']]
             # #Create new index with missing days
             # idx = pd.date_range(df.index[-1], df.index[0])
             # #Reindex and fill the missing day with the value from the day before
             # df = df.reindex(idx, method='bfill').sort_index(ascending=False)
             #Normilize using a of size num_historical_days
             labels = df.Close.pct_change(days).map(lambda x: int(x > pct_change/100.0))
             df = ((df -
             df.rolling(num_historical_days).mean().shift(-num_historical_days))
             /(df.rolling(num_historical_days).max().shift(-num_historical_days)
             -df.rolling(num_historical_days).min().shift(-num_historical_days)))
             df['labels'] = labels
             #Drop the last 10 day that we don't have data for
             df = df.dropna()
             #Hold out the last year of trading for testing
             test_df = df[:365]
             #Padding to keep labels from bleeding
             df = df[400:]
             #This may not create good samples if num_historical_days is a
             #mutliple of 7
             data = df[['Open', 'High', 'Low', 'Close', 'Volume']].values
             labels = df['labels'].values
             for i in range(num_historical_days, len(df), num_historical_days):
                 features = sess.run(gan.features, feed_dict={gan.X:[data[i-num_historical_days:i]]})
                 self.data.append(features[0])
                 print(features[0])
                 self.labels.append(labels[i-1])
             data = test_df[['Open', 'High', 'Low', 'Close', 'Volume']].values
             labels = test_df['labels'].values
             for i in range(num_historical_days, len(test_df), 1):
                 features = sess.run(gan.features, feed_dict={gan.X:[data[i-num_historical_days:i]]})
                 self.test_data.append(features[0])
                 self.test_labels.append(labels[i-1])
Пример #28
0
def test_gan():

    #parameters
    file_name = "animals.txt"
    g_hidden_size = 10
    d_hidden_size = 10
    n_epochs = 1000
    g_epochs = 20
    d_epochs = 10
    g_initial_lr = 1
    d_initial_lr = 1
    g_multiplier = 0.9
    d_multiplier = 0.9
    g_batch_size = 100
    d_batch_size = 100

    # data
    char_list = dataloader.get_char_list(file_name)
    X_actual = dataloader.load_data(file_name)
    seq_len = X_actual.shape[1]

    # construct GAN
    gan = GAN(g_hidden_size, d_hidden_size, char_list)

    # train GAN
    gan.train(X_actual,
              seq_len,
              n_epochs,
              g_epochs,
              d_epochs,
              g_initial_lr,
              d_initial_lr,
              g_multiplier,
              d_multiplier,
              g_batch_size,
              d_batch_size,
              print_progress=True,
              num_displayed=3)
Пример #29
0
def train(cfg):
    train_loader, valid_loader = get_data_loaders(cfg["data"])
    validation_data = get_valid_samples(cfg, valid_loader)
    g = GAN(cfg)

    it = 0
    for i in range(cfg["fit"]["num_epoch"]):
        for images, attr_a, attr_b, mask in tqdm(train_loader):
            g.set_mode("train")

            it += 1

            attr_a = attr_a.type(torch.float)
            attr_b = attr_b.type(torch.float)

            images = images.cuda(
                cfg["GPU"]["name"]) if cfg["GPU"]["enable"] else images
            attr_a = attr_a.cuda(
                cfg["GPU"]["name"]) if cfg["GPU"]["enable"] else attr_a
            attr_b = attr_b.cuda(
                cfg["GPU"]["name"]) if cfg["GPU"]["enable"] else attr_b
            mask = mask.cuda(
                cfg["GPU"]["name"]) if cfg["GPU"]["enable"] else mask

            if it % 5 != 0:
                errD = g.stepD(images, attr_a, attr_b, mask)
            else:
                errG = g.stepG(images, attr_a, attr_b, mask)

            if cfg["wandb"]["enable"] and it % cfg["wandb"]["logs_iter"] == 0:
                wandb.log(get_log_train(errG, errD))

            if it % cfg["fit"]["save_interval"] == 0:
                g.save_models(it)

            if it % cfg["fit"]["valid_interval"] == 0:
                g.set_mode(mode='eval')
                validate_data(cfg, g, validation_data, i, it)
Пример #30
0
def main(argv):
    # Load configs from file

    config = json.load(open(FLAGS.config))
    # set_backend()

    # Set name
    #name = '{}_{}_'.format(config['INPUT_NAME'], config['TARGET_NAME'])
    #for l in config['LABELS']:
    #    name += str(l)
    #config['NAME'] += '_' + name

    if FLAGS.use_wandb:
        import wandb
        resume_wandb = True if FLAGS.wandb_resume_id is not None else False
        wandb.init(config=config,
                   resume=resume_wandb,
                   id=FLAGS.wandb_resume_id,
                   project='EchoGen',
                   name=FLAGS.wandb_run_name)

    # Initialize GAN
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    model = GAN(config, FLAGS.use_wandb, device, FLAGS.dataset_path)

    # load trained models if they exist
    if FLAGS.ckpt_load is not None:
        model.load(f'{FLAGS.ckpt_load}/generator_last_checkpoint.bin',
                   model='generator')
        model.load(f'{FLAGS.ckpt_load}/discriminator_last_checkpoint.bin',
                   model='discriminator')

    if FLAGS.test:
        model.test()
    else:
        model.train()
def wgan_albdiv(train_data, test_data):
    config.ALBEDO_DIVIDE = True
    gan = GAN(
        train_data,
        test_data,
        num_epochs=100000,
        kernel_predict=True,
        batch_size=64,
        g_layers=3,
        g_loss="vgg",
        loss_weights=[0.1, 1.0],
        g_lr=1e-3,
        g_kernel_size=[3, 3],
        g_beta1=0.5,
        g_beta2=0.9,
        c_lr=1e-4,
        c_itr=10,
        g_bn=True,
        model_dir="../experiments/models/wgan-gp_albdiv",
        log_dir="../experiments/logs/wgan-gp_albdiv",
    )
    gan.trainWGAN_GP()
    return
Пример #32
0
 def gan_predict(self):
     tf.reset_default_graph()
     gan = GAN(num_features=5,
               num_historical_days=self.num_historical_days,
               generator_input_size=200,
               is_train=False)
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         saver = tf.train.Saver()
         saver.restore(sess, self.gan_model)
         clf = joblib.load(self.xgb_model)
         for sym, date, data in self.data:
             features = sess.run(gan.features, feed_dict={gan.X: [data]})
             features = xgb.DMatrix(features)
             print('{} {} {}'.format(
                 str(date).split(' ')[0], sym,
                 clf.predict(features)[0][1] > 0.5))
Пример #33
0
from gan import GAN

if __name__ == '__main__':

    GAN_USERNAME = ''
    GAN_PASSWORD = ''
    LIST_ID = ''  # get this calling gan.api.attributes.listing(gan.username, gan.encrypted_password)
    API_KEY = ''  # get this from the website. Go to Contacts->API & Forms

    gan = GAN(GAN_USERNAME, GAN_PASSWORD)

    # Try to login
    if gan.login():

        # List all supported methods
        gan.list_supported_methods()

        print '\n\n List all newsletters'
        newsletters = gan.api.newsletter.listing(gan.username, gan.encrypted_password)
        for newsletter in newsletters:
            print newsletter

        print '\n\nList all attributes'
        newsletters = gan.api.attributes.listing(gan.username, gan.encrypted_password)
        for newsletter in newsletters:
            print newsletter

        print '\n\nAdd new attributes'
        gan.api.attributes.create(gan.username, gan.encrypted_password, 'test_city')
        gan.api.attributes.create(gan.username, gan.encrypted_password, 'shoe_size')