Пример #1
0
train_dataset = tf.data.Dataset.from_tensor_slices(
    (traindata[:,
               0], traindata[:,
                             2])).shuffle(BUFFER_SIZE).batch(GLOBAL_BATCH_SIZE)
test_dataset = tf.data.Dataset.from_tensor_slices(
    (testdata[:, 0], testdata[:, 2])).batch(strategy.num_replicas_in_sync)

train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_dist_dataset = strategy.experimental_distribute_dataset(test_dataset)

# Create a checkpoint directory to store the checkpoints.
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")

with strategy.scope():
    if args.parallel: rim = build_rim_parallel(params)
    else: rim = build_rim_split(params)
    grad_fn = recon_grad

    def get_opt(lr):
        return tf.keras.optimizers.Adam(learning_rate=lr)

    optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)
    checkpoint = tf.train.Checkpoint(model=rim)
    #


def train_step(inputs):
    x_true, y = inputs
    x_init = tf.random.normal(x_true.shape)
    with tf.GradientTape() as tape:
Пример #2
0
def main():
    """
    Model function for the CosmicRIM.
    """

    rim = build_rim_parallel(params)
    grad_fn = recon_dm_grad
    #

#
    train_dataset = tf.data.Dataset.range(args.batch_in_epoch)
    train_dataset = train_dataset.map(pm_data)
    train_dataset = train_dataset.prefetch(-1)
    test_dataset = tf.data.Dataset.range(1).map(pm_data_test).prefetch(-1)
#
    #traindata, testdata = get_data()
    #idx = np.random.randint(0, traindata.shape[0], 1)
    #xx, yy = traindata[idx, 0].astype(np.float32), traindata[idx, 1].astype(np.float32), 
    #x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
    #x_pred = rim(x_init, yy, grad_fn)

    

    #
    # @tf.function
    def rim_train(x_true, x_init, y):
        with tf.GradientTape() as tape:
            x_pred = rim(x_init, y, grad_fn)
            res  = (x_true - x_pred)
            loss = tf.reduce_mean(tf.square(res))
        gradients = tape.gradient(loss, rim.trainable_variables)
        return loss, gradients


    ##Train and save
    piter, testiter  = 10, 20
    losses = []
    lrs = [0.001, 0.0005, 0.0001]
    lepochs = [2, 20, 20]
    trainiter = 0 
    x_test, y_test = None, None

    for il in range(3):
        print('Learning rate = %0.3e'%lrs[il])
        opt = tf.keras.optimizers.Adam(learning_rate=lrs[il])

        i = 0 
        for ie in range(lepochs[il]):

            start = time.time()
            for ii, ix in enumerate(train_dataset):
                xx, yy = ix
                x_init = np.random.normal(size=np.prod(xx.shape)).reshape(xx.shape).astype(np.float32)
                loss, gradients = rim_train(x_true=tf.constant(xx), 
                                        x_init=tf.constant(x_init), 
                                        y=tf.constant(yy))
                losses.append(loss.numpy())    
                opt.apply_gradients(zip(gradients, rim.trainable_variables))
                i+=1 
                trainiter +=1
                if ii > args.batch_in_epoch: break

            print("Time taken for %d iterations : "%i, time.time() - start)
            print("Loss at iteration %d : "%i, losses[-1])

            if i%testiter == 0: 
                plt.plot(losses)
                plt.savefig(ofolder + 'losses.png')
                plt.close()

                if x_test is None:
                    for x_test, y_test in test_dataset: 
                        print("shape of test set : ", x_test.shape)
                        pred_adam = adam(tf.constant(x_init), tf.constant(y_test), grad_fn)
                        pred_adam = [pred_adam[0].numpy(), pm(pred_adam)[0].numpy()]
                        pred_adam10 = adam10(tf.constant(x_init), tf.constant(y_test), grad_fn)
                        pred_adam10 = [pred_adam10[0].numpy(), pm(pred_adam10)[0].numpy()]
                        minic, minfin = fid_recon.reconstruct(tf.constant(y_test), RRs=RRs, niter=adamiters10, lr=0.1)
                        compares =  [pred_adam, pred_adam10, [minic[0], minfin[0]]]
                        x_test, y_test = x_test.numpy(), y_test.numpy()
                        print('Test set generated')
                        break

                
                x_init = np.random.normal(size=np.prod(x_test.shape)).reshape(x_test.shape).astype(np.float32)
                #x_init = (y_test - (y_test.max() - y_test.min())/2.)/y_test.std() + np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)
                pred = rim(tf.constant(x_init), tf.constant(y_test), grad_fn)[-1]
                check_im(x_test[0], x_init[0], pred.numpy()[0], fname=ofolder + 'rim-im-%04d.png'%trainiter)
                check_2pt(x_test, y_test, rim, grad_fn, compares, fname=ofolder + 'rim-2pt-%04d.png'%trainiter)

                rim.save_weights(ofolder + '/%d'%trainiter)
Пример #3
0
except Exception as e:
    print(e)

train_dataset = tf.data.Dataset.range(args.batch_in_epoch)
train_dataset = train_dataset.map(datamodel.pm_data)
train_dataset = train_dataset.prefetch(-1)
test_dataset = tf.data.Dataset.range(strategy.num_replicas_in_sync).map(
    datamodel.pm_data_test).prefetch(-1)

train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_dist_dataset = strategy.experimental_distribute_dataset(test_dataset)

###############################################

with strategy.scope():
    rim = build_rim_parallel(params)
    grad_fn = datamodel.recon_grad
    optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)
    checkpoint = tf.train.Checkpoint(model=rim)
    #


def train_step(inputs):
    x_true, y = inputs
    x_init = tf.random.normal(x_true.shape)
    with tf.GradientTape() as tape:
        x_pred = rim(x_init, y, grad_fn)
        res = (x_true - x_pred)
        loss = tf.reduce_mean(
            tf.square(res))  ##This is not advised, come back to this
    gradients = tape.gradient(loss, rim.trainable_variables)
Пример #4
0
def main():
    """
    Model function for the CosmicRIM.
    """

    if args.posdata: suff = 'pos'
    else: suff = 'mass'
    if args.nbody: suff = suff + '-T%02d' % nsteps
    else: suff = suff + '-LPT2'
    if args.prior: pass
    else: suff = suff + '-noprior'
    if len(RRs) != 2: suff = suff + "-RR%d" % (len(RRs))
    print(suff)

    rim = build_rim_parallel(params)
    #grad_fn = recon_dm_grad
    #
    b1, b2, errormesh = setupbias()
    bias = tf.constant([b1, b2], dtype=tf.float32)
    grad_fn = recon_grad
    grad_params = [bias, errormesh]

    idx = np.random.randint(0, testdata.shape[0], 1)
    idx = idx * 0 + 1
    xx, yy = testdata[idx,
                      0].astype(np.float32), testdata[idx,
                                                      1].astype(np.float32),
    x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(
        np.float32)
    fid_recon = Recon_Bias(nc,
                           bs,
                           bias,
                           errormesh,
                           a0=0.1,
                           af=1.0,
                           nsteps=args.nsteps,
                           nbody=args.nbody,
                           lpt_order=2,
                           anneal=True,
                           prior=args.prior)
    #minic, minfin = fid_recon.reconstruct(tf.constant(yy), RRs=[1.0, 0.0], niter=args.rim_iter*10, lr=0.1)

    print("Loss at truth : ",
          recon_model(tf.constant(xx), tf.constant(yy), *[bias, errormesh]))
    print(
        "Loss at init : ",
        recon_model(tf.constant(x_init), tf.constant(yy), *[bias, errormesh]))

    pred_adam = adam(tf.constant(x_init), tf.constant(yy), grad_fn,
                     [bias, errormesh])
    print(
        "Loss at adam : ",
        recon_model(tf.constant(pred_adam), tf.constant(yy),
                    *[bias, errormesh]))
    pred_adam = [pred_adam[0].numpy(), biasfield(pred_adam, bias)[0].numpy()]

    pred_adam10 = adam10(tf.constant(x_init), tf.constant(yy), grad_fn,
                         [bias, errormesh])
    print(
        "Loss at adam 10x : ",
        recon_model(tf.constant(pred_adam10), tf.constant(yy),
                    *[bias, errormesh]))
    pred_adam10 = [
        pred_adam10[0].numpy(),
        biasfield(pred_adam10, bias)[0].numpy()
    ]
    minic, minfin = fid_recon.reconstruct(tf.constant(yy),
                                          RRs=RRs,
                                          niter=args.rim_iter * 10,
                                          lr=0.1)
    compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]]

    check_im(xx[0],
             x_init[0],
             minic[0],
             fname='./figs/L%04d-N%03d-%s-im.png' % (bs, nc, suff))
    check_2pt(xx,
              yy,
              rim,
              grad_fn,
              grad_params,
              compares,
              fname='./figs/L%04d-N%03d-%s-2pt.png' % (bs, nc, suff))
    print('Test set generated')

    sys.exit()

    x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(
        np.float32)
    x_pred = rim(x_init, yy, grad_fn, grad_params)

    #
    # @tf.function
    def rim_train(x_true, x_init, y):

        with tf.GradientTape() as tape:
            x_pred = rim(x_init, y, grad_fn, grad_params)
            res = (x_true - x_pred)
            loss = tf.reduce_mean(tf.square(res))
        gradients = tape.gradient(loss, rim.trainable_variables)
        return loss, gradients

    ##Train and save
    piter, testiter = 10, 20
    losses = []
    lrs = [0.001, 0.0005, 0.0001]
    liters = [201, 1001, 1001]
    trainiter = 0
    start = time.time()
    x_test, y_test = None, None

    for il in range(3):
        print('Learning rate = %0.3e' % lrs[il])
        opt = tf.keras.optimizers.Adam(learning_rate=lrs[il])

        for i in range(liters[il]):
            idx = np.random.randint(0, traindata.shape[0], args.batch_size)
            xx, yy = traindata[idx, 0].astype(
                np.float32), traindata[idx, 1].astype(np.float32),
            x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(
                np.float32)
            #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)

            loss, gradients = rim_train(x_true=tf.constant(xx),
                                        x_init=tf.constant(x_init),
                                        y=tf.constant(yy))

            losses.append(loss.numpy())
            opt.apply_gradients(zip(gradients, rim.trainable_variables))

            if i % piter == 0:
                print("Time taken for %d iterations : " % piter,
                      time.time() - start)
                print("Loss at iteration %d : " % i, losses[-1])
                start = time.time()
            if i % testiter == 0:
                plt.plot(losses)
                plt.savefig(ofolder + 'losses.png')
                plt.close()

                if x_test is None:
                    idx = np.random.randint(0, testdata.shape[0], 1)
                    x_test, y_test = testdata[idx, 0].astype(
                        np.float32), testdata[idx, 1].astype(np.float32),
                    print(
                        "Loss at truth : ",
                        recon_model(tf.constant(x_test), tf.constant(y_test),
                                    *[bias, errormesh]))
                    print(
                        "Loss at init : ",
                        recon_model(tf.constant(x_init), tf.constant(y_test),
                                    *[bias, errormesh]))

                    pred_adam = adam(tf.constant(x_init), tf.constant(y_test),
                                     grad_fn, [bias, errormesh])
                    print(
                        "Loss at adam : ",
                        recon_model(tf.constant(pred_adam),
                                    tf.constant(y_test), *[bias, errormesh]))
                    pred_adam = [
                        pred_adam[0].numpy(),
                        biasfield(pred_adam, bias)[0].numpy()
                    ]

                    pred_adam10 = adam10(tf.constant(x_init),
                                         tf.constant(y_test), grad_fn,
                                         [bias, errormesh])
                    print(
                        "Loss at adam 10x : ",
                        recon_model(tf.constant(pred_adam10),
                                    tf.constant(y_test), *[bias, errormesh]))
                    pred_adam10 = [
                        pred_adam10[0].numpy(),
                        biasfield(pred_adam10, bias)[0].numpy()
                    ]
                    minic, minfin = fid_recon.reconstruct(tf.constant(y_test),
                                                          RRs=[1.0, 0.0],
                                                          niter=args.rim_iter *
                                                          10,
                                                          lr=0.1)
                    compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]]
                    check_2pt(x_test,
                              y_test,
                              rim,
                              grad_fn,
                              grad_params,
                              compares,
                              fname='halosrecon.png')
                    print('Test set generated')

                x_init = np.random.normal(size=x_test.size).reshape(
                    x_test.shape).astype(np.float32)
                #x_init = (y_test - (y_test.max() - y_test.min())/2.)/y_test.std() + np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)
                pred = rim(tf.constant(x_init), tf.constant(y_test), grad_fn,
                           grad_params)[-1]
                check_im(x_test[0],
                         x_init[0],
                         pred.numpy()[0],
                         fname=ofolder + 'rim-im-%04d.png' % trainiter)
                check_2pt(x_test,
                          y_test,
                          rim,
                          grad_fn,
                          grad_params,
                          compares,
                          fname=ofolder + 'rim-2pt-%04d.png' % trainiter)
                rim.save_weights(ofolder + '/%d' % trainiter)

            trainiter += 1
Пример #5
0
def main():
    """
    Model function for the CosmicRIM.
    """

    rim = build_rim_parallel(params)
    grad_fn = recon_grad
    #

#
#    train_dataset = tf.data.Dataset.range(args.batch_in_epoch)
#    train_dataset = train_dataset.map(pm_data)
#    # dset = dset.apply(tf.data.experimental.unbatch())
#    train_dataset = train_dataset.prefetch(-1)
#    test_dataset = tf.data.Dataset.range(1).map(pm_data_test).prefetch(-1)
#
    traindata, testdata = get_data()
    idx = np.random.randint(0, traindata.shape[0], 1)
    xx, yy = traindata[idx, 0].astype(np.float32), traindata[idx, -1].astype(np.float32), 
    x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
    x_pred = rim(x_init, yy, grad_fn)

    

    #
    # @tf.function
    def rim_train(x_true, x_init, y):

        with tf.GradientTape() as tape:
            x_pred = rim(x_init, y, grad_fn)
            res  = (x_true - x_pred)
            loss = tf.reduce_mean(tf.square(res))
        gradients = tape.gradient(loss, rim.trainable_variables)
        return loss, gradients


    ##Train and save
    piter, testiter  = 10, 50
    losses = []
    lrs = [0.001, 0.0005, 0.0001]
    liters = [201, 1001, 1001]
    trainiter = 0 
    start = time.time()
    x_test, y_test = None, None

    for il in range(3):
        print('Learning rate = %0.3e'%lrs[il])
        opt = tf.keras.optimizers.Adam(learning_rate=lrs[il])

        for i in range(liters[il]):
            idx = np.random.randint(0, traindata.shape[0], args.batch_size)
            xx, yy = traindata[idx, 0].astype(np.float32), traindata[idx, -1].astype(np.float32), 
            x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
            #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
            

            loss, gradients = rim_train(x_true=tf.constant(xx), 
                                    x_init=tf.constant(x_init), 
                                    y=tf.constant(yy))

            losses.append(loss.numpy())    
            opt.apply_gradients(zip(gradients, rim.trainable_variables))

            if i%piter == 0: 
                print("Time taken for %d iterations : "%piter, time.time() - start)
                print("Loss at iteration %d : "%i, losses[-1])
                start = time.time()
            if i%testiter == 0: 
                plt.plot(losses)
                plt.savefig(ofolder + 'losses.png')
                plt.close()

                ##check 2pt and comapre to Adam
                #idx = np.random.randint(0, testdata.shape[0], 1)
                #xx, yy = testdata[idx, 0].astype(np.float32), testdata[idx, 1].astype(np.float32), 
                if x_test is None:
                    idx = np.random.randint(0, testdata.shape[0], 1)
                    x_test, y_test = testdata[idx, 0].astype(np.float32), testdata[idx, -1].astype(np.float32), 
                    pred_adam = adam(tf.constant(x_init), tf.constant(y_test), grad_fn)
                    pred_adam = [pred_adam[0].numpy(), pm(pred_adam)[0].numpy()]
                    pred_adam10 = adam10(tf.constant(x_init), tf.constant(y_test), grad_fn)
                    pred_adam10 = [pred_adam10[0].numpy(), pm(pred_adam10)[0].numpy()]
                    minic, minfin = fid_recon.reconstruct(tf.constant(y_test), RRs=[1.0, 0.0], niter=args.rim_iter*10, lr=0.1)
                    compares =  [pred_adam, pred_adam10, [minic[0], minfin[0]]]
                    print('Test set generated')

                x_init = np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)
                #x_init = (y_test - (y_test.max() - y_test.min())/2.)/y_test.std() + np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)
                pred = rim(tf.constant(x_init), tf.constant(y_test), grad_fn)[-1]
                check_im(x_test[0], x_init[0], pred.numpy()[0], fname=ofolder + 'rim-im-%04d.png'%trainiter)
                check_im(y_test[0], x_init[0], gal_sample(pm(pred)).numpy()[0], fname=ofolder + 'rim-fin-%04d.png'%trainiter)
                check_2pt(x_test, y_test, rim, grad_fn, compares, fname=ofolder + 'rim-2pt-%04d.png'%trainiter)

                rim.save_weights(ofolder + '/%d'%trainiter)

            trainiter  += 1
Пример #6
0
def main():
    """
    Model function for the CosmicRIM.
    """

    if args.parallel: rim = build_rim_parallel(params)
    else: rim = build_rim_split(params)
    grad_fn = recon_dm_grad
    #
    #
    traindata, testdata = get_data()
    idx = np.random.randint(0, traindata.shape[0], 1)
    xx, yy = traindata[idx,
                       0].astype(np.float32), traindata[idx,
                                                        1].astype(np.float32),
    x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(
        np.float32)
    x_pred = rim(x_init, yy, grad_fn)

    trainiter = args.trainiter
    rim.load_weights(ofolder + '%d' % trainiter)
    print('Loaded')
    idx = np.random.randint(0, testdata.shape[0], 1)
    x_test, y_test = testdata[idx, 0].astype(
        np.float32), testdata[idx, 1].astype(np.float32),
    x_init = np.random.normal(size=x_test.size).reshape(x_test.shape).astype(
        np.float32)
    pred = rim(tf.constant(x_init), tf.constant(y_test), grad_fn)

    fig, ax = plt.subplots(1, 2, figsize=(9, 4))
    k, pkt = tools.power(x_test[0], boxsize=bs)
    lss = ["-"] * 7 + ["--"] * 7
    print(lss)
    for i in range(pred.shape[0]):
        print(i, pred[i].shape, x_test.shape)
        k, pk = tools.power(pred[i, 0].numpy(), boxsize=bs)
        k, px = tools.power(pred[i, 0].numpy(), f2=x_test[0], boxsize=bs)
        rcc = px / (pkt * pk)**0.5
        print(rcc)
        ax[0].plot(k,
                   rcc,
                   'C%d' % (i % 7),
                   alpha=0.7,
                   ls=lss[(i % 7)],
                   label=i)
        ax[1].plot(k, (pk / pkt)**0.5,
                   'C%d' % (i % 7),
                   alpha=0.7,
                   ls=lss[(i % 7)])
    for axis in ax:
        axis.semilogx()
        axis.legend()
        axis.grid(which='both')
    ax[0].set_ylim(-0.1, 1.2)
    ax[1].set_ylim(-0.2, 2.5)
    plt.savefig('./figs/2pt-iters.png')
    plt.close()

    fig, ax = plt.subplots(2, 5, figsize=(14, 8))
    for i in range(10):
        ax.flatten()[i].imshow(pred[i + 1, 0].numpy().sum(axis=0))
    plt.savefig('./figs/im-iters.png')
    plt.close()

    lss = ['-', '--', ':', '-.']

    pred_adam = adam(tf.constant(x_init), tf.constant(y_test), grad_fn)
    pred_adam = [pred_adam[0].numpy(), pm(pred_adam)[0].numpy()]
    pred_adam10 = adam10(tf.constant(x_init), tf.constant(y_test), grad_fn)
    pred_adam10 = [pred_adam10[0].numpy(), pm(pred_adam10)[0].numpy()]
    minic, minfin = fid_recon.reconstruct(tf.constant(y_test),
                                          RRs=[1.0, 0.0],
                                          niter=args.rim_iter * 10,
                                          lr=0.1)
    compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]]
    print('Test set generated')

    check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im')
    check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt')

    x_init = pred.numpy().copy()
    pred = rim(tf.constant(pred), tf.constant(y_test), grad_fn)[-1]
    check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im-pred')
    check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt-pred')

    x_init = y_test
    pred = rim(tf.constant(pred), tf.constant(y_test), grad_fn)[-1]
    check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im-data')
    check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt-data')

    x_init = x_test
    pred = rim(tf.constant(pred), tf.constant(y_test), grad_fn)[-1]
    check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im-truth')
    check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt-truth')