Beispiel #1
0
def test_single_task_training(file_path):
    # load dataset
    df = pd.read_csv(file_path)
    y_tr = df.values[:, 1]
    x_tr = df.values[:, -1]
    lang_obj = lang.Lang(x_tr)
    vocab_size = len(lang_obj.idx2ch)
    x_tensor = lang.preprocessing(x_tr, lang_obj)

    # define models
    enc_f = nets.Encoder(vocab_size=vocab_size, reverse=False)
    enc_b = nets.Encoder(vocab_size=vocab_size, reverse=True)
    attention = nets.BidirectionalAttention(128)
    fcuk = nets.FullyConnectedUnits([128, 'tanh', 0.25, 32, 'tanh', 0.10, 1])

    # define the flow function
    def flow(xs, models):
        enc_f, enc_b, attention, fcuk = models
        eo_f, h_f = enc_f(xs)
        eo_b, h_b = enc_b(xs)
        attention_weights = attention(eo_f, eo_b, h_f, h_b)
        ys = fcuk(attention_weights)
        return ys

    box = nets.Box(flow=flow,
                   models=[enc_f, enc_b, attention, fcuk],
                   n_epochs=1,
                   batch_sz=32)

    box.train(x_tensor, y_tr)
    box.save_weights('box')
Beispiel #2
0
    def __init__(self, args, sess=None):
        self.session(sess)
        self.netE = nets.Encoder()
        self.netG = nets.Generator()
        self.train = tf.placeholder(tf.bool)
        self.build_network(args.nsf, args.npx, args.batch_size)

        if sess is None and args.check == False:
            self.initialize()

        variables_to_restore = tf.trainable_variables(
        ) + tf.moving_average_variables()
        super(SurfaceToStructure, self).__init__(variables_to_restore)
    def __init__(self, args, sess=None):
        self.session(sess)
        self.G_losses = []
        self.D_losses = []
        self.xgs = []
        self.netE = nets.Encoder()
        self.netG = nets.Generator()
        self.netD = nets.Discriminator()
        self.train_G = tf.placeholder(tf.bool)
        self.train_D = tf.placeholder(tf.bool)
        opt = tf.train.AdamOptimizer(args.learning_rate, 0.5)
        G_tower_grads = []
        D_tower_grads = []
        n_disc = len(args.nsf_disc)

        for i in range(args.n_gpus):
            gpu_name = '/gpu:{0}'.format(i+3)
            with tf.device(gpu_name):
                print gpu_name
                batch_size_per_gpu = args.batch_size / args.n_gpus
                self.build_network(args, batch_size_per_gpu, i)

                G_grads = opt.compute_gradients(self.G_losses[-1], var_list=self.E_vars + self.G_vars)
                G_tower_grads.append(G_grads)

                D_grads = [opt.compute_gradients(self.D_losses[-1][i], var_list=self.D_vars[i]) for i in range(n_disc)]
                D_tower_grads.append(D_grads)

        self.optG = opt.apply_gradients(average_gradients(G_tower_grads))
        self.G_loss = tf.reduce_mean(self.G_losses)
        self.xg = tf.concat(self.xgs, 0)

        self.optD = []
        self.D_loss = []
        for i in range(n_disc):
            grads = []
            losses = []
            for j in range(args.n_gpus):
                grads.append(D_tower_grads[j][i])
                losses.append(self.D_losses[j][i])
            self.optD.append(opt.apply_gradients(average_gradients(grads)))
            self.D_loss.append(tf.reduce_mean(losses))

        if sess is None and args.check == False:
            self.initialize()

        ma_vars = tf.moving_average_variables()
        BN_vars = [var for var in ma_vars if var.name.startswith('E') or var.name.startswith('G')]
        variables_to_save = self.E_vars + self.G_vars + BN_vars
        super(SurfaceToStructure, self).__init__(variables_to_save)
Beispiel #4
0
y_te = scaler.transform(y_te)
pickle.dump(scaler, open('scaler.p', 'wb'))

# save the dataset for later use
np.save('y_tr', y_tr)
np.save('x_tr', x_tr)
np.save('y_te', y_te)
np.save('x_te', x_te)

# create the language object and map it to strings
lang_obj = lang.Lang(list(x_tr) + list(x_te))
vocab_size = len(lang_obj.idx2ch) + 1
x_tr = lang.preprocessing(x_tr, lang_obj)

# define models
enc_f = nets.Encoder(vocab_size=vocab_size, batch_sz=BATCH_SZ, reverse=False)
enc_b = nets.Encoder(vocab_size=vocab_size, batch_sz=BATCH_SZ, reverse=True)
attention = nets.BidirectionalWideAttention(128)
fcuk = nets.FullyConnectedUnits(
    [512, 'tanh', 0.30, 512, 'tanh', 0.30, 512, 'tanh', 0.25])
fcuk_props = nets.FullyConnectedUnits([9])
decoder = nets.AttentionDecoder(vocab_size=vocab_size)

# convert to tensor
x_tr = tf.convert_to_tensor(x_tr)
y_tr = tf.convert_to_tensor(y_tr)

# make them into a dataset object
ds = tf.data.Dataset.from_tensor_slices((x_tr, y_tr)).shuffle(y_tr.shape[0])
ds = ds.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SZ))
Beispiel #5
0
np.save('fp_tr', fp_tr)
np.save('fp_te', fp_te)
'''

y_tr = np.load('y_tr.npy')
x_tr = np.load('x_tr.npy')
fp_tr = np.load('fp_tr.npy')
n_samples = y_tr.shape[0]

f_handle = open('lang_obj.p', 'rb')
lang_obj = pickle.load(f_handle)
f_handle.close()
vocab_size = len(lang_obj.idx2ch) + 1

# define models
enc_f = nets.Encoder(vocab_size=vocab_size, batch_sz = BATCH_SZ, reverse=False,
    enc_units = 512)
enc_b = nets.Encoder(vocab_size=vocab_size, batch_sz = BATCH_SZ, reverse=True,
    enc_units = 512)
attention = nets.BidirectionalWideAttention(512)
fcuk = nets.FullyConnectedUnits([512, 'leaky_relu', 0.25, 512, 'leaky_relu'])
d_mean = nets.FullyConnectedUnits([512])
d_log_var = nets.FullyConnectedUnits([512])
fcuk_props = nets.FullyConnectedUnits([9])
fcuk_fp = nets.FullyConnectedUnits([167, 'sigmoid'])
decoder = nets.OneHotDecoder(vocab_size=vocab_size, dec_units = 512)
bypass_v_f = nets.FullyConnectedUnits([1])
simple_decoder = nets.SimpleDecoder(vocab_size=vocab_size, dec_units=1024,
    batch_sz = BATCH_SZ)

# convert to tensor
x_tr = tf.convert_to_tensor(x_tr)