示例#1
0
文件: rbm.py 项目: ysmiraak/lgm
    def gen(self, sess, k= 4, v= None, ret_v= True, ret_h= False):
        if v is not None: sess.run(tf.assign(self.v, v, validate_shape= False))
        if ret_v and ret_h:
            ret = self.gibbs
        elif ret_v:
            ret = self.gibbs[0]
        elif ret_h:
            ret = self.gibbs[1]
        else:
            raise StopIteration("not ret_v and not ret_h")
        while True: yield sess.run(ret, feed_dict= {self.k_: k})


if False:
    from utils import mnist
    batchit = mnist(batch_size= 100, ds= 'train', with_labels= False, binary= True)

    rbm = Rbm(28*28, 512, samples= 100)
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    # rm -r log
    # tf.summary.FileWriter("log/rbm", sess.graph).close()
    # tf.reset_default_graph()
    # sess.close()

    wtr = tf.summary.FileWriter("log/rbm")
    rbm.pcd(sess, wtr, batchit, k= 4, lr= 0.01, steps= 60000, step_plot= 10000)
    rbm.pcd(sess, wtr, batchit, k= 4, lr= 0.001, steps= 12000, step_plot= 3000)
    wtr.close()
示例#2
0
from keras.layers import Input, Dense, Dropout

from ddrop.layers import DropConnectDense, DropConnect
from utils import mnist

batch_size = 128
nb_epoch = 12

# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)

(X_train, Y_train), (X_test, Y_test), nb_classes = mnist()

X_train = X_train.reshape(X_train.shape[0], -1)
X_test = X_test.reshape(X_test.shape[0], -1)
inputs = Input(shape=(np.prod(X_train.shape[1:]),))
x = Dense(128, activation='relu')(inputs)
# x = Dense(64, activation='relu')(x)
x = DropConnect(Dense(64, activation='relu'), prob=0.5)(x)
# x = Dropout(0.6)(x)
predictions = Dense(nb_classes, activation='softmax')(x)

model = Model(input=inputs, output=predictions)
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
    print(report)


if __name__ == '__main__':
    ### create models losses ...
    NUM_CLASSES = 10
    DIM = 512
    N_EPOCH = 16

    out_file = 'test' + str(DIM) + '.npy'
    out_file_norm = 'test' + str(DIM) + '_norm_' + '.npy'
    out_file_y = 'test_y_' + '.npy'
    ##
    m = Net_arc(n_out=DIM, batchnorm=True)
    ## load
    train_loader, test_loader = mnist(batch_size=100, shuffle=False)
    print('train len ={}'.format(len(train_loader.sampler)))
    print('test len ={}'.format(len(test_loader.sampler)))

    metric_fc = Arcface_loss(num_classes=NUM_CLASSES,
                             feat_dim=DIM,
                             device='cpu')
    optimzer_arcface = optim.Adam(metric_fc.parameters(), lr=0.01)

    models = {'arcFace': m}
    train_log = {k: [] for k in models}
    test_log = {k: [] for k in models}

    sheduler_nn = lr_scheduler.StepLR(models['arcFace'].optim, 8, gamma=0.5)
    sheduler_arcface = lr_scheduler.StepLR(optimzer_arcface, 8, gamma=0.5)
示例#4
0
                loss_recons = self.loss_recons = tf.reduce_mean(
                    tf.reduce_sum(tf.square(x - g), axis=1))
            with tf.name_scope('loss_relent'):
                # loss_relent = self.loss_relent = tf.reduce_mean(
                #     0.5 * tf.reduce_sum((- 1.0 - lv + tf.exp(lv) + tf.square(mu)), axis= 1))
                loss_relent = self.loss_relent = tf.reduce_mean(
                    tf.reduce_sum((-1.0 - lv + tf.exp(lv) + tf.square(mu)),
                                  axis=1))
            with tf.name_scope('loss'):
                loss = self.loss = loss_relent + loss_recons
            up = self.up = tf.train.AdamOptimizer().minimize(loss)
            self.step = 0


if False:
    dat = next(mnist(60000, with_labels=False, binary=False))

    vae = Vae(dat, dim_rec=(128, 128), dim_z=128, dim_gen=(128, 128))
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    # rm -r log
    # tf.summary.FileWriter('log/vae', sess.graph).close()
    # tf.reset_default_graph()
    # sess.close()

    loss = tf.summary.merge((tf.summary.scalar(name='loss', tensor=vae.loss),
                             tf.summary.scalar(name='loss_recons',
                                               tensor=vae.loss_recons),
                             tf.summary.scalar(name='loss_relent',
                                               tensor=vae.loss_relent)))