Пример #1
0
def step_val(model: Model, data: Data, step):
    t_d = data.next_test()
    shape = np.prod(DATASET_SHAPE[data.set_name][1:])
    d = tf.reshape(t_d[0], [-1, shape])
    pred = model([d], training=False)

    acc = eval.acc(t_d[1], pred)
    err = 1 - acc
    tf.summary.scalar('val/acc', acc, step=step)
    tf.summary.scalar('val/err', err, step=step)

    return acc
Пример #2
0
def step_train(model: VQModel, data: Data, opt: tf.keras.optimizers.Optimizer,
               step):
    epoch = step // (DATASET_EXAMPLE_COUNT['train'][data.set_name] /
                     data.batch_size)
    s_d, u_d = data.next_train()

    s1 = img_processing(s_d[0])
    s2 = img_processing(s_d[0])
    u1 = img_processing(u_d[0])
    u2 = img_processing(u_d[0])
    if data.set_name == 'mnist':
        shape = np.prod(DATASET_SHAPE[data.set_name][1:])
        s1 = tf.reshape(img_processing(s_d[0]), [-1, shape])
        s2 = tf.reshape(img_processing(s_d[0]), [-1, shape])
        u1 = tf.reshape(img_processing(u_d[0]), [-1, shape])
        u2 = tf.reshape(img_processing(u_d[0]), [-1, shape])
    batch_feed = [s1, s2, u1, u2, s_d[1], u_d[1]]
    batch_feed = [tf.identity(i) for i in batch_feed]
    summary_step = -1 if step % 50 > 0 else step
    with tf.GradientTape() as tape:
        x, pred, _pred, pre_vq, context_ind = model(batch_feed,
                                                    training=True,
                                                    step=summary_step)
        img = tf.concat([s1, u1], axis=0)
        loss = model.obj(img,
                         s_d[1],
                         x,
                         pred,
                         _pred,
                         pre_vq,
                         context_ind,
                         epoch,
                         step=summary_step)
        gradient = tape.gradient(loss, sources=model.trainable_variables)
        opt.apply_gradients(zip(gradient, model.trainable_variables))
    if summary_step >= 0:
        acc = eval.acc(s_d[1], pred[:data.s_size, :])
        err = 1 - acc

        ll = tf.concat([batch_feed[2], batch_feed[3]], axis=0)
        if tf.shape(ll).shape[0] < 2:
            ll = tf.one_hot(ll, DATASET_CLASS_COUNT[data.set_name])

        sim = tf.expand_dims(
            tf.expand_dims(tf.matmul(ll, ll, transpose_b=True), 0), -1)
        # tf.summary.image('img', s_d[0], step=step, max_outputs=1)
        # tf.summary.image('gt/adj', sim, step=step, max_outputs=1)
        tf.summary.scalar('train/acc', acc, step=step)
        tf.summary.scalar('train/err', err, step=step)

    return loss
Пример #3
0
def step_train(model: GumbelModel, data: Data,
               opt1: tf.keras.optimizers.Optimizer,
               opt2: tf.keras.optimizers.Optimizer, step):
    epoch = step // (DATASET_EXAMPLE_COUNT['train'][data.set_name] /
                     data.batch_size)
    s_d, u_d = data.next_train()

    s1 = img_processing(s_d[0])
    s2 = img_processing(s_d[0])
    u1 = img_processing(u_d[0])
    u2 = img_processing(u_d[0])
    if data.set_name == 'mnist':
        shape = np.prod(DATASET_SHAPE[data.set_name][1:])
        s1 = tf.reshape(img_processing(s_d[0]), [-1, shape])
        s2 = tf.reshape(img_processing(s_d[0]), [-1, shape])
        u1 = tf.reshape(img_processing(u_d[0]), [-1, shape])
        u2 = tf.reshape(img_processing(u_d[0]), [-1, shape])
    batch_feed = [s1, s2, u1, u2, s_d[1], u_d[1]]
    batch_feed = [tf.identity(i) for i in batch_feed]
    summary_step = -1 if step % 50 > 0 else step
    with tf.GradientTape() as tape1:  # , tf.GradientTape() as tape2:
        x, pred, _pred = model(batch_feed, training=True, step=summary_step)
        img = tf.concat([s1, u1], axis=0)
        loss1, loss2 = model.obj(img,
                                 s_d[1],
                                 x,
                                 pred,
                                 _pred,
                                 epoch,
                                 step=summary_step)
        gradient1 = tape1.gradient(loss1 + loss2,
                                   sources=model.trainable_variables)
        # gradient2 = tape2.gradient(loss2, sources=model.trainable_variables)
        opt1.apply_gradients(zip(gradient1, model.trainable_variables))
        # opt2.apply_gradients(zip(gradient2, model.trainable_variables))
    if summary_step >= 0:
        acc = eval.acc(s_d[1], pred[:data.s_size, :])
        err = 1 - acc

        # tf.summary.image('img', s_d[0], step=step, max_outputs=1)
        # tf.summary.image('gt/adj', sim, step=step, max_outputs=1)
        tf.summary.scalar('train/acc', acc, step=step)
        tf.summary.scalar('train/err', err, step=step)

    return loss1 + loss2
Пример #4
0
def step_train(model: Model, data: Data, opt: tf.keras.optimizers.Optimizer,
               step):
    s_d, u_d = data.next_train()
    shape = np.prod(DATASET_SHAPE[data.set_name][1:])
    batch_feed = [
        tf.reshape(s_d[0], [-1, shape]),
        tf.reshape(u_d[0], [-1, shape]), s_d[1], u_d[1]
    ]
    batch_feed = [tf.identity(i) for i in batch_feed]
    summary_step = -1 if step % 50 > 0 else step
    with tf.GradientTape() as tape:
        x, bbn, context_ind, feat, pred, adj = model(batch_feed,
                                                     training=True,
                                                     step=summary_step)
        img = tf.concat([batch_feed[0], batch_feed[1]], axis=0)
        loss = model.obj(img,
                         s_d[1],
                         x,
                         bbn,
                         context_ind,
                         feat,
                         adj,
                         pred,
                         step=summary_step)
        gradient = tape.gradient(loss, sources=model.trainable_variables)
        opt.apply_gradients(zip(gradient, model.trainable_variables))
    if summary_step >= 0:
        acc = eval.acc(s_d[1], pred)
        err = 1 - acc

        ll = tf.concat([batch_feed[2], batch_feed[3]], axis=0)
        if tf.shape(ll).shape[0] < 2:
            ll = tf.one_hot(ll, DATASET_CLASS_COUNT[data.set_name])

        sim = tf.expand_dims(
            tf.expand_dims(tf.matmul(ll, ll, transpose_b=True), 0), -1)
        tf.summary.image('gt/adj', sim, step=step, max_outputs=1)
        tf.summary.scalar('train/acc', acc, step=step)
        tf.summary.scalar('train/err', err, step=step)

    return loss