train_op = opt.minimize(loss)
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
mav = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'moving_mean' in v.name]
mav = mav[0]
#with tf.control_dependencies(update_op):
#    train_op = opt.minimize(loss)


sess = tf.Session()
sess.run(tf.global_variables_initializer())

#model.pretrain_load(sess)
saver = tf.train.Saver()
saver.restore(sess, save_dir + 'a.ckpt')

avg = Avg(['loss'])
for i in range(1, 1+max_iter):
    x, y = get_train_pair(8)
    rnd_bern = np.random.randint(100, size=[8,512,512])
    rnd_bern = rnd_bern < 2
    fd = {img_x: x, img_y: y, b_ph: rnd_bern}
    _, _, l = sess.run([train_op, update_op, loss], fd)
    avg.add(l, 0)

    if i % 30 == 0:
        avg.show(i)
    if i % 100 == 0:
        fd = {img_x: x, img_y: y}
        rc, rx, ry = sess.run([recon, img_x, img_y], fd)
        for k in range(rc.shape[0]):
            np.save('sample_imgs/a_'+str(k)+'.npy', rc[k])
示例#2
0
    train_op = opt.minimize(loss)
    update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    train_step = tf.group(train_op, update_op)
    acc = tf_acc(y_ph, y)


    logit2 = model.get_another_logit(feature)
    y2 = tf.nn.softmax(logit2)
    loss2 = cross_entropy(y_ph, y2)
    train_step2 = tf.train.AdamOptimizer(lr_ph, name='opt2').minimize(loss2)
    acc2 = tf_acc(y_ph, y2)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    avg = Avg(desc=['train acc', 'train loss', 'lr', 'test acc'])

#    if args.dataset=='imgnet':
#        train_data = MiniImageNet('train')
#        val_data = MiniImageNet('train_val')
#    if args.dataset=='cifar':
#        train_data = Cifar('cifar100', 'train')
#        val_data = Cifar('cifar100', 'test')
#    if args.dataset=='mnist':
#        train_data = MNIST('notMNIST', 'train')
#        val_data = MNIST('notMNIST', 'test')
#
#
    model.saver_init()
    if args.from_ckpt:
        model.saver_load(sess)
    train_step = tf.group(train_op, update_op)
    acc = tf_acc(y_ph, y)


    logit2 = model.get_another_logit(feature)
    y2 = tf.nn.softmax(logit2)
    loss2 = cross_entropy(y_ph, y2)
    opt2 = tf.train.AdamOptimizer(lr_ph, name='opt2')
    train_op2 = opt2.minimize(loss2)
    train_step2 = tf.group(train_op, update_op)
    acc2 = tf_acc(y_ph, y2)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    avg = Avg(desc=['train acc', 'train loss', 'lr', 'test acc'])

    if args.dataset=='imgnet':
        train_data = MiniImageNet('train')
        val_data = MiniImageNet('train_val')
    if args.dataset=='cifar':
        train_data = Cifar('cifar100', 'train')
        val_data = Cifar('cifar100', 'test')
    if args.dataset=='mnist':
        train_data = MNIST('notMNIST', 'train')
        val_data = MNIST('notMNIST', 'test')


    model.saver_init()
    if args.from_ckpt:
        model.saver_load(sess)
        losses.append(cross_entropy(qy_ph, y))
        acces.append(tf_acc(qy_ph, y))

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    ep_gen = []
    for dset in args.test_dataset:
        ep = Episode_Generator(nway, kshot, dset, phase='test_test')
        ep_gen.append(ep)

    for model in models:
        model.saver_init()
        model.saver_load(sess)

    avg = Avg(desc=['train acc'])

    accss = []
    for i in range(5):
        f_l = .0
        a_l = .0
        for i in range(1 + args.initial_step, 1 + args.max_iter):
            rnd_dset = np.random.randint(len(ep_gen))
            sx, sy, qx, qy = ep_gen[rnd_dset].next_batch(qnum)
            fd = {sx_ph: sx, qx_ph: qx, qy_ph: to1hot(qy, nway)}

            loss_lists = sess.run(losses, fd)
            acc_lists = sess.run(acces, fd)

            f_l += np.array(loss_lists)
            a_l += np.array(acc_lists)
示例#5
0
                             batch_size=BATCH_SIZE)

    net = GAT(133, 14).to(dev)
    print("TOTAL PARMS",
          sum(p.numel() for p in net.parameters() if p.requires_grad))

    # create optimizer
    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)

    dur = []

    lossf = F.mse_loss
    second_lossf = torch.nn.MSELoss(reduction='none')
    for epoch in range(50):
        net.train()
        train_avg = Avg()
        train2_avg = Avg()
        # if epoch < 10:
        for g, v in tqdm(train_loader):
            optimizer.zero_grad()

            if epoch >= 3:
                t0 = time.time()
            v = v.to(dev)
            af = g.ndata['atom_features'].to(dev)
            ge = g.edata['edge_features'].to(dev)
            v_pred, p = net(g, af, ge)

            loss = lossf(v, v_pred).mean()

            loss.backward()
loss = -tf.reduce_mean(bloss * mask) * 10000

#        + tf.reduce_mean( tf.abs(img_y - recon) )
#loss = tf.reduce_mean(tf.square(tf.subtract(img_y, recon)))# + tf.abs(recon))
opt = tf.train.AdamOptimizer(1e-4)
train_op = opt.minimize(loss)
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

#model.pretrain_load(sess)
saver = tf.train.Saver()
saver.restore(sess, save_dir + 'a.ckpt')

avg = Avg(['loss'])
for i in range(1, 1+max_iter):
#    x, y = get_train_pair(8)
#    rnd_bern = np.random.randint(100, size=[8,512,512])
#    rnd_bern = rnd_bern < 2
#    fd = {img_x: x, img_y: y, b_ph: rnd_bern}
#    _, _, l = sess.run([train_op, update_op, loss], fd)
#    p = sess.run(recon, fd)
#    print (p.shape)
#    avg.add(l, 0)
#    if i % 30 == 0:
#        avg.show(i)
    if i % 10 == 0:
        x, y = get_test_data(8)
        fd = {img_x: x, img_y: y}
        rc, rx, ry = sess.run([recon, img_x, img_y], fd)
    pred = tf.reduce_sum(logits, axis=0)
    acc = tf_acc(qy_ph, pred)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    ep_gen = []
    for dset in args.test_dataset:
        ep = Episode_Generator(nway, kshot, dset, phase='test_test')
        ep_gen.append(ep)

    for model in models:
        model.saver_init()
        model.saver_load(sess)

    avg = Avg(desc=['train acc'])

    for j in range(5):
        f_l = .0
        a_l = .0
        for i in range(1 + args.initial_step, 1 + args.max_iter):
            rnd_dset = np.random.randint(len(ep_gen))
            sx, sy, qx, qy = ep_gen[rnd_dset].next_batch(qnum)
            fd = {sx_ph: sx, qx_ph: qx, qy_ph: to1hot(qy, nway)}

            #        loss_lists = sess.run(losses, fd)
            #       acc_lists = sess.run(acces, fd)
            a = sess.run(acc, fd)
            #            print (a)
            avg.add(a, 0)
        avg.show(j)
示例#8
0
    out = pan.get_activation(x_1, x_2, x_3)

    mask = tf.greater(y_trues, 10)
    mask = tf.logical_or(mask, b_ph)
    mask = tf.cast(mask, tf.float32)

    loss = tf.reduce_mean((out - y_trues)**2 * mask + abs(out))
    train_step = tf.train.AdamOptimizer(args.lr).minimize(loss)


    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
#    saver.restore(sess, save_dir)

    avg = Avg(desc='loss')
    for i in range(1,1+args.max_iter):
        x, y = get_train_pair(args.batch_size)
        rnd_init = np.random.randint(100, size=2)
        pn = get_patch_num(args.imsize, args.large_k, args.stride,
                rnd_init[0], rnd_init[1],
                args.batch_size)
        rnd_bern = np.random.randint(200, size=pn)
        rnd_bern = (rnd_bern < 3)
        x = x[:,rnd_init[0]:,rnd_init[1]:]
        y = y[:,rnd_init[0]:,rnd_init[1]:]
        fd = {x_ph: x, y_ph: y, b_ph: rnd_bern}

        l, _ = sess.run([loss, train_step], fd)
        avg.add(l, 0)
示例#9
0
    #isTr = tf.constant(True)
    isTr = True
    model = Baseline_CNN(args.model_name)

    y = get_proto_model_output(model, sx_ph, qx_ph, nway, isTr, trainable=True)
    loss = cross_entropy(qy_ph, y)
    opt = tf.train.AdamOptimizer(lr_ph)
    train_op = opt.minimize(loss)
    update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    train_step = tf.group(train_op, update_op)
    acc = tf_acc(qy_ph, y)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    avg = Avg(desc=['train acc', 'train loss', 'lr'])

    ep_gen = Episode_Generator(nway, kshot, args.dataset, phase='train')
    model.saver_init()
    if args.from_ckpt:
        model.saver_load(sess)

    max_step = int(500 * 64 / nway / qnum)
    for j in range(args.max_epoch):
        lr = 1e-3 if j < 100 else 1e-4
        for i in range(max_step):
            sx, sy, qx, qy = ep_gen.next_batch(qnum)
            fd = {sx_ph: sx, qx_ph: qx, qy_ph: to1hot(qy, nway), lr_ph: lr}
            p1, p2, _ = sess.run([acc, loss, train_step], fd)

            avg.add(p1, 0)