Beispiel #1
0
def train():
    datagenerator   =   DataGenerator(number_classes, number_samples_xclass)
    print('Start training')
    for step in range(50000):
        _imgs, _labels  =   datagenerator.sample_batch('train', batch_size)

        _imgs_tensor, _labels_tensor    =   torch.tensor(_imgs, dtype=torch.float32, device=device), torch.tensor(_labels, dtype=torch.float32, device=device)

        output  =   model(_imgs_tensor, _labels_tensor)
        optimizer.zero_grad()
        loss    =   compute_loss(output, _labels_tensor)
        
        loss.backward()        
        optimizer.step()

        if step%50 == 0:
            set_trace()
            _imgs, _labels = datagenerator.sample_batch('test', 100)
            _imgs_tensor, _labels_tensor = torch.tensor(_imgs, dtype=torch.float32, device=device), torch.tensor(_labels, dtype=torch.float32, device=device)
            with torch.no_grad():
                output_t    =   model(_imgs_tensor, _labels_tensor)
                pred_lbls   =   np.asarray(output_t[:, -1, :,:].argmax(2).to('cpu'))
                _labels_tn  =   _labels[:,-1,:,:].argmax(2)
                accuracy    =   (_labels_tn == pred_lbls).mean()
                print('accuracy ->\t{}'.format(accuracy))
Beispiel #2
0
def test():
    ims = tf.placeholder(tf.float32,
                         shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                                784))
    labels = tf.placeholder(tf.float32,
                            shape=(None, FLAGS.num_samples + 1,
                                   FLAGS.num_classes, FLAGS.num_classes))

    data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1)
    o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
    o.load_weights('chpt_1700.ckpt')
    print('recovery data')
    out = o(ims, labels)

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        #set_trace()
        i, l = data_generator.sample_batch('train', 100)
        feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
        pred = sess.run([out], feed)
        pred = pred[0]
        #pred = pred.reshape(
        #        -1, FLAGS.num_samples + 1,
        #        FLAGS.num_classes, FLAGS.num_classes)
        pred = pred[:, -1, :, :].argmax(2)
        l = l[:, -1, :, :].argmax(2)
        print("Test Accuracy", (1.0 * (pred == l)).mean())
Beispiel #3
0
def train():
    ims = tf.placeholder(tf.float32,
                         shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                                784))
    labels = tf.placeholder(tf.float32,
                            shape=(None, FLAGS.num_samples + 1,
                                   FLAGS.num_classes, FLAGS.num_classes))

    data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1)

    o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
    out = o(ims, labels)

    loss = loss_function(out, labels)
    optim = tf.train.AdamOptimizer(0.001)
    optimizer_step = optim.minimize(loss)
    checkpoint_path = 'chpt_{}.ckpt'

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        for step in range(50000):
            i, l = data_generator.sample_batch('train', FLAGS.meta_batch_size)
            feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
            _, ls = sess.run([optimizer_step, loss], feed)

            if step % 100 == 0:
                print("*" * 5 + "Iter " + str(step) + "*" * 5)
                i, l = data_generator.sample_batch('test', 100)
                feed = {
                    ims: i.astype(np.float32),
                    labels: l.astype(np.float32)
                }
                pred, tls = sess.run([out, loss], feed)
                print("Train Loss:", ls, "Test Loss:", tls)
                pred = pred.reshape(-1, FLAGS.num_samples + 1,
                                    FLAGS.num_classes, FLAGS.num_classes)
                pred = pred[:, -1, :, :].argmax(2)
                l = l[:, -1, :, :].argmax(2)
                print("Test Accuracy", (1.0 * (pred == l)).mean())
                o.save_weights(checkpoint_path.format(step))
Beispiel #4
0
o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
out = o(ims, labels)

loss = loss_function(out, labels)
print("...tf.trainable_variables():", tf.trainable_variables())

optim = tf.train.AdamOptimizer(0.001)
#optim = tf.compat.v1.train.AdamOptimizer(0.001)
optimizer_step = optim.minimize(loss)
print("...Starts training...")
with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())

    for step in range(50000):
        i, l = data_generator.sample_batch(batch_type='train',
                                           batch_size=FLAGS.meta_batch_size)
        #print("i.shape:",i.shape)
        feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
        #print("feed[ims].shape:", feed[ims].shape)
        _, ls = sess.run([optimizer_step, loss], feed)

        if step % 100 == 0:
            print("*" * 5 + "Iter " + str(step) + "*" * 5)
            i, l = data_generator.sample_batch(batch_type='test',
                                               batch_size=100)
            feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
            pred, tls = sess.run([out, loss], feed)
            print("Train Loss:", ls, "Test Loss:", tls)
            pred = pred.reshape(-1, FLAGS.num_samples + 1, FLAGS.num_classes,
                                FLAGS.num_classes)
            pred = pred[:, -1, :, :].argmax(2)
Beispiel #5
0
	init_op = tf.global_variables_initializer()
	sess.run(init_op)

    # call DataGenerator with k_shot+n_query samples per class
	data_generator = DataGenerator(n_way, k_shot+n_query, n_meta_test_way, k_meta_test_shot+n_meta_test_query, config={'data_folder': args.data_path})
	for ep in range(n_epochs):
		for epi in range(n_episodes):
			#############################
			#### YOUR CODE GOES HERE ####

			# sample a batch of training data and partition into
		    # support and query sets
			
			# support, query, labels = None, None, None

			images_batch, labels_batch = data_generator.sample_batch('meta_train', 1)

			support = images_batch[0, :, :k_shot, :].reshape((n_way, k_shot, im_width, im_height, channels))
			query = images_batch[0, :, k_shot:, :].reshape((n_way, n_query, im_width, im_height, channels))

			labels_s = labels_batch[0, :, :k_shot, :].reshape((n_way, k_shot, n_way))
			labels = labels_batch[0, :, k_shot:, :].reshape((n_way, n_query, n_way))

			support_vec = support.reshape((n_way * k_shot, im_width * im_height * channels))
			labels_s_vec = labels_s.reshape((n_way * k_shot, n_way))

			l_s_argmax = labels_s_vec.argmax(1)
			sort_ind = np.argsort(l_s_argmax)
			support_vec_sort = support_vec[sort_ind]
			# print(l_s_argmax[sort_ind])
Beispiel #6
0
data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1)

o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
out = o(ims, labels)

loss = loss_function(out, labels)
optim = tf.train.AdamOptimizer(0.001)
optimizer_step = optim.minimize(loss)

with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())

    for step in range(50000):
        i, l = data_generator.sample_batch('train', FLAGS.meta_batch_size)
        feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
        _, ls = sess.run([optimizer_step, loss], feed)

        if step % 100 == 0:
            print("*" * 5 + "Iter " + str(step) + "*" * 5)
            i, l = data_generator.sample_batch('test', 100)
            feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
            pred, tls = sess.run([out, loss], feed)
            print("Train Loss:", ls, "Test Loss:", tls)
            pred = pred.reshape(-1, FLAGS.num_samples + 1, FLAGS.num_classes,
                                FLAGS.num_classes)
            pred = pred[:, -1, :, :].argmax(2)
            l = l[:, -1, :, :].argmax(2)
            print("Test Accuracy", (1.0 * (pred == l)).mean())
Beispiel #7
0
                                   k_meta_test_shot + n_meta_test_query,
                                   config={'data_folder': args.data_path})

    experiments = []
    exp_filename = 'result_proto_meta_val_acc' + '_n_way' + str(
        n_way) + '_k_shot' + str(k_shot) + '_n_query' + str(n_query) + '.csv'
    for ep in range(n_epochs):
        for epi in range(n_episodes):
            #############################
            #### YOUR CODE GOES HERE ####

            # sample a batch of training data and partition into
            # support and query sets

            # support, query, labels = None, None, None
            image_batches, label_batches = data_generator.sample_batch(
                "meta_train", 1, shuffle=False)
            inputa = image_batches[:, :, :k_shot, :]
            inputb = image_batches[:, :, k_shot:, :]
            # labela = label_batches[:, :, :k_shot, :]
            labelb = label_batches[:, :, k_shot:, :]

            support = np.reshape(
                inputa, [n_way, k_shot, im_width, im_height, channels])
            query = np.reshape(inputb,
                               [n_way, n_query, im_width, im_height, channels])
            labels = np.reshape(labelb, [n_way, n_query, n_way])

            #############################
            _, ls, ac = sess.run([train_op, ce_loss, acc],
                                 feed_dict={
                                     x: support,
Beispiel #8
0
    neg = torch.zeros(batch_size).to(device)
    real = torch.where(real_preds > 0.5, pos, neg)
    fake = torch.where(fake_preds < 0.5, pos, neg)
    return (torch.sum(real) + torch.sum(fake) + 0.0) / (2 * batch_size + 0.0)


gen_loss = []
dis_loss = []
print('Started adversarial training for {} steps'.format(combined))
for adv_step in range(combined):
    if adv_step % 20000 == 0:
        torch.save(netG.state_dict(), ('./checkpoint/' + str(sampling) +
                                       '_{}.pt').format(adv_step))
        torch.save(netD.state_dict(), ('./checkpoint/' + str(sampling) +
                                       '_d_{}.pt').format(adv_step))
    latent_sample, real_sample = data_generator.sample_batch(
        'discrim', batch_size, FLAGS.discriminator_RF)
    #latent_sample, real_sample, real_2d_sample = data_generator.sample_both_depth_batch('discrim', batch_size, FLAGS.discriminator_RF)
    sing_real = np.concatenate(
        (real_sample[:, 0:frames_given, :],
         np.repeat(np.expand_dims(real_sample[:, frames_given - 1, :], axis=1),
                   rf - frames_given, 1)),
        axis=1)
    sing_real = torch.from_numpy(sing_real).float().to(device)
    latent_sample = torch.from_numpy(latent_sample).float().to(device)
    real_sample = torch.from_numpy(real_sample).float().to(device)
    #real_2d_sample = torch.from_numpy(real_2d_sample).float().to(device)
    to_print = False
    for i in range(1):
        netG.zero_grad()
        label = torch.full((batch_size, ), real_label, device=device)
        fake = netG(sing_real, to_print)
	tf_config = tf.ConfigProto()
	tf_config.gpu_options.allow_growth=True
	sess = tf.InteractiveSession(config=tf_config)
	init_op = tf.global_variables_initializer()
	sess.run(init_op)



    # call DataGenerator with k_shot+n_query samples per class
	data_generator = DataGenerator(n_way, k_shot+n_query, n_meta_test_way, k_meta_test_shot+n_meta_test_query, config={'data_folder': args.data_path})
	for ep in range(n_epochs):
		for epi in range(n_episodes):

			# sample a batch of training data and partition into
		    # support and query sets
			meta_batch = data_generator.sample_batch('meta_train', 1, shuffle=False)
			support = np.reshape(meta_batch[0][0, :, :k_shot, :], (n_way, k_shot, im_height, im_width, channels))
			query = np.reshape(meta_batch[0][0, :, k_shot:, :], (n_way, n_query, im_height, im_width, channels))
			labels = meta_batch[1][0, :, k_shot:, :]

			_, ls, ac = sess.run([train_op, ce_loss, acc], feed_dict={x: support, q: query, labels_ph: labels})


			if (epi+1) % 50 == 0:

                # sample a batch of validation data and partition into
		        # support and query sets
				meta_batch = data_generator.sample_batch('meta_val', 1, shuffle=False)
				support = np.reshape(meta_batch[0][0, :, :k_shot, :], (n_way, k_shot, im_height, im_width, channels))
				query = np.reshape(meta_batch[0][0, :, k_shot:, :], (n_way, n_query, im_height, im_width, channels))
				labels = meta_batch[1][0, :, k_shot:, :]
Beispiel #10
0
	tf_config.gpu_options.allow_growth=True
	sess = tf.InteractiveSession(config=tf_config)
	init_op = tf.global_variables_initializer()
	sess.run(init_op)

	# call DataGenerator with k_shot+n_query samples per class
	data_generator = DataGenerator(n_way, k_shot+n_query, n_meta_test_way, k_meta_test_shot+n_meta_test_query, config={'data_folder': args.data_path})
	for ep in range(n_epochs):
		for epi in range(n_episodes):
			#############################
			#### YOUR CODE GOES HERE ####

			# sample a batch of training data and partition into
			# support and query sets

			inputs, labels = data_generator.sample_batch('meta_train', batch_size=1, swap=False, shuffle=False)
			support, query, labels = split_sampled_dataset(inputs, labels)
			# print(support.shape)
			# print(query.shape)
			# print(labels.shape)

			# support, query, labels = None, None, None
			#############################
			_, ls, ac = sess.run([train_op, ce_loss, acc], feed_dict={x: support, q: query, labels_ph: labels})
			if (epi+1) % 50 == 0:
				#############################
				#### YOUR CODE GOES HERE ####

				# sample a batch of validation data and partition into
				# support and query sets
with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    y_pred_memory = tf.concat([controller_output] +
                              next_state['read_vector_list'],
                              axis=2)
    state = next_state
    y_pred_memory = Dense(N, activation='softmax')(y_pred_memory)
    y_pred = y_pred_memory[:, -1, :N]
    adam = tf.keras.optimizers.Adam(learning_rate=0.001)

    model = Model(inputs=inp, outputs=y_pred)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    history = model.fit_generator(data_generator.sample_batch('train', B),
                                  epochs=150,
                                  steps_per_epoch=50,
                                  validation_data=data_generator.sample_batch(
                                      'val', B),
                                  validation_steps=B)
    print(
        model.evaluate_generator(data_generator.sample_batch('test', B),
                                 steps=50))

    fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(6, 4))
    axs[0].plot(history.history['accuracy'])
    axs[0].plot(history.history['val_accuracy'])
    axs[0].set_title('Model accuracy')
    axs[0].set_ylabel('Accuracy')
    axs[0].set_xlabel('Epoch')