Ejemplo n.º 1
0
def test():
    ims = tf.placeholder(tf.float32,
                         shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                                784))
    labels = tf.placeholder(tf.float32,
                            shape=(None, FLAGS.num_samples + 1,
                                   FLAGS.num_classes, FLAGS.num_classes))

    data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1)
    o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
    o.load_weights('chpt_1700.ckpt')
    print('recovery data')
    out = o(ims, labels)

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        #set_trace()
        i, l = data_generator.sample_batch('train', 100)
        feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
        pred = sess.run([out], feed)
        pred = pred[0]
        #pred = pred.reshape(
        #        -1, FLAGS.num_samples + 1,
        #        FLAGS.num_classes, FLAGS.num_classes)
        pred = pred[:, -1, :, :].argmax(2)
        l = l[:, -1, :, :].argmax(2)
        print("Test Accuracy", (1.0 * (pred == l)).mean())
Ejemplo n.º 2
0
    def train(model, fd_classifier, dataset_path, img_dim, split, epochs,
              batch_size):
        try:

            gen = DataGenerator(dataset_path)
            data, labels = gen.get_images(img_dim, fd_classifier)

            # split them manually
            split = int(len(data) * split / 100)
            train_x, test_x = data[:split], data[split:]
            train_y, test_y = labels[:split], labels[split:]

            lb = LabelBinarizer()
            train_y = lb.fit_transform(train_y)
            test_y = lb.transform(test_y)

            # TODO: PNG problem
            # train model
            train_x = np.array(train_x)
            train_y = np.array(train_y)
            test_x = np.array(test_x)
            test_y = np.array(test_y)
            return lb, model.fit(train_x, train_y, test_x, test_y, epochs,
                                 batch_size)

        except Exception as e:
            print(e)
Ejemplo n.º 3
0
def train():
    datagenerator   =   DataGenerator(number_classes, number_samples_xclass)
    print('Start training')
    for step in range(50000):
        _imgs, _labels  =   datagenerator.sample_batch('train', batch_size)

        _imgs_tensor, _labels_tensor    =   torch.tensor(_imgs, dtype=torch.float32, device=device), torch.tensor(_labels, dtype=torch.float32, device=device)

        output  =   model(_imgs_tensor, _labels_tensor)
        optimizer.zero_grad()
        loss    =   compute_loss(output, _labels_tensor)
        
        loss.backward()        
        optimizer.step()

        if step%50 == 0:
            set_trace()
            _imgs, _labels = datagenerator.sample_batch('test', 100)
            _imgs_tensor, _labels_tensor = torch.tensor(_imgs, dtype=torch.float32, device=device), torch.tensor(_labels, dtype=torch.float32, device=device)
            with torch.no_grad():
                output_t    =   model(_imgs_tensor, _labels_tensor)
                pred_lbls   =   np.asarray(output_t[:, -1, :,:].argmax(2).to('cpu'))
                _labels_tn  =   _labels[:,-1,:,:].argmax(2)
                accuracy    =   (_labels_tn == pred_lbls).mean()
                print('accuracy ->\t{}'.format(accuracy))
Ejemplo n.º 4
0
def train_with_data_generator(dataset_root_dir=GAN_DATA_ROOT_DIR,
                              weights_file=None):
    net_name = 'o_net'
    batch_size = BATCH_SIZE
    epochs = ONET_EPOCHS
    learning_rate = ONET_LEARNING_RATE

    dataset_dir = os.path.join(dataset_root_dir, net_name)
    pos_dataset_path = os.path.join(dataset_dir, 'pos_shuffle.h5')
    neg_dataset_path = os.path.join(dataset_dir, 'neg_shuffle.h5')
    part_dataset_path = os.path.join(dataset_dir, 'part_shuffle.h5')
    landmarks_dataset_path = os.path.join(dataset_dir, 'landmarks_shuffle.h5')

    data_generator = DataGenerator(pos_dataset_path,
                                   neg_dataset_path,
                                   part_dataset_path,
                                   landmarks_dataset_path,
                                   batch_size,
                                   im_size=NET_SIZE['o_net'])
    data_gen = data_generator.generate()
    steps_per_epoch = data_generator.steps_per_epoch()

    callbacks, model_file = create_callbacks_model_file(net_name, epochs)

    _o_net = train_o_net_with_data_generator(data_gen,
                                             steps_per_epoch,
                                             initial_epoch=0,
                                             epochs=epochs,
                                             lr=learning_rate,
                                             callbacks=callbacks,
                                             weights_file=weights_file)
    _o_net.save_weights(model_file)
Ejemplo n.º 5
0
def train_with_data_generator(dataset_root_dir=GAN_DATA_ROOT_DIR,
                              model_file=model_file,
                              weights_file=None):

    batch_size = 64 * 7
    epochs = 30
    learning_rate = 0.001

    pos_dataset_path = os.path.join(dataset_root_dir,
                                    'pos_shuffle_%s.h5' % (net_name))
    neg_dataset_path = os.path.join(dataset_root_dir,
                                    'neg_shuffle_%s.h5' % (net_name))
    part_dataset_path = os.path.join(dataset_root_dir,
                                     'part_shuffle_%s.h5' % (net_name))
    landmarks_dataset_path = os.path.join(
        dataset_root_dir, 'landmarks_shuffle_%s.h5' % (net_name))

    data_generator = DataGenerator(pos_dataset_path,
                                   neg_dataset_path,
                                   part_dataset_path,
                                   landmarks_dataset_path,
                                   batch_size,
                                   im_size=12)
    data_gen = data_generator.generate()
    steps_per_epoch = data_generator.steps_per_epoch()

    if net_name == 'Pnet':
        _net = Pnet()
    elif net_name == 'Rnet':
        _net = Rnet()
    else:
        _net = Onet()

    _net_model = _net.model(training=True)
    _net_model.summary()
    if weights_file is not None:
        _net_model.load_weights(weights_file)

    #sgd = SGD(lr=0.005, momentum=0.8)
    #_p_net_model.compile(optimizer=sgd, loss=_p_net.loss_func, metrics=[_p_net.accuracy, _p_net.recall])
    _net_model.compile(Adam(lr=learning_rate),
                       loss=_net.loss_func,
                       metrics=[_net.accuracy, _net.recall])

    _net_model.fit_generator(data_gen,
                             steps_per_epoch=steps_per_epoch,
                             initial_epoch=0,
                             epochs=epochs)

    _net_model.save_weights(model_file)
Ejemplo n.º 6
0
def main():
    filepath = os.path.dirname(os.path.abspath(__file__))

    dataset_root_dir = filepath + '/data/traindata/'

    pos_dataset_path = os.path.join(dataset_root_dir, 'pos_shuffle.h5')
    neg_dataset_path = os.path.join(dataset_root_dir, 'neg_shuffle.h5')
    part_dataset_path = os.path.join(dataset_root_dir, 'part_shuffle.h5')
    landmarks_dataset_path = os.path.join(dataset_root_dir, 'landmarks_shuffle.h5')

    data_generator = DataGenerator(pos_dataset_path, neg_dataset_path, part_dataset_path, landmarks_dataset_path, 64*7, im_size=12)
    data_gen = data_generator.generate()

    for item in data_gen:
        print(item[1][:,:1])
        exit(0)
Ejemplo n.º 7
0
def main():
	if FLAGS.meta_train == False:
		orig_meta_batch_size = FLAGS.meta_batch_size
		# always use meta batch size of 1 when testing.
		FLAGS.meta_batch_size = 1

    # call data_generator and get data with FLAGS.k_shot*2 samples per class
	data_generator = DataGenerator(FLAGS.n_way, FLAGS.k_shot*2, FLAGS.n_way, FLAGS.k_shot*2, config={'data_folder': FLAGS.data_path})

    # set up MAML model
	dim_output = data_generator.dim_output
	dim_input = data_generator.dim_input
	meta_test_num_inner_updates = FLAGS.meta_test_num_inner_updates
	model = MAML(dim_input, dim_output,
		meta_test_num_inner_updates=meta_test_num_inner_updates,
		learn_inner_lr=FLAGS.learn_inner_update_lr)
	model.construct_model(prefix='maml')
	model.summ_op = tf.summary.merge_all()

	saver = loader = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=10)

	tf_config = tf.ConfigProto()
	tf_config.gpu_options.allow_growth=True
	sess = tf.InteractiveSession(config=tf_config)

	if FLAGS.meta_train == False:
		# change to original meta batch size when loading model.
		FLAGS.meta_batch_size = orig_meta_batch_size

	if FLAGS.meta_train_k_shot == -1:
		FLAGS.meta_train_k_shot = FLAGS.k_shot
	if FLAGS.meta_train_inner_update_lr == -1:
		FLAGS.meta_train_inner_update_lr = FLAGS.inner_update_lr

	exp_string = 'cls_'+str(FLAGS.n_way)+'.mbs_'+str(FLAGS.meta_batch_size) + '.k_shot_' + str(FLAGS.meta_train_k_shot) + '.inner_numstep' + str(FLAGS.num_inner_updates) + '.inner_updatelr' + str(FLAGS.meta_train_inner_update_lr)
	if FLAGS.learn_inner_update_lr:
		exp_string += ".learn_inner_lr"

	resume_itr = 0
	model_file = None

	tf.global_variables_initializer().run()

	if FLAGS.resume or not FLAGS.meta_train:
		model_file = tf.train.latest_checkpoint(FLAGS.logdir + '/' + exp_string)
		if FLAGS.meta_test_iter > 0:
			model_file = model_file[:model_file.index('model')] + 'model' + str(FLAGS.meta_test_iter)
		if model_file:
			ind1 = model_file.index('model')
			resume_itr = int(model_file[ind1+5:])
			print("Restoring model weights from " + model_file)
			saver.restore(sess, model_file)

	if FLAGS.meta_train:
		meta_train(model, saver, sess, exp_string, data_generator, resume_itr)
	else:
		FLAGS.meta_batch_size = 1
		meta_test(model, saver, sess, exp_string, data_generator, meta_test_num_inner_updates)
Ejemplo n.º 8
0
def training_val(train_information, val_information, num_au_labels, val_ratio,
                 num_epochs, batch_size, img_cols, img_rows):
    # load data folders
    params = {
        'batch_size': batch_size,
        'img_cols': img_cols,
        'img_rows': img_rows,
        'num_au_labels': num_au_labels,
        'shuffle': True
    }
    training_generator = DataGenerator(train_information, **params)
    validation_generator = DataGenerator(val_information, **params)
    patience = 20

    model = DRML((img_cols, img_rows, 3), num_au_labels)
    model.summary()
    adam = Adam(lr=0.001)
    model.compile(optimizer=adam,
                  loss='binary_crossentropy',
                  metrics=[
                      f1_threshold(0.8, 10),
                      multi_label_accuracy_threshold(0.8, 10),
                      f1_threshold(0.5, 10),
                      multi_label_accuracy_threshold(0.5, 10)
                  ])

    model.fit_generator(
        generator=training_generator,
        epochs=num_epochs,
        verbose=1,
        validation_data=validation_generator,
        callbacks=[
            EarlyStopping(patience=patience),
            ModelCheckpoint(
                "train_model/DRML_{epoch:03d}-{val_multi_label_accuracy:.5f}.hdf5",
                save_best_only=True)
        ])

    # evaluation validation data
    evaluation_val_data(val_information, batch_size, model, img_cols, img_rows,
                        0.8)
    evaluation_val_data(val_information, batch_size, model, img_cols, img_rows,
                        0.5)
Ejemplo n.º 9
0
def train():
    ims = tf.placeholder(tf.float32,
                         shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                                784))
    labels = tf.placeholder(tf.float32,
                            shape=(None, FLAGS.num_samples + 1,
                                   FLAGS.num_classes, FLAGS.num_classes))

    data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1)

    o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
    out = o(ims, labels)

    loss = loss_function(out, labels)
    optim = tf.train.AdamOptimizer(0.001)
    optimizer_step = optim.minimize(loss)
    checkpoint_path = 'chpt_{}.ckpt'

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        for step in range(50000):
            i, l = data_generator.sample_batch('train', FLAGS.meta_batch_size)
            feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
            _, ls = sess.run([optimizer_step, loss], feed)

            if step % 100 == 0:
                print("*" * 5 + "Iter " + str(step) + "*" * 5)
                i, l = data_generator.sample_batch('test', 100)
                feed = {
                    ims: i.astype(np.float32),
                    labels: l.astype(np.float32)
                }
                pred, tls = sess.run([out, loss], feed)
                print("Train Loss:", ls, "Test Loss:", tls)
                pred = pred.reshape(-1, FLAGS.num_samples + 1,
                                    FLAGS.num_classes, FLAGS.num_classes)
                pred = pred[:, -1, :, :].argmax(2)
                l = l[:, -1, :, :].argmax(2)
                print("Test Accuracy", (1.0 * (pred == l)).mean())
                o.save_weights(checkpoint_path.format(step))
Ejemplo n.º 10
0
Archivo: run.py Proyecto: dmsedra/faces
def main(epochs=100):
    params = {
        "faces_dir": "data/orl_faces",
        "cifar_file": "data/cifar_10/data_batch_1"
    }
    dg = DataGenerator(**params)
    gen = dg.batch()

    X = tf.placeholder(tf.float32, [None, 32, 32])
    Y = tf.placeholder(tf.float32, [None, 2])
    optimizer, cost, acc, probs = model.small_model(X, Y)

    steps = int(np.ceil(dg.X.shape[0] / dg.batch_size) * epochs)
    init = tf.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        s = time.time()
        for i in range(steps):
            X_batch, Y_batch = next(
                gen
            )  #np.concatenate((np.ones((1,250,250)),np.zeros((1,250,250)))), [[0,1],[1,0]]

            #update params

            p, c, a = sess.run([optimizer, cost, acc],
                               feed_dict={
                                   X: X_batch,
                                   Y: Y_batch
                               })
            print("Step %d Cost %f Accuracy %f" % (steps, c, a))

            #evaluate performance
            if i % 10 == 0:
                X_eval, Y_eval = dg.eval()
                eval_a = sess.run([acc], feed_dict={X: X_batch, Y: Y_batch})
                print("Eval Accuracy %f" % eval_a[0])
        print("Time %f" % (time.time() - s))

        saver.save(sess, "models/small/model.ckpt")
Ejemplo n.º 11
0
        x = self.conv3(x)
        x = nn.functional.leaky_relu(x)
        x = self.conv4(x)
        x = nn.functional.leaky_relu(x)
        x = self.conv5(x)
        x = nn.functional.leaky_relu(x)
        x = self.conv6(x)
        x = nn.functional.leaky_relu(x)
        x = self.conv7(x)
        x = nn.functional.leaky_relu(x)
        x = self.conv8(x)
        x = self.act_fn(x)
        return x


data_generator = DataGenerator(latent_dim)
criterion = nn.BCELoss()
real_label = 1
fake_label = 0

netG = Generator().to(device)
print(netG)
netD = Discriminator().to(device)
print(netD)
net2D = Discriminator2D().to(device)
print(net2D)

optimizerD = optim.Adam(netD.parameters(), lr=0.0002, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=0.0002, betas=(0.5, 0.999))
optimizer2D = optim.Adam(net2D.parameters(), lr=0.0002, betas=(0.5, 0.999))
Ejemplo n.º 12
0
    """
        #############################
        #### YOUR CODE GOES HERE ####
        pass
        #############################
        return out


ims = tf.placeholder(tf.float32,
                     shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                            784))
labels = tf.placeholder(tf.float32,
                        shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                               FLAGS.num_classes))

data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1,
                               {'data_folder': FLAGS.data_root})

o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
out = o(ims, labels)

loss = loss_function(out, labels)
optim = tf.train.AdamOptimizer(0.001)
optimizer_step = optim.minimize(loss)

with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())

    for step in range(50000):
        i, l = data_generator.sample_batch('train', FLAGS.meta_batch_size)
        feed = {ims: i.astype(np.float32), labels: l.astype(np.float32)}
Ejemplo n.º 13
0
        y, h = self.dense4(y)
        y, h = self.dense5(y)
        y = self.dense6(y)
        y = torch.reshape(y, (batch_size, rf, pose_dim))
        if to_print:
            print("Y IS: " + str(y))
        x = x + y
        return x


gen = Generator().to(device)
gen.load_state_dict(torch.load(chkpt_path))
gen.eval()
from load_data import DataGenerator

data_generator = DataGenerator(latent_dim)
latent_sample, real_sample = data_generator.sample_depth_batch(
    'discriminator', batch_size, rf)
#latent_sample = torch.from_numpy(latent_sample).float().to(device)
real_sample = torch.from_numpy(real_sample[:, 10, :]).float().to(device)
fake = gen(real_sample, False).detach().cpu().numpy()
real_sample = real_sample.unsqueeze(1).detach().cpu().numpy()
for i in range(batch_size):
    print("MAKING VIDEO " + str(i))
    render_3d_animation(np.expand_dims(real_sample[i], axis=0),
                        ('boneconvdepthreal_{}.mp4').format(i),
                        depth=True,
                        limit=1)
    render_3d_animation(np.expand_dims(fake[i], axis=0),
                        ('boneconvdepthsampled_{}.mp4').format(i),
                        depth=True,
Ejemplo n.º 14
0
	num_classes, num_support = x_shape[0], x_shape[1]
	num_queries = q_shape[1]
	labels_ph = tf.placeholder(tf.float32, [None, None, None])
	model = ProtoNet([num_filters]*num_conv_layers, latent_dim)
	x_latent = model(tf.reshape(x, [-1, im_height, im_width, channels]))
	q_latent = model(tf.reshape(q, [-1, im_height, im_width, channels]))
	ce_loss, acc = ProtoLoss(x_latent, q_latent, labels_ph, num_classes, num_support, num_queries)
	train_op = tf.train.AdamOptimizer().minimize(ce_loss)
	tf_config = tf.ConfigProto()
	tf_config.gpu_options.allow_growth=True
	sess = tf.InteractiveSession(config=tf_config)
	init_op = tf.global_variables_initializer()
	sess.run(init_op)

	# call DataGenerator with k_shot+n_query samples per class
	data_generator = DataGenerator(n_way, k_shot+n_query, n_meta_test_way, k_meta_test_shot+n_meta_test_query, config={'data_folder': args.data_path})
	for ep in range(n_epochs):
		for epi in range(n_episodes):
			#############################
			#### YOUR CODE GOES HERE ####

			# sample a batch of training data and partition into
			# support and query sets

			inputs, labels = data_generator.sample_batch('meta_train', batch_size=1, swap=False, shuffle=False)
			support, query, labels = split_sampled_dataset(inputs, labels)
			# print(support.shape)
			# print(query.shape)
			# print(labels.shape)

			# support, query, labels = None, None, None
Ejemplo n.º 15
0
	num_classes, num_support = x_shape[0], x_shape[1]
	num_queries = q_shape[1]
	labels_ph = tf.placeholder(tf.float32, [None, None, None])
	model = ProtoNet([num_filters]*num_conv_layers, latent_dim)
	x_latent = model(tf.reshape(x, [-1, im_height, im_width, channels]))
	q_latent = model(tf.reshape(q, [-1, im_height, im_width, channels]))
	ce_loss, acc = ProtoLoss(x_latent, q_latent, labels_ph, num_classes, num_support, num_queries)
	train_op = tf.train.AdamOptimizer().minimize(ce_loss)
	tf_config = tf.ConfigProto()
	tf_config.gpu_options.allow_growth=True
	sess = tf.InteractiveSession(config=tf_config)
	init_op = tf.global_variables_initializer()
	sess.run(init_op)

    # call DataGenerator with k_shot+n_query samples per class
	data_generator = DataGenerator(n_way, k_shot+n_query, n_meta_test_way, k_meta_test_shot+n_meta_test_query, config={'data_folder': args.data_path})
	for ep in range(n_epochs):
		for epi in range(n_episodes):
			#############################
			#### YOUR CODE GOES HERE ####

			# sample a batch of training data and partition into
		    # support and query sets
			
			# support, query, labels = None, None, None

			images_batch, labels_batch = data_generator.sample_batch('meta_train', 1)

			support = images_batch[0, :, :k_shot, :].reshape((n_way, k_shot, im_width, im_height, channels))
			query = images_batch[0, :, k_shot:, :].reshape((n_way, n_query, im_width, im_height, channels))
Ejemplo n.º 16
0
               decay=0.0,
               amsgrad=True)
    batch_size = 20
    epochs = 1000

    leave_dataset_index = 3
    map_index = 2

    #Load Data
    person_input, expected_output, group_input, scene_input, test_input, test_output = load_data(
        leave_dataset_index=leave_dataset_index, map_index=map_index)  #[0:4]
    print(len([scene_input, group_input, person_input][1]))
    #Set data generator
    train_generator = DataGenerator(
        data=[scene_input, group_input, person_input],
        labels=expected_output,
        batch_size=batch_size,
        shuffle=False)
    test_generator = DataGenerator(data=test_input,
                                   labels=test_output,
                                   batch_size=batch_size,
                                   shuffle=False)
    print('####')
    print(scene_input.shape)

    #Compile model
    model.compile(loss='mse', optimizer=opt)

    #Setting TensorBoard
    tbCallback = TensorBoard(log_dir='graph/',
                             histogram_freq=0,
Ejemplo n.º 17
0

if FLAGS.cnn:
    ims = tf.placeholder(tf.float32,
                         shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                                28, 28))
else:
    ims = tf.placeholder(tf.float32,
                         shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                                784))
labels = tf.placeholder(tf.float32,
                        shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                               FLAGS.num_classes))

data_generator = DataGenerator(FLAGS.num_classes,
                               FLAGS.num_samples + 1,
                               config={"flatten": not FLAGS.cnn})

if FLAGS.cnn:
    o = ConvMANN(FLAGS.num_classes, FLAGS.num_samples + 1, FLAGS.dropout)
else:
    o = MANN(FLAGS.num_classes,
             FLAGS.num_samples + 1,
             n_layers=FLAGS.n_layers,
             layer_units=FLAGS.layer_units)
out = o(ims, labels)

loss = loss_function(out, labels)
optim = tf.train.AdamOptimizer(FLAGS.lr)
optimizer_step = optim.minimize(loss)
Ejemplo n.º 18
0
model_save_freq = conf.getint('model_save_freq')
logs_dir = conf['logs_dir']
img_dir, model_dir = init_dirs(logs_dir)

print("Loading data...")
random_transforms = [transforms.RandomRotation(degrees=5)]

transform = transforms.Compose([
    transforms.RandomHorizontalFlip(p=0.5),
    # transforms.RandomApply(random_transforms,
    # p=0.5),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

train_data = DataGenerator(database, transform=transform, n_samples=N)

train_loader = DataLoader(train_data,
                          shuffle=True,
                          batch_size=batch_size,
                          num_workers=4)

netG = Generator(128, 32, 3).to(device)
netD = Discriminator(3, 48).to(device)

criterion = nn.BCELoss()
criterionH = nn.MSELoss()

optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(beta1, beta2))
optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(beta1, beta2))
lr_schedulerG = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
Ejemplo n.º 19
0
    model = ProtoNet([num_filters] * num_conv_layers, latent_dim)
    x_latent = model(tf.reshape(x, [-1, im_height, im_width, channels]))
    q_latent = model(tf.reshape(q, [-1, im_height, im_width, channels]))
    ce_loss, acc = ProtoLoss(x_latent, q_latent, labels_ph, num_classes,
                             num_support, num_queries)
    train_op = tf.train.AdamOptimizer().minimize(ce_loss)
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=tf_config)
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # call DataGenerator with k_shot+n_query samples per class
    data_generator = DataGenerator(n_way,
                                   k_shot + n_query,
                                   n_meta_test_way,
                                   k_meta_test_shot + n_meta_test_query,
                                   config={'data_folder': args.data_path})
    for ep in range(n_epochs):
        for epi in range(n_episodes):
            #############################
            #### YOUR CODE GOES HERE ####

            # sample a batch of training data and partition into
            # support and query sets

            support, query, labels = None, None, None
            #############################
            _, ls, ac = sess.run([train_op, ce_loss, acc],
                                 feed_dict={
                                     x: support,
Ejemplo n.º 20
0
    l_onehot = torch.from_numpy(l_onehot).to(device)
    l_lbl = torch.from_numpy(l_lbl)[:, -1:].view(-1).to(device)
    # last_n_step_labels = l_onehot[:, -1:]
    # last_n_step_labels = last_n_step_labels.squeeze(1).reshape(-1, args.num_classes)  # (B * N, N)
    # l_lbl2 = torch.tensor(last_n_step_labels.argmax(axis=1)).to(device)
    return i, l_onehot, l_lbl


#
#
if __name__ == '__main__':
    #
    # Setup optimization.
    device = getDevice(torch.cuda.is_available(), 0)
    args = getInputArgs()
    data_generator = DataGenerator(args.num_classes, args.num_samples + 1)
    mann = MANN(args.num_classes, args.num_samples + 1).to(device)
    optim = torch.optim.Adam(mann.parameters(), 0.001)
    ce = torch.nn.CrossEntropyLoss()
    #
    # Train.
    pbar = tqdm(range(50000))
    for step in pbar:
        i, l_onehot, l_lbl = sampleBatch(data_generator, 'train',
                                         args.meta_batch_size, device, args)

        out = mann(i, l_onehot)
        loss = loss_function(ce, out, l_lbl)
        loss.backward()
        optim.step()
        optim.zero_grad()
Ejemplo n.º 21
0
    model = ProtoNet([num_filters] * num_conv_layers, latent_dim)
    x_latent = model(tf.reshape(x, [-1, im_height, im_width, channels]))
    q_latent = model(tf.reshape(q, [-1, im_height, im_width, channels]))
    ce_loss, acc = ProtoLoss(x_latent, q_latent, labels_ph, num_classes,
                             num_support, num_queries)
    train_op = tf.train.AdamOptimizer().minimize(ce_loss)
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=tf_config)
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # call DataGenerator with k_shot+n_query samples per class
    data_generator = DataGenerator(n_way,
                                   k_shot + n_query,
                                   n_meta_test_way,
                                   k_meta_test_shot + n_meta_test_query,
                                   config={'data_folder': args.data_path})

    experiments = []
    exp_filename = 'result_proto_meta_val_acc' + '_n_way' + str(
        n_way) + '_k_shot' + str(k_shot) + '_n_query' + str(n_query) + '.csv'
    for ep in range(n_epochs):
        for epi in range(n_episodes):
            #############################
            #### YOUR CODE GOES HERE ####

            # sample a batch of training data and partition into
            # support and query sets

            # support, query, labels = None, None, None
Ejemplo n.º 22
0
        input_label_scaled = tf.reshape(input_labels,
                                        (-1, K * N, N))[:, :-1, :]
        input_label_scaled = tf.concat(
            (input_label_scaled, tf.zeros_like(input_label_scaled[:, -1:])), 1)
        input_label_scaled = tf.dtypes.cast(input_label_scaled, tf.float32)

        x = tf.concat((input_img_scaled, input_label_scaled), -1)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        out = self.layer_reshape(x)
        #############################
        return out


data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1)
o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
optim = tf.keras.optimizers.Adam(0.001)


def train_step(images, labels):
    with tf.GradientTape() as tape:
        pred = o(images, labels)
        loss = loss_function(pred, labels)
    gradients = tape.gradient(loss, o.trainable_variables)
    optim.apply_gradients(zip(gradients, o.trainable_variables))
    return loss


for step in range(50000):
    image, label = data_generator.sample_batch('train', FLAGS.meta_batch_size)
Ejemplo n.º 23
0
        Returns:
            [B, K+1, N, N] predictions
        """
        #############################
        #### YOUR CODE GOES HERE ####
        pass
        #############################
        return #just temporary, uncommend line below after function is implemented
        #return out

ims = tf.placeholder(tf.float32, shape=(
    None, FLAGS.num_samples + 1, FLAGS.num_classes, 784))
labels = tf.placeholder(tf.float32, shape=(
    None, FLAGS.num_samples + 1, FLAGS.num_classes, FLAGS.num_classes))

data_generator = DataGenerator(
    FLAGS.num_classes, FLAGS.num_samples + 1)

#FOR TESTING FLAGS ONLY
print(FLAGS.num_samples, FLAGS.num_classes, FLAGS.meta_batch_size)

'''
o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
out = o(ims, labels)

loss = loss_function(out, labels)
optim = tf.train.AdamOptimizer(0.001)
optimizer_step = optim.minimize(loss)

'''

'''
Ejemplo n.º 24
0
	labels_ph = tf.placeholder(tf.float32, [None, None, None])
	model = ProtoNet([num_filters]*num_conv_layers, latent_dim)
	x_latent = model(tf.reshape(x, [-1, im_height, im_width, channels]))
	q_latent = model(tf.reshape(q, [-1, im_height, im_width, channels]))
	ce_loss, acc = ProtoLoss(x_latent, q_latent, labels_ph, num_classes, num_support, num_queries)
	train_op = tf.train.AdamOptimizer().minimize(ce_loss)
	tf_config = tf.ConfigProto()
	tf_config.gpu_options.allow_growth=True
	sess = tf.InteractiveSession(config=tf_config)
	init_op = tf.global_variables_initializer()
	sess.run(init_op)



    # call DataGenerator with k_shot+n_query samples per class
	data_generator = DataGenerator(n_way, k_shot+n_query, n_meta_test_way, k_meta_test_shot+n_meta_test_query, config={'data_folder': args.data_path})
	for ep in range(n_epochs):
		for epi in range(n_episodes):

			# sample a batch of training data and partition into
		    # support and query sets
			meta_batch = data_generator.sample_batch('meta_train', 1, shuffle=False)
			support = np.reshape(meta_batch[0][0, :, :k_shot, :], (n_way, k_shot, im_height, im_width, channels))
			query = np.reshape(meta_batch[0][0, :, k_shot:, :], (n_way, n_query, im_height, im_width, channels))
			labels = meta_batch[1][0, :, k_shot:, :]

			_, ls, ac = sess.run([train_op, ce_loss, acc], feed_dict={x: support, q: query, labels_ph: labels})


			if (epi+1) % 50 == 0:
Ejemplo n.º 25
0
        x = self.layer2(x)
        # Return original shape [B,K+1,N,N]
        out = tf.reshape(x, [-1, K, N, N])
        #############################
        return out


#Placeholders for images and labels
ims = tf.placeholder(tf.float32,
                     shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                            784))
labels = tf.placeholder(tf.float32,
                        shape=(None, FLAGS.num_samples + 1, FLAGS.num_classes,
                               FLAGS.num_classes))

data_generator = DataGenerator(FLAGS.num_classes, FLAGS.num_samples + 1)

o = MANN(FLAGS.num_classes, FLAGS.num_samples + 1)
out = o(ims, labels)

loss = loss_function(out, labels)
print("...tf.trainable_variables():", tf.trainable_variables())

optim = tf.train.AdamOptimizer(0.001)
#optim = tf.compat.v1.train.AdamOptimizer(0.001)
optimizer_step = optim.minimize(loss)
print("...Starts training...")
with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
import matplotlib.pyplot as plt
from load_data import DataGenerator
from MANN import MANNCell

N = 10  #num_samples
K = 2  #num_samples_per_class
B = 15
total = N * K * B
manncell = MANNCell(rnn_size=128,
                    memory_size=20,
                    memory_vector_dim=4,
                    head_num=2,
                    samples_per_batch=N * K)
state = manncell.zero_state(B, tf.float32)

data_generator = DataGenerator(num_classes=N, num_samples_per_class=K)

inp = Input(shape=(K * N, 784 + N))

controller_output, next_state = manncell(inp, state)

with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    y_pred_memory = tf.concat([controller_output] +
                              next_state['read_vector_list'],
                              axis=2)
    state = next_state
    y_pred_memory = Dense(N, activation='softmax')(y_pred_memory)
    y_pred = y_pred_memory[:, -1, :N]
    adam = tf.keras.optimizers.Adam(learning_rate=0.001)