Exemple #1
0
def tsne(args):
    X_train, X_test, X_test_original, Y_test = load_mnist_data()

    random_image = np.random.uniform(0, 1, (100, 28, 28, 1))
    print("random noise image")
    plt.figure(4, figsize=(2, 2))
    plt.title('random noise image')
    plt.imshow(random_image[0].reshape(28, 28), cmap=plt.cm.gray)

    # intermidieate output of discriminator
    f = model.feature_extractor(args)
    feature_map_of_random = f.predict(random_image, verbose=1)
    feature_map_of_mnist = f.predict(X_test_original[Y_test != 1][:300],
                                     verbose=1)
    feature_map_of_mnist_1 = f.predict(X_test[:100], verbose=1)

    # t-SNE for visualization
    output = np.concatenate(
        (feature_map_of_random, feature_map_of_mnist, feature_map_of_mnist_1))
    output = output.reshape(output.shape[0], -1)
    anomaly_flag = np.array([1] * 100 + [0] * 300)

    X_embedded = TSNE(n_components=2).fit_transform(output)
    plt.figure(5)
    plt.title("t-SNE embedding on the feature representation")
    plt.scatter(X_embedded[:100, 0],
                X_embedded[:100, 1],
                label='random noise(anomaly)')
    plt.scatter(X_embedded[100:400, 0],
                X_embedded[100:400, 1],
                label='mnist(anomaly)')
    plt.scatter(X_embedded[400:, 0],
                X_embedded[400:, 1],
                label='mnist(normal)')
    plt.legend()
    plt.show()
def run(data_dir, batchsize=50, n_epochs=50):

    tf.reset_default_graph()
    train_dir = data_dir + 'train/'
    test_dir = data_dir + 'test/'
    train_imgs, train_labels = load_images(train_dir)
    test_imgs, test_labels = load_images(test_dir,
                                         resize=train_imgs.shape[2],
                                         seq_len=train_imgs.shape[1])
    n_samples = train_imgs.shape[0]

    #train_dataset = tf.data.Dataset.from_tensor_slices((train_imgs,train_labels))
    test_dataset = tf.data.Dataset.from_tensor_slices((test_imgs, test_labels))
    train_img = tf.data.Dataset.from_tensor_slices(train_imgs)
    train_labels = tf.data.Dataset.from_tensor_slices(train_labels)

    train_dataset = tf.data.Dataset.zip((train_img, train_labels))
    train_dataset = train_dataset.shuffle(
        buffer_size=100,
        reshuffle_each_iteration=True).batch(batchsize).repeat()
    test_dataset = test_dataset.shuffle(
        buffer_size=100, reshuffle_each_iteration=True).batch(batchsize)

    iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
                                               train_dataset.output_shapes)
    x, y = iterator.get_next()
    train_iterator = iterator.make_initializer(train_dataset)
    test_iterator = iterator.make_initializer(test_dataset)
    # apply random augmentations
    ft_extr = feature_extractor()
    logits = ft_extr.create_3dconv_model(x)

    #loss = weighted_ce(next_element[1],model,.1)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
    prediction = tf.nn.softmax(logits)
    #cnf_matrix = tf.math.confusion_matrix(predictions=tf.to_float(tf.argmax(prediction,1)),labels=tf.to_float(tf.argmax(y, 1)),num_classes=2)
    equality = tf.equal(tf.to_float(tf.argmax(prediction, 1)),
                        tf.to_float(tf.argmax(y, 1)))
    accuracy = tf.reduce_mean(tf.to_float(equality))

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.AdamOptimizer(learning_rate=.001).minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(train_iterator)
        saver = tf.train.Saver()

        for epoch in range(n_epochs):
            ep_loss = []
            ep_cnf = []
            for _ in range(int(n_samples / batchsize)):
                _, b_loss = sess.run([optimizer, loss],
                                     feed_dict={'is_training:0':
                                                True})  #cnf_mat cnf_matrix
                ep_loss.append(b_loss)
                #ep_cnf.append(cnf_mat)

            print(np.mean(ep_loss))
            print(np.mean(ep_cnf, axis=0))
            if (n_epochs % 10 == 0):
                save_path = saver.save(
                    sess, data_dir + str(epoch) + "_checkpoint.ckpt")

        print('predicting..')
        save_path = saver.save(sess, data_dir + "final_checkpoint.ckpt")

        with tf.SessionL() as sess:
            sess.run(test_iterator)
            result_set = []
            try:
                while True:
                    pred = sess.run(prediction,
                                    feed_dict={'is_training:0': False})
                    result_set.append(pred)
            except:
                pass
Exemple #3
0
import numpy as np
import chess
import model
import input

FLAGS = tf.app.flags.FLAGS

label_strings, _ = input.load_labels()

with tf.device('/cpu:0'):
    board = tf.placeholder(tf.float32, shape=[1, 8, 8, 6])
    turn = tf.placeholder(tf.float32, shape=[1])
    #player = tf.placeholder(tf.float32, shape=[1])
    label = tf.placeholder(tf.int64, shape=[1])
    example = [board, turn]
    features, _ = model.feature_extractor([board])
    logits, _ = model.model(example, features)

    onehot_labels = tf.one_hot(label, model.NUM_LABELS, dtype=tf.float32)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                labels=onehot_labels))

    saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    sess = tf.Session(config=config)
    checkpoint = tf.train.get_checkpoint_state(FLAGS.logdir)
    if checkpoint and checkpoint.model_checkpoint_path:
Exemple #4
0
    makedir('valid_result')
    valid_result = f'valid_result/{mag}_train-{train_slide}_valid-{valid_slide}_DArate-{DArate}.csv'
    makedir('model_params')
    model_params = f'model_params/{mag}_train-{train_slide}_DArate-{DArate}.pth'
    f = open(log, 'w')
    f_writer = csv.writer(f, lineterminator='\n')
    csv_header = ["epoch", "class_loss", "domain_loss", "train_acc"]
    f_writer.writerow(csv_header)
    f.close()

    torch.backends.cudnn.benchmark=True #cudnn benchmark mode

    # load model
    from model import feature_extractor, class_predictor, domain_predictor, DAMIL
    # declare each block
    feature_extractor = feature_extractor()
    class_predictor = class_predictor()
    domain_predictor = domain_predictor(domain_num)
    # DAMIL
    model = DAMIL(feature_extractor, class_predictor, domain_predictor)
    model = model.to(device)

    # use cross entropy loss function
    loss_fn = nn.CrossEntropyLoss()
    # use SGDmomentum for optimizer
    optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9, weight_decay=0.0)


    # start training
    for epoch in range(EPOCHS):
        # generate bags
EP = 15

if __name__ == '__main__':

    argument = sys.argv[1:]
    source_domain = argument[:-1]
    target_domain = argument[-1]

    os.makedirs('./model/' + target_domain, exist_ok=True)

    N = len(source_domain)
    # dataloader
    source_dataloader_list = []
    source_clf = {}
    source_loss = {}
    extractor = feature_extractor().to(device)
    #extractor.load_state_dict(torch.load('./model/2'+target_domain+'/extractor_5.pth'))
    extractor_optim = optim.Adam(extractor.parameters(), lr=3e-4)
    min_ = float('inf')

    for source in source_domain:
        print(source)
        dataset = DATASET(source, source + '_train.csv')
        dataset = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
        if len(dataset) < min_:
            min_ = len(dataset)

        source_dataloader_list.append(dataset)

        # c1 : for target
        # c2 : for source
Exemple #6
0
from numpy import linalg as LA
import random
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import seaborn as sns
import math
import cntk as C
from model import feature_extractor
import json

#######################
target_dist = 30
target_var = 5
#######################
f1 = feature_extractor((2, 360), (1, 32), 0.7)
with open('training_human_data.json') as json_data:
    data = json.load(json_data)
#print(np.array(data[list(data.keys())[0]]['radardata_list'][0]['observation']).shape)
#print(data[list(data.keys())[0]]['radardata_list'][0]['position'])
#print(len(data[list(data.keys())[0]]['radardata_list']))

#print(data.keys())
data_new = {}
targets = {}
for key in data.keys():
    data_new[key] = []
    targets[key] = []
    observations = np.array(data[key]['radardata_list'])
    n = len(observations)
    for i in range(n):
Exemple #7
0
def main(src, tar):

	G = feature_extractor().to(device)

	cls_c1 = predictor().to(device)
	cls_c2 = predictor().to(device)

	cls_c1.apply(weights_init_uniform)
	cls_c2.apply(weights_init_uniform)

	###		 dataloader  	 ###
	if src == 'mnist':
		src_train_set = dset.MNIST('./dataset/mnist', train=True, download=True, transform=gray2rgb_transform)	
		
	elif src == 'mnistm':
		src_train_set = DATASET('./dataset/mnistm/train', './dataset/mnistm/train.csv', transforms=rgb_transform)

	elif src == 'svhn':
		src_train_set = dset.SVHN(root='./dataset/svhn/', download=download, transform=rgb_transform)

	elif src == 'usps':
		src_train_set = DATASET('./dataset/usps/train', './dataset/usps/train.csv', transforms=gray2rgb_transform)


	if tar == 'svhn':
		tar_train_set = dset.SVHN(root='./dataset/svhn/', download=download, transform = rgb_transform)

	elif tar == 'mnist':
		tar_train_set = dset.MNIST('./dataset/mnist', train=True, download=True, transform=gray2rgb_transform)

	elif tar == 'mnistm':
		tar_train_set = DATASET('./dataset/mnistm/train', './dataset/mnistm/train.csv', transform=rgb_transform)

	elif tar == 'usps':
		tar_train_set = DATASET('./dataset/usps/train', './dataset/usps/train.csv', transform=rgb_transform)
		
		

	src_train_loader = torch.utils.data.DataLoader(
		dataset = src_train_set,
		batch_size = BATCH_SIZE,
		shuffle = True,
		)

	tar_train_loader = torch.utils.data.DataLoader(
		dataset = tar_train_set,
		batch_size = BATCH_SIZE,
		shuffle = True,
		)

	optimizer_encoder = optim.Adam(G.parameters() , lr=3e-4, weight_decay=0.0005)
	optimizer_clf_1 = optim.Adam(cls_c1.parameters(), lr=3e-4, weight_decay=0.0005)
	optimizer_clf_2 = optim.Adam(cls_c2.parameters(), lr=3e-4, weight_decay=0.0005)

	# train
	ac_list, loss_list = train(G, cls_c1, cls_c2, optimizer_encoder, optimizer_clf_1, optimizer_clf_2, EP, src_train_loader, tar_train_loader, src, tar)
	ac_list = np.array(ac_list).flatten()
	
	# plot tsne
	loss_list = np.array(loss_list).flatten()
	epoch = [i for i in range(EP)]
	my_function.tsne_plot(G, src_train_loader, tar_train_loader, src, tar, BATCH_SIZE, 'mcd', mode=False)

	### plot learning curve  ###
	plt.figure()
	plt.plot(epoch, ac_list)
	plt.xlabel('EPOCH')
	plt.ylabel('Accuracy')
	plt.title('domian_adapt : ' + src + ' to ' + tar)
	plt.savefig('./learning_curve/domian_adapt_' + src + '_to_' + tar + '_accuracy.jpg')

	plt.figure()
	plt.plot(epoch, loss_list)
	plt.xlabel('EPOCH')
	plt.ylabel('Loss')
	plt.title('domian_adapt : ' + src + ' to ' + tar)
	plt.savefig('./learning_curve/domian_adapt_' + src + '_to_' + tar + '_loss.jpg')
Exemple #8
0
filenames = tf.placeholder(tf.string, shape=[None])
dataset = input.inputs(filenames)

iterator = tf.contrib.data.Iterator.from_structure(dataset.output_types,
                                                   dataset.output_shapes)

training_init_op = iterator.make_initializer(dataset)

examples, labels, results = iterator.get_next()

labels = tf.cast(labels, tf.int32)
#labels = tf.one_hot(labels, model.NUM_LABELS, dtype=tf.float32)

# Model
features = model.feature_extractor(examples)
layers = []
nn_logits = model.policy_model(examples, features, layers)

saver = tf.train.Saver(save_relative_paths=True)

memory = memory_head.MemoryHead(32768 * 8, model.NUM_LABELS)
memory_input = layers[
    1]  #tf.one_hot(labels, model.NUM_LABELS, dtype=tf.float32)#layers[2]
logits, mask, teacher_loss = memory.policy_model(examples, memory_input,
                                                 labels)

config = tf.ConfigProto()
config.gpu_options.allow_growth = True

fullsaver = tf.train.Saver(save_relative_paths=True)
Exemple #9
0
def train_with_tf(train_imgs,
                  train_labels,
                  test_imgs,
                  test_labels,
                  n_epochs=10,
                  batchsize=50):

    # additional_train_imgs = np.repeat(train_imgs[:122,:],4,axis=0)
    # additional_train_labels = np.repeat(train_labels[:122,:],4,axis=0)

    # train_imgs = np.concatenate((train_imgs,additional_train_imgs),axis=0)
    # train_labels = np.concatenate((train_labels,additional_train_labels),axis=0)

    # Fourier transform
    # fft_train = fourier_transform(train_imgs)

    # Random cnn features

    # train_imgs,test_imgs = apply_pretrained_model(train_imgs,test_imgs)

    train_dataset = tf.data.Dataset.from_tensor_slices(
        (train_imgs, train_labels))
    valid_dataset = tf.data.Dataset.from_tensor_slices(
        (train_imgs, train_labels))
    test_dataset = tf.data.Dataset.from_tensor_slices((test_imgs, test_labels))

    n_samples = train_imgs.shape[0]

    train_dataset = train_dataset.shuffle(
        buffer_size=100, reshuffle_each_iteration=True).batch(
            batchsize, drop_remainder=True).repeat()  #.prefetch(50)
    valid_dataset = valid_dataset.batch(batchsize)
    test_dataset = test_dataset.batch(batchsize)

    iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
                                               train_dataset.output_shapes)
    next_element = iterator.get_next()

    train_iterator = iterator.make_initializer(train_dataset)
    valid_iterator = iterator.make_initializer(valid_dataset)
    test_iterator = iterator.make_initializer(test_dataset)

    ft_extr = feature_extractor()

    # abandoned as well:
    #model = ft_extr.create_lstm_model(next_element[0])
    #model,pre_fc = ft_extr.small_model(next_element[0])
    model, pre_fc = ft_extr.create_3dconv_model(next_element[0])

    # abandoned approaches
    #loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model,labels=next_element[1]))
    #loss = sparse_cost_sensitive_loss(model,next_element[1],[[1.,1.],[1.,1.]]) #TODO label to onehot
    #loss = weighted_ce(next_element[1],model,.1)
    loss = tf.nn.weighted_cross_entropy_with_logits(
        tf.cast(next_element[1], tf.float32), model, tf.constant(6000.))

    prediction = tf.nn.softmax(model)

    cnf_matrix = tf.math.confusion_matrix(
        predictions=tf.to_float(tf.argmax(prediction, 1)),
        labels=tf.to_float(tf.argmax(next_element[1], 1)),
        num_classes=2)
    equality = tf.equal(tf.to_float(tf.argmax(prediction, 1)),
                        tf.to_float(tf.argmax(next_element[1], 1)))
    accuracy = tf.reduce_mean(tf.to_float(equality))

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.AdamOptimizer(learning_rate=.001).minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(train_iterator)
        saver = tf.train.Saver()

        for epoch in range(n_epochs):
            ep_loss = []
            ep_cnf = []
            for _ in range(int(n_samples / batchsize + .5)):
                _, b_loss, cnf_mat = sess.run(
                    [optimizer, loss, cnf_matrix],
                    feed_dict={'is_training:0': True})
                ep_loss.append(b_loss)
                ep_cnf.append(cnf_mat)

            print(np.mean(ep_loss))
            print(np.mean(ep_cnf, axis=0))
            if (n_epochs % 10 == 0):
                save_path = saver.save(
                    sess, data_dir + str(epoch) + "_checkpoint.ckpt")

        encoded_train = []
        sess.run(valid_iterator)
        try:
            while True:
                enc_pred = sess.run(pre_fc, feed_dict={'is_training:0': False})
                encoded_train.append(enc_pred)
        except:
            pass

        encoded_train = np.vstack(np.array(encoded_train))

        print('predicting..')
        save_path = saver.save(sess, data_dir + "final_checkpoint.ckpt")
        sess.run(test_iterator)
        result_set = []
        encoded_test = []
        try:
            while True:
                pred, pre_feats = sess.run([prediction, pre_fc],
                                           feed_dict={'is_training:0': False})
                result_set.append(pred)
                encoded_test.append(pre_feats)
        except:
            pass
        # range
        # sess.run(optimizer,cost)
    # training is false and true sometimes

    # save_path = saver.save(sess, "path.ckpt")
    # saver.restore(sess, "path.ckpt")
    encoded_test = np.vstack(np.array(encoded_test))
    result_set = np.vstack(np.array(result_set))

    return encoded_train, encoded_test, result_set
def main(src, tar):

    G = feature_extractor().to(device)

    cls_c1 = predictor().to(device)
    cls_c2 = predictor().to(device)

    cls_c1.apply(weights_init_uniform)
    cls_c2.apply(weights_init_uniform)

    ###		 dataloader  	 ###
    if src == 'sketch':
        src_train_set = DATASET('sketch', 'sketch_train.csv')

    elif src == 'infograph':
        src_train_set = DATASET('infograpth', 'infograph_train.csv')

    elif src == 'real':
        src_train_set = DATASET('real', 'real_train.csv')

    elif src == 'quickdraw':
        src_train_set = DATASET('quickdraw', 'quickdraw_train.csv')

    if tar == 'sketch':
        tar_train_set = DATASET('sketch', 'sketch_train.csv')

    elif tar == 'infograph':
        tar_train_set = DATASET('infograpth', 'infograph_train.csv')

    elif tar == 'real':
        tar_train_set = DATASET('real', 'real_train.csv')

    elif tar == 'quickdraw':
        tar_train_set = DATASET('quickdraw', 'quickdraw_train.csv')

    src_train_loader = torch.utils.data.DataLoader(
        dataset=src_train_set,
        batch_size=BATCH_SIZE,
        shuffle=True,
    )

    tar_train_loader = torch.utils.data.DataLoader(
        dataset=tar_train_set,
        batch_size=BATCH_SIZE,
        shuffle=True,
    )

    optimizer_encoder = optim.Adam(G.parameters(),
                                   lr=2e-4,
                                   weight_decay=0.0005)
    optimizer_clf_1 = optim.Adam(cls_c1.parameters(),
                                 lr=2e-4,
                                 weight_decay=0.0005)
    optimizer_clf_2 = optim.Adam(cls_c2.parameters(),
                                 lr=2e-4,
                                 weight_decay=0.0005)

    # train
    ac_list, loss_list = train(G, cls_c1, cls_c2, optimizer_encoder,
                               optimizer_clf_1, optimizer_clf_2, EP,
                               src_train_loader, tar_train_loader, src, tar)
    ac_list = np.array(ac_list).flatten()

    # plot tsne
    loss_list = np.array(loss_list).flatten()
    #epoch = [i for i in range(EP)]
    #my_function.tsne_plot(G, src_train_loader, tar_train_loader, src, tar, BATCH_SIZE, 'mcd', mode=False)

    ### plot learning curve  ###
    plt.figure()
    plt.plot(epoch, ac_list)
    plt.xlabel('EPOCH')
    plt.ylabel('Accuracy')
    plt.title('domian_adapt : ' + src + ' to ' + tar)
    plt.savefig('./learning_curve/domian_adapt_' + src + '_to_' + tar +
                '_accuracy.jpg')

    plt.figure()
    plt.plot(epoch, loss_list)
    plt.xlabel('EPOCH')
    plt.ylabel('Loss')
    plt.title('domian_adapt : ' + src + ' to ' + tar)
    plt.savefig('./learning_curve/domian_adapt_' + src + '_to_' + tar +
                '_loss.jpg')