Esempio n. 1
0
def main(_):
    batch_size = 64
    noise_dim = 100
    tensorboard_dir = 'tensorboard'
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True
    # tf.reset_default_graph()
    with tf.Session(config=run_config) as sess:
        writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
        can = CAN(sess)
        # Building Generator Model from CAN
        tf_random_noise = tf.placeholder(shape=[None, noise_dim],
                                         dtype=tf.float32)
        g_model = can.generator(tf_random_noise)

        generator_summary = tf.summary.image("generator_summary", g_model)
        checkpoint_dir = 'checkpoint/wikiart/'
        latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
        saver = tf.train.Saver()
        saver.restore(sess, latest_checkpoint)
        for i in range(10):
            random_noise = np.random.normal(
                0, 1, [batch_size, noise_dim]).astype(np.float32)
            _, summary = sess.run([g_model, generator_summary],
                                  feed_dict={tf_random_noise: random_noise})
            writer.add_summary(summary)
def main():
    # Set parameters
    vae_epoch = 2
    can_epoch = 1000
    batch_size = 64
    latent_dim = 10
    beta_eeg = 5.0
    train = True

    # Read data sets
    data_root = "/home/zainkhan/bci-representation-learning"
    eeg_train, eeg_test, pupil_train, pupil_test, sub_cond = utils.read_single_trial_datasets(
        data_root)

    if train:
        # Train VAE
        vae = VAE(beta=beta_eeg, latent_dim=latent_dim)
        vae.compile(optimizer=keras.optimizers.Adam())
        vae.fit(eeg_train, epochs=vae_epoch, batch_size=batch_size)

        # Save VAE
        #vae.encoder.save("vae_encoder")
        #vae.decoder.save("vae_decoder")

        # Train CAN
        can = CAN(
            vae=vae,
            can_data=pupil_train,
            vae_data=eeg_train,
            latent_dim=latent_dim,
            epochs=can_epoch,
            batch_size=batch_size,
        )
        can.compile(optimizer=keras.optimizers.Adam(), run_eagerly=True)
        can.fit(pupil_train,
                epochs=can_epoch,
                batch_size=batch_size,
                shuffle=False)

        # Save CAN
        can.encoder.save("can_encoder")
        can.decoder.save("can_decoder")
    else:
        # Load all encoders/decoders
        vae = VAE(beta=beta_eeg, latent_dim=latent_dim)
        vae.encoder = keras.models.load_model("vae_encoder")
        vae.decoder = keras.models.load_model("vae_decoder")

        can = CAN(vae=vae, vae_data=eeg_train, latent_dim=latent_dim)
        can.encoder = keras.models.load_model("can_encoder")
        can.decoder = keras.models.load_model("can_decoder")

    # VAE predictions
    encoded_data = vae.encoder.predict(eeg_test)
    decoded_data = vae.decoder.predict(encoded_data)
    fn = utils.get_filename("predictions/", "test-eeg")
Esempio n. 3
0
def main(_):
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        can = CAN(sess)
        can.build_model()
        can.train()
Esempio n. 4
0
def main():
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s [%(levelname)s] %(message)s",
        handlers=[
            logging.FileHandler("/home/pi/RaspiReCircle/logs/debug.log"),
            logging.StreamHandler()
        ]
    ) 
    logging.info("")
    logging.info("Executing ReCircle machine control script...")   
    
    CANobject = CAN()
    t1 = threading.Thread(target = CAN_makethread, args = (CANobject,))
    t1.start()
    
    ReCircleobject = ReCircle(CANobject)
    ReCircleobject.maincontroller()  
Esempio n. 5
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s [%(levelname)s] %(message)s",
                        handlers=[
                            logging.FileHandler("logs/debug.log"),
                            logging.StreamHandler()
                        ])
    logging.info("")
    logging.info("Executing ReCircle machine control script...")

    CANobject = CAN()
    t1 = threading.Thread(target=CAN_makethread, args=(CANobject, ))
    t1.start()

    ReCircleobject = ReCircle(CANobject)
    t2 = threading.Thread(target=ReCircle_makethread, args=(ReCircleobject, ))
    t2.start()

    root = tk.Tk()
    app = GUI(root, CANobject, ReCircleobject)
    root.protocol("WM_DELETE_WINDOW", app.ask_quit)
    root.after(1000, app.update)
    root.mainloop()
Esempio n. 6
0
else:
    data, labels, protected = get_balanced_data("adult.data")

# Testing basic model
if verbose:
    print("Creating model.")
input_size = data.shape[1]
kf = KFold(n_splits=num_folds)

test_results = np.array([])

for i, (train_idx, test_idx) in enumerate(kf.split(data)):
    if model_type == 0:
        model = BasicModel(input_size)
    elif model_type == 1:
        model = CAN(input_size)

    if verbose:
        model.display_models()

    # Create folders for fold
    fold_fig_folder = fig_folder + 'fold{}/'.format(i)
    fold_model_folder = model_folder + 'fold{}/'.format(i)
    Path(fold_fig_folder).mkdir(parents=True, exist_ok=True)
    Path(fold_model_folder).mkdir(parents=True, exist_ok=True)

    # Get training data
    train_data = data[train_idx]
    train_prtd = protected[train_idx]
    train_label = labels[train_idx]
Esempio n. 7
0
def save_img(img, img_path, is_image):
    if is_image:
        img[:, 0, :, :] += 72.39
        img[:, 1, :, :] += 82.91
        img[:, 2, :, :] += 73.16
        img /= 255
        img = img[:, [1, 0, 2], :, :]
    img = torchvision.utils.make_grid(img)
    npimg = img.cpu().numpy()
    # npimg[npimg > 0.1] = 1
    # npimg[npimg <= 0.1] = 0
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.savefig(img_path)


net = CAN()
net.load_state_dict(torch.load('trained.wts'))
net.to(device)
net.eval()

i = 0
with torch.no_grad():
    for data in trainloader:
        imgs, labels = data
        imgs, labels = imgs.to(device), labels.to(device)
        save_img(imgs, 'images/' + str(i) + '.png', True)

        out = net(imgs)
        # out = out.data.max(1)[1][0]
        out = torch.nn.functional.softmax(out, 1)[:, 1, :, :]
        save_img(out, 'images/out' + str(i) + '.png', False)
Esempio n. 8
0
def run_experiment(model_type,
                   data_type,
                   epochs=100,
                   num_folds=5,
                   batch_size=128,
                   testing_inv=10,
                   embed_size=0):
    '''
    Runs experiment for given different configurations.
    :param model_type: type of model. 0 for basic. 1 for CAN
    :param data_type: type of data. 0 for orginial. 1 for balanced data.
    :return:
    '''
    def get_data(file_path, data_type):
        data_loader = DataLoader(file_path)
        if data_type == 0:
            data, labels, protected = data_loader.get_numeric_data()
        else:
            data, labels, protected = data_loader.get_numeric_data(True)

        return data, labels, protected

    def fold_bars(values, title, path):
        # Fold Test Classifier Accuracy
        plt.figure(figsize=(10, 15))
        plt.title(title)
        plt.bar([str(x) for x in range(len(values))], values)
        plt.savefig(path)
        plt.close()

    def write_to_file(file_name, information):
        with open(file_name, 'a') as csv_file:
            writer = csv.writer(csv_file)
            writer.writerow(information)

    def write_idxs_to_txt(file_name, train, test):
        train_str = [str(num) + '\n' for num in train]
        test_str = [str(num) + '\n' for num in test]
        with open(file_name, 'w') as file:
            file.writelines(train_str)
            file.write('--------\n')
            file.writelines(test_str)

    def record_test_stats(model, data, protected, labels, test_idx, batch_size,
                          epoch, fold_fig_folder, start, start_string, i,
                          fold_model_folder, data_type, fold_time_str,
                          embed_size):
        # Testing data
        test_data = data[test_idx]
        test_attr = protected[test_idx]
        test_label = labels[test_idx]

        # Model test statistics
        test_stats = model.test(test_data, test_attr, test_label, batch_size)

        # Confusion Matrix from model
        confusion_mat = model.confusion_matrix(test_data, test_attr,
                                               test_label, batch_size)
        # print('record cm ({})'.format(len(confusion_mat)), confusion_mat)
        # Recording Information
        model.create_figs(epoch, fold_fig_folder)

        # Variables to write to CSV
        curr_time = datetime.now()
        curr_time_string = curr_time.strftime("%d/%m/%Y %H:%M:%S")
        diff_time = curr_time - start
        # print('test_stats:', test_stats)
        record_vars = [
            start_string, curr_time_string, diff_time, i, epoch, *test_stats,
            *confusion_mat, batch_size, fold_fig_folder, fold_model_folder,
            data_type, embed_size
        ]

        write_to_file(fold_time_str + 'overview.csv', record_vars)

    # Sets information for saving data
    fig_folder = 'model_testing/'
    if model_type == 0:
        fig_folder += 'basic/'
    elif model_type == 1:
        fig_folder += 'can/'
    elif model_type == 2:
        fig_folder += 'can_embed/embed_size{}/'.format(embed_size)
    elif model_type == 3:
        fig_folder += 'split/'

    # Record datetime
    start = datetime.now()
    start_string = start.strftime("%d/%m/%Y %H:%M:%S")
    fold_time_str = fig_folder + 'epochs{}_'.format(epochs) + start.strftime(
        "%d-%m-%Y_%H-%M-%S") + '/'

    csv_file_str = fold_time_str
    model_folder = fold_time_str + 'models/'
    fig_folder = fold_time_str + 'graphs/'

    data, labels, protected = get_data('adult.data', data_type)

    # Training information
    input_size = data.shape[1]
    kf = KFold(n_splits=num_folds, shuffle=True, random_state=7215)

    test_results = np.array([])
    for i, (train_idx, test_idx) in enumerate(kf.split(data)):
        print("Fold {}".format(i))
        tf.keras.backend.clear_session()
        if model_type == 0:
            model = BasicModel(input_size)
        elif model_type == 1:
            model = CAN(input_size)
        elif model_type == 2:
            model = CANEmbedded(input_size, embed_size)
        elif model_type == 3:
            model = SplitModel(input_size)

        model.display_models()

        # Create folders for fold
        fold_fig_folder = fig_folder + 'fold{}/'.format(i)
        fold_model_folder = model_folder + 'fold{}/'.format(i)
        Path(fold_fig_folder).mkdir(parents=True, exist_ok=True)
        Path(fold_model_folder).mkdir(parents=True, exist_ok=True)
        # Get training data

        write_idxs_to_txt(fold_time_str + 'idxs_fold{}.txt'.format(i),
                          train_idx, test_idx)
        train_data = data[train_idx]
        train_prtd = protected[train_idx]
        train_label = labels[train_idx]
        for epoch in range(epochs):
            if epoch % testing_inv == 0 or epoch == epochs - 1:
                print("\tEpoch: {}".format(epoch))

            # Train model
            model.train(train_data, train_prtd, train_label, batch_size)

            # Testing at defined interval
            if epoch % testing_inv == 0 or epoch == epochs - 1:
                record_test_stats(model, data, protected, labels, test_idx,
                                  batch_size, epoch, fold_fig_folder, start,
                                  start_string, i, fold_model_folder,
                                  data_type, fold_time_str, embed_size)
            if epoch % 100 == 0 or epoch == epochs - 1:
                model.model_save(fold_model_folder, epoch)
        result_names, fig_files = model.result_graph_info()
        incr = len(test_results) // epochs
        for i in range(incr):
            graph_vals = test_results[i::incr]
            fold_bars(graph_vals, result_names[i], fig_folder + fig_files[i])
        img /= 255
        img = img[:,[1,0,2],:,:]
    else:
        img = img.unsqueeze(1)
        img *= 255
    img = torchvision.utils.make_grid(img)
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.savefig(img_path)


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

torch.manual_seed(102)

net = CAN()



net.load_state_dict(torch.load('../trained.wts'), strict=False)
net.to(device)
net.train()

params = net.state_dict()
dilated_conv_layers = [36, 39, 42, 45, 48, 51, 54]
for layer_idx in dilated_conv_layers:
    w = params['features.'+str(layer_idx)+'.weight']
    b = params['features.'+str(layer_idx)+'.bias']
    w.fill_(0)
    for i in range(w.shape[0]):
        w[i,i,1,1] = 1
Esempio n. 10
0
    if is_image:
        img[:,0,:,:] *= 0.229
        img[:,1,:,:] *= 0.224
        img[:,2,:,:] *= 0.225

        img[:,0,:,:] += 0.485
        img[:,1,:,:] += 0.456
        img[:,2,:,:] += 0.406

        img = img[:,[1,0,2],:,:]
    img = torchvision.utils.make_grid(img)
    npimg = img.cpu().numpy()

    plt.imsave(img_path, np.transpose(npimg, (1, 2, 0)), format="png", cmap="hot")

net = CAN()

net.load_state_dict(checkpoint["model_state"])
net.to(device)
net.eval()

i = 0
save_data = False
running_metrics_val = runningScore(2)

with torch.no_grad():
    for data in valloader:
        imgs, labels = data
        imgs, labels = imgs.to(device), labels.to(device)
        
        out = net(imgs)
Esempio n. 11
0
resume_training = False
checkpoint_dir = '/home/tejus/lane-seg-experiments/Segmentation/CAN_logger/frontend_only/runs/2018-10-07_18-47-46/best_val_model.pkl'
run_id = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
logdir = os.path.join('runs/' , str(run_id))
writer = SummaryWriter(log_dir=logdir)
print('RUNDIR: {}'.format(logdir))
logger = get_logger(logdir)
logger.info('Let the party begin | Dilated convolutions')

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

torch.manual_seed(102)

# Network definition 

net = CAN()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=0.0001)
# optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr = 0.0001, momentum=0.9) # 0.00001
# loss_fn = nn.BCEWithLogitsLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience = 2, verbose = True, min_lr = 0.000001)
loss_fn = nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor([1, 57]))
net.to(device)

if not resume_training:
    checkpoint = torch.load(checkpoint_dir)
    net.load_state_dict(checkpoint["model_state"],strict = False)

else:
    checkpoint = torch.load(checkpoint_dir)

    net.load_state_dict(checkpoint["model_state"],strict = False)
Esempio n. 12
0
checkpoint_dir = '/mnt/data/tejus/Segmentation/CAN_logger/frontend_only/runs/2018-10-06_14-51-26/best_val_model_tested.pkl'
run_id = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
logdir = os.path.join('runs/', str(run_id))
writer = SummaryWriter(log_dir=logdir)
print('RUNDIR: {}'.format(logdir))
logger = get_logger(logdir)
logger.info(
    'Let the party begin | Dilated convolutions + Local Feature Extraction')
logger.info('Model loaded: {}'.format(checkpoint_dir))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

torch.manual_seed(102)

# Network definition

net = CAN()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=0.00001)
# optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr = 0.0001, momentum=0.9) # 0.00001
# loss_fn = nn.BCEWithLogitsLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       'min',
                                                       patience=2,
                                                       verbose=True,
                                                       min_lr=0.000001)
loss_fn = nn.CrossEntropyLoss()
net.to(device)
if start_from_scratch:
    pretrained_state_dict = models.vgg16(pretrained=True).state_dict()
    pretrained_state_dict = {
Esempio n. 13
0
        img[:, 1, :, :] += 82.91
        img[:, 2, :, :] += 73.16
        img /= 255
        img = img[:, [1, 0, 2], :, :]
    else:
        img = img.unsqueeze(1)
        img *= 255
    img = torchvision.utils.make_grid(img)
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.savefig(img_path)


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net = CAN()
pretrained_state_dict = torch.load('models/CAN_pretrained.wts')
pretrained_state_dict = {
    k: v
    for k, v in pretrained_state_dict.items()
    if 'features.62' not in k and 'features.64' not in k
}
net.load_state_dict(pretrained_state_dict, strict=False)
net.to(device)

print(device)
net.segment[0].weight.to(device)
net.segment[0].bias.to(device)
print(device)
print(net.features[0].weight.device)
print(net.segment[0].weight.device)