Пример #1
0
def hp_gan_run(model_name="TAGAN"):
    run_id = datetime.now().strftime("%Y%m%d-%H%M%S")
    logdir = os.path.join("../Logs", "hpsweep-" + model_name + run_id)
    labels = config.FEATURES

    dataloader = Dataloader("5000d",
                            labels,
                            normalized=True,
                            continuous_labels=False)
    out_subject = 31
    dataset = dataloader("gan", 64, leave_out=out_subject)

    for heads in config.HP_HEADS.domain.values:
        for posenc in config.HP_POSENC.domain.values:
            hparams = {config.HP_HEADS: heads, config.HP_POSENC: posenc}

            run_name = "%d-heads.p_enc-%s" % (heads, str(posenc))
            run_logdir = os.path.join(logdir, run_name)
            print('--- Starting trial: %s' % run_name)
            print({h.name: hparams[h] for h in hparams})

            trainer = GAN_Trainer(mode=model_name,
                                  batch_size=64,
                                  hparams=hparams,
                                  logdir=run_logdir,
                                  num_classes=2,
                                  n_signals=len(labels),
                                  leave_out=out_subject,
                                  class_conditional=True,
                                  subject_conditional=True,
                                  save_image_every_n_steps=200,
                                  n_critic=5,
                                  train_steps=200000)

            trainer.train(dataset=dataset)
Пример #2
0
def train_loso_gans(model_name):
    run_id = datetime.now().strftime("%Y%m%d-%H%M%S")
    logdir = os.path.join("../Logs", "loso-" + model_name + run_id)
    hparams = config.OPT_PARAMS["gan"]
    labels = config.FEATURES

    dataloader = Dataloader("5000d",
                            labels,
                            normalized=True,
                            continuous_labels=False)

    for out_subject in config.OUT_SUBJECT.domain.values:
        run_name = "subject-%d-out" % out_subject
        run_logdir = os.path.join(logdir, run_name)
        tf.print("Training GAN sans subject %d." % out_subject)
        dataset = dataloader("gan",
                             hparams[config.HP_GAN_BATCHSIZE],
                             leave_out=out_subject)
        trainer = GAN_Trainer(mode=model_name,
                              batch_size=hparams[config.HP_GAN_BATCHSIZE],
                              hparams=hparams,
                              logdir=run_logdir,
                              num_classes=2,
                              n_signals=len(labels),
                              leave_out=out_subject,
                              class_conditional=True,
                              subject_conditional=True,
                              save_image_every_n_steps=1500,
                              n_critic=5,
                              train_steps=200000)

        trainer.train(dataset=dataset)

        del dataset
        del trainer
Пример #3
0
def run_gan(model_name):
    run_id = datetime.now().strftime("%Y%m%d-%H%M%S")
    logdir = os.path.join("../Logs", "loso-" + model_name + run_id)
    leave_out = 31
    hparams = config.OPT_PARAMS["gan"]
    features = config.FEATURES
    dataloader = Dataloader("5000d",
                            features,
                            continuous_labels=False,
                            normalized=True)
    dataset = dataloader("gan",
                         hparams[config.HP_GAN_BATCHSIZE],
                         leave_out=leave_out)
    trainer = GAN_Trainer(mode=model_name,
                          batch_size=64,
                          hparams=hparams,
                          logdir=logdir,
                          num_classes=2,
                          n_signals=len(features),
                          leave_out=leave_out,
                          class_conditional=True,
                          subject_conditional=True,
                          save_image_every_n_steps=200,
                          n_critic=5,
                          train_steps=200000)

    trainer.train(dataset=dataset)
Пример #4
0
    def __init__(self,
                 path,
                 batch_size,
                 features,
                 subject_conditioned,
                 categorical_sampling,
                 argmaxed_label=False):
        self.batch_chunks = 16
        self.batch_size = batch_size // self.batch_chunks

        self.fake_datagenerator = DatasetGenerator(
            batch_size=self.batch_chunks,
            path=path,
            subject_conditioned=subject_conditioned,
            categorical_sampling=categorical_sampling,
            no_subject_output=True,
            argmaxed_label=argmaxed_label)

        self.real_dataloader = Dataloader("5000d",
                                          features,
                                          normalized=True,
                                          continuous_labels=False)
Пример #5
0
                        default=0.4,
                        metavar='D',
                        help='dropout rate (default: 0.4)')
    parser.add_argument('--tensorboard',
                        type=str,
                        default='default_tb',
                        metavar='TB',
                        help='Name for tensorboard model')
    args = parser.parse_args()

    device = t.device('cuda' if t.cuda.is_available() else 'cpu')

    writer = SummaryWriter(args.tensorboard)

    t.set_num_threads(args.num_threads)
    loader = Dataloader('./data/')

    model = Model(args.num_layers,
                  args.num_heads,
                  args.dropout,
                  max_len=loader.max_len,
                  embeddings_path='./data/embeddings.npy')
    model.to(device)

    optimizer = Optimizer(model.learnable_parameters(),
                          lr=0.0002,
                          amsgrad=True)

    print('Model have initialized')

    for i in range(args.num_iterations):
Пример #6
0
                        help='num layers in decoder (default: 8)')
    parser.add_argument('--num-heads',
                        type=int,
                        default=14,
                        metavar='NH',
                        help='num heads in each decoder layer (default: 14)')
    parser.add_argument('--state-dict',
                        type=str,
                        default='',
                        metavar='SD',
                        help='Path to saved state dict')
    args = parser.parse_args()

    device = t.device('cuda' if t.cuda.is_available() else 'cpu')

    loader = Dataloader('./data/')

    model = Model(args.num_layers,
                  args.num_heads,
                  0.,
                  max_len=loader.max_len,
                  embeddings_path='./data/embeddings.npy')
    model.load_state_dict(t.load(args.state_dict))
    model.to(device)

    model.eval()
    with t.no_grad():
        seed = [1] + loader.sp.EncodeAsIds(args.seed)

        generations = '\n'.join([
            loader.sp.DecodeIds(seed + model.generate(seed, device))
Пример #7
0
import matplotlib.pyplot as plt
import numpy as np
import visdom

from skimage.data import astronaut
from skimage.color import rgb2gray
from skimage.filters import sobel
from skimage.segmentation import felzenszwalb, slic, quickshift, watershed
from skimage.segmentation import mark_boundaries
from data.dataloader import Dataloader
import matplotlib.pyplot as plt
# from lib.text_future import get_text_future
from lib.segmentor import Segmentor
vis = visdom.Visdom(env='TBES_Visual_Results_NEW')

bcd500_loader = Dataloader(data_dir='./dataset/BSR')
train_image = bcd500_loader.get_image(mode='train')

raw_image = train_image[2]

if vis.win_exists('raw'):
    vis.close(win='raw')
    assert not vis.win_exists('raw'), 'Closed window still exists'
vis.image(raw_image.transpose(2, 0, 1), win='raw')

segmentor = Segmentor(raw_image)

if vis.win_exists('superpixel'):
    vis.close(win='superpixel')
    assert not vis.win_exists('superpixel'), 'Closed window still exists'
vis.image(mark_boundaries(segmentor.image_data,
Пример #8
0
def run_loso_cv(model_name, mixed=False, start_from=0):
    run_id = datetime.now().strftime("%Y%m%d-%H%M%S")
    logdir = os.path.join("../Logs", "loso-" + "tagan" + model_name + run_id)
    features = config.FEATURES
    dataloader = Dataloader("5000d",
                            features,
                            normalized=True,
                            continuous_labels=False)
    generator_base_path = "../Logs"
    absolute_ID = 0

    for out_subject in config.OUT_SUBJECT.domain.values:
        subject_label = "subject-%d-out" % out_subject
        test_set = dataloader(mode="test",
                              batch_size=128,
                              leave_out=out_subject,
                              one_hot=True)

        for data_source in config.TRAIN_DATA.domain.values:
            hparams = {
                config.OUT_SUBJECT: out_subject,
                config.TRAIN_DATA: data_source
            }

            train_label = data_source.split('_')

            if train_label[0] == "real":
                steps_per_epoch = None
                augmented = tf.greater(len(train_label), 1)
            else:
                steps_per_epoch = 463
                wgan_path = "loso-wgan-class" if train_label[
                    1] == "cls" else "loso-wgan-class-subject"
                wgan_path = os.path.join(generator_base_path, wgan_path,
                                         subject_label)
                subj_cond = True if train_label[1] == "subjcls" else False
                categorical_sampling = True if (train_label[2]
                                                == "categ") else False
                argmaxed_label = False if train_label[2] == "intpreg" else True

                if mixed:
                    mixed_dataset = MixedDataset(
                        path=wgan_path,
                        batch_size=128,
                        features=features,
                        subject_conditioned=subj_cond,
                        categorical_sampling=categorical_sampling,
                        argmaxed_label=argmaxed_label)
                else:
                    fake_dataset = DatasetGenerator(
                        batch_size=128,
                        path=wgan_path,
                        subject_conditioned=subj_cond,
                        categorical_sampling=categorical_sampling,
                        no_subject_output=True,
                        argmaxed_label=argmaxed_label)

            for rerun in range(config.NUM_RERUNS):
                run_name = "%s-%s" % (subject_label, data_source)
                run_logdir = os.path.join(logdir, subject_label, data_source,
                                          ".%d" % rerun)
                print(
                    "RUN_ID: %d  --  Subject: %d, Trained on %s data (mixed: %s), Restart #%d"
                    % (absolute_ID, out_subject, data_source, mixed &
                       (data_source is not "real"), rerun))

                if absolute_ID < start_from:
                    absolute_ID += 1
                    continue

                if train_label[0] == "real":
                    print("Augmented: %s" % augmented)
                    train_set = dataloader(mode="train",
                                           batch_size=128,
                                           leave_out=out_subject,
                                           one_hot=True,
                                           augmented=augmented)
                    eval_set = dataloader(mode="eval",
                                          batch_size=128,
                                          leave_out=out_subject,
                                          one_hot=True)
                else:
                    if mixed:
                        train_set, eval_set = mixed_dataset(
                            out_subject=out_subject)
                    else:
                        train_set = fake_dataset()
                        eval_set = dataloader(mode="eval",
                                              batch_size=128,
                                              leave_out=out_subject,
                                              one_hot=True,
                                              force_eval_change=True)
                    print(
                        "path: %s\nCategorical sampling: %s\nSubject conditioned: %s\nArgmaxed Label: %s"
                        % (wgan_path, categorical_sampling, subj_cond,
                           argmaxed_label))

                if model_name == "BaseNET":
                    model = BaseNET2(hparams)
                elif model_name == "LSTM":
                    model = ConvLSTM(hparams)

                model.compile(optimizer=tf.keras.optimizers.Adam(
                    learning_rate=0.0008, beta_1=0.9, beta_2=0.99),
                              loss=[tf.keras.losses.KLD],
                              metrics=["accuracy", MCC(),
                                       WF1()])

                callbacks = CallbacksProducer(hparams, run_logdir,
                                              run_name).get_callbacks()
                test_writer = tf.summary.create_file_writer(logdir=run_logdir)

                model.fit(train_set,
                          epochs=100,
                          steps_per_epoch=steps_per_epoch,
                          validation_data=eval_set,
                          callbacks=callbacks)

                tf.print("\n######## Test Result: ########\n")
                test_logs = model.evaluate(test_set)
                test_logs = {
                    out: test_logs[i]
                    for i, out in enumerate(model.metrics_names)
                }
                with test_writer.as_default():
                    for name, value in test_logs.items():
                        tf.summary.scalar("test_" + name, value, step=0)
                tf.print("\n")

                del train_set
                absolute_ID += 1
Пример #9
0
def run(model_name, hparams, logdir, run_name=None, dense_shape=None):
    try:
        if hparams[config.HP_LOSS_TYPE] == "MSE":
            continuous_labels = True
            loss = tf.keras.losses.MeanSquaredError()
            metrics = [SimpleRegressionAccuracy]
            labels = ["arousal"]
        elif hparams[config.HP_LOSS_TYPE] == "BCE":
            continuous_labels = True
            loss = CastingBinaryCrossentropy()
            metrics = [CastingBinaryAccuracy()]
            labels = ["arousal"]
        elif hparams[config.HP_LOSS_TYPE] == "DUAL_BCE":
            continuous_labels = True
            loss = [CastingBinaryCrossentropy(), CastingBinaryCrossentropy()]
            metrics = [CastingBinaryAccuracy(), CastingBinaryAccuracy()]
            labels = ["arousal", "valence"]
        elif hparams[config.HP_LOSS_TYPE] == "KLD":
            continuous_labels = False
            loss = tf.keras.losses.KLDivergence()
            metrics = ["accuracy", MCC()]
            labels = ["arousal"]
    except:
        continuous_labels = True
        loss = CastingBinaryCrossentropy()
        metrics = [CastingBinaryAccuracy()]
        labels = ["arousal"]

    features = config.FEATURES
    dataloader = Dataloader("5000d",
                            features,
                            labels,
                            continuous_labels=continuous_labels,
                            normalized=True)
    train_dataset = dataloader("train",
                               128,
                               leave_out=hparams[config.OUT_SUBJECT],
                               one_hot=True)
    eval_dataset = dataloader("eval",
                              128,
                              leave_out=hparams[config.OUT_SUBJECT],
                              one_hot=True)

    if model_name == "BaseNET":
        model = BaseNET2(hparams)  # ResNET(num_classes=1)
    if model_name == "SimpleLSTM":
        model = SimpleLSTM(hparams)
    if model_name == "ConvLSTM":
        model = ConvLSTM(hparams)
    if model_name == "ChannelCNN":
        model = ChannelCNN(hparams, 5)
    if model_name == "DeepCNN":
        model = DeepCNN(hparams)
    if model_name == "LateFuseCNN":
        model = LateFuseCNN(hparams, 5)
    if model_name == "AttentionNET":
        model = AttentionNET(hparams)
    if model_name == "AttentionNET2":
        model = AttentionNET2(hparams)
    if model_name == "AttentionNETDual":
        model = AttentionNETDual(hparams)

    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001,
                                                     beta_1=0.9),
                  loss=loss,
                  metrics=metrics)

    callbacks = CallbacksProducer(hparams, logdir, run_name).get_callbacks()

    model.fit(train_dataset,
              epochs=50,
              validation_data=eval_dataset,
              callbacks=callbacks)

    del model
    del dataloader
    del train_dataset
    del eval_dataset