예제 #1
0
def test(choice, X_test, y_test):

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        #Teacher Model
        if choice['teacher'] == 'lnin':
            model = LNIN(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'lnin_ae':
            model = DNIN(img_size, img_class, sess, choice)
            ae = AE(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'lnin_ssae':
            model = DNIN(img_size, img_class, sess, choice)
            ae = AE(img_size, img_class, sess, choice)
            ssae = SSAE(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'dnin':
            model = DNIN(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'tnin':
            model = TNIN(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'mnin':
            model = MNIN(img_size, img_class, sess, choice)
        else:
            model = SN(img_size, img_class, sess, choice)
        #Student Model
        if choice['student'] == 'lsn':
            model = LSN(img_size, img_class, sess, choice)

        elif choice['student'] == 'dsn':
            model = DSN(img_size, img_class, sess, choice)

        elif choice['student'] == 'msn':
            model = MSN(img_size, img_class, sess, choice)
        elif choice['student'] == 'ssn':
            model = SSN(img_size, img_class, sess, choice)

        if not model.save:
            #sess.run(tf.initialize_all_variables())
            print "No Model to test"
            return

        a = 0
        for i in range(100):
            if choice['student'] == 'none':
                if choice['teacher'] == 'lnin_ssae':
                    hidden = model.get_hidden(X_test[i * 100:(i + 1) * 100])
                    hidden = ae.get_embedding(hidden)
                    a += ssae.test(hidden, y_test[i * 100:(i + 1) * 100])

                else:
                    a += model.test(X_test[i * 100:(i + 1) * 100],
                                    y_test[i * 100:(i + 1) * 100])
            else:
                a += model.test(X_test[i * 100:(i + 1) * 100],
                                y_test[i * 100:(i + 1) * 100])

        print " Test Average Accuracy: ", 1. * a / 100

        return a
예제 #2
0
def main(model_name='AE',
         embedding_size=128,
         n_epochs=1000,
         batch_size=32,
         roi_size=128):
    if model_name == 'VAE':
        model = VAE(embedding_size=embedding_size)
        criterion = VAELoss()
    elif 'AE':
        model = AE(embedding_size=embedding_size)
        criterion = nn.MSELoss()

    details = 'L{}'.format(embedding_size)
    log_dir = os.path.join(
        log_dir_root, '{}_{}_{}'.format(model_name, details,
                                        time.strftime('%Y%m%d_%H%M%S')))
    #criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)

    if is_cuda:
        print('This is CUDA!!!!')
        torch.backends.cudnn.benchmark = True  #useful for arrays of fix dimension
        model = model.cuda()
        criterion = criterion.cuda()

    generator = ROIFlowBatch(mask_file,
                             feat_file,
                             is_cuda=is_cuda,
                             batch_size=batch_size,
                             roi_size=roi_size,
                             is_shuffle=True)
    t = TrainerAutoEncoder(model, optimizer, criterion, generator, n_epochs,
                           log_dir)
    t.fit()
예제 #3
0
    def init_fit(self, X1_train, X2_train, y_train, X1_val, X2_val, y_val, args, ):
        self.train_loader = get_dataloader (X1_train, X2_train, y_train, args.batch_size)
        self.test_loader = get_dataloader(X1_val, X2_val, y_val, args.batch_size)

        self.predictor = AE(
            encoder_layer_sizes=[X1_train.shape[1]],
            latent_size=args.latent_size,
            decoder_layer_sizes=[X2_train.shape[1]],
            activation=args.activation,
            batch_norm= args.batch_norm,
            dropout=args.dropout,
            mlp_type=self.mlp_type,
            conditional=args.conditional,
            num_labels=10 if args.conditional else 0).to(self.device)
        self.optimizer = torch.optim.Adam(self.predictor.parameters(), lr=args.learning_rate)
        self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.8)
예제 #4
0
    def make_ae(self) -> AE:
        encoder = self.make_encoder(self.get_encoder_input_shape()[1:])
        decoder = self.make_decoder(self.get_latent_code_shape(encoder)[1:])

        model = AE(encoder=encoder,
                   decoder=decoder,
                   learning_rate=WarmupSchedule(1000, self.learning_rate))
        return model
    def build_model(self):
        # pass
        # 모델 생성하고 config 파일을 load, gpu에 올림
        self.model = cc(AE(self.config)).to(self.get_default_device())
        # 모델을 evaluation 모드로 전환
        # print(self.model)
        self.model.eval()
        # load_state_dict를 이용하여 학습된 VC 모델의 checkpoint load
        # print(f'Load model from /home/ubuntu/test/VC')
        self.model.load_state_dict(
            torch.load('/home/ubuntu/test/VC/model_140000_2.ckpt',
                       map_location=torch.device("cpu")))

        return
예제 #6
0
    def build_model(self):
        # pass
        # self.config 모델에 load한뒤 gpu에 올려서 self.model 변수 생성
        self.model = cc(AE(self.config))
        print(self.model)

        # self.config을 참고하여 self.opt에 optimizer
        optimizer = self.config['optimizer']
        self.opt = torch.optim.Adam(self.model.parameters(),
                                    lr=optimizer['lr'],
                                    betas=(optimizer['beta1'],
                                           optimizer['beta2']),
                                    amsgrad=optimizer['amsgrad'],
                                    weight_decay=optimizer['weight_decay'])
        print(self.opt)
        return
예제 #7
0
def test(data, folder, e):
    label_col = list(data.columns)
    result = data
    model = AE()
    model.load_state_dict(
        torch.load(f'{folder}/ep{e}data_aug.pth', map_location='cpu'))
    model.eval()
    dataset = AEDataset(data)
    dataloader = DataLoader(dataset=dataset, batch_size=128, shuffle=False)
    for inputs in tqdm(dataloader):
        outputs = model(inputs.float(), 'cpu')
        for i in range(len(outputs)):
            tmp = outputs[i].detach().numpy()
            tmp = pd.DataFrame([tmp], columns=label_col)
            result = pd.concat([result, tmp], ignore_index=True)
    result.to_csv(f'{folder}/data_augment.csv',
                  mode='a',
                  header=True,
                  index=False)
    return result
예제 #8
0
    TC.on_train_end('_')


if __name__ == "__main__":
    data_dir = 'data/coco2017'
    log_dir = 'logs'
    input_image_size = (256, 256, 3)
    batch_size = 10
    latent_dim = 32
    optimizer = tf.keras.optimizers.Adam(1e-3)

    # Initialize and compile models
    incept_model = InceptionV3(include_top=False,
                               pooling='avg',
                               input_shape=input_image_size)
    ae_model = AE(latent_dim, input_image_size)
    cae_model = CAE(latent_dim, input_image_size)
    vae_model = VAE(latent_dim, input_image_size)
    cvae_model = CVAE(latent_dim, input_image_size)
    memcae_model = MemCAE(latent_dim, True, input_image_size, batch_size, 500,
                          optimizer)

    for classes in [['cat']]:
        # Load and augment training data
        ds_train = dataloader(classes, data_dir, input_image_size, batch_size,
                              'train2019')
        ds_val = dataloader(classes, data_dir, input_image_size, batch_size,
                            'val2019')
        class_label = classes[0] if len(classes) == 1 else "similar"

        # Train each model for comparison
            image_matrix.append(skt.unsqueeze(0).repeat(1, 3, 1, 1).clone())
            image_matrix.append(gimg_ae.cpu())

            image_matrix.append(
                skt.unsqueeze(0).repeat(1, 3, 1, 1).clone().fill_(1))
            image_matrix.append(torch.nn.functional.interpolate(g_images, 512))

    image_matrix = torch.cat(image_matrix)
    vutils.save_image(0.5 * (image_matrix + 1), im_name, nrow=BATCH_SIZE + 1)


if __name__ == "__main__":
    device = 'cuda'

    from models import AE, RefineGenerator_art, RefineGenerator_face
    net_ae = AE()
    net_ae.style_encoder.reset_cls()
    net_ig = RefineGenerator_face()

    ckpt = torch.load('./models/16.pth')

    net_ae.load_state_dict(ckpt['ae'])
    net_ig.load_state_dict(ckpt['ig'])

    net_ae.to(device)
    net_ig.to(device)

    net_ae.eval()
    #net_ig.eval()

    data_root_colorful = './data/rgb/'
예제 #10
0
def main(args):

    model_alias = 'DeepBiome_%s+%s_%s+fea1_%s+fea2_%s+bs_%s+%s' % (
        args.model,
        args.dataset_name, args.data_type,
        args.fea1,args.fea2,
        args.batch_size,
        args.extra)

    tl.configure("runs/ds.{}".format(model_alias))
    tl.log_value(model_alias, 0)

    """ no stat file needed for now
    stat_alias = 'obj_DataStat+%s_%s' % (args.dataset_name, args.dataset_subset)
    stat_path = os.path.join(
        output_dir, '%s.pkl' % (stat_alias)
    )
    with open(stat_path,'rb') as sf:
        data_stats = pickle.load(sf)
    """

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num

    data_path = os.path.join(DATA_ROOT, "ibd_{}.pkl".format(args.data_type))


    logger.info("Initializing train dataset")

    # load data
    print('==> loading data'); print()
    (X1_train, X2_train,  y_train), (X1_val, X2_val, y_val) = load_data(data_path)
    train_loader = get_dataloader (X1_train, X2_train, y_train, args.batch_size)
    test_loader = get_dataloader(X1_val, X2_val, y_val, args.batch_size)

    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    ts = time.time()

    def loss_fn(model, recon_x, x, mean, log_var):
        if model =="AE":
            mseloss = torch.nn.MSELoss()
            return torch.sqrt(mseloss(recon_x, x))
        elif model =="VAE":
            BCE = torch.nn.functional.binary_cross_entropy(
                recon_x.view(-1, 28*28), x.view(-1, 28*28), reduction='sum')
            KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
            return (BCE + KLD) / x.size(0)

    if args.model == "AE":
        predictor = AE(
            encoder_layer_sizes=[X1_train.shape[1]],
            latent_size=args.latent_size,
            decoder_layer_sizes=[X2_train.shape[1]],
            activation=args.activation,
            batch_norm= args.batch_norm,
            dropout=args.dropout,
            conditional=args.conditional,
            num_labels=10 if args.conditional else 0).to(device)
    else:
        predictor = VAE(
            encoder_layer_sizes=args.encoder_layer_sizes,
            latent_size=args.latent_size,
            decoder_layer_sizes=args.decoder_layer_sizes,
            activation=args.activation,
            batch_norm=args.batch_norm,
            dropout=args.dropout,
            conditional=args.conditional,
            num_labels=10 if args.conditional else 0).to(device)

    optimizer = torch.optim.Adam(predictor.parameters(), lr=args.learning_rate)

    logs = defaultdict(list)

    for epoch in range(args.epochs):

        tracker_epoch = defaultdict(lambda: defaultdict(dict))

        for iteration, (x1, x2, y) in enumerate(train_loader):

            x1, x2, y = x1.to(device), x2.to(device), y.to(device)

            if args.conditional:
                x2_hat, z, mean, log_var = predictor(x1, y)
            else:
                x2_hat, z, mean, log_var = predictor(x1)

            for i, yi in enumerate(y):
                id = len(tracker_epoch)
                tracker_epoch[id]['x'] = z[i, 0].item()
                tracker_epoch[id]['y'] = z[i, 1].item()
                tracker_epoch[id]['label'] = yi.item()

            loss = loss_fn(args.model, x2_hat, x2, mean, log_var)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            logs['loss'].append(loss.item())

            if iteration % args.print_every == 0 or iteration == len(train_loader)-1:
                print("Epoch {:02d}/{:02d} Batch {:04d}/{:d}, Loss {:9.4f}".format(
                    epoch, args.epochs, iteration, len(train_loader)-1, loss.item()))
                if args.model =="VAE":
                    if args.conditional:
                        c = torch.arange(0, 10).long().unsqueeze(1)
                        x = predictor.inference(n=c.size(0), c=c)
                    else:
                        x = predictor.inference(n=10)
                """
                plt.figure()
                plt.figure(figsize=(5, 10))
                for p in range(10):
                    plt.subplot(5, 2, p+1)
                    if args.conditional:
                        plt.text(
                            0, 0, "c={:d}".format(c[p].item()), color='black',
                            backgroundcolor='white', fontsize=8)
                    plt.imshow(x[p].view(28, 28).cpu().data.numpy())
                    plt.axis('off')

                if not os.path.exists(os.path.join(args.fig_root, str(ts))):
                    if not(os.path.exists(os.path.join(args.fig_root))):
                        os.mkdir(os.path.join(args.fig_root))
                    os.mkdir(os.path.join(args.fig_root, str(ts)))

                plt.savefig(
                    os.path.join(args.fig_root, str(ts),
                                 "E{:d}I{:d}.png".format(epoch, iteration)),
                    dpi=300)
                plt.clf()
                plt.close('all')
                """
        #Batch test
        x1, x2, y = torch.FloatTensor(X1_val).to(device), torch.FloatTensor(X2_val).to(device), torch.FloatTensor(y_val).to(device)
        if args.conditional:
            x2_hat, z, mean, log_var = predictor(x1, y)
        else:
            x2_hat, z, mean, log_var = predictor(x1)
        val_loss = loss_fn(args.model, x2_hat, x2, mean, log_var)
        print("val_loss: {:9.4f}", val_loss.item())
        """
예제 #11
0
 def __init__(self):
     super(GNNq,self).__init__()
     self.gnnql = AE(rnafeat.shape[1],256,args.hidden)
     self.gnnqd = AE(gdi.shape[0],256,args.hidden)
예제 #12
0
                          ('base_lr', args.base_lr), ('n_input', args.n_input),
                          ('n_output', args.n_output), ('archi', args.archi)])

    return config


config = parse_args()

### call data ###
mnist = Mnist()
n_samples = mnist.num_examples

### call models ###
if config['model_name'] == 'AE':
    print('Run AE')
    model = AE(config['n_input'], config['n_output'], config['archi'])
elif config['model_name'] == 'VAE':
    print('Run VAE')
    model = VAE(config['n_input'], config['n_output'], config['archi'])

### make folder ###
mother_folder = config['model_name']
try:
    os.mkdir(mother_folder)
except OSError:
    pass

### outputs ###
latent, reconstr, loss = model.Forward()

lr = config['base_lr']
예제 #13
0
import os
import sys
#Be sure to use abspath linux does not give the path if one uses __file__
_BASEDIR = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.join(_BASEDIR, 'src')
sys.path.append(src_dir)
from flow import ROIFlowBatch
from models import AE

mask_file = '/Volumes/behavgenom_archive$/Avelino/screening/CeNDR/MaskedVideos/CeNDR_Set1_160517/BRC20067_worms10_food1-10_Set10_Pos5_Ch6_16052017_165021.hdf5'
feat_file = '/Volumes/behavgenom_archive$/Avelino/screening/CeNDR/Results/CeNDR_Set1_160517/BRC20067_worms10_food1-10_Set10_Pos5_Ch6_16052017_165021_featuresN.hdf5'


if __name__ == '__main__':
    ae = AE(64)
    
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(ae.parameters(), lr=1e-2)
    #%%
    #optimizer = torch.optim.SGD(vae.parameters(), lr=1e-1, momentum=0.9)
    
    n_epochs = 100
    
    #%%
    gen = ROIFlowBatch(mask_file, feat_file, batch_size=32, roi_size=128)
    ae.train()
    for epoch in range(n_epochs):
        pbar = tqdm.tqdm(gen)
        for X in pbar:
            decoded = ae.forward(X)
예제 #14
0
config['io_type'] = config['i_type'] + '2' + config['o_type']

mnist = Mnist()

config['n_output'] = mnist.out_shape

n_samples = mnist.num_examples
iter_per_epoch = int(n_samples / config['batch_size'])

train_x, train_y = mnist.train_images, mnist.train_labels
test_x, test_y = mnist.test_images, mnist.test_labels

if config['model_name'] == 'AE':
    print('Run AE')
    model = AE(config['n_output'], config['archi'])
elif config['model_name'] == 'VAE':
    print('Run VAE')
    model = VAE(config['n_output'], config['archi'])

mother_folder = os.path.join(config['path_dir'], config['model_name'])
mother_folder = os.path.join(mother_folder, config['io_type'])
try:
    os.mkdir(mother_folder)
except OSError:
    pass

folder_name = os.path.join(mother_folder,
                           config['model_name'] + '_' + config['datasets'])
try:
    os.mkdir(folder_name)
예제 #15
0
 def build_model(self):
     # create model, discriminator, optimizers
     self.model = cc(AE(self.config))
     print(self.model)
     self.model.eval()
     return
예제 #16
0
    from utils import load_params

    net_ig = RefineGenerator().cuda()
    net_ig = nn.DataParallel(net_ig)

    ckpt = './train_results/trial_refine_ae_as_gan_1024_2/models/4.pth'
    if ckpt is not None:
        ckpt = torch.load(ckpt)
        #net_ig.load_state_dict(ckpt['ig'])
        #net_id.load_state_dict(ckpt['id'])
        net_ig_ema = ckpt['ig_ema']
        load_params(net_ig, net_ig_ema)
    net_ig = net_ig.module
    #net_ig.eval()

    net_ae = AE()
    net_ae.load_state_dicts(
        './train_results/trial_vae_512_1/models/176000.pth')
    net_ae.cuda()
    net_ae.eval()

    inception = load_patched_inception_v3().cuda()
    inception.eval()
    '''
    real_features = extract_feature_from_generator_fn( 
        real_image_loader(dataloader, n_batches=1000), inception )
    real_mean = np.mean(real_features, 0)
    real_cov = np.cov(real_features, rowvar=False)
    '''
    #pickle.dump({'feats': real_features, 'mean': real_mean, 'cov': real_cov}, open('celeba_fid_feats.npy','wb') )
예제 #17
0
def data_aug(data, lr=0.001, epoch=800, batch_size=128):
    folder = 'data_aug'
    save_path = f'{folder}/data_augment.csv'
    clean_file(save_path)
    store_e = [700, 750, 800]
    if not os.path.exists(folder):
        os.makedirs(folder)
    else:
        for i in store_e:
            result = test(data, folder, i)
        return result

    train_loss_curve = []
    valid_loss_curve = []
    # load model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = AE()
    model = model.to(device)
    model.train()

    dataset = AEDataset(data)
    train_size = int(0.85 * len(dataset))
    valid_size = len(dataset) - train_size
    train_dataset, valid_dataset = random_split(dataset,
                                                [train_size, valid_size])
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)
    valid_dataloader = DataLoader(dataset=valid_dataset,
                                  batch_size=batch_size,
                                  shuffle=True)

    # loss function and optimizer
    # can change loss function and optimizer you want
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    best = 100
    # start training
    for e in range(epoch):
        train_loss = 0.0
        valid_loss = 0.0

        print(f'\nEpoch: {e+1}/{epoch}')
        print('-' * len(f'Epoch: {e+1}/{epoch}'))
        # tqdm to disply progress bar
        for inputs in tqdm(train_dataloader):
            # data from data_loader
            inputs = inputs.float().to(device)
            outputs = model(inputs, device)
            loss = criterion(outputs, inputs)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        for inputs in tqdm(valid_dataloader):
            # data from data_loader
            inputs = inputs.float().to(device)
            outputs = model(inputs, device)
            # MSE loss
            loss = criterion(outputs, inputs)
            # loss calculate
            valid_loss += loss.item()
        # save the best model weights as .pth file
        loss_epoch = train_loss / len(train_dataset)
        valid_loss_epoch = valid_loss / len(valid_dataset)
        # if valid_loss_epoch < best :
        #     best = valid_loss_epoch
        #     torch.save(model.state_dict(), 'data_aug.pth')
        if e in store_e:
            torch.save(model.state_dict(), f'{folder}/ep{e}data_aug.pth')
            print(f"training in epoch  {e},start augment data!!")
            result = test(data, folder, e)
        print(f'Training loss: {loss_epoch:.4f}')
        print(f'Valid loss: {valid_loss_epoch:.4f}')
        # save loss  every epoch
        train_loss_curve.append(loss_epoch)
        valid_loss_curve.append(valid_loss_epoch)
    # generate training curve
    # visualize(train_loss_curve,valid_loss_curve, 'Data Augmentation')
    return result
예제 #18
0
def train(choice, X_train, y_train, X_test, y_test):

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True

    student_model = None
    teacher_model = None
    with tf.Session(config=config) as sess:
        #Teacher Model
        global ssae, ae
        if choice['teacher'] == 'lnin':
            teacher_model = LNIN(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'lnin_ae':
            teacher_model = DNIN(img_size, img_class, sess, choice)
            ae = AE(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'lnin_ssae':
            teacher_model = DNIN(img_size, img_class, sess, choice)
            ae = AE(img_size, img_class, sess, choice)
            ssae = SSAE(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'dnin':
            teacher_model = DNIN(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'mnin':
            teacher_model = MNIN(img_size, img_class, sess, choice)
        elif choice['teacher'] == 'tnin':
            teacher_model = TNIN(img_size, img_class, sess, choice)
        else:
            teacher_model = SN(img_size, img_class, sess, choice)
        #Student Model
        if choice['student'] == 'lsn':
            student_model = LSN(img_size, img_class, sess, choice)
        elif choice['student'] == 'dsn':
            student_model = DSN(img_size, img_class, sess, choice)
        elif choice['student'] == 'msn':
            student_model = MSN(img_size, img_class, sess, choice)

        if choice['student'] != 'none':
            if not student_model.save:
                sess.run(tf.initialize_all_variables())
                print "Initialize"
        else:
            if choice['teacher'] == 'lnin_ssae':
                if not ssae.save:
                    sess.run(tf.initialize_all_variables())
                    print "Initialize"
            elif choice['teacher'] == 'lnin_ae':
                if not ae.save:
                    sess.run(tf.initialize_all_variables())
                    print "Initialize"
            else:
                if not teacher_model.save:
                    sess.run(tf.initialize_all_variables())
                    print "Initialize"

        max_epochs = choice['epochs']
        l = 0
        a = 0
        test_accuracy = []

        step = 0
        epoch = 0
        loss = [
        ]  #np.load("loss_"+choice['teacher'] +'_'+choice['student']+"_run"+str(choice['run'])+".npy").tolist()
        accuracy = [
        ]  #np.load("accuracy_"+choice['teacher']+'_'+choice['student']+"_run"+str(choice['run'])+".npy").tolist()

        while epoch <= choice['epochs']:
            if step * batch_size > X_train.shape[0] or step == 0:
                s = np.arange(X_train.shape[0])
                np.random.shuffle(s)

                X_train = X_train[s]
                y_train = y_train[s]
                step = 0

                print "Epoch:%d, loss: %f, accuracy: %f" % (epoch, l, a)
                save_results(choice, teacher_model, student_model, loss,
                             accuracy)

                epoch += 1

            X_batch = X_train[step *
                              batch_size:min(X_train.shape[0], (step + 1) *
                                             batch_size), :]
            y_batch = y_train[step *
                              batch_size:min(y_train.shape[0], (step + 1) *
                                             batch_size), :]
            if choice['student'] != 'none':
                s = np.random.choice(X_batch.shape[0], X_batch.shape[0])
                X_batch2 = X_batch[s]
                eps = np.random.rand(X_batch.shape[0])[:, None, None, None]
                eps = np.tile(eps, [1, 32, 32, 3])
                X_batch3 = eps * X_batch + (1 - eps) * X_batch2
                X_batch = np.concatenate(
                    (X_batch[:X_batch.shape[0]], X_batch3[X_batch3.shape[0]:]))

            if choice['student'] != 'none':
                if choice['teacher'] == 'lnin_ssae':
                    #X_batch, y_batch = teacher_model.correct(X_batch, y_batch)
                    hidden = teacher_model.get_hidden(X_batch)
                    hidden = ae.get_embedding(hidden)
                    labels = ssae.get_embedding(hidden)
                elif choice['teacher'] == 'dnin':
                    labels = teacher_model.predict(X_batch)
        #labels = y_batch
                elif choice['teacher'] == 'tnin':
                    labels = teacher_model.get_embedding(X_batch)
                    labels[labels >= 0.5] = 1
                    labels[labels < 0.5] = 0
                elif choice['teacher'] == 'mnin':
                    labels = teacher_model.get_sub_prediction(X_batch)
                elif choice['teacher'] == 'lnin':
                    labels = teacher_model.get_logits(X_batch)

                l, a = student_model.train(X_batch, y_batch, labels)
            else:
                if choice['teacher'] == 'lnin_ae':
                    #X_batch, y_batch = teacher_model.correct(X_batch, y_batch)
                    hidden = teacher_model.get_hidden(X_batch)
                    l, a = ae.train(hidden)
                elif choice['teacher'] == 'lnin_ssae':
                    #X_batch, y_batch = teacher_model.correct(X_batch, y_batch)
                    hidden = teacher_model.get_hidden(X_batch)
                    hidden = ae.get_embedding(hidden)
                    l, a = ssae.train(hidden, y_batch)
                else:
                    l, a = teacher_model.train(X_batch, y_batch)

            loss.append(l)
            accuracy.append(a)

            if epoch == choice['epochs']:
                a = 0
                for i in range(100):
                    if choice['student'] != 'none':
                        a += student_model.test(X_test[i * 100:(i + 1) * 100],
                                                y_test[i * 100:(i + 1) * 100])
                    else:
                        a += teacher_model.test(X_test[i * 100:(i + 1) * 100],
                                                y_test[i * 100:(i + 1) * 100])
                a = 1. * a / 100
                print "Test Accuracy ", a
                test_accuracy.append(a)
                if step == 100:
                    np.save("test_accuracy_"+choice['teacher']+'_'+choice['student']\
                        +"_run"+str(choice['run'])+".npy",np.asarray(test_accuracy))
                    print "Student Done"
                    break
            step += 1
예제 #19
0
def train():
    from benchmark import calc_fid, extract_feature_from_generator_fn, load_patched_inception_v3, real_image_loader, image_generator, image_generator_perm
    import lpips

    from config import IM_SIZE_GAN, BATCH_SIZE_GAN, NFC, NBR_CLS, DATALOADER_WORKERS, EPOCH_GAN, ITERATION_AE, GAN_CKECKPOINT
    from config import SAVE_IMAGE_INTERVAL, SAVE_MODEL_INTERVAL, LOG_INTERVAL, SAVE_FOLDER, TRIAL_NAME, DATA_NAME, MULTI_GPU
    from config import FID_INTERVAL, FID_BATCH_NBR, PRETRAINED_AE_PATH
    from config import data_root_colorful, data_root_sketch_1, data_root_sketch_2, data_root_sketch_3

    real_features = None
    inception = load_patched_inception_v3().cuda()
    inception.eval()

    percept = lpips.PerceptualLoss(model='net-lin', net='vgg', use_gpu=True)

    saved_image_folder = saved_model_folder = None
    log_file_path = None
    if saved_image_folder is None:
        saved_image_folder, saved_model_folder = make_folders(
            SAVE_FOLDER, 'GAN_' + TRIAL_NAME)
        log_file_path = saved_image_folder + '/../gan_log.txt'
        log_file = open(log_file_path, 'w')
        log_file.close()

    dataset = PairedMultiDataset(data_root_colorful,
                                 data_root_sketch_1,
                                 data_root_sketch_2,
                                 data_root_sketch_3,
                                 im_size=IM_SIZE_GAN,
                                 rand_crop=True)
    print('the dataset contains %d images.' % len(dataset))
    dataloader = iter(
        DataLoader(dataset,
                   BATCH_SIZE_GAN,
                   sampler=InfiniteSamplerWrapper(dataset),
                   num_workers=DATALOADER_WORKERS,
                   pin_memory=True))

    from datasets import ImageFolder
    from datasets import trans_maker_augment as trans_maker

    dataset_rgb = ImageFolder(data_root_colorful, trans_maker(512))
    dataset_skt = ImageFolder(data_root_sketch_3, trans_maker(512))

    net_ae = AE(nfc=NFC, nbr_cls=NBR_CLS)

    if PRETRAINED_AE_PATH is None:
        PRETRAINED_AE_PATH = 'train_results/' + 'AE_' + TRIAL_NAME + '/models/%d.pth' % ITERATION_AE
    else:
        from config import PRETRAINED_AE_ITER
        PRETRAINED_AE_PATH = PRETRAINED_AE_PATH + '/models/%d.pth' % PRETRAINED_AE_ITER

    net_ae.load_state_dicts(PRETRAINED_AE_PATH)
    net_ae.cuda()
    net_ae.eval()

    RefineGenerator = None
    if DATA_NAME == 'celeba':
        from models import RefineGenerator_face as RefineGenerator
    elif DATA_NAME == 'art' or DATA_NAME == 'shoe':
        from models import RefineGenerator_art as RefineGenerator
    net_ig = RefineGenerator(nfc=NFC, im_size=IM_SIZE_GAN).cuda()
    net_id = Discriminator(nc=3).cuda(
    )  # we use the patch_gan, so the im_size for D should be 512 even if training image size is 1024

    if MULTI_GPU:
        net_ae = nn.DataParallel(net_ae)
        net_ig = nn.DataParallel(net_ig)
        net_id = nn.DataParallel(net_id)

    net_ig_ema = copy_G_params(net_ig)

    opt_ig = optim.Adam(net_ig.parameters(), lr=2e-4, betas=(0.5, 0.999))
    opt_id = optim.Adam(net_id.parameters(), lr=2e-4, betas=(0.5, 0.999))

    if GAN_CKECKPOINT is not None:
        ckpt = torch.load(GAN_CKECKPOINT)
        net_ig.load_state_dict(ckpt['ig'])
        net_id.load_state_dict(ckpt['id'])
        net_ig_ema = ckpt['ig_ema']
        opt_ig.load_state_dict(ckpt['opt_ig'])
        opt_id.load_state_dict(ckpt['opt_id'])

    ## create a log file
    losses_g_img = AverageMeter()
    losses_d_img = AverageMeter()
    losses_mse = AverageMeter()
    losses_rec_s = AverageMeter()

    losses_rec_ae = AverageMeter()

    fixed_skt = fixed_rgb = fixed_perm = None

    fid = [[0, 0]]

    for epoch in range(EPOCH_GAN):
        for iteration in tqdm(range(10000)):
            rgb_img, skt_img_1, skt_img_2, skt_img_3 = next(dataloader)

            rgb_img = rgb_img.cuda()

            rd = random.randint(0, 3)
            if rd == 0:
                skt_img = skt_img_1.cuda()
            elif rd == 1:
                skt_img = skt_img_2.cuda()
            else:
                skt_img = skt_img_3.cuda()

            if iteration == 0:
                fixed_skt = skt_img_3[:8].clone().cuda()
                fixed_rgb = rgb_img[:8].clone()
                fixed_perm = true_randperm(fixed_rgb.shape[0], 'cuda')

            ### 1. train D
            gimg_ae, style_feats = net_ae(skt_img, rgb_img)
            g_image = net_ig(gimg_ae, style_feats)

            pred_r = net_id(rgb_img)
            pred_f = net_id(g_image.detach())

            loss_d = d_hinge_loss(pred_r, pred_f)

            net_id.zero_grad()
            loss_d.backward()
            opt_id.step()

            loss_rec_ae = F.mse_loss(gimg_ae, rgb_img) + F.l1_loss(
                gimg_ae, rgb_img)
            losses_rec_ae.update(loss_rec_ae.item(), BATCH_SIZE_GAN)

            ### 2. train G
            pred_g = net_id(g_image)
            loss_g = g_hinge_loss(pred_g)

            if DATA_NAME == 'shoe':
                loss_mse = 10 * (F.l1_loss(g_image, rgb_img) +
                                 F.mse_loss(g_image, rgb_img))
            else:
                loss_mse = 10 * percept(
                    F.adaptive_avg_pool2d(g_image, output_size=256),
                    F.adaptive_avg_pool2d(rgb_img, output_size=256)).sum()
            losses_mse.update(loss_mse.item() / BATCH_SIZE_GAN, BATCH_SIZE_GAN)

            loss_all = loss_g + loss_mse

            if DATA_NAME == 'shoe':
                ### the grey image reconstruction
                perm = true_randperm(BATCH_SIZE_GAN)
                img_ae_perm, style_feats_perm = net_ae(skt_img, rgb_img[perm])

                gimg_grey = net_ig(img_ae_perm, style_feats_perm)
                gimg_grey = gimg_grey.mean(dim=1, keepdim=True)
                real_grey = rgb_img.mean(dim=1, keepdim=True)
                loss_rec_grey = F.mse_loss(gimg_grey, real_grey)
                loss_all += 10 * loss_rec_grey

            net_ig.zero_grad()
            loss_all.backward()
            opt_ig.step()

            for p, avg_p in zip(net_ig.parameters(), net_ig_ema):
                avg_p.mul_(0.999).add_(p.data, alpha=0.001)

            ### 3. logging
            losses_g_img.update(pred_g.mean().item(), BATCH_SIZE_GAN)
            losses_d_img.update(pred_r.mean().item(), BATCH_SIZE_GAN)

            if iteration % SAVE_IMAGE_INTERVAL == 0:  #show the current images
                with torch.no_grad():

                    backup_para_g = copy_G_params(net_ig)
                    load_params(net_ig, net_ig_ema)

                    gimg_ae, style_feats = net_ae(fixed_skt, fixed_rgb)
                    gmatch = net_ig(gimg_ae, style_feats)

                    gimg_ae_perm, style_feats = net_ae(fixed_skt,
                                                       fixed_rgb[fixed_perm])
                    gmismatch = net_ig(gimg_ae_perm, style_feats)

                    gimg = torch.cat([
                        F.interpolate(fixed_rgb, IM_SIZE_GAN),
                        F.interpolate(fixed_skt.repeat(1, 3, 1, 1),
                                      IM_SIZE_GAN), gmatch,
                        F.interpolate(gimg_ae, IM_SIZE_GAN), gmismatch,
                        F.interpolate(gimg_ae_perm, IM_SIZE_GAN)
                    ])

                    vutils.save_image(
                        gimg,
                        f'{saved_image_folder}/img_iter_{epoch}_{iteration}.jpg',
                        normalize=True,
                        range=(-1, 1))
                    del gimg

                    make_matrix(
                        dataset_rgb, dataset_skt, net_ae, net_ig, 5,
                        f'{saved_image_folder}/img_iter_{epoch}_{iteration}_matrix.jpg'
                    )

                    load_params(net_ig, backup_para_g)

            if iteration % LOG_INTERVAL == 0:
                log_msg = 'Iter: [{0}/{1}] G: {losses_g_img.avg:.4f}  D: {losses_d_img.avg:.4f}  MSE: {losses_mse.avg:.4f}  Rec: {losses_rec_s.avg:.5f}  FID: {fid:.4f}'.format(
                    epoch,
                    iteration,
                    losses_g_img=losses_g_img,
                    losses_d_img=losses_d_img,
                    losses_mse=losses_mse,
                    losses_rec_s=losses_rec_s,
                    fid=fid[-1][0])

                print(log_msg)
                print('%.5f' % (losses_rec_ae.avg))

                if log_file_path is not None:
                    log_file = open(log_file_path, 'a')
                    log_file.write(log_msg + '\n')
                    log_file.close()

                losses_g_img.reset()
                losses_d_img.reset()
                losses_mse.reset()
                losses_rec_s.reset()
                losses_rec_ae.reset()

            if iteration % SAVE_MODEL_INTERVAL == 0 or iteration + 1 == 10000:
                print('Saving history model')
                torch.save(
                    {
                        'ig': net_ig.state_dict(),
                        'id': net_id.state_dict(),
                        'ae': net_ae.state_dict(),
                        'ig_ema': net_ig_ema,
                        'opt_ig': opt_ig.state_dict(),
                        'opt_id': opt_id.state_dict(),
                    }, '%s/%d.pth' % (saved_model_folder, epoch))

            if iteration % FID_INTERVAL == 0 and iteration > 1:
                print("calculating FID ...")
                fid_batch_images = FID_BATCH_NBR
                if real_features is None:
                    if os.path.exists('%s_fid_feats.npy' % (DATA_NAME)):
                        real_features = pickle.load(
                            open('%s_fid_feats.npy' % (DATA_NAME), 'rb'))
                    else:
                        real_features = extract_feature_from_generator_fn(
                            real_image_loader(dataloader,
                                              n_batches=fid_batch_images),
                            inception)
                        real_mean = np.mean(real_features, 0)
                        real_cov = np.cov(real_features, rowvar=False)
                        pickle.dump(
                            {
                                'feats': real_features,
                                'mean': real_mean,
                                'cov': real_cov
                            }, open('%s_fid_feats.npy' % (DATA_NAME), 'wb'))
                        real_features = pickle.load(
                            open('%s_fid_feats.npy' % (DATA_NAME), 'rb'))

                sample_features = extract_feature_from_generator_fn(
                    image_generator(dataset,
                                    net_ae,
                                    net_ig,
                                    n_batches=fid_batch_images),
                    inception,
                    total=fid_batch_images)
                cur_fid = calc_fid(sample_features,
                                   real_mean=real_features['mean'],
                                   real_cov=real_features['cov'])
                sample_features_perm = extract_feature_from_generator_fn(
                    image_generator_perm(dataset,
                                         net_ae,
                                         net_ig,
                                         n_batches=fid_batch_images),
                    inception,
                    total=fid_batch_images)
                cur_fid_perm = calc_fid(sample_features_perm,
                                        real_mean=real_features['mean'],
                                        real_cov=real_features['cov'])

                fid.append([cur_fid, cur_fid_perm])
                print('fid:', fid)
                if log_file_path is not None:
                    log_file = open(log_file_path, 'a')
                    log_msg = 'fid: %.5f, %.5f' % (fid[-1][0], fid[-1][1])
                    log_file.write(log_msg + '\n')
                    log_file.close()
예제 #20
0
파일: main.py 프로젝트: viliusmat/coma-1
        tmp = pickle.load(f, encoding='latin1')

edge_index_list = [utils.to_edge_index(adj).to(device) for adj in tmp['adj']]
down_transform_list = [
    utils.to_sparse(down_transform).to(device)
    for down_transform in tmp['down_transform']
]
up_transform_list = [
    utils.to_sparse(up_transform).to(device)
    for up_transform in tmp['up_transform']
]

model = AE(args.in_channels,
           args.out_channels,
           args.latent_channels,
           edge_index_list,
           down_transform_list,
           up_transform_list,
           K=args.K).to(device)
print(model)

if args.optimizer == 'Adam':
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                momentum=0.9)
else:
예제 #21
0
class BiomeAE():
    def __init__(self, args):
        if args.model in ["BiomeAEL0"]:
            self.mlp_type = "L0"
        else:
            self.mlp_type = None

        self.model_alias = args.model_alias
        self.model= args.model
        self.snap_loc = os.path.join(args.vis_dir, "snap.pt")

        #tl.configure("runs/ds.{}".format(model_alias))
        #tl.log_value(model_alias, 0)

        """ no stat file needed for now
        stat_alias = 'obj_DataStat+%s_%s' % (args.dataset_name, args.dataset_subset)
        stat_path = os.path.join(
            output_dir, '%s.pkl' % (stat_alias)
        )
        with open(stat_path,'rb') as sf:
            data_stats = pickle.load(sf)
        """

        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num
        self.predictor = None

        torch.manual_seed(args.seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(args.seed)

        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


    def get_transformation(self):
        return None

    def loss_fn(self, recon_x, x, mean, log_var):
        if self.model in ["BiomeAE","BiomeAESnip"]:
            mseloss = torch.nn.MSELoss()
            return torch.sqrt(mseloss(recon_x, x))
        if self.model in ["BiomeAEL0"]:
            mseloss = torch.nn.MSELoss()
            return torch.sqrt(mseloss(recon_x, x))+self.predictor.regularization()
        elif self.model =="BiomeVAE":
            BCE = torch.nn.functional.binary_cross_entropy(
                recon_x.view(-1, 28*28), x.view(-1, 28*28), reduction='sum')
            KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())
            return (BCE + KLD) / x.size(0)

    def param_l0(self):
        return self.predictor.param_l0()

    def init_fit(self, X1_train, X2_train, y_train, X1_val, X2_val, y_val, args, ):
        self.train_loader = get_dataloader (X1_train, X2_train, y_train, args.batch_size)
        self.test_loader = get_dataloader(X1_val, X2_val, y_val, args.batch_size)

        self.predictor = AE(
            encoder_layer_sizes=[X1_train.shape[1]],
            latent_size=args.latent_size,
            decoder_layer_sizes=[X2_train.shape[1]],
            activation=args.activation,
            batch_norm= args.batch_norm,
            dropout=args.dropout,
            mlp_type=self.mlp_type,
            conditional=args.conditional,
            num_labels=10 if args.conditional else 0).to(self.device)
        self.optimizer = torch.optim.Adam(self.predictor.parameters(), lr=args.learning_rate)
        self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.8)

    def train(self, args):
        if args.contr:
            print("Loading from ", self.snap_loc)
            loaded_model_para = torch.load(self.snap_loc)
            self.predictor.load_state_dict(loaded_model_para)

        t = 0
        logs = defaultdict(list)
        iterations_per_epoch = len(self.train_loader.dataset) / args.batch_size
        num_iterations = int(iterations_per_epoch * args.epochs)
        for epoch in range(args.epochs):

            tracker_epoch = defaultdict(lambda: defaultdict(dict))

            for iteration, (x1, x2, y) in enumerate(self.train_loader):
                t+=1

                x1, x2, y = x1.to(self.device), x2.to(self.device), y.to(self.device)

                if args.conditional:
                    x2_hat, z, mean, log_var = self.predictor(x1, y)
                else:
                    x2_hat, z, mean, log_var = self.predictor(x1)

                for i, yi in enumerate(y):
                    id = len(tracker_epoch)
                    tracker_epoch[id]['x'] = z[i, 0].item()
                    tracker_epoch[id]['y'] = z[i, 1].item()
                    tracker_epoch[id]['label'] = yi.item()

                loss = self.loss_fn(x2_hat, x2, mean, log_var)

                self.optimizer.zero_grad()
                loss.backward()
                if (t + 1) % int(num_iterations / 10) == 0:
                    self.scheduler.step()
                self.optimizer.step()

                #enforce non-negative
                if args.nonneg_weight:
                    for layer in self.predictor.modules():
                        if isinstance(layer, nn.Linear):
                            layer.weight.data.clamp_(0.0)


                logs['loss'].append(loss.item())

                if iteration % args.print_every == 0 or iteration == len(self.train_loader)-1:
                    print("Epoch {:02d}/{:02d} Batch {:04d}/{:d}, Loss {:9.4f}".format(
                        epoch, args.epochs, iteration, len(self.train_loader)-1, loss.item()))
                    if args.model =="VAE":
                        if args.conditional:
                            c = torch.arange(0, 10).long().unsqueeze(1)
                            x = self.predictor.inference(n=c.size(0), c=c)
                        else:
                            x = self.predictor.inference(n=10)

        if not args.contr:
            print("Saving to ", self.snap_loc)
            torch.save(self.predictor.state_dict(), self.snap_loc)

    def fit(self,X1_train, X2_train, y_train, X1_val, X2_val, y_val, args,):
        self.init_fit(X1_train, X2_train, y_train, X1_val, X2_val, y_val, args)
        self.train(args)

    def get_graph(self):
        """
        return nodes and weights
        :return:
        """
        nodes = []
        weights = []
        for l, layer in enumerate(self.predictor.modules()):
            if isinstance(layer, nn.Linear):
                lin_layer =layer
                nodes.append(["%s"%(x) for x in list(range(lin_layer.in_features))])
                weights.append(lin_layer.weight.detach().cpu().numpy().T)
        nodes.append(["%s"%(x) for x in list(range(lin_layer.out_features))]) #last linear layer

        return (nodes, weights)

    def predict(self,X1_val, X2_val, y_val, args):
        #Batch test
        x1, x2, y = torch.FloatTensor(X1_val).to(self.device), torch.FloatTensor(X2_val).to(self.device), torch.FloatTensor(y_val).to(self.device)
        if args.conditional:
            x2_hat, z, mean, log_var = self.predictor(x1, y)
        else:
            x2_hat, z, mean, log_var = self.predictor(x1)
        val_loss = self.loss_fn( x2_hat, x2, mean, log_var)
        print("val_loss: {:9.4f}", val_loss.item())
        return x2_hat.detach().cpu().numpy()

    def transform(self,X1_val, X2_val, y_val, args):
        x1, x2, y = torch.FloatTensor(X1_val).to(self.device), torch.FloatTensor(X2_val).to(
            self.device), torch.FloatTensor(y_val).to(self.device)
        if args.conditional:
            x2_hat, z, mean, log_var = self.predictor(x1, y)
        else:
            x2_hat, z, mean, log_var = self.predictor(x1)

        return z.detach().cpu().numpy()

    def get_influence_matrix(self):
        return self.predictor.get_influence_matrix()
from worm_models import SkeletonsMaps, EigenWorms 

if __name__ == '__main__':
    import pandas as pd
    import tables
    from tierpsy.helper.params import read_microns_per_pixel
    from tierpsy.analysis.ske_create.helperIterROI import getROIfromInd

    #load model
    model_dir_root = '/data/ajaver/onedrive/classify_strains/logs/worm_autoencoder'
    dnames = glob.glob(os.path.join(model_dir_root, 'AE_L64*'))
    d = dnames[0]
    embedding_size = int(d.split('AE_L')[-1].partition('_')[0])
    model_path = os.path.join(d, 'checkpoint.pth.tar')
    print(embedding_size)
    model = AE(embedding_size)
    
    
    checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    
    #%%
    mask_file = '/data/ajaver/onedrive/aggregation/N2_1_Ch1_29062017_182108_comp3.hdf5'
    feat_file = mask_file.replace('.hdf5', '_featuresN.hdf5')
    
    w_ind = 264
    ini_f = 1947
    
    microns_per_pixel = read_microns_per_pixel(feat_file)
    
예제 #23
0
        tmp = pickle.load(f, encoding='latin1')

edge_index_list = [utils.to_edge_index(adj).to(device) for adj in tmp['adj']]
down_transform_list = [
    utils.to_sparse(down_transform).to(device)
    for down_transform in tmp['down_transform']
]
up_transform_list = [
    utils.to_sparse(up_transform).to(device)
    for up_transform in tmp['up_transform']
]

model = AE(args.in_channels,
           args.out_channels,
           args.latent_channels,
           edge_index_list,
           down_transform_list,
           up_transform_list,
           K=args.K).to(device)
print(model)

if args.optimizer == 'Adam':
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                momentum=0.9)
else:
예제 #24
0
SAVE_IMAGES_IN_RESULTS = True

### LOAD DATA
data = datasets.ImageFolder(root=DATA_DIRECTORY,
                            transform=transforms.Compose([
                                transforms.RandomHorizontalFlip(),
                                transforms.ToTensor()
                            ]))

data_loader = torch.utils.data.DataLoader(dataset=data,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True)

### INIT MODEL
device = torch.device("cpu")  #change if on GPU, also need to use .cuda()
model = AE().to(device)

### MSE LOSS AND ADAM OPTIMIZER
criterion = nn.MSELoss(size_average=True, reduce=True)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)

### TRAIN
for epoch in range(EPOCHS):
    for idx, (sample, _) in enumerate(data_loader):
        if sample.shape[0] < BATCH_SIZE: break
        reconstruction, z = model(sample)
        loss = criterion(reconstruction, sample)
        optimizer.zero_grad()
        loss.sum().backward()
        optimizer.step()
예제 #25
0
parser.add_argument('--encoded', type=str, default=None,
                    help='Encoded file')

args = parser.parse_args()

np.random.seed(int(time.time()))
torch.manual_seed(int(time.time()))


# Load data
adj, inv_adj, features, labels, n_feat, n_class = load_data(n_conf=args.n_conf)

# Model and optimizer
if args.model == 'AE' or args.model == 'PCA':
    model = AE(n_feat=n_feat,
               n_hid=args.hidden,
               n_lat=args.latent,
               dropout=args.dropout)
elif args.model == 'GCAE':
    model = GCAE(n_feat=n_feat,
                n_hid=args.hidden,
                n_lat=args.latent,
                dropout=args.dropout)
else:
    raise ValueError("You choose wrong network model")

optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.MSELoss()

if args.checkpoint is not None:
    checkpoint = torch.load(args.checkpoint)
    model.load_state_dict(checkpoint['model_state_dict'])
예제 #26
0
    def __init__(self,
                 domain,
                 train_data_file,
                 validation_data_file,
                 test_data_file,
                 minibatch_size,
                 rng,
                 device,
                 behav_policy_file_wDemo,
                 behav_policy_file,
                 context_input=False,
                 context_dim=0,
                 drop_smaller_than_minibatch=True,
                 folder_name='/Name',
                 autoencoder_saving_period=20,
                 resume=False,
                 sided_Q='negative',
                 autoencoder_num_epochs=50,
                 autoencoder_lr=0.001,
                 autoencoder='AIS',
                 hidden_size=16,
                 ais_gen_model=1,
                 ais_pred_model=1,
                 embedding_dim=4,
                 state_dim=42,
                 num_actions=25,
                 corr_coeff_param=10,
                 dst_hypers={},
                 cde_hypers={},
                 odernn_hypers={},
                 **kwargs):
        '''
        We assume discrete actions and scalar rewards!
        '''

        self.rng = rng
        self.device = device
        self.train_data_file = train_data_file
        self.validation_data_file = validation_data_file
        self.test_data_file = test_data_file
        self.minibatch_size = minibatch_size
        self.drop_smaller_than_minibatch = drop_smaller_than_minibatch
        self.autoencoder_num_epochs = autoencoder_num_epochs
        self.autoencoder = autoencoder
        self.autoencoder_lr = autoencoder_lr
        self.saving_period = autoencoder_saving_period
        self.resume = resume
        self.sided_Q = sided_Q
        self.num_actions = num_actions
        self.state_dim = state_dim
        self.corr_coeff_param = corr_coeff_param

        self.context_input = context_input  # Check to see if we'll one-hot encode the categorical contextual input
        self.context_dim = context_dim  # Check to see if we'll remove the context from the input and only use it for decoding
        self.hidden_size = hidden_size

        if self.context_input:
            self.input_dim = self.state_dim + self.context_dim + self.num_actions
        else:
            self.input_dim = self.state_dim + self.num_actions

        self.autoencoder_lower = self.autoencoder.lower()
        self.data_folder = folder_name + f'/{self.autoencoder_lower}_data'
        self.checkpoint_file = folder_name + f'/{self.autoencoder_lower}_checkpoints/checkpoint.pt'
        if not os.path.exists(folder_name +
                              f'/{self.autoencoder_lower}_checkpoints'):
            os.mkdir(folder_name + f'/{self.autoencoder_lower}_checkpoints')
        if not os.path.exists(folder_name + f'/{self.autoencoder_lower}_data'):
            os.mkdir(folder_name + f'/{self.autoencoder_lower}_data')
        self.store_path = folder_name
        self.gen_file = folder_name + f'/{self.autoencoder_lower}_data/{self.autoencoder_lower}_gen.pt'
        self.pred_file = folder_name + f'/{self.autoencoder_lower}_data/{self.autoencoder_lower}_pred.pt'

        if self.autoencoder == 'AIS':
            self.container = AIS.ModelContainer(device, ais_gen_model,
                                                ais_pred_model)
            self.gen = self.container.make_encoder(
                self.hidden_size,
                self.state_dim,
                self.num_actions,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.hidden_size,
                                                    self.state_dim,
                                                    self.num_actions)

        elif self.autoencoder == 'AE':
            self.container = AE.ModelContainer(device)
            self.gen = self.container.make_encoder(
                self.hidden_size,
                self.state_dim,
                self.num_actions,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.hidden_size,
                                                    self.state_dim,
                                                    self.num_actions)

        elif self.autoencoder == 'DST':
            self.dst_hypers = dst_hypers
            self.container = DST.ModelContainer(device)
            self.gen = self.container.make_encoder(
                self.input_dim,
                self.hidden_size,
                gru_n_layers=self.dst_hypers['gru_n_layers'],
                augment_chs=self.dst_hypers['augment_chs'])
            self.pred = self.container.make_decoder(
                self.hidden_size, self.state_dim,
                self.dst_hypers['decoder_hidden_units'])

        elif self.autoencoder == 'DDM':
            self.container = DDM.ModelContainer(device)

            self.gen = self.container.make_encoder(
                self.state_dim,
                self.hidden_size,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.state_dim,
                                                    self.hidden_size)
            self.dyn = self.container.make_dyn(self.num_actions,
                                               self.hidden_size)
            self.all_params = chain(self.gen.parameters(),
                                    self.pred.parameters(),
                                    self.dyn.parameters())

            self.inv_loss_coef = 10
            self.dec_loss_coef = 0.1
            self.max_grad_norm = 50

            self.dyn_file = folder_name + '/ddm_data/ddm_dyn.pt'

        elif self.autoencoder == 'RNN':
            self.container = RNN.ModelContainer(device)

            self.gen = self.container.make_encoder(
                self.hidden_size,
                self.state_dim,
                self.num_actions,
                context_input=self.context_input,
                context_dim=self.context_dim)
            self.pred = self.container.make_decoder(self.hidden_size,
                                                    self.state_dim,
                                                    self.num_actions)

        elif self.autoencoder == 'CDE':
            self.cde_hypers = cde_hypers

            self.container = CDE.ModelContainer(device)
            self.gen = self.container.make_encoder(
                self.input_dim + 1,
                self.hidden_size,
                hidden_hidden_channels=self.
                cde_hypers['encoder_hidden_hidden_channels'],
                num_hidden_layers=self.cde_hypers['encoder_num_hidden_layers'])
            self.pred = self.container.make_decoder(
                self.hidden_size, self.state_dim,
                self.cde_hypers['decoder_num_layers'],
                self.cde_hypers['decoder_num_units'])

        elif self.autoencoder == 'ODERNN':
            self.odernn_hypers = odernn_hypers
            self.container = ODERNN.ModelContainer(device)

            self.gen = self.container.make_encoder(self.input_dim,
                                                   self.hidden_size,
                                                   self.odernn_hypers)
            self.pred = self.container.make_decoder(
                self.hidden_size, self.state_dim,
                self.odernn_hypers['decoder_n_layers'],
                self.odernn_hypers['decoder_n_units'])
        else:
            raise NotImplementedError

        self.buffer_save_file = self.data_folder + '/ReplayBuffer'
        self.next_obs_pred_errors_file = self.data_folder + '/test_next_obs_pred_errors.pt'
        self.test_representations_file = self.data_folder + '/test_representations.pt'
        self.test_correlations_file = self.data_folder + '/test_correlations.pt'
        self.policy_eval_save_file = self.data_folder + '/dBCQ_policy_eval'
        self.policy_save_file = self.data_folder + '/dBCQ_policy'
        self.behav_policy_file_wDemo = behav_policy_file_wDemo
        self.behav_policy_file = behav_policy_file

        # Read in the data csv files
        assert (domain == 'sepsis')
        self.train_demog, self.train_states, self.train_interventions, self.train_lengths, self.train_times, self.acuities, self.rewards = torch.load(
            self.train_data_file)
        train_idx = torch.arange(self.train_demog.shape[0])
        self.train_dataset = TensorDataset(self.train_demog, self.train_states,
                                           self.train_interventions,
                                           self.train_lengths,
                                           self.train_times, self.acuities,
                                           self.rewards, train_idx)

        self.train_loader = DataLoader(self.train_dataset,
                                       batch_size=self.minibatch_size,
                                       shuffle=True)

        self.val_demog, self.val_states, self.val_interventions, self.val_lengths, self.val_times, self.val_acuities, self.val_rewards = torch.load(
            self.validation_data_file)
        val_idx = torch.arange(self.val_demog.shape[0])
        self.val_dataset = TensorDataset(self.val_demog, self.val_states,
                                         self.val_interventions,
                                         self.val_lengths, self.val_times,
                                         self.val_acuities, self.val_rewards,
                                         val_idx)

        self.val_loader = DataLoader(self.val_dataset,
                                     batch_size=self.minibatch_size,
                                     shuffle=False)

        self.test_demog, self.test_states, self.test_interventions, self.test_lengths, self.test_times, self.test_acuities, self.test_rewards = torch.load(
            self.test_data_file)
        test_idx = torch.arange(self.test_demog.shape[0])
        self.test_dataset = TensorDataset(self.test_demog, self.test_states,
                                          self.test_interventions,
                                          self.test_lengths, self.test_times,
                                          self.test_acuities,
                                          self.test_rewards, test_idx)

        self.test_loader = DataLoader(self.test_dataset,
                                      batch_size=self.minibatch_size,
                                      shuffle=False)

        # encode CDE data first to save time
        if self.autoencoder == 'CDE':
            self.train_coefs = load_cde_data('train', self.train_dataset,
                                             self.cde_hypers['coefs_folder'],
                                             self.context_input, device)
            self.val_coefs = load_cde_data('val', self.val_dataset,
                                           self.cde_hypers['coefs_folder'],
                                           self.context_input, device)
            self.test_coefs = load_cde_data('test', self.test_dataset,
                                            self.cde_hypers['coefs_folder'],
                                            self.context_input, device)