예제 #1
0
def load_mlp_class_model(saved_model):
    '''Load model weights from path saved_model.'''
    config = ClassConfig
    model_params = {
        'input_shape': config.x_shape,
        'classes': config.classes,
        'batch_size': config.batch_size,
        'hidden_units': config.hidden_units,
        'mode': config.mode,
        'dropout': False
    }
    model = MLP(model_params)
    model.load_state_dict(torch.load(saved_model))

    return model.eval()
예제 #2
0
파일: models.py 프로젝트: alvinzz/LRMBMRL
    def __init__(self,
                 name,
                 latent_dim,
                 action_dim,
                 in_layer=None,
                 out_activation=None,
                 hidden_dims=[64, 64, 64],
                 hidden_activation=tf.nn.tanh,
                 weight_init=tf.contrib.layers.xavier_initializer,
                 bias_init=tf.zeros_initializer):
        with tf.variable_scope(name):
            if in_layer is None:
                self.zs = tf.placeholder(tf.float32,
                                         shape=[None, latent_dim],
                                         name='zs')
            else:
                self.zs = in_layer
            self.actions = tf.placeholder(tf.float32,
                                          shape=[None, action_dim],
                                          name='actions')
            self.za_concat = tf.concat([self.zs, self.actions], axis=1)

            self.model_network = MLP('model',
                                     latent_dim + action_dim,
                                     latent_dim,
                                     out_activation=out_activation,
                                     hidden_dims=hidden_dims,
                                     hidden_activation=hidden_activation,
                                     weight_init=weight_init,
                                     bias_init=bias_init,
                                     in_layer=self.za_concat)
            self.pred_z = self.model_network.layers['out']
예제 #3
0
파일: decoders.py 프로젝트: alvinzz/LRMBMRL
    def __init__(self,
                 name,
                 latent_dim,
                 ob_dim,
                 in_layer=None,
                 out_activation=None,
                 hidden_dims=[64, 64, 64],
                 hidden_activation=tf.nn.tanh,
                 weight_init=tf.contrib.layers.xavier_initializer,
                 bias_init=tf.zeros_initializer):
        with tf.variable_scope(name):
            if in_layer is None:
                self.zs = tf.placeholder(tf.float32,
                                         shape=[None, latent_dim],
                                         name='zs')
            else:
                self.zs = in_layer

            self.decoder_network = MLP('decoder',
                                       latent_dim,
                                       ob_dim,
                                       out_activation=out_activation,
                                       hidden_dims=hidden_dims,
                                       hidden_activation=hidden_activation,
                                       weight_init=weight_init,
                                       bias_init=bias_init,
                                       in_layer=self.zs)
            self.decoded = self.decoder_network.layers['out']
예제 #4
0
def model(data, labels):
    network = MLP(n_pixels, 10, args.units, args.nonlinear)
    if cuda:
        network.cuda()
    optimizer = th.optim.Adam(network.parameters(), args.lr)
    prediction_list = []
    for i in range(args.T):
        category = network(data)
        prediction_list.append(category)

        if i != args.T - 1:
            loss = F.cross_entropy(category, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    return prediction_list
예제 #5
0
파일: encoders.py 프로젝트: alvinzz/LRMBMRL
    def __init__(
        self,
        name,
        ob_dim,
        latent_dim,
        in_layer=None,
        out_activation=None,
        hidden_dims=[64, 64, 64],
        hidden_activation=tf.nn.tanh,
        weight_init=tf.contrib.layers.xavier_initializer,
        bias_init=tf.zeros_initializer,
        reuse_scope=False,
    ):
        with tf.variable_scope(name, reuse=reuse_scope):
            if in_layer is None:
                self.obs = tf.placeholder(tf.float32,
                                          shape=[None, ob_dim],
                                          name='obs')
            else:
                self.obs = in_layer

            self.mean_network = MLP('means',
                                    ob_dim,
                                    latent_dim,
                                    out_activation=out_activation,
                                    hidden_dims=hidden_dims,
                                    hidden_activation=hidden_activation,
                                    weight_init=weight_init,
                                    bias_init=bias_init,
                                    in_layer=self.obs)
            self.means = self.mean_network.layers['out']

            self.log_var_network = MLP('log_vars',
                                       ob_dim,
                                       latent_dim,
                                       out_activation=out_activation,
                                       hidden_dims=hidden_dims,
                                       hidden_activation=hidden_activation,
                                       weight_init=weight_init,
                                       bias_init=bias_init,
                                       in_layer=self.obs)
            self.log_vars = self.log_var_network.layers['out']

            self.distribution = DiagGaussian(self.means, self.log_vars)
            self.zs = self.distribution.sample()
예제 #6
0
 def define_agent(self, width, height, num_actions):
     return DQNAgent(config=Config(num_actions=num_actions,
                                   encoder=OneHotEncoder(width, height),
                                   optimizer=AdamOptimizer(0.01),
                                   network=MLP(),
                                   policy=EpsilonGreedyPolicy(1, 0.01, 500),
                                   discount=0.95,
                                   capacity=100,
                                   batch_size=16))
예제 #7
0
 def define_agent(self, width, height, num_actions):
     return NStepDQNAgent(
         config=Config(num_actions=num_actions,
                       encoder=OneHotEncoder(width, height),
                       optimizer=AdamOptimizer(0.01),
                       network=MLP(),
                       policy=EpsilonGreedyPolicy(1, 0.01, 1000),
                       discount=0.95,
                       n_step=8))
class MLP_Regression():
    def __init__(self, label, parameters):
        super().__init__()
        self.writer = SummaryWriter(comment=f"_{label}_training")
        self.label = label
        self.lr = parameters['lr']
        self.hidden_units = parameters['hidden_units']
        self.mode = parameters['mode']
        self.batch_size = parameters['batch_size']
        self.num_batches = parameters['num_batches']
        self.x_shape = parameters['x_shape']
        self.y_shape = parameters['y_shape']
        self.save_model_path = f'{parameters["save_dir"]}/{label}_model.pt'
        self.best_loss = np.inf
        self.init_net(parameters)

    def init_net(self, parameters):
        if not os.path.exists(parameters["save_dir"]):
            os.makedirs(parameters["save_dir"])

        model_params = {
            'input_shape': self.x_shape,
            'classes': self.y_shape,
            'batch_size': self.batch_size,
            'hidden_units': self.hidden_units,
            'mode': self.mode
        }
        self.net = MLP(model_params).to(DEVICE)
        self.optimiser = torch.optim.Adam(self.net.parameters(), lr=self.lr)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser,
                                                         step_size=5000,
                                                         gamma=0.5)
        print("MLP Parameters: ")
        print(
            f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}'
        )

    def train_step(self, train_data):
        self.net.train()
        for _, (x, y) in enumerate(train_data):
            x, y = x.to(DEVICE), y.to(DEVICE)
            self.net.zero_grad()
            self.loss_info = torch.nn.functional.mse_loss(self.net(x),
                                                          y,
                                                          reduction='sum')
            self.loss_info.backward()
            self.optimiser.step()

        self.epoch_loss = self.loss_info.item()

    def evaluate(self, x_test):
        self.net.eval()
        with torch.no_grad():
            y_test = self.net(x_test.to(DEVICE)).detach().cpu().numpy()
            return y_test

    def log_progress(self, step):
        write_loss(self.writer, self.loss_info, step)
    def init_net(self, parameters):
        if not os.path.exists(parameters["save_dir"]):
            os.makedirs(parameters["save_dir"])

        model_params = {
            'input_shape': self.x_shape,
            'classes': self.y_shape,
            'batch_size': self.batch_size,
            'hidden_units': self.hidden_units,
            'mode': self.mode
        }
        self.net = MLP(model_params).to(DEVICE)
        self.optimiser = torch.optim.Adam(self.net.parameters(), lr=self.lr)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser,
                                                         step_size=5000,
                                                         gamma=0.5)
        print("MLP Parameters: ")
        print(
            f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}'
        )
예제 #10
0
    def __init__(self, opt):
        super(VanillaAE, self).__init__()
        self.opt = opt
        self.device = torch.device("cuda:0" if not opt.no_cuda else "cpu")
        nc = int(opt.nc)
        imageSize = int(opt.imageSize)
        nz = int(opt.nz)
        nblk = int(opt.nblk)

        # generator
        self.netG = MLP(input_dim=nc * imageSize * imageSize,
                        output_dim=nc * imageSize * imageSize,
                        dim=nz,
                        n_blk=nblk,
                        norm='none',
                        activ='relu').to(self.device)
        weights_init(self.netG)
        if opt.netG != '':
            self.netG.load_state_dict(
                torch.load(opt.netG, map_location=self.device))
        print_and_write_log(opt.train_log_file, 'netG:')
        print_and_write_log(opt.train_log_file, str(self.netG))

        # losses
        self.criterion = nn.MSELoss()
        # define focal frequency loss
        self.criterion_freq = FFL(loss_weight=opt.ffl_w,
                                  alpha=opt.alpha,
                                  patch_factor=opt.patch_factor,
                                  ave_spectrum=opt.ave_spectrum,
                                  log_matrix=opt.log_matrix,
                                  batch_matrix=opt.batch_matrix).to(
                                      self.device)

        # misc
        self.to(self.device)

        # optimizer
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, opt.beta2))
예제 #11
0
class Greedy_Bandit(Bandit):
    def __init__(self, label, *args):
        super().__init__(label, *args)
        self.writer = SummaryWriter(comment=f"_{label}_training"),

    def init_net(self, parameters):
        model_params = {
            'input_shape': self.x.shape[1] + 2,
            'classes': 1 if len(self.y.shape) == 1 else self.y.shape[1],
            'batch_size': self.batch_size,
            'hidden_units': parameters['hidden_units'],
            'mode': parameters['mode']
        }
        self.net = MLP(model_params).to(DEVICE)
        self.optimiser = torch.optim.Adam(self.net.parameters(), lr=self.lr)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser,
                                                         step_size=5000,
                                                         gamma=0.5)
        print(f'Bandit {self.label} Parameters: ')
        print(
            f'buffer_size: {self.buffer_size}, batch size: {self.batch_size}, number of samples: {self.n_samples}, epsilon: {self.epsilon}'
        )
        print("MLP Parameters: ")
        print(
            f'input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}'
        )

    def loss_step(self, x, y, batch_id):
        self.net.train()
        self.net.zero_grad()
        net_loss = torch.nn.functional.mse_loss(self.net(x).squeeze(),
                                                y,
                                                reduction='sum')
        net_loss.backward()
        self.optimiser.step()
        return net_loss

    def log_progress(self, step):
        write_loss(self.writer[0], self.loss_info, step)
        self.writer[0].add_scalar('logs/cumulative_regret',
                                  self.cumulative_regrets[-1], step)
예제 #12
0
 def init_net(self, parameters):
     model_params = {
         'input_shape': self.x.shape[1] + 2,
         'classes': 1 if len(self.y.shape) == 1 else self.y.shape[1],
         'batch_size': self.batch_size,
         'hidden_units': parameters['hidden_units'],
         'mode': parameters['mode']
     }
     self.net = MLP(model_params).to(DEVICE)
     self.optimiser = torch.optim.Adam(self.net.parameters(), lr=self.lr)
     self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser,
                                                      step_size=5000,
                                                      gamma=0.5)
     print(f'Bandit {self.label} Parameters: ')
     print(
         f'buffer_size: {self.buffer_size}, batch size: {self.batch_size}, number of samples: {self.n_samples}, epsilon: {self.epsilon}'
     )
     print("MLP Parameters: ")
     print(
         f'input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}'
     )
예제 #13
0
    def do_stuff(opt):
        print(f'\nTraining {opt} for {args.num_epochs} epochs...')
        net = CNN() if args.dataset == 'cifar' else MLP()
        _, kwargs = misc.split_optim_dict(misc.optim_dict[opt])
        optimizer = misc.task_to_optimizer(opt)(params=net.parameters(),
                                                **kwargs)
        optimizer = misc.wrap_optimizer(opt, optimizer)

        return fit(net,
                   data,
                   optimizer,
                   num_epochs=args.num_epochs,
                   lr_schedule=True)
    def init_net(self, parameters):
        if not os.path.exists(parameters["save_dir"]):
            os.makedirs(parameters["save_dir"])

        model_params = {
            'input_shape': self.x_shape,
            'classes': self.classes,
            'batch_size': self.batch_size,
            'hidden_units': self.hidden_units,
            'mode': self.mode,
            'dropout': self.dropout,
        }
        if self.dropout:
            self.net = MLP_Dropout(model_params).to(DEVICE)
            print('MLP Dropout Parameters: ')
            print(f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}')
        else:
            self.net = MLP(model_params).to(DEVICE)
            print('MLP Parameters: ')
            print(f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}')
        self.optimiser = torch.optim.SGD(self.net.parameters(), lr=self.lr)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser, step_size=100, gamma=0.5)
예제 #15
0
    def do_stuff(opt):
        print(f'\nTraining {opt} for {args.num_epochs} epochs...')
        net = CNN() if args.dataset == 'cifar' else MLP()
        _, kwargs = misc.split_optim_dict(misc.optim_dict[opt])
        optimizer = misc.task_to_optimizer(opt)(params=net.parameters(),
                                                **kwargs)

        if 'lookahead' in opt.lower():
            optimizer = optimizers.Lookahead(optimizer, k=5, alpha=0.5)

        return fit(net,
                   data,
                   optimizer,
                   num_epochs=args.num_epochs,
                   lr_schedule=True)
예제 #16
0
    def __init__(
            self,
            input_dim,
            dim,
            style_dim,
            n_downsample,
            n_res,
            activ,
            pad_type,
            mlp_dim):
        super(AdaINGen, self).__init__()

        # style encoder
        self.enc_style = StyleEncoder(4, input_dim, dim, style_dim, norm='none', activ=activ, pad_type=pad_type)

        # content encoder
        self.enc_content: ContentEncoder = ContentEncoder(n_downsample, n_res, input_dim, dim, 'in', activ, pad_type=pad_type)
        self.dec: Decoder = Decoder(n_downsample, n_res, self.enc_content.output_dim, input_dim, res_norm='adain', activ=activ, pad_type=pad_type)

        # MLP to generate AdaIN parameters
        self.mlp = MLP(style_dim, self.get_num_adain_params(self.dec), mlp_dim, 3, norm='none', activ=activ)
예제 #17
0
tf.reset_default_graph()

x_in = tf.placeholder( dtype = tf.float64, shape = [None,noFets])
y_in = tf.placeholder( dtype = tf.int64, shape = [None])
lmbdaIn = tf.placeholder( dtype = tf.float64, shape = [])

if regType == 1:
    reg = tf.contrib.layers.l1_regularizer( lmbdaIn)
else:
    reg = tf.contrib.layers.l2_regularizer( 2*lmbdaIn)


with tf.variable_scope("Model"):
    
    logits = MLP( x_in, units, tf.nn.relu, outDim, reg = reg)

if outDim != 1:
    loss = tf.losses.sparse_softmax_cross_entropy( 
            labels = y_in, logits = logits)
else:
    loss = tf.losses.sigmoid_cross_entropy(
            labels = y_in, logits = logits)

totalPar = 0
for iTens in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope ='Model'):
    if 'kernel:' in iTens.name or 'bias:' in iTens.name:
        totalPar += np.prod( iTens.shape.as_list())

lossReg = tf.losses.get_regularization_loss() / totalPar
예제 #18
0
os.makedirs(opt.resf, exist_ok=True)
if opt.show_input:
    resfwi = os.path.join(os.path.split(opt.resf)[0], os.path.split(opt.resf)[1] + '_with_input')
    os.makedirs(resfwi, exist_ok=True)
    opt.resfwi = resfwi

print_and_write_log(test_log_file, opt)

device = torch.device("cuda:0" if not opt.no_cuda else "cpu")
nc = int(opt.nc)
imageSize = int(opt.imageSize)
nz = int(opt.nz)
nblk = int(opt.nblk)
model_netG = MLP(input_dim=nc * imageSize * imageSize,
                 output_dim=nc * imageSize * imageSize,
                 dim=nz,
                 n_blk=nblk,
                 norm='none',
                 activ='relu').to(device)
model_netG.load_state_dict(torch.load(opt.netG, map_location=device))
print_and_write_log(test_log_file, 'netG:')
print_and_write_log(test_log_file, str(model_netG))

if opt.eval:
    model_netG.eval()

for i, data in enumerate(tqdm(dataloader), 0):
    img, img_path = data
    img_name = os.path.splitext(os.path.basename(img_path[0]))[0] + '.png'
    if i >= opt.num_test:
        break
    real = img.to(device)
예제 #19
0
from networks import CNN, SoftMax, MLP
from modules import buildModel, reconstructionAttack

print("Building Models")
print("SoftMax")
buildModel(SoftMax(), 0.1, True, True)
    
print("MLP")
buildModel(MLP(), 0.1, True, True)

print("DAE")
buildDAELayer(DAELayer(10304, 1000), lRate=1e-4, epochs=5000, plot=True)
buildDAELayer(DAELayer(1000, 300), lRate=1e-4, epochs=5000, plot=True)
buildDAESoftmaxModel(DAESoftMax(), lRate=1e-2, epochs=1000, plot=True)

print("CNN")
buildModel(CNN(), 0.001, True, True)
class MLP_Classification():
    def __init__(self, label, parameters):
        super().__init__()
        self.writer = SummaryWriter(comment=f"_{label}_training")
        self.label = label
        self.lr = parameters['lr']
        self.hidden_units = parameters['hidden_units']
        self.mode = parameters['mode']
        self.batch_size = parameters['batch_size']
        self.num_batches = parameters['num_batches']
        self.x_shape = parameters['x_shape']
        self.classes = parameters['classes']
        self.save_model_path = f'{parameters["save_dir"]}/{label}_model.pt'
        self.best_acc = 0.
        self.dropout = parameters['dropout']
        self.init_net(parameters)
    
    def init_net(self, parameters):
        if not os.path.exists(parameters["save_dir"]):
            os.makedirs(parameters["save_dir"])

        model_params = {
            'input_shape': self.x_shape,
            'classes': self.classes,
            'batch_size': self.batch_size,
            'hidden_units': self.hidden_units,
            'mode': self.mode,
            'dropout': self.dropout,
        }
        if self.dropout:
            self.net = MLP_Dropout(model_params).to(DEVICE)
            print('MLP Dropout Parameters: ')
            print(f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}')
        else:
            self.net = MLP(model_params).to(DEVICE)
            print('MLP Parameters: ')
            print(f'batch size: {self.batch_size}, input shape: {model_params["input_shape"]}, hidden units: {model_params["hidden_units"]}, output shape: {model_params["classes"]}, lr: {self.lr}')
        self.optimiser = torch.optim.SGD(self.net.parameters(), lr=self.lr)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimiser, step_size=100, gamma=0.5)

    def train_step(self, train_data):
        self.net.train()
        for _, (x, y) in enumerate(tqdm(train_data)):
            x, y = x.to(DEVICE), y.to(DEVICE)
            self.net.zero_grad()
            self.loss_info = torch.nn.functional.cross_entropy(self.net(x), y, reduction='sum')
            self.loss_info.backward()
            self.optimiser.step()

    def predict(self, X):
        probs = torch.nn.Softmax(dim=1)(self.net(X))
        preds = torch.argmax(probs, dim=1)
        return preds, probs

    def evaluate(self, test_loader):
        self.net.eval()
        print('Evaluating on validation data')
        correct = 0
        total = 0

        with torch.no_grad():
            for data in tqdm(test_loader):
                X, y = data
                X, y = X.to(DEVICE), y.to(DEVICE)
                preds, _ = self.predict(X)
                total += self.batch_size
                correct += (preds == y).sum().item()
        self.acc = correct / total
        print(f'{self.label} validation accuracy: {self.acc}')    

    def log_progress(self, step):
        write_loss(self.writer, self.loss_info, step)
        write_acc(self.writer, self.acc, step)
예제 #21
0
def main(args):
    torch.manual_seed(args.seed)
    if not os.path.exists(args.res_dir):
        os.mkdir(args.res_dir)
    if not os.path.exists(
            os.path.join(args.res_dir, args.type + str(args.noise))):
        os.mkdir(os.path.join(args.res_dir, args.type + str(args.noise)))
    if not os.path.exists(
            os.path.join(args.res_dir, args.type + str(args.noise),
                         str(args.pace))):
        os.mkdir(
            os.path.join(args.res_dir, args.type + str(args.noise),
                         str(args.pace)))

    if not os.path.exists(args.model_dir):
        os.mkdir(args.model_dir)

    res_dir = os.path.join(args.res_dir, args.type + str(args.noise),
                           str(args.pace))

    data1 = dd.io.load(os.path.join(args.vec_dir, 'NYU_correlation_matrix.h5'))
    data2 = dd.io.load(os.path.join(args.vec_dir, 'UM_correlation_matrix.h5'))
    data3 = dd.io.load(os.path.join(args.vec_dir, 'USM_correlation_matrix.h5'))
    data4 = dd.io.load(os.path.join(args.vec_dir,
                                    'UCLA_correlation_matrix.h5'))

    x1 = torch.from_numpy(data1['data']).float()
    y1 = torch.from_numpy(data1['label']).long()
    x2 = torch.from_numpy(data2['data']).float()
    y2 = torch.from_numpy(data2['label']).long()
    x3 = torch.from_numpy(data3['data']).float()
    y3 = torch.from_numpy(data3['label']).long()
    x4 = torch.from_numpy(data4['data']).float()
    y4 = torch.from_numpy(data4['label']).long()

    if args.overlap:
        idNYU = dd.io.load('./idx/NYU_sub_overlap.h5')
        idUM = dd.io.load('./idx/UM_sub_overlap.h5')
        idUSM = dd.io.load('./idx/USM_sub_overlap.h5')
        idUCLA = dd.io.load('./idx/UCLA_sub_overlap.h5')
    else:
        idNYU = dd.io.load('./idx/NYU_sub.h5')
        idUM = dd.io.load('./idx/UM_sub.h5')
        idUSM = dd.io.load('./idx/USM_sub.h5')
        idUCLA = dd.io.load('./idx/UCLA_sub.h5')

    if args.split == 0:
        tr1 = idNYU['1'] + idNYU['2'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['1'] + idUM['2'] + idUM['3'] + idUM['4']
        tr3 = idUSM['1'] + idUSM['2'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['1'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['0']
        te2 = idUM['0']
        te3 = idUSM['0']
        te4 = idUCLA['0']
    elif args.split == 1:
        tr1 = idNYU['0'] + idNYU['2'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['0'] + idUM['2'] + idUM['3'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['2'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['1']
        te2 = idUM['1']
        te3 = idUSM['1']
        te4 = idUCLA['1']
    elif args.split == 2:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['0'] + idUM['1'] + idUM['3'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['2']
        te2 = idUM['2']
        te3 = idUSM['2']
        te4 = idUCLA['2']
    elif args.split == 3:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['4']
        tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['4']
        te1 = idNYU['3']
        te2 = idUM['3']
        te3 = idUSM['3']
        te4 = idUCLA['3']
    elif args.split == 4:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['3']
        tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['3']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['3']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['3']
        te1 = idNYU['4']
        te2 = idUM['4']
        te3 = idUSM['4']
        te4 = idUCLA['4']

    x1_train = x1[tr1]
    y1_train = y1[tr1]
    x2_train = x2[tr2]
    y2_train = y2[tr2]
    x3_train = x3[tr3]
    y3_train = y3[tr3]
    x4_train = x4[tr4]
    y4_train = y4[tr4]

    x1_test = x1[te1]
    y1_test = y1[te1]
    x2_test = x2[te2]
    y2_test = y2[te2]
    x3_test = x3[te3]
    y3_test = y3[te3]
    x4_test = x4[te4]
    y4_test = y4[te4]

    if args.sepnorm:
        mean = x1_train.mean(0, keepdim=True)
        dev = x1_train.std(0, keepdim=True)
        x1_train = (x1_train - mean) / dev
        x1_test = (x1_test - mean) / dev

        mean = x2_train.mean(0, keepdim=True)
        dev = x2_train.std(0, keepdim=True)
        x2_train = (x2_train - mean) / dev
        x2_test = (x2_test - mean) / dev

        mean = x3_train.mean(0, keepdim=True)
        dev = x3_train.std(0, keepdim=True)
        x3_train = (x3_train - mean) / dev
        x3_test = (x3_test - mean) / dev

        mean = x4_train.mean(0, keepdim=True)
        dev = x4_train.std(0, keepdim=True)
        x4_train = (x4_train - mean) / dev
        x4_test = (x4_test - mean) / dev
    else:
        mean = torch.cat((x1_train, x2_train, x3_train, x4_train),
                         0).mean(0, keepdim=True)
        dev = torch.cat((x1_train, x2_train, x3_train, x4_train),
                        0).std(0, keepdim=True)
        x1_train = (x1_train - mean) / dev
        x1_test = (x1_test - mean) / dev
        x2_train = (x2_train - mean) / dev
        x2_test = (x2_test - mean) / dev
        x3_train = (x3_train - mean) / dev
        x3_test = (x3_test - mean) / dev
        x4_train = (x4_train - mean) / dev
        x4_test = (x4_test - mean) / dev

    train1 = TensorDataset(x1_train, y1_train)
    train_loader1 = DataLoader(train1,
                               batch_size=len(train1) // args.nsteps,
                               shuffle=True)
    train2 = TensorDataset(x2_train, y2_train)
    train_loader2 = DataLoader(train2,
                               batch_size=len(train2) // args.nsteps,
                               shuffle=True)
    train3 = TensorDataset(x3_train, y3_train)
    train_loader3 = DataLoader(train3,
                               batch_size=len(train3) // args.nsteps,
                               shuffle=True)
    train4 = TensorDataset(x4_train, y4_train)
    train_loader4 = DataLoader(train4,
                               batch_size=len(train4) // args.nsteps,
                               shuffle=True)
    train_all = ConcatDataset([train1, train2, train3, train4])
    train_loader = DataLoader(train_all, batch_size=500, shuffle=False)

    test1 = TensorDataset(x1_test, y1_test)
    test2 = TensorDataset(x2_test, y2_test)
    test3 = TensorDataset(x3_test, y3_test)
    test4 = TensorDataset(x4_test, y4_test)
    test_loader1 = DataLoader(test1,
                              batch_size=args.test_batch_size1,
                              shuffle=False)
    test_loader2 = DataLoader(test2,
                              batch_size=args.test_batch_size2,
                              shuffle=False)
    test_loader3 = DataLoader(test3,
                              batch_size=args.test_batch_size3,
                              shuffle=False)
    test_loader4 = DataLoader(test4,
                              batch_size=args.test_batch_size4,
                              shuffle=False)
    tbs = [
        args.test_batch_size1, args.test_batch_size2, args.test_batch_size3,
        args.test_batch_size4
    ]

    model1 = MLP(6105, args.dim, 2).to(device)
    model2 = MLP(6105, args.dim, 2).to(device)
    model3 = MLP(6105, args.dim, 2).to(device)
    model4 = MLP(6105, args.dim, 2).to(device)
    optimizer1 = optim.Adam(model1.parameters(),
                            lr=args.lr1,
                            weight_decay=5e-2)
    optimizer2 = optim.Adam(model2.parameters(),
                            lr=args.lr2,
                            weight_decay=5e-2)
    optimizer3 = optim.Adam(model3.parameters(),
                            lr=args.lr3,
                            weight_decay=5e-2)
    optimizer4 = optim.Adam(model4.parameters(),
                            lr=args.lr4,
                            weight_decay=5e-2)

    models = [model1, model2, model3, model4]
    train_loaders = [
        train_loader1, train_loader2, train_loader3, train_loader4
    ]
    optimizers = [optimizer1, optimizer2, optimizer3, optimizer4]

    model = MLP(6105, args.dim, 2).to(device)
    print(model)
    nnloss = nn.NLLLoss()

    def train(epoch):
        pace = args.pace
        for i in range(4):
            models[i].train()
            if epoch <= 50 and epoch % 20 == 0:
                for param_group1 in optimizers[i].param_groups:
                    param_group1['lr'] = 0.5 * param_group1['lr']
            elif epoch > 50 and epoch % 20 == 0:
                for param_group1 in optimizers[i].param_groups:
                    param_group1['lr'] = 0.5 * param_group1['lr']

        #define weights
        w = dict()
        denominator = np.sum(np.array(tbs))
        for i in range(4):
            w[i] = 0.25  #tbs[i]/denominator

        loss_all = dict()
        num_data = dict()
        for i in range(4):
            loss_all[i] = 0
            num_data[i] = 0
        count = 0
        for t in range(args.nsteps):
            for i in range(4):
                optimizers[i].zero_grad()
                a, b = next(iter(train_loaders[i]))
                num_data[i] += b.size(0)
                a = a.to(device)
                b = b.to(device)
                output = models[i](a)
                loss = nnloss(output, b)
                loss.backward()
                loss_all[i] += loss.item() * b.size(0)
                optimizers[i].step()
            count += 1
            if count % pace == 0 or t == args.nsteps - 1:
                with torch.no_grad():
                    for key in model.state_dict().keys():
                        if models[0].state_dict()[key].dtype == torch.int64:
                            model.state_dict()[key].data.copy_(
                                models[0].state_dict()[key])
                        else:
                            temp = torch.zeros_like(model.state_dict()[key])
                            # add noise
                            for s in range(4):
                                if args.type == 'G':
                                    nn = tdist.Normal(
                                        torch.tensor([0.0]),
                                        args.noise *
                                        torch.std(models[s].state_dict()
                                                  [key].detach().cpu()))
                                else:
                                    nn = tdist.Laplace(
                                        torch.tensor([0.0]),
                                        args.noise *
                                        torch.std(models[s].state_dict()
                                                  [key].detach().cpu()))
                                noise = nn.sample(models[s].state_dict()
                                                  [key].size()).squeeze()
                                noise = noise.to(device)
                                temp += w[s] * (models[s].state_dict()[key] +
                                                noise)
                            # update global model
                            model.state_dict()[key].data.copy_(temp)
                            # updata local model
                            for s in range(4):
                                models[s].state_dict()[key].data.copy_(
                                    model.state_dict()[key])

        return loss_all[0] / num_data[0], loss_all[1] / num_data[1], \
               loss_all[2] / num_data[2], loss_all[3] / num_data[3]

    def test(federated_model, dataloader, train=False):
        federated_model.eval()
        test_loss = 0
        correct = 0
        outputs = []
        preds = []
        targets = []
        for data, target in dataloader:
            targets.append(target[0].detach().numpy())
            data = data.to(device)
            target = target.to(device)
            output = federated_model(data)
            outputs.append(output.detach().cpu().numpy())
            test_loss += nnloss(output, target).item() * target.size(0)
            pred = output.data.max(1)[1]
            preds.append(pred.detach().cpu().numpy())
            correct += pred.eq(target.view(-1)).sum().item()

        test_loss /= len(dataloader.dataset)
        correct /= len(dataloader.dataset)
        if train:
            print('Train set local: Average loss: {:.4f}, Average acc: {:.4f}'.
                  format(test_loss, correct))
        else:
            print('Test set local: Average loss: {:.4f}, Average acc: {:.4f}'.
                  format(test_loss, correct))
        return test_loss, correct, targets, outputs, preds

    best_acc = 0
    best_epoch = 0
    train_loss = dict()
    for i in range(4):
        train_loss[i] = list()
    for epoch in range(args.epochs):
        start_time = time.time()
        print(f"Epoch Number {epoch + 1}")
        l1, l2, l3, l4 = train(epoch)
        print(
            ' L1 loss: {:.4f}, L2 loss: {:.4f}, L3 loss: {:.4f}, L4 loss: {:.4f}'
            .format(l1, l2, l3, l4))
        train_loss[0].append(l1)
        train_loss[1].append(l2)
        train_loss[2].append(l3)
        train_loss[3].append(l4)
        test(model, train_loader, train=True)
        test(model, train_loader, train=True)

        print('===NYU===')
        _, acc1, targets1, outputs1, preds1 = test(model,
                                                   test_loader1,
                                                   train=False)
        print('===UM===')
        _, acc2, targets2, outputs2, preds2 = test(model,
                                                   test_loader2,
                                                   train=False)
        print('===USM===')
        _, acc3, targets3, outputs3, preds3 = test(model,
                                                   test_loader3,
                                                   train=False)
        print('===UCLA===')
        _, acc4, targets4, outputs4, preds4 = test(model,
                                                   test_loader4,
                                                   train=False)
        if (acc1 + acc2 + acc3 + acc4) / 4 > best_acc:
            best_acc = (acc1 + acc2 + acc3 + acc4) / 4
            best_epoch = epoch
        total_time = time.time() - start_time
        print('Communication time over the network', round(total_time, 2),
              's\n')
    model_wts = copy.deepcopy(model.state_dict())
    torch.save(model_wts, os.path.join(args.model_dir,
                                       str(args.split) + '.pth'))
    dd.io.save(os.path.join(res_dir, 'NYU_' + str(args.split) + '.h5'), {
        'outputs': outputs1,
        'preds': preds1,
        'targets': targets1
    })
    dd.io.save(os.path.join(res_dir, 'UM_' + str(args.split) + '.h5'), {
        'outputs': outputs2,
        'preds': preds2,
        'targets': targets2
    })
    dd.io.save(os.path.join(res_dir, 'USM_' + str(args.split) + '.h5'), {
        'outputs': outputs3,
        'preds': preds3,
        'targets': targets3
    })
    dd.io.save(os.path.join(res_dir, 'UCLA_' + str(args.split) + '.h5'), {
        'outputs': outputs4,
        'preds': preds4,
        'targets': targets4
    })
    dd.io.save(os.path.join(res_dir, 'train_loss.h5'), {'loss': train_loss})
    print('Best Acc:', best_acc)
    print('split:', args.split, '   noise:', args.noise, '   pace:', args.pace)
예제 #22
0
import torch
import sys
import argparse
from networks import CNN, SoftMax, MLP
from modules import buildModel, reconstructionAttack, buildDAELayer, buildDAESoftmaxModel

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on', device)

print("Building Models")
print("SoftMax")
buildModel(SoftMax(), 0.1, True, True)

print("MLP")
buildModel(MLP(), 0.1, True, True)

print("DAE")
buildDAELayer(DAELayer(10304, 1000), lRate=1e-4, epochs=5000, plot=True)
buildDAELayer(DAELayer(1000, 300), lRate=1e-4, epochs=5000, plot=True)
buildDAESoftmaxModel(DAESoftMax(), lRate=1e-2, epochs=1000, plot=True)

print("CNN")
buildModel(CNN(), 0.001, True, True)

print("\nModel(s) is reconstructed with alpha =", 5000, "beta =", 100,
      "gamma =", 0.01, "delta =", 0.1)
print("Attacking Models")
# SoftMax Model from paper
print("Softmax")
reconstructionAttack(SoftMax())
예제 #23
0
parser.add_argument('--penalty', type=float, default=0)
parser.add_argument('--T', type=int, default=4)
parser.add_argument('--units', action=partition('-', int), default=(256, ))
args = parser.parse_args()
print args

if args.gpu < 0:
    cuda = False
else:
    cuda = True
    th.cuda.set_device(args.gpu)

loader_dict, size = create_mnist_loaders(args.mnist_path, args.batch_size)

n_pixels = size[0] * size[1]
feature_extractor = MLP(n_pixels, args.n_features, args.units, args.nonlinear)
model = Network0(feature_extractor, args.n_features, size, args.T,
                 args.penalty)
if cuda:
    model.cuda()
optimizer = th.optim.Adam(model.parameters(), args.lr)

visdom = Visdom(env=__file__)
tl_vis = TraceVisualizer(visdom, {'title': 'training loss'})
ta_vis = TraceVisualizer(visdom, {'title': 'training accuracy'})
va_vis = TraceVisualizer(visdom, {'title': 'validation accuracy'})

opts = {'width': args.mask_vis_width, 'height': args.mask_vis_height}
mask_vis_tuple = tuple(ImageVisualizer(visdom, opts) for _ in range(args.T))

예제 #24
0
파일: policies.py 프로젝트: alvinzz/IRL
    def __init__(
            self,
            name,
            ob_dim,
            action_dim,
            var_network=False,  # NN if true, else trainable params indep of obs
            out_activation=None,
            hidden_dims=[64, 64],
            hidden_activation=tf.nn.tanh,
            weight_init=tf.contrib.layers.xavier_initializer,
            bias_init=tf.zeros_initializer,
            optimizer=ClipPPO):
        with tf.variable_scope(name):
            self.obs = tf.placeholder(tf.float32,
                                      shape=[None, ob_dim],
                                      name='obs')

            # policy net
            self.mean_network = MLP('means',
                                    ob_dim,
                                    action_dim,
                                    out_activation=out_activation,
                                    hidden_dims=hidden_dims,
                                    hidden_activation=hidden_activation,
                                    weight_init=weight_init,
                                    bias_init=bias_init,
                                    in_layer=self.obs)
            self.means = self.mean_network.layers['out']

            if var_network:
                self.log_var_network = MLP('log_vars',
                                           ob_dim,
                                           action_dim,
                                           out_activation=out_activation,
                                           hidden_dims=hidden_dims,
                                           hidden_activation=hidden_activation,
                                           weight_init=weight_init,
                                           bias_init=bias_init,
                                           in_layer=self.obs)
                self.log_vars = self.log_var_network.layers['out']
            else:
                self.log_var_network = MLP('log_vars',
                                           ob_dim,
                                           action_dim,
                                           out_activation=out_activation,
                                           hidden_dims=[],
                                           hidden_activation=hidden_activation,
                                           weight_init=weight_init,
                                           bias_init=bias_init,
                                           in_layer=self.obs)
                self.log_vars = self.log_var_network.layers['out']

            self.distribution = DiagGaussian(self.means, self.log_vars)
            self.sampled_actions = self.distribution.sample()

            self.actions = tf.placeholder(tf.float32,
                                          shape=[None, action_dim],
                                          name='actions')
            self.action_log_probs = self.distribution.log_prob(self.actions)
            self.entropies = self.distribution.entropy()

            # value net
            self.value_network = MLP('values',
                                     ob_dim,
                                     1,
                                     out_activation=out_activation,
                                     hidden_dims=hidden_dims,
                                     hidden_activation=hidden_activation,
                                     weight_init=weight_init,
                                     bias_init=bias_init,
                                     in_layer=self.obs)
            self.values = self.value_network.layers['out']

            # training, PPO for now
            self.optimizer = optimizer(ob_dim, action_dim, self)
예제 #25
0
beta = int(args.beta[0])
gamma = float(args.gamma[0])
delta = float(args.delta[0])

print("\nModel(s) is reconstructed with alpha =", alpha, "beta =", beta,
      "gamma =", gamma, "delta =", delta)
print("Models choosen", model)

if model == 'all':
    # SoftMax Model from paper
    print("Softmax")
    reconstructionAttack(SoftMax(), alpha, beta, gamma, delta, True, False)

    # MLP Model from paper
    print("MLP")
    reconstructionAttack(MLP(), alpha, beta, gamma, delta, True, False)

    # DAE Model from paper
    print("DAE")
    reconstructionAttack(DAESoftMax(), alpha, beta, gamma, delta, True, False)

    # CNN for comparison
    print("CNN")
    reconstructionAttack(CNN(), alpha, beta, gamma, delta, True, False)
else:
    if model == 'Softmax':
        # SoftMax Model from paper
        print("Softmax")
        reconstructionAttack(SoftMax(), alpha, beta, gamma, delta, True, False)

    if model == 'MLP':
예제 #26
0
class VanillaAE(nn.Module):
    def __init__(self, opt):
        super(VanillaAE, self).__init__()
        self.opt = opt
        self.device = torch.device("cuda:0" if not opt.no_cuda else "cpu")
        nc = int(opt.nc)
        imageSize = int(opt.imageSize)
        nz = int(opt.nz)
        nblk = int(opt.nblk)

        # generator
        self.netG = MLP(input_dim=nc * imageSize * imageSize,
                        output_dim=nc * imageSize * imageSize,
                        dim=nz,
                        n_blk=nblk,
                        norm='none',
                        activ='relu').to(self.device)
        weights_init(self.netG)
        if opt.netG != '':
            self.netG.load_state_dict(
                torch.load(opt.netG, map_location=self.device))
        print_and_write_log(opt.train_log_file, 'netG:')
        print_and_write_log(opt.train_log_file, str(self.netG))

        # losses
        self.criterion = nn.MSELoss()
        # define focal frequency loss
        self.criterion_freq = FFL(loss_weight=opt.ffl_w,
                                  alpha=opt.alpha,
                                  patch_factor=opt.patch_factor,
                                  ave_spectrum=opt.ave_spectrum,
                                  log_matrix=opt.log_matrix,
                                  batch_matrix=opt.batch_matrix).to(
                                      self.device)

        # misc
        self.to(self.device)

        # optimizer
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.beta1, opt.beta2))

    def forward(self):
        pass

    def gen_update(self, data, epoch, matrix=None):
        self.netG.zero_grad()
        real = data.to(self.device)
        if matrix is not None:
            matrix = matrix.to(self.device)
        recon = self.netG(real)

        # apply pixel-level loss
        errG_pix = self.criterion(recon, real) * self.opt.mse_w

        # apply focal frequency loss
        if epoch >= self.opt.freq_start_epoch:
            errG_freq = self.criterion_freq(recon, real, matrix)
        else:
            errG_freq = torch.tensor(0.0).to(self.device)

        errG = errG_pix + errG_freq
        errG.backward()
        self.optimizerG.step()

        return errG_pix, errG_freq

    def sample(self, x):
        x = x.to(self.device)
        self.netG.eval()
        with torch.no_grad():
            recon = self.netG(x)
        self.netG.train()

        return recon

    def save_checkpoints(self, ckpt_dir, epoch):
        torch.save(self.netG.state_dict(),
                   '%s/netG_epoch_%03d.pth' % (ckpt_dir, epoch))
예제 #27
0
def get_network(model, channel, num_classes, im_size=(32, 32)):
    torch.random.manual_seed(int(time.time() * 1000) % 100000)
    net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting(
    )

    if model == 'MLP':
        net = MLP(channel=channel, num_classes=num_classes)
    elif model == 'ConvNet':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling,
                      im_size=im_size)
    elif model == 'LeNet':
        net = LeNet(channel=channel, num_classes=num_classes)
    elif model == 'AlexNet':
        net = AlexNet(channel=channel, num_classes=num_classes)
    elif model == 'VGG11':
        net = VGG11(channel=channel, num_classes=num_classes)
    elif model == 'VGG11BN':
        net = VGG11BN(channel=channel, num_classes=num_classes)
    elif model == 'ResNet18':
        net = ResNet18(channel=channel, num_classes=num_classes)
    elif model == 'ResNet18BN_AP':
        net = ResNet18BN_AP(channel=channel, num_classes=num_classes)

    elif model == 'ConvNetD1':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=1,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetD2':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=2,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetD3':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=3,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetD4':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=4,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)

    elif model == 'ConvNetW32':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=32,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetW64':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=64,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetW128':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=128,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetW256':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=256,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling=net_pooling)

    elif model == 'ConvNetAS':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act='sigmoid',
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetAR':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act='relu',
                      net_norm=net_norm,
                      net_pooling=net_pooling)
    elif model == 'ConvNetAL':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act='leakyrelu',
                      net_norm=net_norm,
                      net_pooling=net_pooling)

    elif model == 'ConvNetNN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='none',
                      net_pooling=net_pooling)
    elif model == 'ConvNetBN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='batchnorm',
                      net_pooling=net_pooling)
    elif model == 'ConvNetLN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='layernorm',
                      net_pooling=net_pooling)
    elif model == 'ConvNetIN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='instancenorm',
                      net_pooling=net_pooling)
    elif model == 'ConvNetGN':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm='groupnorm',
                      net_pooling=net_pooling)

    elif model == 'ConvNetNP':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling='none')
    elif model == 'ConvNetMP':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling='maxpooling')
    elif model == 'ConvNetAP':
        net = ConvNet(channel=channel,
                      num_classes=num_classes,
                      net_width=net_width,
                      net_depth=net_depth,
                      net_act=net_act,
                      net_norm=net_norm,
                      net_pooling='avgpooling')

    else:
        net = None
        exit('DC error: unknown model')

    gpu_num = torch.cuda.device_count()
    if gpu_num > 0:
        device = 'cuda'
        if gpu_num > 1:
            net = nn.DataParallel(net)
    else:
        device = 'cpu'
    net = net.to(device)

    return net
예제 #28
0
def main(args):
    torch.manual_seed(args.seed)
    if not os.path.exists(args.res_dir):
        os.mkdir(args.res_dir)
    if not os.path.exists(args.model_dir):
        os.mkdir(args.model_dir)

    data1 = dd.io.load(os.path.join(args.vec_dir, 'NYU_correlation_matrix.h5'))
    data2 = dd.io.load(os.path.join(args.vec_dir, 'UM_correlation_matrix.h5'))
    data3 = dd.io.load(os.path.join(args.vec_dir, 'USM_correlation_matrix.h5'))
    data4 = dd.io.load(os.path.join(args.vec_dir,
                                    'UCLA_correlation_matrix.h5'))

    x1 = torch.from_numpy(data1['data']).float()
    y1 = torch.from_numpy(data1['label']).long()
    x2 = torch.from_numpy(data2['data']).float()
    y2 = torch.from_numpy(data2['label']).long()
    x3 = torch.from_numpy(data3['data']).float()
    y3 = torch.from_numpy(data3['label']).long()
    x4 = torch.from_numpy(data4['data']).float()
    y4 = torch.from_numpy(data4['label']).long()

    if args.overlap:
        idNYU = dd.io.load('./idx/NYU_sub_overlap.h5')
        idUM = dd.io.load('./idx/UM_sub_overlap.h5')
        idUSM = dd.io.load('./idx/USM_sub_overlap.h5')
        idUCLA = dd.io.load('./idx/UCLA_sub_overlap.h5')
    else:
        idNYU = dd.io.load('./idx/NYU_sub.h5')
        idUM = dd.io.load('./idx/UM_sub.h5')
        idUSM = dd.io.load('./idx/USM_sub.h5')
        idUCLA = dd.io.load('./idx/UCLA_sub.h5')

    if args.split == 0:
        tr1 = idNYU['1'] + idNYU['2'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['1'] + idUM['2'] + idUM['3'] + idUM['4']
        tr3 = idUSM['1'] + idUSM['2'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['1'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['0']
        te2 = idUM['0']
        te3 = idUSM['0']
        te4 = idUCLA['0']
    elif args.split == 1:
        tr1 = idNYU['0'] + idNYU['2'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['0'] + idUM['2'] + idUM['3'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['2'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['1']
        te2 = idUM['1']
        te3 = idUSM['1']
        te4 = idUCLA['1']
    elif args.split == 2:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['0'] + idUM['1'] + idUM['3'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['2']
        te2 = idUM['2']
        te3 = idUSM['2']
        te4 = idUCLA['2']
    elif args.split == 3:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['4']
        tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['4']
        te1 = idNYU['3']
        te2 = idUM['3']
        te3 = idUSM['3']
        te4 = idUCLA['3']
    elif args.split == 4:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['3']
        tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['3']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['3']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['3']
        te1 = idNYU['4']
        te2 = idUM['4']
        te3 = idUSM['4']
        te4 = idUCLA['4']

    x1_train = x1[tr1]
    y1_train = y1[tr1]
    x2_train = x2[tr2]
    y2_train = y2[tr2]
    x3_train = x3[tr3]
    y3_train = y3[tr3]
    x4_train = x4[tr4]
    y4_train = y4[tr4]

    x1_test = x1[te1]
    y1_test = y1[te1]
    x2_test = x2[te2]
    y2_test = y2[te2]
    x3_test = x3[te3]
    y3_test = y3[te3]
    x4_test = x4[te4]
    y4_test = y4[te4]

    if args.sepnorm:
        mean = x1_train.mean(0, keepdim=True)
        dev = x1_train.std(0, keepdim=True)
        x1_train = (x1_train - mean) / dev
        x1_test = (x1_test - mean) / dev

        mean = x2_train.mean(0, keepdim=True)
        dev = x2_train.std(0, keepdim=True)
        x2_train = (x2_train - mean) / dev
        x2_test = (x2_test - mean) / dev

        mean = x3_train.mean(0, keepdim=True)
        dev = x3_train.std(0, keepdim=True)
        x3_train = (x3_train - mean) / dev
        x3_test = (x3_test - mean) / dev

        mean = x4_train.mean(0, keepdim=True)
        dev = x4_train.std(0, keepdim=True)
        x4_train = (x4_train - mean) / dev
        x4_test = (x4_test - mean) / dev
    else:
        mean = torch.cat((x1_train, x2_train, x3_train, x4_train),
                         0).mean(0, keepdim=True)
        dev = torch.cat((x1_train, x2_train, x3_train, x4_train),
                        0).std(0, keepdim=True)
        x1_train = (x1_train - mean) / dev
        x1_test = (x1_test - mean) / dev
        x2_train = (x2_train - mean) / dev
        x2_test = (x2_test - mean) / dev
        x3_train = (x3_train - mean) / dev
        x3_test = (x3_test - mean) / dev
        x4_train = (x4_train - mean) / dev
        x4_test = (x4_test - mean) / dev

    train = TensorDataset(
        torch.cat((x1_train, x2_train, x3_train, x4_train), 0),
        torch.cat((y1_train, y2_train, y3_train, y4_train), 0))
    train_loader = DataLoader(train, batch_size=args.batch_size, shuffle=True)

    test1 = TensorDataset(x1_test, y1_test)
    test_loader1 = DataLoader(test1,
                              batch_size=args.test_batch_size1,
                              shuffle=False)
    test2 = TensorDataset(x2_test, y2_test)
    test_loader2 = DataLoader(test2,
                              batch_size=args.test_batch_size2,
                              shuffle=False)
    test3 = TensorDataset(x3_test, y3_test)
    test_loader3 = DataLoader(test3,
                              batch_size=args.test_batch_size3,
                              shuffle=False)
    test4 = TensorDataset(x4_test, y4_test)
    test_loader4 = DataLoader(test4,
                              batch_size=args.test_batch_size4,
                              shuffle=False)

    model = MLP(6105, args.dim, 2).to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-2)
    print(model)
    nnloss = nn.NLLLoss()

    def train(data_loader, epoch):
        model.train()

        if epoch <= 50 and epoch % 20 == 0:
            for param_group1 in optimizer.param_groups:
                param_group1['lr'] = 0.5 * param_group1['lr']
        elif epoch > 50 and epoch % 20 == 0:
            for param_group1 in optimizer.param_groups:
                param_group1['lr'] = 0.5 * param_group1['lr']

        loss_all1 = 0

        for data, target in data_loader:
            optimizer.zero_grad()
            data = data.to(device)
            target = target.to(device)
            output1 = model(data)
            loss1 = nnloss(output1, target)
            loss1.backward()
            loss_all1 += loss1.item() * target.size(0)
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
            optimizer.step()
        return loss_all1 / (len(data_loader.dataset))

    def test(data_loader, train=False):
        model.eval()
        test_loss = 0
        correct = 0
        outputs = []
        preds = []
        targets = []
        for data, target in data_loader:
            data = data.to(device)
            targets.append(target[0].detach().numpy())
            target = target.to(device)
            output = model(data)
            outputs.append(output.detach().cpu().numpy())
            test_loss += nnloss(output, target).item() * target.size(0)
            pred = output.data.max(1)[1]
            preds.append(pred.detach().cpu().numpy())
            correct += pred.eq(target.view(-1)).sum().item()

        test_loss /= len(data_loader.dataset)
        correct /= len(data_loader.dataset)
        if train:
            print(
                'Train set: Average loss: {:.4f}, Average acc: {:.4f}'.format(
                    test_loss, correct))
        else:
            print('Test set: Average loss: {:.4f}, Average acc: {:.4f}'.format(
                test_loss, correct))
        return test_loss, correct, targets, outputs, preds

    best_acc = 0
    best_epoch = 0
    for epoch in range(args.epochs):
        start_time = time.time()
        print(f"Epoch Number {epoch + 1}")
        l1 = train(train_loader, epoch)
        test(train_loader, train=True)
        print(' L1 loss: {:.4f}'.format(l1))
        print('===NYU===')
        _, acc1, targets1, outputs1, preds1 = test(test_loader1, train=False)
        print('===UM===')
        _, acc2, targets2, outputs2, preds2 = test(test_loader2, train=False)
        print('===USM===')
        _, acc3, targets3, outputs3, preds3 = test(test_loader3, train=False)
        print('===UCLA===')
        _, acc4, targets4, outputs4, preds4 = test(test_loader4, train=False)
        if (acc1 + acc2 + acc3 + acc4) / 4 > best_acc:
            best_acc = (acc1 + acc2 + acc3 + acc4) / 4
            best_epoch = epoch
        total_time = time.time() - start_time
        print('Communication time over the network', round(total_time, 2),
              's\n')
    model_wts = copy.deepcopy(model.state_dict())
    torch.save(model_wts, os.path.join(args.model_dir,
                                       str(args.split) + '.pth'))
    dd.io.save(os.path.join(args.res_dir, 'NYU_' + str(args.split) + '.h5'), {
        'outputs': outputs1,
        'preds': preds1,
        'targets': targets1
    })
    dd.io.save(os.path.join(args.res_dir, 'UM_' + str(args.split) + '.h5'), {
        'outputs': outputs2,
        'preds': preds2,
        'targets': targets2
    })
    dd.io.save(os.path.join(args.res_dir, 'USM_' + str(args.split) + '.h5'), {
        'outputs': outputs3,
        'preds': preds3,
        'targets': targets3
    })
    dd.io.save(os.path.join(args.res_dir, 'UCLA_' + str(args.split) + '.h5'), {
        'outputs': outputs4,
        'preds': preds4,
        'targets': targets4
    })
    print('Best Acc:', best_acc, 'Best Epoch:', best_epoch)
    print('split:', args.split)
예제 #29
0
def main(args):
    seed = 999
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

    models = []
    if args.method == 'fed':
        for i in range(5):
            model = MLP(6105, 16, 2).to(device)
            model.load_state_dict(
                torch.load(os.path.join('./model/fed_overlap',
                                        str(i) + '.pth')))
            models.append(model)
    elif args.method == 'single':
        for i in range(5):
            model = MLP(6105, 8, 2).to(device)
            model.load_state_dict(
                torch.load(
                    os.path.join('./model/single_overlap', args.site,
                                 str(i) + '.pth')))
            models.append(model)
    elif args.method == 'mix':
        for i in range(5):
            model = MLP(6105, 16, 2).to(device)
            model.load_state_dict(
                torch.load(os.path.join('./model/mix_overlap',
                                        str(i) + '.pth')))
            models.append(model)

    data1 = dd.io.load(os.path.join(args.vec_dir, 'NYU_correlation_matrix.h5'))
    data2 = dd.io.load(os.path.join(args.vec_dir, 'UM_correlation_matrix.h5'))
    data3 = dd.io.load(os.path.join(args.vec_dir, 'USM_correlation_matrix.h5'))
    data4 = dd.io.load(os.path.join(args.vec_dir,
                                    'UCLA_correlation_matrix.h5'))

    x1 = torch.from_numpy(data1['data']).float()
    y1 = torch.from_numpy(data1['label']).long()
    x2 = torch.from_numpy(data2['data']).float()
    y2 = torch.from_numpy(data2['label']).long()
    x3 = torch.from_numpy(data3['data']).float()
    y3 = torch.from_numpy(data3['label']).long()
    x4 = torch.from_numpy(data4['data']).float()
    y4 = torch.from_numpy(data4['label']).long()

    idNYU = dd.io.load('./idx/NYU_sub_overlap.h5')
    idUM = dd.io.load('./idx/UM_sub_overlap.h5')
    idUSM = dd.io.load('./idx/USM_sub_overlap.h5')
    idUCLA = dd.io.load('./idx/UCLA_sub_overlap.h5')

    grad = dict()
    for i in range(5):
        if i == 0:
            tr1 = idNYU['1'] + idNYU['2'] + idNYU['3'] + idNYU['4']
            tr2 = idUM['1'] + idUM['2'] + idUM['3'] + idUM['4']
            tr3 = idUSM['1'] + idUSM['2'] + idUSM['3'] + idUSM['4']
            tr4 = idUCLA['1'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
            te1 = idNYU['0']
            te2 = idUM['0']
            te3 = idUSM['0']
            te4 = idUCLA['0']
        elif i == 1:
            tr1 = idNYU['0'] + idNYU['2'] + idNYU['3'] + idNYU['4']
            tr2 = idUM['0'] + idUM['2'] + idUM['3'] + idUM['4']
            tr3 = idUSM['0'] + idUSM['2'] + idUSM['3'] + idUSM['4']
            tr4 = idUCLA['0'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
            te1 = idNYU['1']
            te2 = idUM['1']
            te3 = idUSM['1']
            te4 = idUCLA['1']
        elif i == 2:
            tr1 = idNYU['0'] + idNYU['1'] + idNYU['3'] + idNYU['4']
            tr2 = idUM['0'] + idUM['1'] + idUM['3'] + idUM['4']
            tr3 = idUSM['0'] + idUSM['1'] + idUSM['3'] + idUSM['4']
            tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['3'] + idUCLA['4']
            te1 = idNYU['2']
            te2 = idUM['2']
            te3 = idUSM['2']
            te4 = idUCLA['2']
        elif i == 3:
            tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['4']
            tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['4']
            tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['4']
            tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['4']
            te1 = idNYU['3']
            te2 = idUM['3']
            te3 = idUSM['3']
            te4 = idUCLA['3']
        elif i == 4:
            tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['3']
            tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['3']
            tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['3']
            tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['3']
            te1 = idNYU['4']
            te2 = idUM['4']
            te3 = idUSM['4']
            te4 = idUCLA['4']

        x1_train = x1[tr1]
        x2_train = x2[tr2]
        x3_train = x3[tr3]
        x4_train = x4[tr4]

        x1_test = x1[te1]
        y1_test = y1[te1]
        x2_test = x2[te2]
        y2_test = y2[te2]
        x3_test = x3[te3]
        y3_test = y3[te3]
        x4_test = x4[te4]
        y4_test = y4[te4]

        mean = x1_train.mean(0, keepdim=True)
        dev = x1_train.std(0, keepdim=True)
        x1_test = (x1_test - mean) / dev

        mean = x2_train.mean(0, keepdim=True)
        dev = x2_train.std(0, keepdim=True)
        x2_test = (x2_test - mean) / dev

        mean = x3_train.mean(0, keepdim=True)
        dev = x3_train.std(0, keepdim=True)
        x3_test = (x3_test - mean) / dev

        mean = x4_train.mean(0, keepdim=True)
        dev = x4_train.std(0, keepdim=True)
        x4_test = (x4_test - mean) / dev

        if args.ASD:
            x1_test = x1_test[y1_test == 1]
            y1_test = y1_test[y1_test == 1]
            x2_test = x2_test[y2_test == 1]
            y2_test = y2_test[y2_test == 1]
            x3_test = x3_test[y3_test == 1]
            y3_test = y3_test[y3_test == 1]
            x4_test = x4_test[y4_test == 1]
            y4_test = y4_test[y4_test == 1]
        elif args.HC:
            x1_test = x1_test[y1_test == 0]
            y1_test = y1_test[y1_test == 0]
            x2_test = x2_test[y2_test == 0]
            y2_test = y2_test[y2_test == 0]
            x3_test = x3_test[y3_test == 0]
            y3_test = y3_test[y3_test == 0]
            x4_test = x4_test[y4_test == 0]
            y4_test = y4_test[y4_test == 0]
        else:
            x1_test = x1_test
            y1_test = y1_test
            x2_test = x2_test
            y2_test = y2_test
            x3_test = x3_test
            y3_test = y3_test
            x4_test = x4_test
            y4_test = y4_test

        test1 = TensorDataset(x1_test, y1_test)
        test2 = TensorDataset(x2_test, y2_test)
        test3 = TensorDataset(x3_test, y3_test)
        test4 = TensorDataset(x4_test, y4_test)
        test_loader1 = DataLoader(test1, batch_size=1, shuffle=False)
        test_loader2 = DataLoader(test2, batch_size=1, shuffle=False)
        test_loader3 = DataLoader(test3, batch_size=1, shuffle=False)
        test_loader4 = DataLoader(test4, batch_size=1, shuffle=False)

        if args.site == 'NYU':
            test_loader = test_loader1
        elif args.site == 'UM':
            test_loader = test_loader2
        elif args.site == 'USM':
            test_loader = test_loader3
        elif args.site == 'UCLA':
            test_loader = test_loader4

        grad[i] = list()
        gdbp = GuidedBackPropogation(models[i])
        models[i].eval()
        for data, target in test_loader:
            data = data.to(device)
            data = data.requires_grad_()
            out_b = gdbp(data)
            out_b[:, target.item()].backward()
            grad_b = gdbp.get(data)
            grad[i].append(grad_b)

    if args.ASD:
        dd.io.save(
            os.path.join('./interpretation',
                         args.method + '_' + args.site + '.h5'),
            {'grad': grad})
    elif args.HC:
        dd.io.save(
            os.path.join('./interpretation_hc',
                         args.method + '_' + args.site + '.h5'),
            {'grad': grad})
    else:
        dd.io.save(
            os.path.join('./interpretation_2class',
                         args.method + '_' + args.site + '.h5'),
            {'grad': grad})
예제 #30
0
def main(args):
    seed = 999
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

    sites = ['NYU','UCLA','UM','USM']
    models_cross = []
    for file in sites:
        if file != args.site:
            model = MLP(6105,8,2).to(device)
            model.load_state_dict(torch.load(os.path.join('./model/cross_overlap',file+'.pth')))
            models_cross.append(model)

    model_single = MLP(6105,8,2).to(device)
    model_single.load_state_dict(torch.load(os.path.join('./model/single_overlap', args.site, str(args.split) + '.pth')))


    data1 = dd.io.load(os.path.join(args.vec_dir,'NYU_correlation_matrix.h5'))
    data2 = dd.io.load(os.path.join(args.vec_dir,'UM_correlation_matrix.h5'))
    data3 = dd.io.load(os.path.join(args.vec_dir,'USM_correlation_matrix.h5'))
    data4 = dd.io.load(os.path.join(args.vec_dir,'UCLA_correlation_matrix.h5'))

    x1 = torch.from_numpy(data1['data']).float()
    y1 = torch.from_numpy(data1['label']).long()
    x2 = torch.from_numpy(data2['data']).float()
    y2 = torch.from_numpy(data2['label']).long()
    x3 = torch.from_numpy(data3['data']).float()
    y3 = torch.from_numpy(data3['label']).long()
    x4 = torch.from_numpy(data4['data']).float()
    y4 = torch.from_numpy(data4['label']).long()

    idNYU = dd.io.load('./idx/NYU_sub_overlap.h5')
    idUM = dd.io.load('./idx/UM_sub_overlap.h5')
    idUSM = dd.io.load('./idx/USM_sub_overlap.h5')
    idUCLA = dd.io.load('./idx/UCLA_sub_overlap.h5')

    if args.split == 0:
        tr1 = idNYU['1'] + idNYU['2'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['1'] + idUM['2'] + idUM['3'] + idUM['4']
        tr3 = idUSM['1'] + idUSM['2'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['1'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['0']
        te2 = idUM['0']
        te3 = idUSM['0']
        te4 = idUCLA['0']
    elif args.split == 1:
        tr1 = idNYU['0'] + idNYU['2'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['0'] + idUM['2'] + idUM['3'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['2'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['2'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['1']
        te2 = idUM['1']
        te3 = idUSM['1']
        te4 = idUCLA['1']
    elif args.split == 2:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['3'] + idNYU['4']
        tr2 = idUM['0'] + idUM['1'] + idUM['3'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['3'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['3'] + idUCLA['4']
        te1 = idNYU['2']
        te2 = idUM['2']
        te3 = idUSM['2']
        te4 = idUCLA['2']
    elif args.split == 3:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['4']
        tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['4']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['4']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['4']
        te1 = idNYU['3']
        te2 = idUM['3']
        te3 = idUSM['3']
        te4 = idUCLA['3']
    elif args.split == 4:
        tr1 = idNYU['0'] + idNYU['1'] + idNYU['2'] + idNYU['3']
        tr2 = idUM['0'] + idUM['1'] + idUM['2'] + idUM['3']
        tr3 = idUSM['0'] + idUSM['1'] + idUSM['2'] + idUSM['3']
        tr4 = idUCLA['0'] + idUCLA['1'] + idUCLA['2'] + idUCLA['3']
        te1 = idNYU['4']
        te2 = idUM['4']
        te3 = idUSM['4']
        te4 = idUCLA['4']

    x1_train = x1[tr1]
    y1_train = y1[tr1]
    x2_train = x2[tr2]
    y2_train = y2[tr2]
    x3_train = x3[tr3]
    y3_train = y3[tr3]
    x4_train = x4[tr4]
    y4_train = y4[tr4]

    x1_test = x1[te1]
    y1_test = y1[te1]
    x2_test = x2[te2]
    y2_test = y2[te2]
    x3_test = x3[te3]
    y3_test = y3[te3]
    x4_test = x4[te4]
    y4_test = y4[te4]

    mean = x1_train.mean(0, keepdim=True)
    dev = x1_train.std(0, keepdim=True)
    x1_test = (x1_test - mean) / dev

    mean = x2_train.mean(0, keepdim=True)
    dev = x2_train.std(0, keepdim=True)
    x2_test = (x2_test - mean) / dev

    mean = x3_train.mean(0, keepdim=True)
    dev = x3_train.std(0, keepdim=True)
    x3_test = (x3_test - mean) / dev

    mean = x4_train.mean(0, keepdim=True)
    dev = x4_train.std(0, keepdim=True)
    x4_test = (x4_test - mean) / dev


    test1 = TensorDataset(x1_test, y1_test)
    test2 = TensorDataset(x2_test, y2_test)
    test3 = TensorDataset(x3_test, y3_test)
    test4 = TensorDataset(x4_test, y4_test)


    if args.site == 'NYU':
        test = test1
    elif args.site == 'UM':
        test = test2
    elif args.site == 'USM':
        test = test3
    elif args.site == 'UCLA':
        test = test4

    te_data = test.tensors[0].to(device)
    te_outputs = []
    targets = test.tensors[1].numpy()
    preds =[]
    #cross model
    for model in models_cross:
        model.eval()
        te_output = model(te_data)
        te_outputs.append(torch.exp(te_output))


    # single_model
    model_single.eval()
    te_output = model_single(te_data)
    te_outputs.append(torch.exp(te_output))
    outputtorch = torch.stack(te_outputs,dim=0)
    output_mean = torch.mean(outputtorch,dim=0)
    preds = output_mean.data.max(1)[1].detach().cpu().numpy()
    if not os.path.exists(args.res_dir):
        os.mkdir(args.res_dir)
    dd.io.save(os.path.join(args.res_dir, args.site+ '_' + str(args.split) + '.h5'),
                {'preds': preds, 'targets': targets})