Beispiel #1
0
def get_models(args):
    sigmoid_flag = 1
    if args.gan == 'lsgan':
        sigmoid_flag = 0

    if args.model == 'scribbler':
        netG = scribbler.Scribbler(5, 3, 32)
    elif args.model == 'texturegan':
        netG = texturegan.TextureGAN(5, 3, 32)
    elif args.model == 'pix2pix':
        netG = define_G(5, 3, 32)
    elif args.model == 'scribbler_dilate_128':
        netG = scribbler_dilate_128.ScribblerDilate128(5, 3, 32)
    else:
        print(args.model + ' not support. Using Scribbler model')
        netG = scribbler.Scribbler(5, 3, 32)

    if args.color_space == 'lab':
        netD = discriminator.Discriminator(1, 32, sigmoid_flag)
        netD_local = discriminator.LocalDiscriminator(2, 32, sigmoid_flag)
    elif args.color_space == 'rgb':
        netD = discriminator.Discriminator(3, 32, sigmoid_flag)

    if args.load == -1:
        netG.apply(weights_init)
    else:
        load_network(netG, 'G', args.load_epoch, args.load, args)

    if args.load_D == -1:
        netD.apply(weights_init)
    else:
        load_network(netD, 'D', args.load_epoch, args.load_D, args)
        load_network(netD_local, 'D_local', args.load_epoch, args.load_D, args)
    return netG, netD, netD_local
Beispiel #2
0
def create_discriminator_criterion(args):
    d = discriminator.Discriminator(outputs_size=1000, K=8).cuda()
    d = torch.nn.DataParallel(d)
    update_parameters = {'params': d.parameters(), "lr": args.d_lr}
    discriminators_criterion = discriminatorLoss(d).cuda()
    if len(args.gpus) > 1:
        discriminators_criterion = torch.nn.DataParallel(discriminators_criterion, device_ids=args.gpus)
    return discriminators_criterion, update_parameters
 def __init__(self, n_level):
     super(LPAGAN, self).__init__()
     self.n_level = n_level
     self.Generator = []
     self.Discriminator = []
     for i in range(n_level):
         g = generator.Generator()
         g.cuda()
         d = discriminator.Discriminator()
         d.cuda()
         self.Generator.append(g)
         self.Discriminator.append(d)
Beispiel #4
0
def get_model(name, n_outputs):
    if name == "lenet":
        model = embedding.EmbeddingNet(n_outputs).cuda()
        opt = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.5, 0.9))

        return model.cuda(), opt

    if name == "disc":
        model = discriminator.Discriminator(input_dims=500,
                                            hidden_dims=500,
                                            output_dims=2)
        opt = torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.5, 0.9))

        return model.cuda(), opt
Beispiel #5
0
 def __init__(self, dataset, conf, strategy):
     super().__init__(dataset, conf)
     self.client_nums = self.conf["federate"]["client_nums"]
     self.client_budget = self.conf["federate"]["client_budget"]
     self.train_indices, self.test_indices = indices_train_test_split(
         list(range(len(self.dataset))))
     self.client_dict = self.get_client_dict(self.train_indices)
     self.client_selection_status = [False for _ in range(self.client_nums)]
     self.discriminator = discriminator.Discriminator(z_dim=4)
     self.discriminator.to(self.device)
     self.dsc_optim = getattr(
         optimUtils, conf["discriminator"]["optimizer"]["name"](
             self.discriminator.parameters(),
             conf["discriminator"]["optimizer"]))
     for client_idx, client_unlabeled_indices in self.client_dict:
         # The param 'dataset' is useless in clients' tasks
         # We need to manually input the unlabeled indices to the clients
         self.clients[client_idx] = TefalClient(dataset, conf)
Beispiel #6
0
def main():
    args = parser.parse_args()

    print_args(args)

    if args.seed is not None:
        print('seed number given => {:d}'.format(args.seed))
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)

    args.dataset = os.path.join('./datasets', args.dataset)

    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    if args.experiment_id is not None:
        model_id = args.experiment_id
    else:
        model_id = time.strftime('%Y-%m-%d-%H%M%S',
                                 time.localtime(time.time()))
    args.save_path = os.path.join('checkpoints', model_id)
    os.makedirs(args.save_path, exist_ok=True)
    print('experiment id => {} \ncheckpoint path => {}'.format(
        model_id, args.save_path))

    writer = get_summarywriter(model_id) if args.tensorboard else None

    inf_train_loader = create_dataloader(args, phase='train', inf=True)
    val_loader = create_dataloader(args, phase='test')

    model = deepdeblur.DeepDeblur_scale3()
    model.to(device)
    netD = discriminator.Discriminator()
    netD.to(device)

    train(inf_train_loader, val_loader, model, netD, device, writer, args)

    writer.close()
Beispiel #7
0

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# custom weights initialization called on netG and netD
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)
        

generator_ = generator.Generator(args["nz"], args["ngf"], args["nc"], args["ngpu"]).to(device)
discriminator_ = discriminator.Discriminator(args["nc"],args["ndf"]).to(device)

generator_.apply(weights_init)
discriminator_.apply(weights_init)

criterion = args['loss_criterion']

params_gen = list(generator_.parameters())
params_dis = list(discriminator_.parameters())

optimizer_gen = torch.optim.Adam(params_gen, lr=args['learning_rate_gen'], betas=(args['beta'], 0.999))
optimizer_dis = torch.optim.Adam(params_dis, lr=args['learning_rate_dis'], betas=(args['beta'], 0.999))


d_stats_manager, g_stats_manager = nt.StatsManager(),nt.StatsManager()
Beispiel #8
0
# Set dataset and dataloader
dataset, dataloader = None, None
if cfg['dataset'] == 'celeba':
    dataset, dataloader = celeba(cfg['batch_size'], cfg['num_workers'])
elif cfg['dataset'] == 'paintings':
    dataset, dataloader = paintings(cfg['batch_size'], cfg['num_workers'])
elif cfg['dataset'] == 'mnist':
    dataset, dataloader = mnist(cfg['batch_size'], cfg['num_workers'])
else:
    raise ValueError("Dataset specified in config/config.json is not implemented.")

netG = generator.Generator(cfg['ngpu'], cfg['nz'], cfg['ngf'], cfg['nc']).to(device)
netG.apply(weights_init)

netD = discriminator.Discriminator(cfg['ngpu'], cfg['nc'], cfg['ndf']).to(device)
netD.apply(weights_init)

# x = (9 * 5)  + (6 / 3) + (4 * 2) - (18 / 6 * 4)
# at the same time, calculate (9 * 5), (6/3), (4*2), (18/6*4)
# in sequence, calculate addition and subtraction

# define loss
# Binary Cross Entropy Loss
criterion = nn.BCELoss()

# make an optimizer
# Adam optimizers for generator and discriminator
optimizerG = optim.Adam(netG.parameters(), cfg['lr'], betas=(0.5, 0.999))
optimizerD = optim.Adam(netD.parameters(), cfg['lr'], betas=(0.5, 0.999))
    durations_d = torch.FloatTensor(d_loss_durations)
    durations_c = torch.FloatTensor(c_loss_durations)
    plt.title('Training...')
    plt.xlabel('Episode')
    plt.ylabel('Duration')
    plt.plot(durations_g.numpy(), 'r', label="g_loss")
    plt.plot(durations_d.numpy(), 'b', label="d_loss")
    plt.plot(durations_c.numpy(), 'g', label="c_loss")
    plt.pause(0.001)  # pause a bit so that plots are updated


use_cuda = torch.cuda.is_available()
# 初始化模型
classifier = classifier.Classifier()
critic = discriminator.Discriminator(input_dims=params.d_input_dims,
                                     hidden_dims=params.d_hidden_dims,
                                     output_dims=params.d_output_dims)
generator = generator.Generator()

criterion = nn.CrossEntropyLoss()

optimizer_c = optim.Adam(classifier.parameters(),
                         lr=params.learning_rate,
                         betas=(params.beta1, params.beta2))
optimizer_d = optim.Adam(critic.parameters(),
                         lr=params.learning_rate,
                         betas=(params.beta1, params.beta2))
optimizer_g = optim.Adam(generator.parameters(),
                         lr=params.learning_rate,
                         betas=(params.beta1, params.beta2))
data_itr_src = get_data_iter("MNIST", train=True)
Beispiel #10
0
train_data, val_data, test_data = data_utils.load_dataset(opt)
train_generator = data_utils.data_generator(train_data, train=True, opt=opt)
# val_generator = data_utils.data_generator(val_data, train=False, opt=opt)
# test_dl_generator = data_utils.data_generator(test_data, train=False, dynamic_length=True, opt=opt)

### ### ### ### ### ### ### ###

### ! Setup Models ! ###
netG = generator.Generator(1, 64, (3, 3, 3), 2, device).to(device)

if (device.type == 'cuda' and (opt.ngpu > 1)):
    netG = nn.DataParallel(netG, list(range(opt.ngpu)))

netG.apply(weight_init.weight_init)

netD = discriminator.Discriminator().to(device)
if (device.type == 'cuda' and (opt.ngpu > 1)):
    netD = nn.DataParallel(netD, list(range(opt.ngpu)))
netD.apply(weight_init.weight_init)

### ### ### ### ### ### ### ###

## ! Setup Loss and Optimizer ! ###
# def loss_fn(outputs, )
criterion = nn.MSELoss()
# x = D(G(V)) y = 1
# x = G(V) y = V
lossG = nn.MSELoss()
# BCELoss()
# MSELoss()
optimizerG = optim.Adam(netG.parameters(), lr=0.0001, betas=(opt.beta1, 0.999))