D0 = model.E_Net_2()
encoder_model = torch.load("./Dmodel")

# 24*24


enet = model.E_Net_2().cuda()
enet.load_state_dict(encoder_model)

add_in_feature = 240+ hidden_d # Add one dimension data for the input_feature data.
gnet = model.G_Net_FM_3(ngpu,add_in_feature,main_gpu=main_gpu).cuda()
# g_model = torch.load("./fm21/G_95000.model")
# gnet.load_state_dict(g_model)
d_in_demension = 2
dnet = model.D_Net_conv(ngpu,d_in_demension,main_gpu=main_gpu).cuda()
# nosie_d = 10

g_net_optimizer = optim.Adam(gnet.parameters(),lr = 1e-4)
d_net_optimizer = optim.Adam(dnet.parameters(), lr = 1e-4)

check_points = 500
num_epoches = 100000
criterion = nn.BCELoss()

for epoch in (range(1,num_epoches)):
    # print("give me a clue")
    # data, label = mnist.train.next_batch(batch_size)
    data, label = mm.batch_next(batch_size, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], shuffle=True)
    dnet.zero_grad()
    gnet.zero_grad()
Exemple #2
0
mm = data_convert.owndata()

num = '0'
out_dir = 'out_fc_{}_{}/'.format(datetime.now(), num)
if not os.path.exists(out_dir):
    os.makedirs(out_dir)
    shutil.copyfile(sys.argv[0], out_dir + '/training_script.py')
# else:
#     print("you have already creat one.")
#     exit(1)
sys.stdout = mutil.Logger(out_dir)  #

in_channel = 1
G = model.G_Net_conv(ngpu).cuda()
D = model.D_Net_conv(ngpu, in_channel).cuda()
"""Weight Initialization"""


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    # elif classname.find('BatchNorm') != -1:
    #     m.weight.data.normal_(1.0, 0.02)
    #     m.bias.data.fill_(0)


G.apply(weights_init)
# G.load_state_dict(torch.load('./out_conv_part/G_20000.model'))
D.apply(weights_init)
out_dir = './cifar100_result/basic_{}_{}/'.format(datetime.now(), num)
out_dir.replace(" ", "_")

if not os.path.exists(out_dir):
    os.makedirs(out_dir)
    shutil.copyfile(sys.argv[0], out_dir + '/training_script.py')

sys.stdout = mutil.Logger(out_dir)
in_channel = 4
d_num = 3

G = model.G_Net_conv_32(ngpu,
                        main_gpu=gpu,
                        in_channel=Z_dim + label_dim,
                        out_channel=3).cuda()
D = model.D_Net_conv(ngpu, in_channel, main_gpu=gpu).cuda()
"""Weight Initialization"""
# def weights_init(m):
#     classname = m.__class__.__name__
#     if classname.find('Conv') != -1:
#         m.weight.data.normal_(0.0, 0.02)
""" ===================== TRAINING ======================== """

d_num = 3
# avd_num = 1/d_num
G_solver = optim.Adam(G.parameters(), lr=1e-4)
D_solver = optim.Adam(D.parameters(), lr=2e-5)

ones_label = Variable(torch.ones(mb_size)).cuda()
zeros_label = Variable(torch.zeros(mb_size)).cuda()
Exemple #4
0
    c_label[i:(i + 6), i / 6] = 1.

sys.stdout = mutil.Logger(out_dir)
# else:
#     print("you have already creat one.")
#     exit(1)

#
#
# def xavier_init(size):
#     in_dim = size[0]
#     xavier_stddev = 1. / np.sqrt(in_dim / 2.)
#     return Variable(torch.randn(*size) * xavier_stddev, requires_grad=True)
in_channel = 2
G = model.G_Net_conv(ngpu).cuda()
D = model.D_Net_conv(ngpu, 1).cuda()
E = model.Ev_Net_conv(ngpu, 1).cuda()
"""Weight Initialization"""


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    # elif classname.find('BatchNorm') != -1:
    #     m.weight.data.normal_(1.0, 0.02)
    #     m.bias.data.fill_(0)


G.apply(weights_init)
D.apply(weights_init)
Exemple #5
0
cnt = 0

num = '0'
out_dir = './cifar100_result/basic_{}_{}/'.format(datetime.now(), num)
out_dir.replace(" ", "_")

if not os.path.exists(out_dir):
    os.makedirs(out_dir)
    shutil.copyfile(sys.argv[0], out_dir + '/training_script.py')

sys.stdout = mutil.Logger(out_dir)
in_channel = 2
d_num = 3

G = model.G_Net_conv(ngpu).cuda()
D_list = [model.D_Net_conv(ngpu, in_channel).cuda() for i in range(d_num)]
"""Weight Initialization"""


def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)


""" ===================== TRAINING ======================== """

d_num = 3
# avd_num = 1/d_num
G_solver = optim.Adam(G.parameters(), lr=1e-4)