Ejemplo n.º 1
0
def train():
    ###################
#     loadedimage = extract_data()
#     saveimg("data00.png",loadedimage,0)
#     save_images(loadedimage, [8, 8],'data0.png')
#     loadedimage = extract_data()
#     save_images(loadedimage, [8, 8],'data1.png')
#     exit()
    ###################

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, IMAGE_CHANNEL])
    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM], name='z')

    G = generator(z)
    D_logits  = discriminator(images)
    samples = sampler(z)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    #t_vars = tf.trainable_variables()
    #d_vars = [var for var in t_vars if 'd_' in var.name]
    #g_vars = [var for var in t_vars if 'g_' in var.name]
    g_vars = ConvNet.getParam([gfc0,gdc0,gdc1,gdc2,gdc3])
    d_vars = ConvNet.getParam([dcv0,dcv1,dcv2,dcv3,dfc0])

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sess = tf.Session()

    sample_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    
    init = tf.initialize_all_variables()  
    sess.run(init)

    for idx in xrange(0, 10000):
        print(idx)
        for _ in xrange(5):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage = extract_data()
            sess.run(d_optim, feed_dict = {z:batch_z, images:loadedimage})

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z})

        if idx % 10 == 0:
            sample = sess.run(samples, feed_dict = {z: sample_z})
            samples_path = CURRENT_DIR + '\\out\\'
            save_images(sample, [8, 8], samples_path + 'sample_%d.png' % (idx))


    sess.close()
Ejemplo n.º 2
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, MNISTData.IMAGE_H, MNISTData.IMAGE_W, MNISTData.IMAGE_CHANNEL])

    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM])

    testz = tf.placeholder(tf.float32, [testBATCH_SIZE, Z_DIM])

    G = generator(z)
    D_logits  = discriminator(images)
    samples = generator(testz)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam(glist)
    d_vars = ConvNet.getParam(dlist)

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sample_z = np.random.uniform(-1, 1, size = (testBATCH_SIZE, Z_DIM))


    sess = tf.Session()
    
    init = tf.global_variables_initializer()  
    sess.run(init)

    start_time = time.time()
    idx = 0
    while True:
        idx = idx + 1
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print(str(idx)+","+str(elapsed_time))
        
        for _ in xrange(2):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage,_ = MNISTData.extract_data(BATCH_SIZE)

            sess.run(d_optim, feed_dict = {z:batch_z, images:loadedimage})

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z})

        if idx % 50 == 0:

            sample = sess.run(samples, feed_dict = {testz: sample_z})

            def imgSave(idx,sample):
                ConvNet.saveImagesMono(sample, saveSize, 'out\\sample_%d.png' % (idx))
                
                
            t = threading.Thread(target=imgSave,args=(idx,sample))
            t.start()
            
        if idx % 500 == 0:
            
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan0g"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan0d"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
            
            gSaver = []
            dSaver = []
            for item in glist:
                gSaver.append(item.getSaver(sess))
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx,gSaver, dSaver))
            t.start()
            
            
            

    sess.close()
Ejemplo n.º 3
0
def train():
    ###################
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test0.png")
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test1.png")
#     exit()

    
    
#     for idx in xrange(0, 1000000000):
#         loadedimage = extract_data()
#         global file_index
#         global content_index
#         print(str(file_index)+","+str(content_index))         
#     exit()

    ###################
    
    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM])
    
    G = generator(z)
    D_logits  = tf.reduce_mean(discriminator(images))
    D_logits_F = tf.reduce_mean(discriminator(G))
    
    gen_cost = -D_logits_F
    disc_cost = D_logits_F - D_logits

    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.0)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam(glist)
    d_vars = ConvNet.getParam(dlist)

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sess = tf.Session()

    sample_z1 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    sample_z2 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    #sample_z3 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    #sample_z4 = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    #for i in xrange(0,BATCH_SIZE):
    #    sample_z[i] = np.random.uniform(-(i/BATCH_SIZE), (i/BATCH_SIZE), size = (Z_DIM))
    
    init = tf.global_variables_initializer()  
    sess.run(init)

    start_time = time.time()
    idx = 0
    while True:
        idx = idx + 1
        elapsed_time = time.time() - start_time
        start_time = time.time()

        dt = 0
        df = 0
        for _ in xrange(2):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage = CBHR.extract_data()
            _,d1,d2 = sess.run([d_optim,D_logits,D_logits_F], feed_dict = {z:batch_z, images:loadedimage})
            dt = dt + d1
            df = df + d2

        print(str(idx)+","+str(CBHR.file_index)+","+str(CBHR.content_index)+","+str(elapsed_time)+","+str(dt)+","+str(df))

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z})

        if idx % 10 == 0:

            sample = np.zeros([BATCH_SIZE*2, IMAGE_H, IMAGE_W, IMAGE_CHANNEL], dtype=np.float32)
            sample[BATCH_SIZE*0:BATCH_SIZE*1] = sess.run(G, feed_dict = {z: sample_z1})
            sample[BATCH_SIZE*1:BATCH_SIZE*2] = sess.run(G, feed_dict = {z: sample_z2})
            #sample[BATCH_SIZE*2:BATCH_SIZE*3] = sess.run(G, feed_dict = {z: sample_z3})
            #sample[BATCH_SIZE*3:BATCH_SIZE*4] = sess.run(G, feed_dict = {z: sample_z4})

            def imgSave(idx,sample):
                ConvNet.saveImages(sample, [4,8], 'out11\\sample_%d.png' % (idx))
                
                
            t = threading.Thread(target=imgSave,args=(idx,sample))
            t.start()
        
        exist = False
        
        if idx%10 == 0:
            exist = os.path.exists("stop.txt")
            
        if idx % 2000 == 0 or exist:
            
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan11g"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan11d"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
            
            gSaver = []
            dSaver = []
            for item in glist:
                gSaver.append(item.getSaver(sess))
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx,gSaver, dSaver))
            t.start()
            
            
        if exist:
            break

    sess.close()
Ejemplo n.º 4
0
def train():

    imagesHR = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    imagesLR = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    
    G = generator(imagesLR)
    D_logits  = discriminator(imagesHR)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - imagesHR
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(imagesHR,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,imagesHR.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam(glist)
    d_vars = ConvNet.getParam(dlist)

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sess = tf.Session()

    init = tf.global_variables_initializer()  
    sess.run(init)
    
    sampleLR = CBLR.extract_data()
    _ = CBHR.extract_data()
    ConvNet.saveImages(sampleLR, saveSize, 'out12sr\\demo.png')
    start_time = time.time()
    idx = 0
    while True:
        idx = idx + 1
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print(str(idx)+","+str(CBHR.file_index)+","+str(CBHR.content_index)+","+str(elapsed_time))
        
        for _ in xrange(2):
            loadedimageLR = CBLR.extract_data()
            loadedimageHR = CBHR.extract_data()
            sess.run(d_optim, feed_dict = {imagesLR:loadedimageLR,imagesHR:loadedimageHR})

        loadedimageLR = CBLR.extract_data()
        loadedimageHR = CBHR.extract_data()
        sess.run(g_optim, feed_dict = {imagesLR:loadedimageLR})

        if idx % 200 == 0:

            sample = sess.run(G, feed_dict = {imagesLR:sampleLR})

            def imgSave(idx,sample):
                ConvNet.saveImages(sample, saveSize, 'out12sr\\sample_%d.png' % (idx))
                
                
            t = threading.Thread(target=imgSave,args=(idx,sample))
            t.start()

        exist = False
        
        if idx%10 == 0:
            exist = os.path.exists("stop.txt")
            
        if idx % 2000 == 0 or exist:
            
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan12srg"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan12srd"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
            
            gSaver = []
            dSaver = []
            for item in glist:
                gSaver.append(item.getSaver(sess))
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx,gSaver, dSaver))
            t.start()
            
            
        if exist:
            break

    sess.close()
Ejemplo n.º 5
0
def train():
    ###################
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test0.png")
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test1.png")
#     exit()

    
    
#     for idx in xrange(0, 1000000000):
#         loadedimage = extract_data()
#         global file_index
#         global content_index
#         print(str(file_index)+","+str(content_index))         
#     exit()

    ###################
    
    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM])
    
    #testz = tf.placeholder(tf.float32, [testBATCH_SIZE, Z_DIM])
    
    G = generator(z)
    D_logits  = discriminator(images)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam(glist)
    d_vars = ConvNet.getParam(dlist)

    global_step = tf.Variable(0, trainable=False)
    #LR = tf.train.exponential_decay(learning_rate=0.01,global_step= global_step, decay_steps=1000,decay_rate= 0.95,staircase=True)
        #starter_learning_rate, global_step, 100000, 0.96, staircase=True)
        
    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)


    sess = tf.Session()
    
    sample_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    
    init = tf.global_variables_initializer()  
    sess.run(init)

    start_time = time.time()
    for idx in xrange(start, end):

        for _ in xrange(2):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage = extract_data()
            _,genCost,discCost,globalStep = sess.run([d_optim,gen_cost,disc_cost,global_step], feed_dict = {z:batch_z, images:loadedimage})

        global file_index
        global content_index
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print(str(idx)+","+str(file_index)+","+str(content_index)+","+str(elapsed_time),","+str(genCost)+","+str(discCost)+","+str(globalStep))

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z})

        if idx % 100 == 0:

            sample = sess.run(G, feed_dict = {z: sample_z})

            def imgSave(idx,sample):
                ConvNet.saveImages(sample, saveSize, 'out\\sample_%d.png' % (idx))
                
                
            t = threading.Thread(target=imgSave,args=(idx,sample))
            t.start()
            
        if idx % 1000 == 0:
            
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan10g"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan10d"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
            
            gSaver = []
            dSaver = []
            for item in glist:
                gSaver.append(item.getSaver(sess))
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx,gSaver, dSaver))
            t.start()
            
            
            

    sess.close()
Ejemplo n.º 6
0
def train():
    ###################
#     loadedimage = extract_data()
#     ConvNet.saveImagesMono(loadedimage, saveSize, "test0.png")
#     loadedimage = extract_data()
#     ConvNet.saveImagesMono(loadedimage, saveSize, "test1.png")
#     exit()

    
    
#     for idx in xrange(0, 1000000000):
#         loadedimage = extract_data()
#         global file_index
#         global content_index
#         print(str(file_index)+","+str(content_index))         
#     exit()

    ###################
    
    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    y_fill = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMAGE_H, IMAGE_W, V_DIM))
    
    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM])
    y_label = tf.placeholder(tf.float32, shape=(BATCH_SIZE, V_DIM))
    
    testz = tf.placeholder(tf.float32, [testBATCH_SIZE, Z_DIM])
    test_label = tf.placeholder(tf.float32, shape=(testBATCH_SIZE, V_DIM))
    
    G = generator(z,y_label)
    D_logits  = discriminator(images,y_fill)
    samples = generator(testz,test_label)
    D_logits_F = discriminator(G,y_fill)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates,y_fill), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam(glist)
    d_vars = ConvNet.getParam(dlist)

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sample_z = np.random.uniform(-1, 1, size = (testBATCH_SIZE, Z_DIM))
    testLabel = np.ndarray([testBATCH_SIZE,V_DIM], np.float32)
    for j in xrange(0,4):
        for i in xrange(0,10):
            testLabel[j*10+i] = -1
            testLabel[j*10+i,i] = 1

    sess = tf.Session()
    
    init = tf.initialize_all_variables()  
    sess.run(init)

    start_time = time.time()
    for idx in xrange(0, 50001):
        global content_index
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print(str(idx)+","+str(content_index)+","+str(elapsed_time))
        
        for _ in xrange(2):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage,yLabel = extract_data()
            
            yFill = yLabel.reshape([BATCH_SIZE, 1, 1, 10]) * np.ones([BATCH_SIZE, IMAGE_H, IMAGE_W, V_DIM])

            sess.run(d_optim, feed_dict = {z:batch_z,y_label:yLabel,y_fill: yFill, images:loadedimage})

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z,y_label:yLabel,y_fill: yFill})

        if idx % 50 == 0:

            sample = sess.run(samples, feed_dict = {testz: sample_z,test_label:testLabel})

            def imgSave(idx,sample):
                ConvNet.saveImagesMono(sample, saveSize, 'out\\sample_%d.png' % (idx))
                
                
            t = threading.Thread(target=imgSave,args=(idx,sample))
            t.start()
            
        if idx % 500 == 0:
            
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan0g"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan0d"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
            
            gSaver = []
            dSaver = []
            for item in glist:
                gSaver.append(item.getSaver(sess))
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx,gSaver, dSaver))
            t.start()
            
            
            

    sess.close()
Ejemplo n.º 7
0
def train():
    ###################
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test0.png")
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test1.png")
#     exit()
    ###################

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM], name='z')

    G = generator(z)
    D_logits  = discriminator(images)
    samples = sampler(z)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam([gfc0,gdc0,gdc1,gdc2,gdc3])
    d_vars = ConvNet.getParam([dcv0,dcv1,dcv2,dcv3,dfc0])

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sess = tf.Session()

    sample_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    
    init = tf.initialize_all_variables()  
    sess.run(init)

    for idx in xrange(0, 10000):
        print(idx)
        for _ in xrange(5):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage = extract_data()
            sess.run(d_optim, feed_dict = {z:batch_z, images:loadedimage})

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z})

        if idx % 50 == 0:
            sample = sess.run(samples, feed_dict = {z: sample_z})
            ConvNet.saveImages(sample, [8, 8], 'out\\sample_%d.png' % (idx))
            
        if idx % 100 == 0:
            
            def save(
                bgfc0, wgfc0,
                bgdc0, wgdc0,
                bgdc1, wgdc1,
                bgdc2, wgdc2,
                bgdc3, wgdc3,
                bdcv0, wdcv0,
                bdcv1, wdcv1,
                bdcv2, wdcv2,
                bdcv3, wdcv3,
                bdfc0, wdfc0
                    ):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW('gan9g.txt')
                gfc0.save_ToFile(bgfc0, wgfc0, saveToFile)
                gdc0.save_ToFile(bgdc0, wgdc0, saveToFile)
                gdc1.save_ToFile(bgdc1, wgdc1, saveToFile)
                gdc2.save_ToFile(bgdc2, wgdc2, saveToFile)
                gdc3.save_ToFile(bgdc3, wgdc3, saveToFile)
                saveToFile.flush();saveToFile.close()

                saveToFile = ConvNet.openEmptyFileW('gan9d.txt')
                dcv0.save_ToFile(bdcv0, wdcv0, saveToFile)
                dcv1.save_ToFile(bdcv1, wdcv1, saveToFile)
                dcv2.save_ToFile(bdcv2, wdcv2, saveToFile)
                dcv3.save_ToFile(bdcv3, wdcv3, saveToFile)
                dfc0.save_ToFile(bdfc0, wdfc0, saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
                
            bgfc0, wgfc0 = gfc0.save_getParam(sess)
            bgdc0, wgdc0 = gdc0.save_getParam(sess)
            bgdc1, wgdc1 = gdc1.save_getParam(sess)
            bgdc2, wgdc2 = gdc2.save_getParam(sess)
            bgdc3, wgdc3 = gdc3.save_getParam(sess)
            bdcv0, wdcv0 = dcv0.save_getParam(sess)
            bdcv1, wdcv1 = dcv1.save_getParam(sess)
            bdcv2, wdcv2 = dcv2.save_getParam(sess)
            bdcv3, wdcv3 = dcv3.save_getParam(sess)
            bdfc0, wdfc0 = dfc0.save_getParam(sess)

            t =threading.Thread(target=save,args=(
                bgfc0, wgfc0,
                bgdc0, wgdc0,
                bgdc1, wgdc1,
                bgdc2, wgdc2,
                bgdc3, wgdc3,
                bdcv0, wdcv0,
                bdcv1, wdcv1,
                bdcv2, wdcv2,
                bdcv3, wdcv3,
                bdfc0, wdfc0
                    ))
            t.start()
            
            
            

    sess.close()