Esempio n. 1
0
    def __init__(self, flg, _dir):
        os.system ("mkdir \"%s\"" % (_dir))
        
        list_file = open(_dir+".txt", 'r')
        
        while 1:
            line = list_file.readline()
            if not line:
                break
            
            line = line.strip()
            
            _name = line.split("_")
            
            in_file1 = getFullPath(_name[0])
            in_file2 = getFullPath(_name[1])
            
            out_file1 = _dir+"\\"+line

            img = np.ndarray([2, IMAGE_H, IMAGE_W, IMAGE_CHANNEL], np.float32)
            
            img[0] = read_image(in_file1)
            img[1] = read_image(in_file2)
            
            ConvNet.saveImages(img, [1,2], out_file1)
            
        list_file.close()
Esempio n. 2
0
def render_naive(idx,_name,t_obj, img0=img_noise, iter_n=200, step=0.0001):
    t_score = tf.reduce_mean(t_obj) # defining the optimization objective
    t_grad = tf.gradients(t_score, imagesT)[0] # behold the power of automatic differentiation!
    
    img = img0.copy()
    for i in range(iter_n):
        g, score = sess.run([t_grad, t_score], {imagesT:img})
        # normalizing the gradient, so the same step size should work 
        g /= g.std()+1e-8         # for different layers and networks
        img += g*step
        print(score)
    
    ConvNet.saveImages(img, [1,1], _name+'demo'+str(idx)+'.png')
Esempio n. 3
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])

    net = pixelShuffler(images,scale=1)
    
    
    with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
        init = tf.global_variables_initializer()  
        sess.run(init)

        loadedimage = extract_data()
        sample = sess.run(net, feed_dict = {images:loadedimage})
        
        ConvNet.saveImages(loadedimage, [2,8], 'sample_in.png')

        ConvNet.saveImages(sample, [2,8], 'sample_out.png')
Esempio n. 4
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])

    D = discriminator(images)

    sess = tf.Session()

    init = tf.global_variables_initializer()  
    sess.run(init)
    
    for i in xrange(0,400):
        print(i)
        i1 = int(random.uniform(1,202599))
        i2 = int(random.uniform(1,202599))
        
        img = np.ndarray([BATCH_SIZE*2, IMAGE_H, IMAGE_W, IMAGE_CHANNEL], np.float32)
        
        img1,file1 = getImg(i1)
        img2,file2 = getImg(i2)
        
        lbl1 = sess.run(D, feed_dict = {images: img1})
        lbl2 = sess.run(D, feed_dict = {images: img2})
        
        if (lbl1 < 1 and
            lbl1 >-1 and 
            lbl2 < 1 and 
            lbl2 >-1 and
            abs(lbl1-lbl2)<0.5): 
        
            if(lbl1>lbl2):
                img[0] = img1
                img[1] = img2
                ConvNet.saveImages(img, [1,2], "E:\\MNIST\\CelebA\\Img\\img_celeba.7z\\trainData\\"+file1+"_"+file2)
            else:
                img[0] = img2
                img[1] = img1
                ConvNet.saveImages(img, [1,2], "E:\\MNIST\\CelebA\\Img\\img_celeba.7z\\trainData\\"+file2+"_"+file1)


    sess.close()
Esempio n. 5
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])

    D = discriminator(images)

    with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
    
        init = tf.global_variables_initializer()  
        sess.run(init)
        
        for idx in xrange(0,500):
            print(idx)
            i1 = int(random.uniform(1,202599))
    
            img1,file = getImg(i1)
    
            lbl = sess.run(D, feed_dict = {images: img1})
    
            val = int((100 - lbl) * 200)
    
            ConvNet.saveImages(img1, [1,1], "E:\\MNIST\\CelebA\\Img\\img_celeba.7z\\trainData\\"+str(val)+"-"+str(lbl)+"-"+file)
Esempio n. 6
0
 def imgSave(idx,sample):
     ConvNet.saveImages(sample, saveSize, 'out\\sample_%d.png' % (idx))
Esempio n. 7
0
 def imgSave(idx,sample):
     ConvNet.saveImages(sample, [4,8], 'out11\\sample_%d.png' % (idx))
Esempio n. 8
0
 def imgSave(idx,sample):
     ConvNet.saveImages(sample, [8,32], 'out12\\sample_%d.png' % (idx))
Esempio n. 9
0
def train():

    imagesHR = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    imagesLR = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    
    G = generator(imagesLR)
    D_logits  = discriminator(imagesHR)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - imagesHR
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(imagesHR,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,imagesHR.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam(glist)
    d_vars = ConvNet.getParam(dlist)

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sess = tf.Session()

    init = tf.global_variables_initializer()  
    sess.run(init)
    
    sampleLR = CBLR.extract_data()
    _ = CBHR.extract_data()
    ConvNet.saveImages(sampleLR, saveSize, 'out12sr\\demo.png')
    start_time = time.time()
    idx = 0
    while True:
        idx = idx + 1
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print(str(idx)+","+str(CBHR.file_index)+","+str(CBHR.content_index)+","+str(elapsed_time))
        
        for _ in xrange(2):
            loadedimageLR = CBLR.extract_data()
            loadedimageHR = CBHR.extract_data()
            sess.run(d_optim, feed_dict = {imagesLR:loadedimageLR,imagesHR:loadedimageHR})

        loadedimageLR = CBLR.extract_data()
        loadedimageHR = CBHR.extract_data()
        sess.run(g_optim, feed_dict = {imagesLR:loadedimageLR})

        if idx % 200 == 0:

            sample = sess.run(G, feed_dict = {imagesLR:sampleLR})

            def imgSave(idx,sample):
                ConvNet.saveImages(sample, saveSize, 'out12sr\\sample_%d.png' % (idx))
                
                
            t = threading.Thread(target=imgSave,args=(idx,sample))
            t.start()

        exist = False
        
        if idx%10 == 0:
            exist = os.path.exists("stop.txt")
            
        if idx % 2000 == 0 or exist:
            
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan12srg"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan12srd"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
            
            gSaver = []
            dSaver = []
            for item in glist:
                gSaver.append(item.getSaver(sess))
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx,gSaver, dSaver))
            t.start()
            
            
        if exist:
            break

    sess.close()
Esempio n. 10
0
def train():
    ###################


#ConvNet.saveImages(images, [4,4], "test.jpg")
#     exit()

    ###################
    
    imagesT = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    imagesF = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    imagesM = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    imagesPL = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    imagesMI = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    
    imagesG = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    imagesB = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])

    DT = discriminator(imagesT)
    DF = discriminator(imagesF)
    DM = discriminator(imagesM)
    DPL = discriminator(imagesPL)
    DMI = discriminator(imagesMI)
    DG = discriminator(imagesG)
    DB = discriminator(imagesB)

    loss = (tf.reduce_mean(tf.nn.relu(  DF -  1))   + 
            tf.reduce_mean(tf.nn.relu(   1 - DT))   +
            tf.reduce_mean(tf.nn.relu(  DM -  1))   +
            tf.reduce_mean(tf.nn.relu(- DM -  1)) +
            tf.reduce_mean(tf.nn.relu(-DPL))     +
            tf.reduce_mean(tf.nn.relu( DMI))   +
            tf.reduce_mean(tf.nn.relu(DB  - DG     ))
             )
    
    #训练器
    #optimizer = tf.train.AdadeltaOptimizer(learning_rate=1).minimize(loss)  # tf.train.AdadeltaOptimizer.init(learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False, name='Adadelta')  #这个 训练器 是用来处理分类的情况,现在是回归所以不用
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001,beta1=0.5,beta2=0.9).minimize(loss)
    #optimizer = tf.train.GradientDescentOptimizer(0.000001).minimize(loss)

    sess = tf.Session()

    init = tf.global_variables_initializer()  
    sess.run(init)
    
    zeros = 0
    
    def save(idx, dSaver):
        print("start save")

        saveToFile = ConvNet.openEmptyFileW("faceTrain"+str(idx)+".txt")
        for item in dSaver:
            item(saveToFile)
        saveToFile.flush();saveToFile.close()
        print("end save")
    
    start_time = time.time()
    idx = 0
    while True:
        idx = idx + 1

        ImagesT = CBT.extract_data()
        ImagesF = CBF.extract_data()
        ImagesM = CBM.extract_data()
        ImagesPL = CBPL.extract_data()
        ImagesMI = CBMI.extract_data()
        
        #if random.uniform(-1,1)>0 :
        #    ImagesG,ImagesB = CB_GB.extract_data()
        #else:
        ImagesG,ImagesB = CB_BG.extract_data()
        
        if False:
            img = np.ndarray([1, IMAGE_H, IMAGE_W, IMAGE_CHANNEL], np.float32)
            img[0] = ImagesG[0]
            ConvNet.saveImages(img, [1,1], "G.png")
            img[0] = ImagesB[0]
            ConvNet.saveImages(img, [1,1], "B.png")
            exit()
            
        _,Loss = sess.run([optimizer,loss], feed_dict = {imagesT: ImagesT,
                                                         imagesF: ImagesF,
                                                         imagesM: ImagesM,
                                                         imagesPL: ImagesPL,
                                                         imagesMI: ImagesMI,
                                                         imagesG: ImagesG,
                                                         imagesB: ImagesB,

                                                         })

        elapsed_time = int((time.time() - start_time)*1000)
        start_time = time.time()
        print(str(idx)+","+str(Loss)+","+str(elapsed_time))
        
        if Loss == 0:
            zeros = zeros + 1
        else:
            zeros = 0

        if idx%10 == 0:
            exist = os.path.exists("stop.txt")
            if exist:
                zeros = 200
                    
        if zeros >= 200 or idx%1000==0:
            
            dSaver = []
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx, dSaver))
            t.start()

        if zeros >= 200:
            break
        
        if idx >=200000:
            break

    sess.close()
Esempio n. 11
0
def train():
    ###################
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test0.png")
#     loadedimage = extract_data()
#     ConvNet.saveImages(loadedimage, [8, 8], "test1.png")
#     exit()
    ###################

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM], name='z')

    G = generator(z)
    D_logits  = discriminator(images)
    samples = sampler(z)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam([gfc0,gdc0,gdc1,gdc2,gdc3])
    d_vars = ConvNet.getParam([dcv0,dcv1,dcv2,dcv3,dfc0])

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sess = tf.Session()

    sample_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
    
    init = tf.initialize_all_variables()  
    sess.run(init)

    for idx in xrange(0, 10000):
        print(idx)
        for _ in xrange(5):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage = extract_data()
            sess.run(d_optim, feed_dict = {z:batch_z, images:loadedimage})

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z})

        if idx % 50 == 0:
            sample = sess.run(samples, feed_dict = {z: sample_z})
            ConvNet.saveImages(sample, [8, 8], 'out\\sample_%d.png' % (idx))
            
        if idx % 100 == 0:
            
            def save(
                bgfc0, wgfc0,
                bgdc0, wgdc0,
                bgdc1, wgdc1,
                bgdc2, wgdc2,
                bgdc3, wgdc3,
                bdcv0, wdcv0,
                bdcv1, wdcv1,
                bdcv2, wdcv2,
                bdcv3, wdcv3,
                bdfc0, wdfc0
                    ):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW('gan9g.txt')
                gfc0.save_ToFile(bgfc0, wgfc0, saveToFile)
                gdc0.save_ToFile(bgdc0, wgdc0, saveToFile)
                gdc1.save_ToFile(bgdc1, wgdc1, saveToFile)
                gdc2.save_ToFile(bgdc2, wgdc2, saveToFile)
                gdc3.save_ToFile(bgdc3, wgdc3, saveToFile)
                saveToFile.flush();saveToFile.close()

                saveToFile = ConvNet.openEmptyFileW('gan9d.txt')
                dcv0.save_ToFile(bdcv0, wdcv0, saveToFile)
                dcv1.save_ToFile(bdcv1, wdcv1, saveToFile)
                dcv2.save_ToFile(bdcv2, wdcv2, saveToFile)
                dcv3.save_ToFile(bdcv3, wdcv3, saveToFile)
                dfc0.save_ToFile(bdfc0, wdfc0, saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
                
            bgfc0, wgfc0 = gfc0.save_getParam(sess)
            bgdc0, wgdc0 = gdc0.save_getParam(sess)
            bgdc1, wgdc1 = gdc1.save_getParam(sess)
            bgdc2, wgdc2 = gdc2.save_getParam(sess)
            bgdc3, wgdc3 = gdc3.save_getParam(sess)
            bdcv0, wdcv0 = dcv0.save_getParam(sess)
            bdcv1, wdcv1 = dcv1.save_getParam(sess)
            bdcv2, wdcv2 = dcv2.save_getParam(sess)
            bdcv3, wdcv3 = dcv3.save_getParam(sess)
            bdfc0, wdfc0 = dfc0.save_getParam(sess)

            t =threading.Thread(target=save,args=(
                bgfc0, wgfc0,
                bgdc0, wgdc0,
                bgdc1, wgdc1,
                bgdc2, wgdc2,
                bgdc3, wgdc3,
                bdcv0, wdcv0,
                bdcv1, wdcv1,
                bdcv2, wdcv2,
                bdcv3, wdcv3,
                bdfc0, wdfc0
                    ))
            t.start()
            
            
            

    sess.close()