Ejemplo n.º 1
0
def model_accuracy(models, image_paths, target_paths, results_dir):
    accList = []
    for model in models:
        print('Path to model {}'.format(model))
        base_model = os.path.basename(model)
        batch_size = get_batch(base_model)
        train_loader, valid_loader, test_loader = ConvNet.train_valid_test_split(
            image_paths, target_paths, batch_size)
        print('Model: {}'.format(base_model))
        match = re.search('arch(\d+)', base_model)
        net_size = match.group(1)
        net_size = int(net_size)
        lr = re.search('lr(\d+\.\d+)', base_model)
        lr = float(lr.group(1))
        batch = get_batch(base_model)
        net = ConvNet.Net(net_size)
        net = nn.DataParallel(net)
        net.load_state_dict(
            torch.load(model, map_location=lambda storage, loc: storage))
        #net.cuda()
        accuracy = ConvNet.model_eval(test_loader, net, batch, lr, net_size,
                                      results_dir)
        accList.append(accuracy)
    maxInd = accList.index(max(accList))
    print('Highest accuracy: {} for model: {}'.format(accList[maxInd],
                                                      models[maxInd]))
    models[maxInd]
    return maxInd
Ejemplo n.º 2
0
    def __init__(self, flg, _dir):
        os.system ("mkdir \"%s\"" % (_dir))
        
        list_file = open(_dir+".txt", 'r')
        
        while 1:
            line = list_file.readline()
            if not line:
                break
            
            line = line.strip()
            
            _name = line.split("_")
            
            in_file1 = getFullPath(_name[0])
            in_file2 = getFullPath(_name[1])
            
            out_file1 = _dir+"\\"+line

            img = np.ndarray([2, IMAGE_H, IMAGE_W, IMAGE_CHANNEL], np.float32)
            
            img[0] = read_image(in_file1)
            img[1] = read_image(in_file2)
            
            ConvNet.saveImages(img, [1,2], out_file1)
            
        list_file.close()
Ejemplo n.º 3
0
            def save(
                bgfc0, wgfc0,
                bgdc0, wgdc0,
                bgdc1, wgdc1,
                bgdc2, wgdc2,
                bgdc3, wgdc3,
                bdcv0, wdcv0,
                bdcv1, wdcv1,
                bdcv2, wdcv2,
                bdcv3, wdcv3,
                bdfc0, wdfc0
                    ):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW('gan9g.txt')
                gfc0.save_ToFile(bgfc0, wgfc0, saveToFile)
                gdc0.save_ToFile(bgdc0, wgdc0, saveToFile)
                gdc1.save_ToFile(bgdc1, wgdc1, saveToFile)
                gdc2.save_ToFile(bgdc2, wgdc2, saveToFile)
                gdc3.save_ToFile(bgdc3, wgdc3, saveToFile)
                saveToFile.flush();saveToFile.close()

                saveToFile = ConvNet.openEmptyFileW('gan9d.txt')
                dcv0.save_ToFile(bdcv0, wdcv0, saveToFile)
                dcv1.save_ToFile(bdcv1, wdcv1, saveToFile)
                dcv2.save_ToFile(bdcv2, wdcv2, saveToFile)
                dcv3.save_ToFile(bdcv3, wdcv3, saveToFile)
                dfc0.save_ToFile(bdfc0, wdfc0, saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
Ejemplo n.º 4
0
def saveimg(filename,wholeData,idx,scale = 255,bias = 128):
    data = ConvNet.newImage(wholeData.shape[1],wholeData.shape[2])
    #clearImg(data,0)
    for i in xrange(0,wholeData.shape[1]):
        for j in xrange(0,wholeData.shape[2]):
            r = wholeData[idx,j,i,0] * scale + bias
            g = wholeData[idx,j,i,1] * scale + bias
            b = wholeData[idx,j,i,2] * scale + bias
            ConvNet.setpixel(data,i,j,r,g,b)
    #lbl = wholeData[idx]
    ConvNet.saveImg(data, filename)
Ejemplo n.º 5
0
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan0g"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan0d"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
Ejemplo n.º 6
0
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openBinaryFileW("gan11g"+str(idx)+".bin")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openBinaryFileW("gan11d"+str(idx)+".bin")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
Ejemplo n.º 7
0
def render_naive(idx,_name,t_obj, img0=img_noise, iter_n=200, step=0.0001):
    t_score = tf.reduce_mean(t_obj) # defining the optimization objective
    t_grad = tf.gradients(t_score, imagesT)[0] # behold the power of automatic differentiation!
    
    img = img0.copy()
    for i in range(iter_n):
        g, score = sess.run([t_grad, t_score], {imagesT:img})
        # normalizing the gradient, so the same step size should work 
        g /= g.std()+1e-8         # for different layers and networks
        img += g*step
        print(score)
    
    ConvNet.saveImages(img, [1,1], _name+'demo'+str(idx)+'.png')
Ejemplo n.º 8
0
def extract_data(BATCH_SIZE,onehot = False):
    global content_index
    content_index = content_index + BATCH_SIZE
    if content_index>=70000:#202599
        content_index = 0

    lbl = _label[content_index:content_index+BATCH_SIZE]
    if onehot:
        lbl = ConvNet.onehot(10,lbl)

    return _data[content_index:content_index+BATCH_SIZE],lbl



    ###################
#     loadedimage = extract_data()
#     ConvNet.saveImagesMono(loadedimage, saveSize, "test0.png")
#     loadedimage = extract_data()
#     ConvNet.saveImagesMono(loadedimage, saveSize, "test1.png")
#     exit()

    
    
#     for idx in xrange(0, 1000000000):
#         loadedimage = extract_data()
#         global file_index
#         global content_index
#         print(str(file_index)+","+str(content_index))         
#     exit()

    ###################
    
Ejemplo n.º 9
0
def train():
    #with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
    with tf.Session() as sess:
        #初始化参数
        sess.run(tf.global_variables_initializer())

        for j in xrange(0, 100):
            #打印当前网络的输出值
            accurate = 0
            for _i in xrange(0,100):
                testData,testLabel = MNISTData.extract_testdata()
                lbl = sess.run(_test, feed_dict={testOne:testData})
                if np.argmax(lbl, 1) == testLabel:
                    accurate = accurate + 0.01
            
            #执行训练
            totalLoss = 0.0
            for _i in xrange(0,10):
                trainData,trainLabel = MNISTData.extract_traindata(BATCH_SIZE)
                _,_loss = sess.run([optimizer,loss], feed_dict={labels_node: trainLabel, inputlayer: trainData})
                totalLoss = totalLoss + _loss
            
            print(j,accurate,totalLoss)
            
        #保存已训练的网络
        Saver = []
        for item in plist:
            Saver.append(item.getSaver(sess))
            
        saveToFile = ConvNet.openEmptyFileW("MNIST.txt")
        for item in Saver:
            item(saveToFile)
        saveToFile.flush();saveToFile.close()
Ejemplo n.º 10
0
def train():
    with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
        #初始化参数
        sess.run(tf.global_variables_initializer())
    
        for j in range(0, 20):
            #打印当前网络的输出值
            a1 = sess.run(_net, feed_dict={inputlayer:verifydata1})
            a2 = sess.run(_net, feed_dict={inputlayer:verifydata2})
            a3 = sess.run(_net, feed_dict={inputlayer:verifydata3})
            print(j, a1[0], a2[0], a3[0])#可以看到输出值越来越接近输入的值
            
            #执行训练
            for i in range(0, 100):
                sess.run(optimizer, feed_dict={finaldata: indata, inputlayer: indata})
        
        #保存已训练的网络
        Saver = []
        for item in plist:
            Saver.append(item.getSaver(sess))
            
        saveToFile = ConvNet.openEmptyFileW("test.txt")
        for item in Saver:
            item(saveToFile)
        saveToFile.flush();saveToFile.close()
Ejemplo n.º 11
0
def main():
    startTime = time.time()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = ConvNet.Net().to(device)
    criterion = nn.MSELoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    epochs = 90

    for epoch in range(1, epochs + 1):
        ConvNet.train(model, device, train_data, criterion, optimizer, epoch)
        ConvNet.test(model, device, criterion, test_data)

    print(50 * "-")
    print('Finished Training')
    endTime = time.time()
    elapsedTime = int(endTime - startTime)
    showTime(elapsedTime)
Ejemplo n.º 12
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])

    net = pixelShuffler(images,scale=1)
    
    
    with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
        init = tf.global_variables_initializer()  
        sess.run(init)

        loadedimage = extract_data()
        sample = sess.run(net, feed_dict = {images:loadedimage})
        
        ConvNet.saveImages(loadedimage, [2,8], 'sample_in.png')

        ConvNet.saveImages(sample, [2,8], 'sample_out.png')
Ejemplo n.º 13
0
def train():
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saveimg("base0.bmp",CifarData,0)
        saveimg("base1.bmp",CifarData,1)
        saveimg("base2.bmp",CifarData,2)
        saveimg("base3.bmp",CifarData,3)
        saveimg("base4.bmp",CifarData,59110)
        saveimg("base5.bmp",CifarData,59111)
        saveimg("base6.bmp",CifarData,59112)
        saveimg("base7.bmp",CifarData,59113)

        verifydata[0] = CifarData[0]
        verifydata[1] = CifarData[1]
        verifydata[2] = CifarData[2]
        verifydata[3] = CifarData[3]
        verifydata[4] = CifarData[59110]
        verifydata[5] = CifarData[59111]
        verifydata[6] = CifarData[59112]
        verifydata[7] = CifarData[59113]
  
        for j in xrange(0, 1000):

            for k in xrange(0,10):#train times
                print(str(k)+' ',end='')
                sys.stdout.flush()
                for i in xrange(0, 800):#train range 
                    for dj in xrange(0, BATCH_SIZE):
                        inputData[dj] = CifarData[dj+i*BATCH_SIZE]
                    sess.run(optimizer, feed_dict={finaldata: inputData, inputLayer: inputData})

            print()
            print(j)
            
            _out = sess.run(regeneratedImg, feed_dict={inputLayer:verifydata})
            saveimg(str(j)+"0.bmp",_out,0)
            saveimg(str(j)+"1.bmp",_out,1)
            saveimg(str(j)+"2.bmp",_out,2)
            saveimg(str(j)+"3.bmp",_out,3)
            saveimg(str(j)+"4.bmp",_out,4)
            saveimg(str(j)+"5.bmp",_out,5)
            saveimg(str(j)+"6.bmp",_out,6)
            saveimg(str(j)+"7.bmp",_out,7)
            
            print("saving")
            testfile = ConvNet.openEmptyFileW("cifar"+str(j)+".txt")
            conv1save(sess,testfile)
            conv2save(sess,testfile)
            conv3save(sess,testfile)
            fc1saver(sess,testfile)
            fc2saver(sess,testfile)
            uconv1save(sess,testfile)
            uconv2save(sess,testfile)
            uconv3save(sess,testfile)
            if testfile:testfile.flush(),testfile.close()   
            print("saved")
Ejemplo n.º 14
0
def display_rgb(dev, data, timestamp):
#    pass
    global keep_running
    im = frame_convert2.video_cv(data)
#    im = im_or.copy()
#    pdb.set_trace()
    if len(im) is not 0:
        cls,det = CNN.detect(net,im)
#    print(det[0,0:4])
        print det.shape
Ejemplo n.º 15
0
def extract_traindata(BATCH_SIZE,onehot = False):
    global train_index
    train_index = train_index + BATCH_SIZE
    if train_index>=60000:#202599
        train_index = 0
    
    lbl = train_label[train_index:train_index+BATCH_SIZE]
    if onehot:
        lbl = ConvNet.onehot(10,lbl)
    return train_data[train_index:train_index+BATCH_SIZE],lbl
Ejemplo n.º 16
0
    def __init__(self, conf, *args):
        super(ProtoNetModule, self).__init__(*args)
        self.conf = conf
        self.net = {}
        if conf['feature']['net_name'] == 'WideResNet':
            self.net['feature'] = WideResNet.create_model(conf)
        else:
            self.net['feature'] = ConvNet.create_model(conf['feature'])

        self.net['head'] = ProtoHead.create_model()
        self.init_optimizer()
Ejemplo n.º 17
0
def extract_testdata(onehot = False):
    global test_index
    test_index = test_index + 1
    if test_index>=10000:#202599
        test_index = 0

    lbl = test_label[test_index:test_index+1]
    if onehot:
        lbl = ConvNet.onehot(10,lbl)

    return test_data[test_index:test_index+1],lbl
Ejemplo n.º 18
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])

    D = discriminator(images)

    sess = tf.Session()

    init = tf.global_variables_initializer()  
    sess.run(init)
    
    for i in xrange(0,400):
        print(i)
        i1 = int(random.uniform(1,202599))
        i2 = int(random.uniform(1,202599))
        
        img = np.ndarray([BATCH_SIZE*2, IMAGE_H, IMAGE_W, IMAGE_CHANNEL], np.float32)
        
        img1,file1 = getImg(i1)
        img2,file2 = getImg(i2)
        
        lbl1 = sess.run(D, feed_dict = {images: img1})
        lbl2 = sess.run(D, feed_dict = {images: img2})
        
        if (lbl1 < 1 and
            lbl1 >-1 and 
            lbl2 < 1 and 
            lbl2 >-1 and
            abs(lbl1-lbl2)<0.5): 
        
            if(lbl1>lbl2):
                img[0] = img1
                img[1] = img2
                ConvNet.saveImages(img, [1,2], "E:\\MNIST\\CelebA\\Img\\img_celeba.7z\\trainData\\"+file1+"_"+file2)
            else:
                img[0] = img2
                img[1] = img1
                ConvNet.saveImages(img, [1,2], "E:\\MNIST\\CelebA\\Img\\img_celeba.7z\\trainData\\"+file2+"_"+file1)


    sess.close()
Ejemplo n.º 19
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])

    D = discriminator(images)

    with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
    
        init = tf.global_variables_initializer()  
        sess.run(init)
        
        for idx in xrange(0,500):
            print(idx)
            i1 = int(random.uniform(1,202599))
    
            img1,file = getImg(i1)
    
            lbl = sess.run(D, feed_dict = {images: img1})
    
            val = int((100 - lbl) * 200)
    
            ConvNet.saveImages(img1, [1,1], "E:\\MNIST\\CelebA\\Img\\img_celeba.7z\\trainData\\"+str(val)+"-"+str(lbl)+"-"+file)
Ejemplo n.º 20
0
def main(unused_argv):
    # Load training and eval data
    predict_data = ConvNet.reformat(inputdata)

    # Create the Estimator
    DL_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
                                           model_dir=ConvNet.networkLocation)

    # Print out predictions
    predict_input_fn = tf.estimator.inputs.numpy_input_fn(
        x={"x": predict_data}, num_epochs=1, shuffle=False)
    predict_results = DL_classifier.predict(input_fn=predict_input_fn)
    for i, p in enumerate(predict_results):
        print(round(p["classes1"][0] * 0.5 + p["classes2"][0] * 0.5))
Ejemplo n.º 21
0
def createVGGNetNetwork():
    layers = [ConvLayer((1,1,32,32),(5,5),32, zero_padding = 2),
            ConvLayer((32,1,32,32),(5,5),1,zero_padding=2),
            MaxPollingLayer((32,1,32,32),filter_shape=(2,2)),
            ConvLayer((32,1,16,16),(5,5),2,zero_padding = 2),
            ConvLayer((64,1,16,16),(5,5),1,zero_padding=2), # JESPERE QUE CETTE LIGNE EST BONNE
            MaxPollingLayer((64,1,16,16),filter_shape=(2,2)),
            ConvLayer((64,1,8,8),(5,5),2,zero_padding=2),
            ConvLayer((128,1,8,8),(5,5),1,zero_padding=2),
            MaxPollingLayer((128,1,8,8),filter_shape = (2,2)),
            FCLayer(2048,128,'sigmoid'),
            FCLayer(128,128,'sigmoid'),
            FCLayer(128,43,'sigmoid')]
    cnn = ConvNet(layers)
    return cnn
Ejemplo n.º 22
0
def train():
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_node, logits=fc2))
    batch = tf.Variable(0, dtype=tf.float32)
    learning_rate = tf.train.exponential_decay(
        0.01,                # Base learning rate.
        batch * BATCH_SIZE,  # Current index into the dataset.
        train_size,          # Decay step.
        0.95,                # Decay rate.
        staircase=True)
    # Use simple momentum for the optimization.
    optimizer = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss,global_step=batch)
    # Create a local session to run the training.
    start_time = time.time()
    with tf.Session() as sess:
        # Run all the initializers to prepare the trainable parameters.
        tf.global_variables_initializer().run()
        print('Initialized!')
        # Loop through training steps.
        for step in xrange(0,100):
            # Compute the offset of the current minibatch in the data.
            # Note that we could use better randomization across epochs.
            offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
            batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
            batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
            # This dictionary maps the batch data (as a numpy array) to the
            # node in the graph it should be fed to.
            feed_dict = {inputLayer: batch_data,labels_node: batch_labels}
            # Run the optimizer to update weights.
            sess.run(optimizer, feed_dict=feed_dict)
            # print some extra information once reach the evaluation frequency
            if step % EVAL_FREQUENCY == 0:
                # fetch some extra nodes' data
                l, predictions = sess.run([loss, train_prediction],
                                              feed_dict=feed_dict)
                elapsed_time = time.time() - start_time
                start_time = time.time()
                print(elapsed_time,l,  error_rate(predictions, batch_labels), error_rate(eval_in_batches(validation_data, sess), validation_labels))
    
        print("save start")
        testfile = ConvNet.openEmptyFileW('conv.txt')
        conv1save(sess,testfile)
        conv2save(sess,testfile)
        fc1save(sess,testfile)
        fc2save(sess,testfile)
        if testfile:testfile.flush(),testfile.close()   
        print("save done")
Ejemplo n.º 23
0
 def __init__(self, imageList, mode, groundTruthFile=None):
     self.mode = mode
     self.cnn = cn.ConvNet()
     self.testRecord = None
     self.idx = -1
     self.imageList = [x.strip() for x in open(imageList)]
     self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
     #self.imageList = self.rankImages()
     #self.imageList = self.imageList[0:10]
     allImgs = set([x.strip() for x in open(config.get('allImagesList'))])
     self.negativeSamples = list(
         allImgs.difference(set(self.groundTruth.keys())))
     self.negativeEpisode = False
     if self.mode == 'train':
         self.negativeProbability = config.getf('negativeEpisodeProb')
         random.shuffle(self.imageList)
     self.loadNextEpisode()
Ejemplo n.º 24
0
def train():
    with tf.Session() as sess:
        #初始化参数
        sess.run(tf.global_variables_initializer())
    
        for j in range(0, 20):
            #打印当前网络的输出值
            a1 = sess.run(fc2, feed_dict={inputlayer:verifydata1})
            a2 = sess.run(fc2, feed_dict={inputlayer:verifydata2})
            a3 = sess.run(fc2, feed_dict={inputlayer:verifydata3})
            print(j, a1[0], a2[0], a3[0])#可以看到输出值越来越接近输入的值
            
            #执行训练
            for i in range(0, 100):
                sess.run(optimizer, feed_dict={finaldata: indata, inputlayer: indata})
        
        #保存已训练的网络
        testfile = ConvNet.openEmptyFileW('test.txt')
        fc1saver(sess,testfile)
        fc2saver(sess,testfile)
        if testfile:testfile.flush(),testfile.close()   
Ejemplo n.º 25
0
    def __init__(self, conf, *args):
        super(MetaRelationModule, self).__init__(*args)
        self.conf = conf
        self.net = {}
        if self.conf['feature']['net_name'] == 'ConvNet':
            self.net['feature'] = ConvNet.create_model(conf['feature'])
        elif self.conf['feature']['net_name'] == 'ResNet':
            self.net['feature'] = ResNet.create_model(conf['feature'])
        elif self.conf['feature']['net_name'] == 'WideResNet':
            self.net['feature'] = WideResNet.create_model(conf)
        elif self.conf['feature']['net_name'] == 'ResNet2':
            self.net['feature'] = ResNet2.create_model(conf)
        else:
            raise NotImplementedError

        self.net['relation'] = RelationNet(conf['relation'])
        if conf['relation']['use_meta_relation'] is True:
            self.net['meta_relation'] = MetaRelationNet(conf['meta_relation'])
        else:
            self.net['meta_relation'] = RelationHead(conf['meta_relation'])

        self.init_optimizer()
Ejemplo n.º 26
0
def simulation(linear_code, top_config, net_config, simutimes_range, target_err_bits_num, batch_size):
    SNRset = generate_snr_set(top_config)
    bp_iter_num = top_config.BP_iter_nums_simu
    noise_io = DataIO.NoiseIO(top_config, False, None, rng_seed=0)
    denoising_net_num = top_config.cnn_net_num
    model_id = top_config.model_id

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    K, N = np.shape(G_matrix)

    ## build BP decoding network
    if np.size(bp_iter_num) != denoising_net_num + 1:
        print('>>> Error: the length of bp_iter_num is not correct! (Iterative_BP_CNN.py)')
        print(bp_iter_num, np.size(bp_iter_num))
        print(denoising_net_num)
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size)

    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    # build network for each CNN denoiser,
    for net_id in range(denoising_net_num):
        if net_id > 0:
            conv_net[net_id] = conv_net[0]
            denoise_net_in[net_id] = denoise_net_in[0]
            denoise_net_out[net_id] = denoise_net_out[0]
        else:
            conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
            denoise_net_in[net_id], denoise_net_out[net_id], _ = conv_net[net_id].build_network()
    # init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    print('Open a tf session!')
    sess.run(init)
Ejemplo n.º 27
0
def createYannLeCunNetwork():
    return ConvNet(sign_recognition=True)
    
Ejemplo n.º 28
0
    #clearImg(data,0)
    for i in xrange(0,wholeData.shape[1]):
        for j in xrange(0,wholeData.shape[2]):
            r = wholeData[idx,j,i,0] * scale + bias
            g = wholeData[idx,j,i,1] * scale + bias
            b = wholeData[idx,j,i,2] * scale + bias
            ConvNet.setpixel(data,i,j,r,g,b)
    #lbl = wholeData[idx]
    ConvNet.saveImg(data, filename)

CifarData = extract_data("E:\\MNIST\\cifar-10-batches-bin\\HWC.bin",60000)

inputData = np.ndarray([BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS], np.float32)
verifydata = np.ndarray([BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS], np.float32)

loadFromFile = ConvNet.openEmptyFileR('cifar.txt')
cv1  = ConvNet.Conv(inDepth = NUM_CHANNELS,outDepth = 32,filterSize = 5, loadFromFile=loadFromFile)#16*16
cv2  = ConvNet.Conv(inDepth = 32,outDepth = 32,filterSize = 5, loadFromFile=loadFromFile)#8*8
cv3  = ConvNet.Conv(inDepth = 32,outDepth = 64,filterSize = 5, loadFromFile=loadFromFile)#4*4
fc2 = ConvNet.FC(inDepth = 4*4*64,outDepth = 2048,loadFromFile = loadFromFile)
dc1 = ConvNet.DeConv(inDepth = 128,outDepth = 64,filterSize = 5,loadFromFile = loadFromFile)
dc2 = ConvNet.DeConv(inDepth = 64,outDepth = 64,filterSize = 5,loadFromFile = loadFromFile)
dc3 = ConvNet.DeConv(inDepth = 64,outDepth = 3,filterSize = 5,loadFromFile = loadFromFile)

if loadFromFile:loadFromFile.close()   


inputLayer = tf.placeholder(tf.float32,shape=(BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS))
finaldata = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS))

net = cv1.getLayer(inputLayer, convStride = 1, poolSize = 2, isRelu = True, fixed = False)
Ejemplo n.º 29
0
    self.__ys = [0] * len(X_pos) + [1] * len(X_neg)

  def __getitem__(self, index):
    return (encode(self.__xs[index], RNA.fold(self.__xs[index])[0], RNA.fold(self.__xs[index])[1]), self.__ys[index])

  def __len__(self):
    return len(self.__xs)


for _species in SPECIES:
  WriteFile = open("./results/cv/%s_cv.rst" % _species, "w")
  rst = []
  for fold in range(K_FOLD):
    loss_list = []
    accuracy_list = []
    model = ConvNet().to(device)
    model = model.double()
    weights = [4.0, 1.0]
    class_weights = torch.DoubleTensor(weights).to(device)
    criterion = nn.CrossEntropyLoss(weight=class_weights)
    # criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adagrad(model.parameters(), lr=LEARNING_RATE)
    train_dataset = DriveData("./dataset/cv/%s/train/%s_pos_train_f%d.fa" % (_species, _species, fold + 1),
                              "./dataset/cv/%s/train/%s_neg_train_f%d.fa" % (_species, _species, fold + 1))
    test_dataset = DriveData("./dataset/cv/%s/val/%s_pos_val_f%d.fa" % (_species, _species, fold + 1),
                             "./dataset/cv/%s/val/%s_neg_val_f%d.fa" % (_species, _species, fold + 1))
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=8,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=8, shuffle=False)
    total_step = len(train_loader)
    for epoch in range(N_EPOCH):
Ejemplo n.º 30
0
            g = wholeData[idx,j,i,1] * scale + bias
            b = wholeData[idx,j,i,2] * scale + bias
            ConvNet.setpixel(data,i,j,r,g,b)
    #lbl = wholeData[idx]
    ConvNet.saveImg(data, filename)
print("loading")
CifarData = extract_data("E:\\MNIST\\cifar-10-batches-bin\\HWC.bin",60000)
print("loaded")

inputLayer = tf.placeholder(tf.float32,shape=(BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS))
finaldata = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS))

inputData = np.ndarray([BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS], np.float32)
verifydata = np.ndarray([BATCH_SIZE, IMAGE_SIZEH, IMAGE_SIZEW, NUM_CHANNELS], np.float32)

testfile = ConvNet.openEmptyFileR('cifar.txt')
conv1,conv1save = ConvNet.ConvLayer(inputLayer,filterSize = 5,outDepth = 32,convStride = 1,poolSize = 2,loadFromFile=testfile)
conv2,conv2save = ConvNet.ConvLayer(conv1,filterSize = 5,outDepth = 32,convStride = 1,poolSize = 2,loadFromFile=testfile)
conv3,conv3save = ConvNet.ConvLayer(conv2,filterSize = 5,outDepth = 64,convStride = 1,poolSize = 2,loadFromFile=testfile)
reshape = ConvNet.Conv2FC_Reshape(conv3)

fc1,fc1saver = ConvNet.FCLayer(reshape,2048,loadFromFile=testfile)

deshape = ConvNet.FC2Conv_Reshape(fc1,4,4,128)
uconv1,uconv1save = ConvNet.DeConvLayer(deshape,filterSize=5,output_shape=[BATCH_SIZE,8,8,64],convStride = 2, loadFromFile = testfile)
uconv2,uconv2save = ConvNet.DeConvLayer(uconv1,filterSize=5,output_shape=[BATCH_SIZE,16,16,64],convStride = 2, loadFromFile = testfile)
uconv3,uconv3save = ConvNet.DeConvLayer(uconv2,filterSize=5,output_shape=[BATCH_SIZE,32,32,3],convStride = 2, loadFromFile = testfile, isRelu = False)
regeneratedImg = uconv3

if testfile:testfile.close()   
Ejemplo n.º 31
0
def generate_noise_samples(linear_code,
                           top_config,
                           net_config,
                           train_config,
                           bp_iter_num,
                           net_id_data_for,
                           generate_data_for,
                           noise_io,
                           model_id,
                           BP_layers=20):
    """
    :param linear_code: LDPC码对象
    :param top_config: 
    :param net_config: 
    :param train_config: 
    :param bp_iter_num: 
    :param net_id_data_for: 
    :param generate_data_for: 
    :param noise_io: 
    :param model_id: 
    :return: 
    """
    # net_id_data_for: the id of the CNN network this function generates data for. Start from zero.
    # model_id is to designate the specific model folder

    # 生成矩阵和校验矩阵
    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix

    SNRset_for_generate_training_data = train_config.SNR_set_gen_data
    if generate_data_for == 'Training':
        batch_size_each_SNR = int(train_config.training_minibatch_size //
                                  np.size(train_config.SNR_set_gen_data))
        total_batches = int(train_config.training_sample_num //
                            train_config.training_minibatch_size)
    elif generate_data_for == 'Test':
        batch_size_each_SNR = int(train_config.test_minibatch_size //
                                  np.size(train_config.SNR_set_gen_data))
        total_batches = int(train_config.test_sample_num //
                            train_config.test_minibatch_size)
    else:
        print('Invalid objective of data generation!')
        exit(0)

    # build BP decoding network
    if np.size(bp_iter_num) != net_id_data_for + 1:
        print('Error: the length of bp_iter_num is not correct!')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size_each_SNR,
                                          top_config, BP_layers)

    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    for net_id in range(net_id_data_for):
        # conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        conv_net[net_id] = ConvNet.ConvNet(net_config, train_config, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[
            net_id].build_network()

    # init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # restore cnn networks before the target CNN
    for net_id in range(net_id_data_for):
        conv_net[net_id].restore_network_with_model_id(
            sess, net_config.total_layers, model_id[0:(net_id + 1)])

    start = datetime.datetime.now()

    if generate_data_for == 'Training':
        train_feature_path = train_config.training_feature_folder + format("BP%s/" % bp_decoder.BP_layers) \
                             + train_config.training_feature_file
        fout_est_noise = open(train_feature_path, 'wb')

        train_label_path = train_config.training_label_folder + format("BP%s/" % bp_decoder.BP_layers) \
                             + train_config.training_label_file
        fout_real_noise = open(train_label_path, 'wb')
        # fout_est_noise = open(train_config.training_feature_file, 'wb')
        # fout_real_noise = open(train_config.training_label_file, 'wb')
    elif generate_data_for == 'Test':
        test_feature_path = train_config.test_feature_folder + format("BP%s/" % bp_decoder.BP_layers) \
                             + train_config.test_feature_file
        fout_est_noise = open(test_feature_path, 'wb')

        test_label_path = train_config.test_label_folder + format("BP%s/" % bp_decoder.BP_layers) \
                          + train_config.test_label_file
        fout_real_noise = open(test_label_path, 'wb')
        # fout_est_noise = open(train_config.test_feature_file, 'wb')
        # fout_real_noise = open(train_config.test_label_file, 'wb')
    else:
        print('Invalid objective of data generation!')
        exit(0)

    # generating data,cnn网络的数据集产生:输入是经过BP译码输出数据noise_before_cnn,输出是实际的信道噪声:channel_noise
    for ik in range(0, total_batches):  # number of batches
        for SNR in SNRset_for_generate_training_data:
            x_bits, _, _, channel_noise, y_receive, LLR, _ = lbc.encode_and_transmission(
                G_matrix, SNR, batch_size_each_SNR, noise_io)
            # x_bits, 1 - u_coded_bits, s_mod, ch_noise, y_receive, LLR, ch_noise_sigma
            for iter in range(0, net_id_data_for + 1):
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32),
                                                 bp_iter_num[iter])

                if iter != net_id_data_for:
                    if top_config.update_llr_with_epdf:
                        prob = conv_net[iter].get_res_noise_pdf(model_id).get(
                            np.float32(SNR))
                        LLR = denoising_and_calc_LLR_epdf(
                            prob, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_awgn(
                            res_noise_power, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)

            # reconstruct noise
            noise_before_cnn = y_receive - (u_BP_decoded * (-2) + 1)
            noise_before_cnn = noise_before_cnn.astype(np.float32)
            noise_before_cnn.tofile(fout_est_noise)  # write features to file
            channel_noise.tofile(fout_real_noise)  # write labels to file

    fout_real_noise.close()
    fout_est_noise.close()

    sess.close()
    end = datetime.datetime.now()

    print("Time: %ds" % (end - start).seconds)
    print("end")
Ejemplo n.º 32
0
                                        args.qt,
                                        transform=transfroms_)
    test_dataset = SegData_catheter_pt(test_file,
                                       proj_pix,
                                       args.qt,
                                       transform=transfroms_)
    trainloader = DataLoader(train_dataset,
                             batch_size=train_batch_num,
                             shuffle=True,
                             num_workers=0)
    testloader = DataLoader(test_dataset,
                            batch_size=train_batch_num,
                            shuffle=False,
                            num_workers=0)
    if args.net == '6layer':
        net = ConvNet.layer6Net(1, 20, 6)
    elif args.net == '8layer':
        net = ConvNet.layer8Net(1, 20, 6)
    elif args.net == 'h**o':
        net = ConvNet.HomographyNet(1, 20, 6)
    elif args.net == 'homo_bn':
        net = ConvNet.HomographyNet_bn(1, 20, 6)
    elif args.net == 'pointnet2':
        net = ConvNet.PointReg(1 * args.qt, False)
    else:
        net = ConvNet.UNet(1, 20, 6)

    net = net.cuda()
    net = nn.DataParallel(net)

    optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=1e-4)
Ejemplo n.º 33
0
    param_group['lr'] = lr


device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
N_EPOCH = 40
SPECIES = ['human', 'whole']
BATCH_SIZE = 64
NUM_CLASSES = 2
LEARNING_RATE = 0.01

for _species in SPECIES:
  WriteFile = open("./results/test/%s_test.rst" % _species, "w")
  rst = []
  loss_list = []
  accuracy_list = []
  model = ConvNet().to(device)
  model = model.double()
  weights = [4.0, 1.0]
  class_weights = torch.DoubleTensor(weights).to(device)
  criterion = nn.CrossEntropyLoss(weight=class_weights)
  optimizer = torch.optim.Adagrad(model.parameters(), lr=LEARNING_RATE, weight_decay=0.00001)
  train_dataset = DriveData("./dataset/cv/%s/%s_pos_all.fa" % (_species, _species),
                            "./dataset/cv/%s/%s_neg_all.fa" % (_species, _species))
  test_dataset = DriveData("./dataset/test/%s/%s_pos_test.fa" % (_species, _species),
                           "./dataset/test/%s/%s_neg_test.fa" % (_species, _species))
  train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=8, shuffle=True)
  test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=8, shuffle=False)
  curr_lr = LEARNING_RATE
  for epoch in range(N_EPOCH):
    print(epoch)
    correct = 0
Ejemplo n.º 34
0
def generate_noise_samples(code, top_config, train_config, net_config, gen_data_for, bp_iter_num, num_of_cnn, model_id,
                           noise_io, intf_io):
    global batch_size_each_SNR, total_batches
    G_matrix = code.G_matrix
    H_matrix = code.H_matrix

    top_config.SNR_set_gen_training = generate_snr_set(top_config)
    print('SNR set for generating training data: %s' % np.array2string(top_config.SNR_set_gen_training))

    if gen_data_for == 'Training':
        batch_size_each_SNR = int(train_config.training_minibatch_size // top_config.SNR_set_size)
        total_batches = int(train_config.training_sample_num // train_config.training_minibatch_size)
    elif gen_data_for == 'Test':
        batch_size_each_SNR = int(train_config.test_minibatch_size // top_config.SNR_set_size)
        total_batches = int(train_config.test_sample_num // train_config.test_minibatch_size)
    else:
        print('>>> Invalid objective of data generation! (ibc.py)')
        exit(0)

    # BP iteration
    if np.size(bp_iter_num) != num_of_cnn + 1:
        print('>>> Error: the length of bp_iter_num is not correct! (ibc.py)')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size_each_SNR)

    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    intf_net_out = {}

    for net_id in range(num_of_cnn):  # TODO: Doesn't work if num_of_cnn=0
        conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id], intf_net_out[net_id] = conv_net[net_id].build_network()

    # Init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # Restore cnn networks before the target CNN        # TODO: Doesn't work if num_of_cnn=0
    for net_id in range(num_of_cnn):  # TODO: Why restore here?
        conv_net[net_id].restore_network_with_model_id(sess, net_config.total_layers, model_id[0:(net_id + 1)])

    start = datetime.datetime.now()

    if gen_data_for == 'Training':
        if not os.path.isdir(train_config.training_folder):
            os.mkdir(train_config.training_folder)
        fout_est_noise = open(train_config.training_feature_file, 'wb')
        fout_real_noise = open(train_config.training_noise_label_file, 'wb')
        fout_real_intf = open(train_config.training_intf_label_file, 'wb')
    elif gen_data_for == 'Test':
        if not os.path.isdir(train_config.test_folder):
            os.mkdir(train_config.test_folder)
        fout_est_noise = open(train_config.test_feature_file, 'wb')
        fout_real_noise = open(train_config.test_noise_label_file, 'wb')
        fout_real_intf = open(train_config.test_intf_label_file, 'wb')
    else:
        print('>>> Invalid objective of data generation! (ibc.py)')
        exit(0)

    # Generating data
    for ik in range(total_batches):
        for SNR in top_config.SNR_set_gen_training:
            x_bits, _, _, ch_noise, intf_labels, y_receive, LLR = lbc.encode_and_transmit(G_matrix, SNR,
                                                                                          batch_size_each_SNR, noise_io,
                                                                                          intf_io, top_config)

            for iter in range(0, num_of_cnn + 1):
                # BP decoder
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), bp_iter_num[iter])

                # CNN
                if iter != num_of_cnn:
                    res_noise_power = conv_net[iter].get_res_noise_power(model_id).get(np.float32(SNR))
                    LLR, predicted_intf_ind = denoising_and_calc_LLR_awgn(res_noise_power, y_receive, u_BP_decoded,
                                                                          denoise_net_in[iter], denoise_net_out[iter],
                                                                          intf_net_out[iter], sess)

            # reconstruct noise
            noise_before_cnn = y_receive - (u_BP_decoded * (-2) + 1)
            noise_before_cnn = noise_before_cnn.astype(np.float32)
            ch_noise = ch_noise.astype(np.float32)
            intf_labels = intf_labels.astype(np.float32)
            noise_before_cnn.tofile(fout_est_noise)  # write features to file
            ch_noise.tofile(fout_real_noise)  # write noise labels to file
            intf_labels.tofile(fout_real_intf)  # write interference labels to file

        if ik % 100 == 0:
            print("%d batches finished!" % ik)
            section = datetime.datetime.now()
            print("Time: %ds" % (section - start).seconds)

    fout_real_noise.close()
    fout_est_noise.close()
    fout_real_intf.close()

    sess.close()
    end = datetime.datetime.now()

    print("Time: %ds" % (end - start).seconds)
    print("Finish generating %s data" % gen_data_for)
        param_group['lr'] = lr


device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
N_EPOCH = 80
BATCH_SIZE = 64
NUM_CLASSES = 2
LEARNING_RATE = 0.01
TRAIN_SPECIES = 'whole'
TEST_SPECIES = 'new'

WriteFile = open("./results/test_new/%s_test.rst" % TEST_SPECIES, "w")
rst = []
loss_list = []
accuracy_list = []
model = ConvNet().to(device)
model = model.double()
weights = [4.0, 1.0]
class_weights = torch.DoubleTensor(weights).to(device)
criterion = nn.CrossEntropyLoss(weight=class_weights)
optimizer = torch.optim.Adagrad(model.parameters(), lr=LEARNING_RATE)
train_dataset = DriveData("./dataset/sequences/%s_pos.fa" % TRAIN_SPECIES,
                          "./dataset/sequences/%s_neg.fa" % TRAIN_SPECIES)
test_dataset = DriveData("./dataset/sequences/%s_pos.fa" % TEST_SPECIES,
                         "./dataset/sequences/%s_neg.fa" % TEST_SPECIES)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=BATCH_SIZE,
                                           num_workers=8,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=BATCH_SIZE,
Ejemplo n.º 36
0
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
import ConvNet
import tensorflow.contrib.rnn.BasicLSTMCell



ind1 = np.ndarray([1, 8, 8, 2], np.float32)
for i in xrange(0, 8):
    for j in xrange(0, 8):
        ind1[0,i, j, 0] = (i+1) * 8 + (j+1);
        ind1[0,i, j, 1] = (i+1) * 8 + (j+1) + 0.5;
        
inputLayer = tf.placeholder(tf.float32, shape=(1,8, 8, 2))

testfile = ConvNet.openEmptyFileR('conv.txt')
conv1, conv1save = ConvNet.ConvLayer(inputLayer, filterSize=5, outDepth=4, convStride=1,padding=True, poolSize=2, loadFromFile=testfile)
conv2, conv2save = ConvNet.ConvLayer(conv1, filterSize=5, outDepth=8, convStride=1,padding=True, poolSize=2, loadFromFile=testfile)
reshape = ConvNet.Conv2FC_Reshape(conv2)
fc1,fc1save = ConvNet.FCLayer(reshape, 8, loadFromFile=testfile)
fc2,fc2save = ConvNet.FCLayer(fc1, 2*2*8, loadFromFile=testfile)
deshape = ConvNet.FC2Conv_Reshape(fc2,2,2,8)
uconv1,uconv1save = ConvNet.DeConvLayer(deshape,filterSize=5,output_shape=[1,4,4,4],convStride = 2,loadFromFile = testfile)
uconv2,uconv2save = ConvNet.DeConvLayer(uconv1,filterSize=5,output_shape=[1,8,8,2],convStride = 2,loadFromFile = testfile)

if testfile:testfile.close()


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
Ejemplo n.º 37
0
def analyze_residual_noise(linear_code, top_config, net_config, simutimes, batch_size):

    ## load some configurations from top_config
    net_id_tested = top_config.currently_trained_net_id
    model_id = top_config.model_id
    bp_iter_num = top_config.BP_iter_nums_gen_data[0:(net_id_tested + 1)]
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None, top_config.cov_1_2_file)
    SNRset = top_config.eval_SNRs

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    _, N = np.shape(G_matrix)

    max_batches, residual_times = np.array(divmod(simutimes, batch_size), np.int32)
    print('Real simutimes: %d' % simutimes)
    if residual_times != 0:
        max_batches += 1

    # build BP decoding network
    if np.size(bp_iter_num) != net_id_tested + 1:
        print('Error: the length of bp_iter_num is not correct! 3!\nnp.size(bp_iter_num)='+str(np.size(bp_iter_num))+'\nnet_id_tested + 1='+str(net_id_tested + 1))
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size)

    # build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}

    # build network for each CNN denoiser,
    for net_id in range(net_id_tested+1):
        conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[net_id].build_network()

    # init gragh
    init = tf.compat.v1.global_variables_initializer()
    sess = tf.compat.v1.Session()
    sess.run(init)

    # restore denoising network
    for net_id in range(net_id_tested + 1):
        conv_net[net_id].restore_network_with_model_id(sess, net_config.total_layers, model_id[0:(net_id+1)])

    model_id_str = np.array2string(model_id, separator='_', formatter={'int': lambda d: "%d" % d})
    model_id_str = model_id_str[1:(len(model_id_str) - 1)]
    loss_file_name = format("%sresidual_noise_property_netid%d_model%s.txt" % (net_config.residual_noise_property_folder, net_id_tested, model_id_str))
    fout_loss = open(loss_file_name, 'wt')

    start = datetime.datetime.now()
    for SNR in SNRset:
        noise_io.reset_noise_generator()
        real_batch_size = batch_size
        # simulation part
        loss = 0.0
        prob = np.ones(0)
        for ik in range(0, max_batches):
            print("Batch id: %d" % ik)
            if ik == max_batches - 1 and residual_times != 0:
                real_batch_size = residual_times
            x_bits, _, s_mod, channel_noise, y_receive, LLR = lbc.encode_and_transmission(G_matrix, SNR, real_batch_size, noise_io)

            for iter in range(0, net_id_tested+1):
                # BP decoding
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), bp_iter_num[iter])
                noise_before_cnn = y_receive - (u_BP_decoded * (-2) + 1)
                noise_after_cnn = sess.run(denoise_net_out[iter], feed_dict={denoise_net_in[iter]: noise_before_cnn})
                s_mod_plus_res_noise = y_receive - noise_after_cnn
                if iter < net_id_tested:  # calculate the LLR for next BP decoding
                    if top_config.update_llr_with_epdf:
                        prob_tmp = conv_net[iter].get_res_noise_pdf(model_id).get(np.float32(SNR))
                        LLR = calc_LLR_epdf(prob_tmp, s_mod_plus_res_noise)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(model_id).get(np.float32(SNR))
                        LLR = s_mod_plus_res_noise * 2.0 / res_noise_power
            if top_config.update_llr_with_epdf:
                prob = stat_prob(s_mod_plus_res_noise - s_mod, prob)
            else:
                loss += np.sum(np.mean(np.square(s_mod_plus_res_noise-s_mod), 1))

        # each SNR
        if top_config.update_llr_with_epdf:
            fout_loss.write(str(SNR) + '\t')
            for i in range(np.size(prob)):
                fout_loss.write(str(prob[i]) + '\t')
            fout_loss.write('\n')
        else:
            loss /= np.double(simutimes)
            fout_loss.write(str(SNR) + '\t' + str(loss) + '\n')

    fout_loss.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end-start).seconds)
    print("end\n")
    sess.close()
Ejemplo n.º 38
0
    loss_win = None
    train_drr_win = None
    test_drr_win = None
    train_xray_win = None
    test_xray_win = None

    transfroms_ = transforms.Compose([
        transforms.ToTensor(),
        # transforms.Resize((64, 64))
    ])
    train_dataset = SegData_csv(train_file, proj_pix, transform=transfroms_)
    test_dataset = SegData_csv(test_file, proj_pix, transform=transfroms_)
    trainloader = DataLoader(train_dataset, batch_size=train_batch_num, shuffle=True, num_workers=0)
    testloader = DataLoader(test_dataset, batch_size=train_batch_num, shuffle=False, num_workers=0)
    if args.net == '6layer':
        net = ConvNet.layer6Net(1, 20, 6)
    elif args.net == '8layer':
        net = ConvNet.layer8Net(1, 20, 6)
    else:
        net = ConvNet.UNet(1, 20, 6)

    net = net.cuda()
    net = nn.DataParallel(net)

    criterion = torch.nn.MSELoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=1e-4)
    # train_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=300)

    best_loss = np.inf

Ejemplo n.º 39
0
 def imgSave(idx,sample):
     ConvNet.saveImagesMono(sample, saveSize, 'out\\sample_%d.png' % (idx))
Ejemplo n.º 40
0
BATCH_SIZE = 500
testBATCH_SIZE = 40
saveSize = [4, 10]

GF = 32             # Dimension of G filters in first conv layer. default [64]
DF = 32             # Dimension of D filters in first conv layer. default [64]
Z_DIM = 10

LR = 0.0001         # Learning rate

t2, t4 = MNISTData.IMAGE_H//2, MNISTData.IMAGE_H//4
s2, s4 = MNISTData.IMAGE_W//2, MNISTData.IMAGE_W//4

glist = []
loadFromFile = ConvNet.openEmptyFileR('gan0g.txt')
gfc0 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = Z_DIM,loadFromFile = loadFromFile))
gfc1 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = GF*2*t4*s4,loadFromFile = loadFromFile))
gdc2 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*1,filterSize = 5,loadFromFile = loadFromFile))
gdc3 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*1,outDepth = MNISTData.IMAGE_CHANNEL,filterSize = 5,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()

dlist = []
loadFromFile = ConvNet.openEmptyFileR('gan0d.txt')
dcva = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = MNISTData.IMAGE_CHANNEL,outDepth = DF*1,filterSize = 1,loadFromFile = loadFromFile))
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*1,filterSize = 5,loadFromFile = loadFromFile))
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*2*t4*s4,outDepth = 1,loadFromFile = loadFromFile))
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*2*t4*s4,outDepth = 1,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()
Ejemplo n.º 41
0
def simulation_colored_noise(linear_code,
                             top_config,
                             net_config,
                             simutimes_range,
                             target_err_bits_num,
                             batch_size,
                             BP_layers,
                             train_epoch=25,
                             use_weight_loss=False):
    # target_err_bits_num: the simulation stops if the number of bit errors reaches the target.
    # simutimes_range: [min_simutimes, max_simutimes]

    ## load configurations from top_config
    SNRset = top_config.eval_SNRs
    bp_iter_num = top_config.BP_iter_nums_simu
    noise_io = DataIO.NoiseIO(
        top_config.N_code,
        False,
        None,
        top_config.cov_1_2_file_simu,
        rng_seed=0)  # cov_1_2_file_simu 就是Noise文件夹下对应的噪声文件
    denoising_net_num = top_config.cnn_net_number
    model_id = top_config.model_id

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    K, N = np.shape(G_matrix)

    ## build BP decoding network
    if np.size(bp_iter_num) != denoising_net_num + 1:
        print('Error: the length of bp_iter_num is not correct!')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size, top_config,
                                          BP_layers, 0, use_weight_loss)
    # bp_decoder_after_cnn = BP_Decoder.BP_NetDecoder(H_matrix, batch_size, top_config, BP_layers, 1)
    # bp_decoder = bp_decoder_before_cnn  # default

    res_N = top_config.N_code
    res_K = top_config.K_code
    res_BP_layers = bp_decoder.BP_layers

    ## build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    # build network for each CNN denoiser,
    if net_config.use_conv_net:  # 如果使用 conv net 才加载
        for net_id in range(denoising_net_num):
            if top_config.same_model_all_nets and net_id > 0:
                conv_net[net_id] = conv_net[0]
                denoise_net_in[net_id] = denoise_net_in[0]
                denoise_net_out[net_id] = denoise_net_out[0]
            else:  # 默认进入
                # conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)  # 建立了一个残差噪声的神经网络对象
                conv_net[net_id] = ConvNet.ConvNet(net_config, top_config,
                                                   net_id)  # 建立了一个残差噪声的神经网络对象
                denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[
                    net_id].build_network()  # 构建好对应的神经网络,返回的是网络的输入和输出
        # init gragh
        init = tf.global_variables_initializer()
        sess = tf.Session()
        print('Open a tf session!')
        sess.run(init)
        # restore denoising network
        for net_id in range(denoising_net_num):
            if top_config.same_model_all_nets and net_id > 0:
                break
            conv_net[net_id].restore_network_with_model_id(
                sess, net_config.total_layers,
                model_id[0:(net_id + 1)])  # 恢复之前训练好的网络。

    ## initialize simulation times
    max_simutimes = simutimes_range[1]
    min_simutimes = simutimes_range[0]
    max_batches, residual_times = np.array(divmod(max_simutimes, batch_size),
                                           np.int32)
    if residual_times != 0:
        max_batches += 1

    ## generate out ber file
    bp_str = np.array2string(bp_iter_num,
                             separator='_',
                             formatter={'int': lambda d: "%d" % d})
    bp_str = bp_str[1:(len(bp_str) - 1)]

    if net_config.use_conv_net and bp_decoder.use_cnn_res_noise:
        ber_file = format(
            '%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_BPDNN%s-CNN-BPDNN%s' %
            (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K, bp_str,
             bp_decoder.BP_layers, bp_decoder.BP_layers))
        f_simulation_time = format(
            '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_BPDNN%s-CNN-BPDNN%s'
            % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
               bp_str, bp_decoder.BP_layers, bp_decoder.BP_layers))
    elif bp_decoder.use_train_bp_net or bp_decoder.train_bp_network:
        if use_weight_loss:
            ber_file = format(
                '%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_BP%s_epoch%s_weight_loss'
                % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                   bp_str, bp_decoder.BP_layers, train_epoch))
            f_simulation_time = format(
                '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_BP%s_epch%s_weight_loss'
                % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                   bp_str, bp_decoder.BP_layers, train_epoch))
        else:
            ber_file = format(
                '%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_BP%s_epoch%s' %
                (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                 bp_str, bp_decoder.BP_layers, train_epoch))
            f_simulation_time = format(
                '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_BP%s_epch%s'
                % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                   bp_str, bp_decoder.BP_layers, train_epoch))

    else:
        ber_file = format('%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_LLRBP%s' %
                          (net_config.model_folder, N, K, bp_decoder.BP_layers,
                           N, K, bp_str, bp_decoder.BP_layers))
        f_simulation_time = format(
            '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_LLRBP%s' %
            (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K, bp_str,
             bp_decoder.BP_layers))

    if top_config.corr_para != top_config.corr_para_simu:  # this means we are testing the model robustness to correlation level.
        ber_file = format('%s_SimuCorrPara%.2f' %
                          (ber_file, top_config.corr_para_simu))
    if top_config.same_model_all_nets:
        ber_file = format('%s_SameModelAllNets' % ber_file)
    if top_config.update_llr_with_epdf:
        ber_file = format('%s_llrepdf' % ber_file)
    if denoising_net_num > 0:
        model_id_str = np.array2string(model_id,
                                       separator='_',
                                       formatter={'int': lambda d: "%d" % d})
        model_id_str = model_id_str[1:(len(model_id_str) - 1)]
        ber_file = format('%s_model%s' % (ber_file, model_id_str))
    if np.size(SNRset) == 1:
        ber_file = format('%s_%.1fdB' % (ber_file, SNRset[0]))

    ber_file = format('%s.txt' % ber_file)
    fout_ber = open(ber_file, 'wt')
    simlation_time_file = format('%s.txt' % f_simulation_time)
    fout_simulation_time = open(simlation_time_file, 'wt')

    ## simulation starts
    start = datetime.datetime.now()
    total_simulation_times = 0
    residual_simulation_times = 0
    for SNR in SNRset:
        real_batch_size = batch_size
        # simulation part
        bit_errs_iter = np.zeros(denoising_net_num + 1, dtype=np.int32)
        actual_simutimes = 0
        rng = np.random.RandomState(1)  # 伪随机数种子
        noise_io.reset_noise_generator()  # reset随机数种子
        for ik in range(0, max_batches):  # 遍历max_batches 6667

            if ik == max_batches - 1 and residual_times != 0:  # 如果遍历结束,并且residual_times != 0 ,在这里默认是 == 0
                real_batch_size = residual_times
                residual_simulation_times = residual_simulation_times + 1
                fout_simulation_time.write('不足一个batch_size, 实际batch_size 是:' +
                                           str(real_batch_size) + '\n')
                print('不足一个batch_size, 实际batch_size 是:' +
                      str(real_batch_size) + '\n')
            x_bits, u_coded_bits, s_mod, ch_noise, y_receive, LLR, ch_noise_sigma = lbc.encode_and_transmission(
                G_matrix, SNR, real_batch_size, noise_io, rng)  #
            # ------------------------------------------------------------
            noise_power = np.mean(np.square(ch_noise))
            practical_snr = 10 * np.log10(1 / (noise_power * 2.0))
            if ik % 1000 == 0:
                print('Batch %d in total %d batches.' % (ik, int(max_batches)),
                      end=' ')
                print('Practical EbN0: %.2f' % practical_snr)

            for iter in range(0, denoising_net_num):  # denoising_net_num == 1
                # if 0 == iter:
                #     bp_decoder = bp_decoder_before_cnn
                # else:
                #     bp_decoder = bp_decoder_after_cnn
                # BP decoding,第二个参数bp_iter_num 失效的,因为迭代次数是由前面的变量 BP_layers 决定的
                u_BP_decoded = bp_decoder.decode(
                    LLR.astype(np.float32),
                    bp_iter_num[iter])  # BP译码传输的本来是LLR,返回的则是对应译码的码字
                # !!!当iter==0,误比特率记录的是BP的误比特率,当iter==1,记录的是BP-CNN-BP的误比特率。
                # 首先判断是否使用 conv net
                if net_config.use_conv_net and iter < denoising_net_num:  # denoising_net_num == 1,当iter==0,使用CNN进行噪声估计,当iter==0,不使用CNN,即单纯使用BP译码
                    if top_config.update_llr_with_epdf:
                        prob = conv_net[iter].get_res_noise_pdf(
                            model_id, res_N, res_K,
                            res_BP_layers).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_epdf(
                            prob, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)
                    elif bp_decoder.use_cnn_res_noise:  # 默认进入else
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id, SNRset, res_N, res_K, res_BP_layers).get(
                                np.float32(SNR))  # 计算噪声功率,这个残差噪声功率貌似是存储在文件中读取的
                        LLR = denoising_and_calc_LLR_awgn(
                            res_noise_power, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter],
                            sess)  # 使用神经网络译码进行噪声估计,并得到新一轮BP的LLR输入
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id, SNRset, res_N, res_K, res_BP_layers).get(
                                np.float32(SNR))  # 计算噪声功率,这个残差噪声功率貌似是存储在文件中读取的
                        LLR = denoising_and_calc_LLR_awgn(
                            res_noise_power, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter],
                            sess)  # 使用神经网络译码进行噪声估计,并得到新一轮BP的LLR输入
                        noise_after_cnn = y_receive - (u_BP_decoded * (-2) + 1)
                        # noise_after_cnn = sess.run(net_out, feed_dict={net_in: noise_before_cnn})
                        # calculate the LLR for next BP decoding
                        s_mod_plus_res_noise = y_receive - noise_after_cnn
                        LLR = s_mod_plus_res_noise * 2.0 / res_noise_power
                output_x = linear_code.dec_src_bits(
                    u_BP_decoded)  # 前k位是编码之前的信息位
                shape_x, shape_y = output_x.shape
                # for i in range(shape_x):
                #     if (np.any(output_x[i] - x_bits[i])):
                #         bit_errs_iter[iter] += 1
                bit_errs_iter[iter] += np.sum(
                    output_x !=
                    x_bits)  # 统计比特不同的熟练(对应位比特不同记为1,然后累加计算有多少个不同比特位)
                pass
                # 同一个码字会记录两次误比特率,一次是只使用BP,还有一次是BP+CNN+BP。一般来说,经过BP+CNN+BP之后的误比特率要比只经过BP要好。

            actual_simutimes += real_batch_size
            if bit_errs_iter[
                    denoising_net_num] >= target_err_bits_num and actual_simutimes >= min_simutimes:  # 当错误码元数或者仿真迭代次数达标
                break
        print('%d bits are simulated!, batch_size=%d' %
              (actual_simutimes * K, real_batch_size))

        total_simulation_times += actual_simutimes

        ber_iter = np.zeros(denoising_net_num + 1, dtype=np.float64)
        fout_ber.write(str(SNR) + '\t')
        for iter in range(0, denoising_net_num + 1):  # 1+1 = 2
            ber_iter[iter] = bit_errs_iter[iter] / float(K * actual_simutimes)
            fout_ber.write(
                str(ber_iter[iter]) + '\t' + str(bit_errs_iter[iter]) + '\t')
            print(ber_iter[iter])
        fout_ber.write('\n')
        # break

    fout_ber.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end - start).seconds)
    print("end\n")

    fout_simulation_time.write(
        str(total_simulation_times) + '\t' + str((end - start).seconds))
    fout_simulation_time.close()

    if net_config.use_conv_net:
        sess.close()
    print('Close the tf session!')
Ejemplo n.º 42
0
def analyze_residual_noise(linear_code, top_config, net_config, simutimes,
                           batch_size, BP_layers):

    ## load some configurations from top_config
    net_id_tested = top_config.currently_trained_net_id
    model_id = top_config.model_id
    bp_iter_num = top_config.BP_iter_nums_gen_data[0:(net_id_tested + 1)]
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None,
                              top_config.cov_1_2_file)
    SNRset = top_config.eval_SNRs

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    _, N = np.shape(G_matrix)

    max_batches, residual_times = np.array(divmod(simutimes, batch_size),
                                           np.int32)
    print('Real simutimes: %d' % simutimes)
    if residual_times != 0:
        max_batches += 1

    # build BP decoding network
    if np.size(bp_iter_num) != net_id_tested + 1:
        print('Error: the length of bp_iter_num is not correct!')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size, top_config,
                                          BP_layers)

    # build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}

    # build network for each CNN denoiser,
    for net_id in range(net_id_tested + 1):
        # conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        conv_net[net_id] = ConvNet.ConvNet(net_config, top_config, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[
            net_id].build_network()

    # init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # restore denoising network
    for net_id in range(net_id_tested + 1):
        conv_net[net_id].restore_network_with_model_id(
            sess, net_config.total_layers, model_id[0:(net_id + 1)])

    model_id_str = np.array2string(model_id,
                                   separator='_',
                                   formatter={'int': lambda d: "%d" % d})
    model_id_str = model_id_str[1:(len(model_id_str) - 1)]
    loss_file_name = format(
        "%s/bp_model/%s_%s/BP%s/%s_%s_residual_noise_property_netid%d_model%s.txt"
        % (net_config.residual_noise_property_folder, N, _,
           bp_decoder.BP_layers, N, _, net_id_tested, model_id_str))
    fout_loss = open(loss_file_name, 'wt')

    start = datetime.datetime.now()
    for SNR in SNRset:  # 0 0.5 1 1.5 2 2.5 3
        noise_io.reset_noise_generator()
        real_batch_size = batch_size
        # simulation part
        loss = 0.0
        prob = np.ones(0)
        for ik in range(0, max_batches):  # max_batches 3
            print("Batch id: %d" % ik)
            if ik == max_batches - 1 and residual_times != 0:
                real_batch_size = residual_times
            x_bits, _, s_mod, channel_noise, y_receive, LLR, _ = lbc.encode_and_transmission(
                G_matrix, SNR, real_batch_size, noise_io)
            # x_bits 随机生成的发送端码元,u_coded_bits 对x_bits做纠错编码后的码元,s_mod 对u_coded_bits做BPSK调制后的码元,ch_noise 信道噪声,y_recive 接收端接收到的信号,LLR 对数似然比
            for iter in range(0, net_id_tested + 1):
                # BP decoding,astype:类型转为float32
                u_BP_decoded = bp_decoder.decode(
                    LLR.astype(np.float32), bp_iter_num[iter]
                )  # u_BP_decoded 就是解码所得, bp_iter_num[iter] 表示bp迭代次数
                noise_before_cnn = y_receive - (u_BP_decoded *
                                                (-2) + 1)  # 转为[-1,1]
                noise_after_cnn = sess.run(
                    denoise_net_out[iter],
                    feed_dict={denoise_net_in[iter]:
                               noise_before_cnn})  #  cnn 计算噪声 n~
                s_mod_plus_res_noise = y_receive - noise_after_cnn  # 接收信号减去 cnn 的噪声 y~,
                if iter < net_id_tested:  # calculate the LLR for next BP decoding
                    if top_config.update_llr_with_epdf:  # 这里就决定了 经过 cnn 输出的残差噪声以什么形式转入下一轮的迭代(经验分布或者重新计算)默认是经验分布
                        prob_tmp = conv_net[iter].get_res_noise_pdf(
                            model_id).get(np.float32(SNR))
                        LLR = calc_LLR_epdf(prob_tmp, s_mod_plus_res_noise)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id).get(np.float32(SNR))
                        LLR = s_mod_plus_res_noise * 2.0 / res_noise_power  # 计算新一轮的BP输入
            if top_config.update_llr_with_epdf:
                prob = stat_prob(s_mod_plus_res_noise - s_mod,
                                 prob)  # s_mod 是发送端的悉尼号
            else:  # 累加实际值和网络输出值之间的均方误差
                loss += np.sum(
                    np.mean(np.square(s_mod_plus_res_noise - s_mod),
                            1))  # 求的是新一轮迭代的输入噪声的平均功率e
                #   axis=1 表示计算[[a,b],[c,d]] [(a+b)/2, (c+d)/2],损失函数是均方误差

        # each SNR 对应的CNN的loss。
        if top_config.update_llr_with_epdf:
            fout_loss.write(str(SNR) + '\t')
            for i in range(np.size(prob)):
                fout_loss.write(str(prob[i]) + '\t')
            fout_loss.write('\n')
        else:
            loss /= np.double(
                simutimes * 16
            )  # 猜测 simutimes = 5000*3, 其中5000是一次测试5000行马元,3是测试三次即 max_batches = 3
            fout_loss.write(
                str(SNR) + '\t' + str(loss) + '\n'
            )  # residual_noise_property_netid0_model0.txt 里面存储的是损失值,很明显损失值是在递减的,说明训练有效果

    fout_loss.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end - start).seconds)
    print("end\n")
    sess.close()
Ejemplo n.º 43
0
def main(argv):
    # folder for saving
    subfold = argv[1]
    if not os.path.exists(subfold):
        os.mkdir(subfold)
        os.mkdir(os.path.join(subfold, "features"))

    # load data
    X_cnn_raw = []
    labels_cnn_raw = []
    num_grp = 3
    for i in range(num_grp):
        fname = '../LRG-fits/data/lrg_171020/sample-lrg-train-120-120-c3-gr{0}.pkl'.format(
            i)
        with open(fname, 'rb') as fp:
            datadict = pickle.load(fp)
            X_cnn_raw.append(datadict['data'])
            labels_cnn_raw.append(datadict['label'])
        time.sleep(3)

    X_test = []
    labels_test = []
    fname = '../LRG-fits/data/lrg_171020/sample-lrg-test-120-120-c3.pkl'
    with open(fname, 'rb') as fp:
        datadict = pickle.load(fp)
        X_test.append(datadict['data'])
        labels_test.append(datadict['label'])

    # Combine and normalization
    sample_mat = np.vstack(X_cnn_raw)
    del (X_cnn_raw)
    labels_cnn = np.hstack(labels_cnn_raw)
    del (labels_cnn_raw)

    # sample_mat = np.nan_to_num(sample_mat)

    rs = 120
    '''
    with open("../nets/norm_params.pkl", 'rb') as fp:
        normparam = pickle.load(fp)
    X_max = normparam["X_max"]
    X_min = normparam["X_min"]
    X_mean = normparam["X_mean"]
    X_train_cnn = (sample_mat - X_min) / (X_max - X_min)
    # X_norm = sample_mat
    X_w_cnn = X_train_cnn - X_mean
    X_tr_cnn = X_w_cnn.reshape(-1, rs, rs, 1).astype('float32')
    '''
    X_tr_cnn = sample_mat.reshape(-1, rs, rs, 1).astype('float32')

    idx = np.random.permutation(len(labels_cnn))
    numsamples = 100000
    X_in = X_tr_cnn[idx[0:numsamples], :, :, :]
    # get labels
    X_out = labels_cnn[idx[0:numsamples]].astype('int32')

    mask_layer1 = [100, 100, 100, 100, 0, 1, 100, 100]
    data_layer1, label_layer1 = sub2triple(data=X_in,
                                           label=X_out,
                                           mask=mask_layer1)
    label_layer1_hotpot = vec2onehot(label=label_layer1, numclass=2)

    numclass = 2
    encode_nodes = 64
    cnn = ConvNet.ConvNet(input_shape=data_layer1.shape,
                          kernel_size=[3, 3, 3, 3, 3],
                          kernel_num=[8, 8, 16, 32, 32],
                          fc_nodes=[],
                          encode_nodes=encode_nodes,
                          padding=('SAME', 'SAME'),
                          stride=(2, 2),
                          numclass=numclass,
                          sess=None,
                          name=None)
    cnn.cae_build()
    cnn.cnn_build(learning_rate=0.001)  # In order to init the weights

    foldname = "./nets/pretrain-171020-2cls/"
    name = "pretrain-120-171020-2cls.pkl"
    cnn.sess, cnn.name = utils.load_net(os.path.join(foldname, name))

    # train
    num_epochs = 100
    learning_rate = 0.001
    batch_size = 100
    droprate = 0.5
    cnn.cnn_train(data=data_layer1,
                  label=label_layer1_hotpot,
                  num_epochs=num_epochs,
                  learning_rate=learning_rate,
                  batch_size=batch_size,
                  droprate=droprate)

    # save features
    fname = "code_l5.pkl"
    folder = "{0}/features/".format(subfold)
    if not os.path.exists(folder):
        os.mkdir(folder)
    numsample = data_layer1.shape[0]
    numone = numsample // 10
    code = np.zeros((numsample, encode_nodes))
    for i in range(10):
        code[i * numone:(i + 1) * numone] = cnn.cae_encode(
            data_layer1[i * numone:(i + 1) * numone, :, :, :])
    # code = cnn.cae_encode(data_layer1)
    label = label_layer1
    with open(os.path.join(folder, fname), 'wb') as fp:
        code_dict = {"code": code, "label": label}
        pickle.dump(code_dict, fp)

    # save net
    foldname = "{0}/net_l5_140".format(subfold)
    name = "net_l5.pkl"
    netname = "model-l5.ckpt"
    if os.path.exists(foldname):
        os.system("rm -r %s" % (foldname))
    os.mkdir(foldname)
    cnn.cnn_save(namepath=os.path.join(foldname, name),
                 netpath=os.path.join(foldname, netname))
Ejemplo n.º 44
0
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS

train_size = train_labels.shape[0]

# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.

# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
#def model(data):

testfile = ConvNet.openEmptyFileR('conv.txt')
inputLayer = tf.placeholder(tf.float32,shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
conv1,conv1save = ConvNet.ConvLayer(inputLayer,filterSize = 5,outDepth = 32,convStride = 1,poolSize = 2,loadFromFile=testfile)
conv2,conv2save = ConvNet.ConvLayer(conv1,filterSize = 5,outDepth = 64,convStride = 1,poolSize = 2,loadFromFile=testfile)
reshape = ConvNet.Conv2FC_Reshape(conv2)
fc1,fc1save = ConvNet.FCLayer(reshape, 512, isRelu = True,loadFromFile=testfile)
fc2,fc2save = ConvNet.FCLayer(fc1, NUM_LABELS,loadFromFile=testfile)
train_prediction = tf.nn.softmax(fc2)
labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
if testfile:testfile.close()   


def eval_in_batches(data, sess):
    predictions = sess.run(train_prediction, feed_dict={inputLayer: data})
    return predictions
Ejemplo n.º 45
0
GF = 32             # Dimension of G filters in first conv layer. default [64]
DF = 32             # Dimension of D filters in first conv layer. default [64]
Z_DIM = 100
IMAGE_CHANNEL = 3
LR = 0.0001         # Learning rate
#左右翻转

def read_image(path):
    image = scipy.misc.imread(path)
    image = scipy.misc.imresize(image,(IMAGE_H,IMAGE_W))
    image = image[np.newaxis,:,:,:] 
    image = image.astype('float32')/255.0 - 0.5
    return image

dlist = []
loadFromFile = ConvNet.openEmptyFileR('gan10d.txt')
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF*2,filterSize = 7,loadFromFile = loadFromFile))#64out
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*4,filterSize = 5,loadFromFile = loadFromFile))#32out
dcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*4,outDepth = DF*8,filterSize = 5,loadFromFile = loadFromFile))#16out
dcv3 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*8,outDepth = DF*16,filterSize = 3,loadFromFile = loadFromFile))#8out
dcv4 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*16,outDepth = DF*16,filterSize = 3,loadFromFile = loadFromFile))#4out
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*16*3*4,outDepth = 64,loadFromFile = loadFromFile))
dfc1 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = 64,outDepth = 1,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()

def discriminator(inputT):
    _ret = dcv0.getLayer(inputT, convStride = 2, poolSize = 2,isRelu=True, fixed = False)
    _ret = dcv1.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)
    _ret = dcv2.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)
    _ret = dcv3.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)
    _ret = dcv4.getLayer(_ret, convStride = 2, poolSize = 1,isRelu=True, fixed = False)
Ejemplo n.º 46
0
loss_win = None
train_drr_win = None
test_drr_win = None
train_xray_win = None
test_xray_win = None

transfroms_ = transforms.Compose([
    transforms.ToTensor(),
    # transforms.Resize((64, 64))
])
train_dataset = SegData(train_root, transform=transfroms_)
test_dataset = SegData(test_root, transform=transfroms_)
trainloader = DataLoader(train_dataset, batch_size=train_batch_num, shuffle=True, num_workers=0)
testloader = DataLoader(test_dataset, batch_size=train_batch_num, shuffle=False, num_workers=0)

net = ConvNet.layer6Net(1, 20, 6)
net = net.cuda()
net = nn.DataParallel(net)

criterion = torch.nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=1e-3, weight_decay=1e-4)
# train_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=300)

best_loss = np.inf



def test(net, loader, optimizer):
    test_tre = 0.0
    num = 0
    net.eval()
Ejemplo n.º 47
0
import MNISTData

BATCH_SIZE = 100

#用来验证的数据,和训练数据一样,也可以用不同的数据验证

#输入层
inputlayer = tf.placeholder(tf.float32, [BATCH_SIZE, MNISTData.IMAGE_H, MNISTData.IMAGE_W, MNISTData.IMAGE_CHANNEL])
testOne = tf.placeholder(tf.float32, [1, MNISTData.IMAGE_H, MNISTData.IMAGE_W, MNISTData.IMAGE_CHANNEL])

#预期的数据
labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE))

#网络定义
plist = []
loadFromFile = ConvNet.openEmptyFileR('MNIST.txt')#从文件中加载已训练的,文件不存在的话默认会随机初始化网络
cv0 = ConvNet.addlist(plist,ConvNet.Conv(inDepth = MNISTData.IMAGE_CHANNEL,outDepth = 16,filterSize = 5,loadFromFile = loadFromFile))
cv1 = ConvNet.addlist(plist,ConvNet.Conv(inDepth = 16,outDepth = 32,filterSize = 5,loadFromFile = loadFromFile))
fc0 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 7*7*32,outDepth = 64,loadFromFile = loadFromFile))
fc1 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 64,outDepth = 10,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()   

def net(inputT):
    _ret = cv0.getLayer(inputT, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
    _ret = cv1.getLayer(_ret, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
    _ret = ConvNet.Conv2FC_Reshape(_ret)
    _ret = fc0.getLayer(_ret, isRelu=True, fixed = False)
    _ret = fc1.getLayer(_ret, isRelu=False, fixed = False)
    return _ret

fc2 = net(inputlayer)
Ejemplo n.º 48
0
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None,
                              top_config.cov_1_2_file)
    # generate training data
    ibd.generate_noise_samples(code, top_config, net_config, train_config,
                               top_config.BP_iter_nums_gen_data,
                               top_config.currently_trained_net_id, 'Training',
                               noise_io, top_config.model_id)
    # generate test data
    ibd.generate_noise_samples(code, top_config, net_config, train_config,
                               top_config.BP_iter_nums_gen_data,
                               top_config.currently_trained_net_id, 'Test',
                               noise_io, top_config.model_id)
elif top_config.function == 'Train':
    # 获取网络id?
    net_id = top_config.currently_trained_net_id
    conv_net = ConvNet.ConvNet(net_config, train_config, net_id)
    conv_net.train_network(top_config.model_id)
elif top_config.function == 'Simulation':
    batch_size = 5000
    if top_config.analyze_res_noise:
        simutimes_for_anal_res_power = int(
            np.ceil(5e6 / float(top_config.K_code * batch_size)) * batch_size)
        ibd.analyze_residual_noise(code, top_config, net_config,
                                   simutimes_for_anal_res_power, batch_size)

    simutimes_range = np.array([
        np.ceil(1e7 / float(top_config.K_code * batch_size)) * batch_size,
        np.ceil(1e8 / float(top_config.K_code * batch_size)) * batch_size
    ], np.int32)
    ibd.simulation_colored_noise(code, top_config, net_config, simutimes_range,
                                 1000, batch_size)
Ejemplo n.º 49
0
    correct_num = 0
    if test_num == -1:
        test_num = len(test_label)

    for i in range(test_num):
        im = enlarge_pic(test_pic[i])
        train_convnet.forward_p(im, test_label[i])
        if np.argmax(train_convnet.output7.maps[0][0]) == test_label[i]:
            correct_num += 1
    correct_rate = correct_num / test_num
    print('testdata correct rate:', correct_rate)


start_time = time.time()
print(start_time)
train_convnet = ConvNet()
epoch = 5
learning_rate = 0.0001
pic_num = 5000
test_pic_num = 1000

all_train_pic = get_images("train-images.idx3-ubyte")
all_train_label = get_labels("train-labels.idx1-ubyte")
all_test_pic = get_images("t10k-images.idx3-ubyte")
all_test_label = get_labels("t10k-labels.idx1-ubyte")

print('start training')
train_net(train_convnet, epoch, all_train_pic, all_train_label, learning_rate,
          pic_num)
print('end training and start test')
test_net(train_convnet, all_test_pic, all_test_label, test_pic_num)
    optimizer="nadam",
    lr=.01,
    lr_decay=0.0)
fully_connected = fcn.FullyConnected(sizes=[32, 4, 10],
                                     activations=["linear", "relu", "linear"],
                                     scale_method="normalize",
                                     optimizer="nadam",
                                     lr=.01,
                                     lr_decay=0.0)
convnet = cnn.ConvNet(conv_method="convolution",
                      layer_names=["conv", "pool", "conv", "pool"],
                      num_filters=[3, None, 2, None],
                      kernel_sizes=[[2, 2], None, [2, 2], None],
                      stride_sizes=[[2, 2], [1, 1], [2, 2], [1, 1]],
                      pool_sizes=[None, [2, 2], None, [2, 2]],
                      pool_fns=[None, "max", None, "max"],
                      pad_fns=["same", "valid", "same", "valid"],
                      activations=["relu", None, "relu", None],
                      input_channels=2,
                      scale_method="normalize",
                      optimizer="nadam",
                      lr=0.01,
                      lr_decay=0)

nn = nen.NeuralNetwork([convnet, fully_connected], "cross_entropy")

# inputs = np.random.rand(2, 6, 6, 2)
# #print(nn.feedforward(inputs, scale=True))

# epsilon=1e-5
# inputs = np.random.rand(2, 6, 6, 2)
# outputs = nn.feedforward(inputs)
Ejemplo n.º 51
0
            file_index = 0
        bytestream.close()
        bytestream = open(filePath + str(file_index)+".bin","br")
        content_index = 0

    buf = bytestream.read(BATCH_SIZE * IMAGE_H * IMAGE_W * IMAGE_CHANNEL)
    data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
    data = (data) / 256.0 - 0.5
    data = data.reshape(BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL)
    return data



print("startload")
glist = []
loadFromFile = ConvNet.openEmptyFileR('gan4g.txt')
gfc0 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM,outDepth = Z_DIM*4,loadFromFile = loadFromFile))
gfc1 = ConvNet.addlist(glist,ConvNet.FC(inDepth = Z_DIM*4,outDepth = GF*4*3*4,loadFromFile = loadFromFile))
gdc0 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*4,filterSize = 3,loadFromFile = loadFromFile))#4in
gdc1 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*4,filterSize = 3,loadFromFile = loadFromFile))#8in
gdc2 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*4,outDepth = GF*2,filterSize = 5,loadFromFile = loadFromFile))#16in
gdc3 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*2,filterSize = 5,loadFromFile = loadFromFile))#32in
gdc4 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*2,outDepth = GF*1,filterSize = 5,loadFromFile = loadFromFile))#32in
gdc5 = ConvNet.addlist(glist,ConvNet.DeConv(inDepth = GF*1,outDepth = IMAGE_CHANNEL,filterSize = 5,loadFromFile = loadFromFile))#64in
if loadFromFile:loadFromFile.close()

dlist = []
loadFromFile = ConvNet.openEmptyFileR('gan4d.txt')
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF*1,filterSize = 7,loadFromFile = loadFromFile))#64out
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*1,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#32out
dcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#16out
Ejemplo n.º 52
0
def simulation_colored_noise(linear_code, top_config, net_config, simutimes_range, target_err_bits_num, batch_size):
# target_err_bits_num: the simulation stops if the number of bit errors reaches the target.
# simutimes_range: [min_simutimes, max_simutimes]

    ## load configurations from top_config
    SNRset = top_config.eval_SNRs
    bp_iter_num = top_config.BP_iter_nums_simu
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None, top_config.cov_1_2_file_simu, rng_seed=0)
    denoising_net_num = top_config.cnn_net_number
    model_id = top_config.model_id

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    K, N = np.shape(G_matrix)

    ## build BP decoding network
    if np.size(bp_iter_num) != denoising_net_num + 1:
        print('Error: the length of bp_iter_num is not correct! 1!\nnp.size(bp_iter_num)='+str(np.size(bp_iter_num))+'\nnet_id_tested + 1='+str(denoising_net_num + 1))
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size)

    ## build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    # build network for each CNN denoiser,
    for net_id in range(denoising_net_num):
        if top_config.same_model_all_nets and net_id > 0:
            conv_net[net_id] = conv_net[0]
            denoise_net_in[net_id] = denoise_net_in[0]
            denoise_net_out[net_id] = denoise_net_out[0]
        else:
            conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
            denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[net_id].build_network()
    # init gragh
    init = tf.compat.v1.global_variables_initializer()
    sess = tf.compat.v1.Session()
    print('Open a tf session!')
    sess.run(init)
    # restore denoising network
    for net_id in range(denoising_net_num):
        if top_config.same_model_all_nets and net_id > 0:
            break
        conv_net[net_id].restore_network_with_model_id(sess, net_config.total_layers, model_id[0:(net_id+1)])

    ## initialize simulation times
    max_simutimes = simutimes_range[1]
    min_simutimes = simutimes_range[0]
    max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
    if residual_times!=0:
        max_batches += 1

    ## generate out ber file
    bp_str = np.array2string(bp_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
    bp_str = bp_str[1:(len(bp_str) - 1)]
    ber_file = format('%sBER(%d_%d)_BP(%s)' % (net_config.model_folder, N, K, bp_str))

    if top_config.corr_para != top_config.corr_para_simu:  # this means we are testing the model robustness to correlation level.
        ber_file = format('%s_SimuCorrPara%.2f' % (ber_file, top_config.corr_para_simu))
    if top_config.same_model_all_nets:
        ber_file = format('%s_SameModelAllNets' % ber_file)
    if top_config.update_llr_with_epdf:
        ber_file = format('%s_llrepdf' % ber_file)
    if denoising_net_num > 0:
        model_id_str = np.array2string(model_id, separator='_', formatter={'int': lambda d: "%d" % d})
        model_id_str = model_id_str[1:(len(model_id_str)-1)]
        ber_file = format('%s_model%s' % (ber_file, model_id_str))
    if np.size(SNRset) == 1:
        ber_file = format('%s_%.1fdB' % (ber_file, SNRset[0]))

    ber_file = format('%s.txt' % ber_file)
    fout_ber = open(ber_file, 'wt')

    ## simulation starts
    start = datetime.datetime.now()
    for SNR in SNRset:
        real_batch_size = batch_size
        # simulation part
        bit_errs_iter = np.zeros(denoising_net_num + 1, dtype=np.int32)
        actual_simutimes = 0
        rng = np.random.RandomState(0)
        noise_io.reset_noise_generator()
        for ik in range(0, max_batches):
            print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
            if ik == max_batches - 1 and residual_times != 0:
                real_batch_size = residual_times
            x_bits, _, s_mod, ch_noise, y_receive, LLR = lbc.encode_and_transmission(G_matrix, SNR, real_batch_size, noise_io, rng)
            noise_power = np.mean(np.square(ch_noise))
            practical_snr = 10*np.log10(1 / (noise_power * 2.0))
            print('Practical EbN0: %.2f' % practical_snr)

            for iter in range(0, denoising_net_num+1):
                # BP decoding
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), bp_iter_num[iter])

                if iter < denoising_net_num:
                    if top_config.update_llr_with_epdf:
                        prob = conv_net[iter].get_res_noise_pdf(model_id).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_epdf(prob, y_receive, u_BP_decoded, denoise_net_in[iter], denoise_net_out[iter], sess)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(model_id, SNRset).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_awgn(res_noise_power, y_receive, u_BP_decoded, denoise_net_in[iter], denoise_net_out[iter], sess)
                output_x = linear_code.dec_src_bits(u_BP_decoded)
                bit_errs_iter[iter] += np.sum(output_x != x_bits)

            actual_simutimes += real_batch_size
            if bit_errs_iter[denoising_net_num] >= target_err_bits_num and actual_simutimes >= min_simutimes:
                break
        print('%d bits are simulated!' % (actual_simutimes * K))

        ber_iter = np.zeros(denoising_net_num+1, dtype=np.float64)
        fout_ber.write(str(SNR) + '\t')
        for iter in range(0, denoising_net_num+1):
            ber_iter[iter] = bit_errs_iter[iter] / float(K * actual_simutimes)
            fout_ber.write(str(ber_iter[iter]) + '\t')
        fout_ber.write('\n')

    fout_ber.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end-start).seconds)
    print("end\n")
    sess.close()
    print('Close the tf session!')
Ejemplo n.º 53
0
verifydata1 = np.ndarray([batchSize, 4], np.float32)
verifydata1[0] = [1.0, 3.0, 1.0, 3.0]
verifydata2 = np.ndarray([batchSize, 4], np.float32)
verifydata2[0] = [2.0, 1.0, 2.0, 1.0]
verifydata3 = np.ndarray([batchSize, 4], np.float32)
verifydata3[0] = [0.0, 0.5, 1.0, 1.5]

#输入层
inputlayer = tf.placeholder(tf.float32, shape=(batchSize, 4))

#预期的数据
finaldata = tf.placeholder(tf.float32, shape=(batchSize, 4))

#网络定义
plist = []
loadFromFile = ConvNet.openEmptyFileR('test.txt')#从文件中加载已训练的,文件不存在的话默认会随机初始化网络
#可以调整 5 这个数字,看对最终结果的影响,数字太小网络容量不足无法学习太复杂的情况
fc0 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 4,outDepth = 5,loadFromFile = loadFromFile))
fc1 = ConvNet.addlist(plist,ConvNet.FC(inDepth = 5,outDepth = 4,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()   

def net(inputT):
    _ret = fc0.getLayer(inputT, isRelu=True, fixed = False)
    _ret = fc1.getLayer(_ret, isRelu=False, fixed = False)
    return _ret

_net = net(inputlayer)

#损失函数
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=train_labels_node, logits=logits))  #这个loss是用来处理分类的情况,现在是回归所以不用
loss = tf.reduce_sum(tf.square(_net - finaldata))
Ejemplo n.º 54
0
stride = 3
layer_parameters = [[fh,fw,filters,stride],
                    [3,3,filters,2],
                    [fcl_filters+10],
                    [fcl_filters]]
layer_types = ['conv','max_pool','fc','fc']
actuators = [[0] , af.ReLU2,None,af.ReLU2,af.ReLU2,af.Softmax]

alpha = 0.01 # Step size
beta1 = 0.9 # Step weighted average parameter
beta2 = 0.98 # Step normalization parameter
gamma = 0.001 # Decay mulptiplier at the end of training (epochs*batch_size)
epsilon = 1e-8 # Addition to denominator to prevent div by 0
lam = 1e-4 # Regularization parameter
b_size = 256 # Batch size
clf = ConvNet.network(epochs,tolerance,actuators,layer_parameters,layer_types,alpha,beta1,beta2,epsilon,gamma,lam)

#%% Predict

df = pd.read_csv('digits_test.csv')
test_samples = 2000
print('Predicting using {} test samples'.format(test_samples))
x_test =  np.array(df.drop('label',1).loc[:].values).astype(int).T
x_test = x_test[:,-test_samples:]
x_test = x_test.reshape(28,28,1,-1)

# Output - one hot - y[ouputs,samples]
output_data_test = np.array(df[['label']].loc[:].values).astype(int).T
y_test = one_hot_output(output_data_test[:,-test_samples:])

Ejemplo n.º 55
0
def rename_nodes(graph_def, rename_func):
    res_def = tf.GraphDef()
    for n0 in graph_def.node:
        n = res_def.node.add() 
        n.MergeFrom(n0)
        n.name = rename_func(n.name)
        for i, s in enumerate(n.input):
            n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
    return res_def




DF = 32             # Dimension of D filters in first conv layer. default [64]
dlist = []
loadFromFile = ConvNet.openTextFileR('faceTrain.txt')
dcv0 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = IMAGE_CHANNEL,outDepth = DF,filterSize = 5,loadFromFile = loadFromFile))#64out
dcv1 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF,outDepth = DF*2,filterSize = 5,loadFromFile = loadFromFile))#64out
dcv2 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*2,outDepth = DF*4,filterSize = 5,loadFromFile = loadFromFile))#32out
dcv3 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*4,outDepth = DF*8,filterSize = 5,loadFromFile = loadFromFile))#16out
dcv4 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*8,outDepth = DF*16,filterSize = 3,loadFromFile = loadFromFile))#8out
dcv5 = ConvNet.addlist(dlist,ConvNet.Conv(inDepth = DF*16,outDepth = DF*16,filterSize = 3,loadFromFile = loadFromFile))#4out
dfc0 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = DF*16*3*4,outDepth = 128,loadFromFile = loadFromFile))
dfc1 = ConvNet.addlist(dlist,ConvNet.FC(inDepth = 128,outDepth = 1,loadFromFile = loadFromFile))
if loadFromFile:loadFromFile.close()

imagesT = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_CHANNEL])
l0 = dcv0.getLayer(imagesT, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
l1 = dcv1.getLayer(l0, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
l2 = dcv2.getLayer(l1, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
l3 = dcv3.getLayer(l2, convStride = 1, poolSize = 2,isRelu=True, fixed = False)
Ejemplo n.º 56
0
def run_train_models(datasets, parameters):
    device = parameters["device"]

    # Per-Class-Error
    per_class_error = {
        "MLP": 0.0,
        "FCN": 0.0,
        "ResNet": 0.0
    }

    for dataset_number, (dataset, dataloader) in enumerate(datasets.items()):
        dataset_number += 1

        # setting up
        if parameters["verbose"]:
            print_dataset_info(dataset, dataloader)
        sleep(1)

        time_steps = dataloader['test'].dataset.inputs.shape[-1]
        n_classes = len(np.unique(dataloader['test'].dataset.targets))

        # MLP
        if parameters["run_mlp"]:
            model_name = "MLP"
            if parameters["verbose"]:
                print(model_name)
            model = MultiLayerPerceptron(time_steps, n_classes)
            optimizer = optim.Adadelta(
                model.parameters(),
                lr=parameters["mlp_lr"],
                rho=parameters["mlp_rho"],
                eps=parameters["mlp_eps"]
            )
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["mlp_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)

        # ConvNet
        if parameters["run_fcn"]:
            model_name = "FCN"
            if parameters["verbose"]:
                print(model_name)
            model = ConvNet(time_steps, n_classes)
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)

            optimizer = optim.Adam(
                model.parameters(),
                lr=parameters["fcn_lr"],
                betas=parameters["fcn_betas"],
                eps=parameters["fcn_eps"]
            )
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["fcn_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)

        # ResNet
        if parameters["run_resnet"]:
            model_name = "ResNet"
            if parameters["verbose"]:
                print(model_name)
            model = ResNet(time_steps, n_classes)
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)
            optimizer = optim.Adam(
                model.parameters(),
                lr=parameters["fcn_lr"],
                betas=parameters["fcn_betas"],
                eps=parameters["fcn_eps"]
            )
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["fcn_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)
    final_prediction = b[:, :, 0]
    labels = (final_prediction > 0.5).astype(np.int)
    return labels


if __name__ == '__main__':
    args = parser.parse_args()

    rasterfn = args.inpfile
    base = os.path.basename(rasterfn)
    fname = os.path.splitext(base)[0]
    newRasterfn = '/exports/csce/eddie/geos/groups/geos_cnn_imgclass/data/AerialImageDataset/predict_raster/predict_{}.tif'.format(
        fname)
    base_model = os.path.basename(args.model)
    match = re.search('arch(\d+)', base_model)
    net_size = match.group(1)
    net_size = int(net_size)
    net = ConvNet.Net(net_size)
    net = nn.DataParallel(net)
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    net.to(device)
    if torch.cuda.is_available():
        net.load_state_dict(torch.load(args.model))
    else:
        net.load_state_dict(
            torch.load(args.model, map_location=lambda storage, loc: storage))

    toTensor = transforms.ToTensor()
    array = image_loader(rasterfn)
    array2raster(rasterfn, newRasterfn, array)
Ejemplo n.º 58
0
def train():

    images = tf.placeholder(tf.float32, [BATCH_SIZE, MNISTData.IMAGE_H, MNISTData.IMAGE_W, MNISTData.IMAGE_CHANNEL])

    z = tf.placeholder(tf.float32, [BATCH_SIZE, Z_DIM])

    testz = tf.placeholder(tf.float32, [testBATCH_SIZE, Z_DIM])

    G = generator(z)
    D_logits  = discriminator(images)
    samples = generator(testz)
    D_logits_F = discriminator(G)

    gen_cost = -tf.reduce_mean(D_logits_F)
    disc_cost = tf.reduce_mean(D_logits_F) - tf.reduce_mean(D_logits)
    alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.0,maxval=1.0)
    differences = G - images
    differences = tf.reshape(differences,[BATCH_SIZE,-1])
    imagereshape = tf.reshape(images,[BATCH_SIZE,-1])
    interpolates = imagereshape + (alpha*differences)
    interpolates = tf.reshape(interpolates,images.shape)
    gradients = tf.gradients(discriminator(interpolates), [interpolates])[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)
    
    LAMBDA = 10 # Gradient penalty lambda hyperparameter
    disc_cost += LAMBDA*gradient_penalty

    g_vars = ConvNet.getParam(glist)
    d_vars = ConvNet.getParam(dlist)

    d_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(disc_cost, var_list=d_vars)        
    g_optim = tf.train.AdamOptimizer(learning_rate=LR,beta1=0.5,beta2=0.9).minimize(gen_cost, var_list=g_vars)

    sample_z = np.random.uniform(-1, 1, size = (testBATCH_SIZE, Z_DIM))


    sess = tf.Session()
    
    init = tf.global_variables_initializer()  
    sess.run(init)

    start_time = time.time()
    idx = 0
    while True:
        idx = idx + 1
        elapsed_time = time.time() - start_time
        start_time = time.time()
        print(str(idx)+","+str(elapsed_time))
        
        for _ in xrange(2):
            batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
            loadedimage,_ = MNISTData.extract_data(BATCH_SIZE)

            sess.run(d_optim, feed_dict = {z:batch_z, images:loadedimage})

        batch_z = np.random.uniform(-1, 1, size = (BATCH_SIZE, Z_DIM))
        sess.run(g_optim, feed_dict = {z: batch_z})

        if idx % 50 == 0:

            sample = sess.run(samples, feed_dict = {testz: sample_z})

            def imgSave(idx,sample):
                ConvNet.saveImagesMono(sample, saveSize, 'out\\sample_%d.png' % (idx))
                
                
            t = threading.Thread(target=imgSave,args=(idx,sample))
            t.start()
            
        if idx % 500 == 0:
            
            def save(idx, gSaver, dSaver):
                print("start save")
                saveToFile = ConvNet.openEmptyFileW("gan0g"+str(idx)+".txt")
                for item in gSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
 
                saveToFile = ConvNet.openEmptyFileW("gan0d"+str(idx)+".txt")
                for item in dSaver:
                    item(saveToFile)
                saveToFile.flush();saveToFile.close()
                print("end save")
            
            gSaver = []
            dSaver = []
            for item in glist:
                gSaver.append(item.getSaver(sess))
            for item in dlist:
                dSaver.append(item.getSaver(sess))

            t = threading.Thread(target=save,args=(idx,gSaver, dSaver))
            t.start()
            
            
            

    sess.close()
Ejemplo n.º 59
0
def main(argv):

    # folder for saving
    subfold = argv[1]
    if not os.path.exists(subfold):
        os.mkdir(subfold)
    # load data
    fname = '../LRG-filter/data/lrg_171016/sample-LRG-140-120-10-c3.pkl'
    with open(fname, 'rb') as fp:
        datadict = pickle.load(fp)
        X_raw = datadict['data']

    # Reshape and generate train and test dataset
    rs = 120
    # normalization and whitening
    with open("../nets/norm_params.pkl", 'rb') as fp:
        normparam = pickle.load(fp)
    X_max = normparam["X_max"]
    X_min = normparam["X_min"]
    X_mean = normparam["X_mean"]

    X_train_pre = X_raw
    X_train_pre = (X_train_pre - X_min) / (X_max - X_min)
    X_in = X_train_pre.reshape(-1, rs, rs, 1)
    X_mean = np.mean(X_train_pre)
    X_w = X_in - X_mean  # Whitening?

    numclass = 3
    encode_nodes = 32
    cnn = ConvNet.ConvNet(input_shape=X_w.shape,
                          kernel_size=[3, 3, 3, 3, 3],
                          kernel_num=[8, 8, 16, 32, 32],
                          fc_nodes=[],
                          encode_nodes=encode_nodes,
                          padding=('SAME', 'SAME'),
                          stride=(2, 2),
                          numclass=numclass,
                          sess=None,
                          name=None)
    cnn.cae_build()
    cnn.cnn_build(learning_rate=0.001)  # In order to init the weights

    foldname = "{0}/net_l3_140".format(subfold)
    name = "net_l3.pkl"
    cnn.sess, cnn.name = utils.load_net(os.path.join(foldname, name))

    # estimate
    label, label_pos = cnn.cnn_predict(img=X_w)
    label_new_pos = pos_to_line(label_pos)

    # save result
    savefold = "{0}/est_labeled".format(subfold)
    if not os.path.exists(savefold):
        os.mkdir(savefold)
    savepath = "est_l3.pkl"
    savedict = {
        "label_raw": np.array(datadict['type']),
        "z": np.array(datadict['redshift']),
        "snvss": np.array(datadict['snvss']),
        "name": np.array(datadict['name']),
        "label_est": label,
        "label_pos": label_new_pos
    }
    with open(os.path.join(savefold, savepath), 'wb') as fp:
        pickle.dump(savedict, fp)
Ejemplo n.º 60
0
#用来验证的数据,和训练数据一样,也可以用不同的数据验证
verifydata1 = np.ndarray([batchSize, 4], np.float32)
verifydata1[0] = [1.0, 3.0, 1.0, 3.0]
verifydata2 = np.ndarray([batchSize, 4], np.float32)
verifydata2[0] = [2.0, 1.0, 2.0, 1.0]
verifydata3 = np.ndarray([batchSize, 4], np.float32)
verifydata3[0] = [0.0, 0.5, 1.0, 1.5]

#输入层
inputlayer = tf.placeholder(tf.float32, shape=(batchSize, 4))

#预期的数据
finaldata = tf.placeholder(tf.float32, shape=(batchSize, 4))

#网络定义
testfile = ConvNet.openEmptyFileR('test.txt')#从文件中加载已训练的,文件不存在的话默认会随机初始化网络
#可以调整 5 这个数字,看对最终结果的影响,数字太小网络容量不足无法学习太复杂的情况
fc1,fc1saver = ConvNet.FCLayer(inputlayer,5,isRelu = True,loadFromFile=testfile)
fc2,fc2saver = ConvNet.FCLayer(fc1,4,loadFromFile=testfile)
if testfile:testfile.close()   

#损失函数
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=train_labels_node, logits=logits))  #这个loss是用来处理分类的情况,现在是回归所以不用
loss = tf.reduce_sum(tf.square(fc2 - finaldata))

#训练器
#optimizer = tf.train.AdadeltaOptimizer(learning_rate=1).minimize(loss)  # tf.train.AdadeltaOptimizer.init(learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False, name='Adadelta')  #这个 训练器 是用来处理分类的情况,现在是回归所以不用
optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)

def train():
    with tf.Session() as sess: