예제 #1
0
def main():
    # Checks the number of arguments and if the number is wrong, a
    # message telling this will be sent to stdout and the program ends,
    if(len(sys.argv) == 4):
        trainingListFileName = sys.argv[1]
        trainingFacitFileName = sys.argv[2]
        examineFileName = sys.argv[3]
    else:
        print("Wrong number of arguments")
        exit(-1)

    # The Image objects of the user included traininglist will be built and added to the list.
    # Same with the file the program will use to test its training on in the end.
    defaultTestImgList = ImageReader.parse(trainingListFileName,trainingFacitFileName)
    examineImgList = ImageReader.parseTest(examineFileName)


    learningRate = 0.1

    # A list with the four node types are built, the number 1-4 represents a mood/ smile type.
    nodeList = []
    nodeList.extend([Node(learningRate, 1), Node(learningRate, 2), Node(learningRate, 3), Node(learningRate, 4)])

    # The program is trained
    trainNetworkOnImgList(nodeList,defaultTestImgList)

    # The program puts its training to the test with the user included test file.
    exmamineNetwork(nodeList,examineImgList,True)
예제 #2
0
def train(modelname):
    # server = tf.train.Server.create_local_server()
    # sess = tf.Session(server.target)
    # K.set_session(sess)
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # K.set_floatx('float16')

    learning_rate = 0.0001
    epochs = 80
    batch_size = 20
    decay_r = (learning_rate / (epochs))
    images_n = 15000

    # model = nb.build_model()
    model = keras.models.load_model(
        "64i_15k_300_retrained_HSV_120more",
        custom_objects={'binary_activation': nb.binary_activation})
    fp = os.path.expanduser('~') + "/Downloads/img_celeba/data_crop_256_jpg"
    images = ir.read_directory(fp, images_n)
    images = np.array(images) / 255
    images = np.array(images, dtype=np.float16)
    loss_func = "mse"
    if False:  # todo: implement multigpu setting
        multi_model = keras.utils.multi_gpu_model(model,
                                                  gpus=1,
                                                  cpu_merge=True,
                                                  cpu_relocation=False)
    else:
        multi_model = model
    multi_model.compile(optimizer=keras.optimizers.Adam(lr=learning_rate,
                                                        decay=decay_r),
                        loss=loss_func,
                        metrics=['accuracy'])
    history = multi_model.fit(images,
                              images,
                              validation_split=0.05,
                              callbacks=[],
                              batch_size=batch_size,
                              epochs=epochs)
    model.save(modelname)

    test_im = ir.read_directory(fp, limit=200, start=images_n - 50)
    test_im_disp = np.array(test_im)
    test_im = np.array(test_im) / 255
    test_pred = model.predict(test_im)
    test_pred = np.array(test_pred * 255, dtype=np.uint8)
    rd.main(modelname)
    for i, d in enumerate(test_pred):
        break
        fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
        c_d = cv2.cvtColor(d, cv2.COLOR_BGR2RGB)
        ax0.imshow(c_d, interpolation='nearest', aspect='auto')
        c_test_im = cv2.cvtColor(test_im_disp[i], cv2.COLOR_BGR2RGB)
        ax1.imshow(c_test_im, interpolation='nearest', aspect='auto')
        plt.show()
        if i > 50:
            break
예제 #3
0
def histogramEqualization(imagePath):

    os.remove("./image.txt")
    finalSize = ImageConverter.getBytesFromFile(imagePath)
    print("Image Converter to bytes...")
    os.remove("../AssemblyCode/imageHE.txt")
    open("../AssemblyCode/imageHE.txt", 'w')
    subprocess.Popen(["make"], stdout=subprocess.PIPE, cwd="../AssemblyCode")
    print("Image processing Please wait...")
    time.sleep(15)
    print("Image processed...")
    print("Opening image...")
    deleteLastLine("../AssemblyCode/imageHE.txt")
    ImageReader.imageReader(imagePath, finalSize)
예제 #4
0
파일: Main.py 프로젝트: M1wyTka/AStar
def display_grid():
    start_time = time.time()

    print("Reading image")
    matrix, (start_x, start_y), (end_x, end_y) = ImageReader.read_image(image_to_process)

    print("Setting nodes")
    node_grid = ng.NodeGrid(matrix)
    node_grid.set_start(start_x, start_y)
    node_grid.set_end(end_x, end_y)

    print("Finding path")
    path = node_grid.find_path()

    print("Path found")
    print("Drawing path")
    ImageReader.draw_path(image_to_process, image_to_save, path)

    print("Finished")

    elapsed_time = time.time() - start_time
    print('{} seconds elapsed'.format(elapsed_time))
예제 #5
0
def main(modelname = "64i_15k_300_retrained_HSV_200more"):
    os.environ['CUDA_VISIBLE_DEVICES'] = ''
    fp = os.path.expanduser('~') + "/Downloads/imama/"
    # fp = os.path.expanduser('~') + "/Bakk/Bakalauras/personal_testing_images/logos"
    # fp = os.path.expanduser('~') + "/Bakk/Bakalauras/personal_testing_images/cropped"
    # fp = os.path.expanduser('~') + "/Bakk/Bakalauras/personal_testing_images/random"
    images_n = 10
    images = ir.read_directory(fp, images_n, start=0)
    images = np.array(images) / 255

    model = load_model(modelname, custom_objects={'binary_activation': nb.binary_activation}, compile=True)
    print(model.summary())
    visualise_model(images, model, "conv2d_14", 8, 8)
def main():
    #

    imageReader = ImageReader.ImageReader()
    traitRecognitor = TraitRecognitor.TraitRecognitor()
    # ScoreCalculator is modelled but not yet implemented. its not foccussed during the Bachelor Thesis
    #    scoreCalculator = ScoreCalculator.ScoreCalculator()
    imageProcessor = ImageProcessor.ImageProcessor()
    contourFinder = ContourFinder.ContourFinder()
    contourDrawer = ContourDrawer.ContourDrawer()
    imageWriter = ImageWriter.ImageWriter()

    ImageAnalysisController(
        imageReader=imageReader,
        traitRecognitor=traitRecognitor,
        # Score calculator not yet implemented, it was not focus of Bachelor Thesis
        #scoreCalculator = scoreCalculator,
        imageWriter=imageWriter,
        imageProcessor=imageProcessor,
        contourFinder=contourFinder,
        contourDrawer=contourDrawer)
예제 #7
0
    return c_matrix

def computeAccuracyofDclassifier(prelist,label):
    count=0
    tup=zip(prelist, label)
    for (p,l) in tup:
        if tuple(p)==tuple(l):
            print(p,l)
            count=count+1

    return count/100



images, label, direction = ImageReader.readTestImage()
batch = ImageReader.getbatch(images, label, direction, test_list_len)

with tf.Session() as sess:
    new_saver=tf.train.import_meta_graph('owckpt/model.ckpt.meta')
    new_saver.restore(sess,'owckpt/model.ckpt')
    yc= tf.get_collection('pred_networkc')[0]
    yd = tf.get_collection('pred_networkd')[0]
    graph=tf.get_default_graph()
    X=graph.get_operation_by_name('X').outputs[0]
    sess.run(tf.local_variables_initializer())

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    for i in range(1):
예제 #8
0
from skimage import feature
from skimage import filters
from skimage.transform import hough_line, hough_line_peaks, probabilistic_hough_line

import ImageReader
import RGBToGrayScale

#dialog box para pegar o path da imagem.
root = tk.Tk()
root.withdraw()

#root.withdraw()
path = filedialog.askopenfilename()

#Lê a imagem || Exercício 1
image = ImageReader.readImage(path)
#Fim Exercício 1

#Transforma a Imagem em Monocromática || Exercício 2
graysScale = RGBToGrayScale.rgb2gray(image)

fig, axes = plt.subplots(1, 2, figsize=(8, 4))
fig.suptitle("Original para Escala de Cinza", ha='center', va='top')
fig.dpi = 125

ax = axes.ravel()

ax[0].imshow(image)
ax[0].set_title("Original")
ax[1].imshow(graysScale, cmap=plt.cm.gray)
ax[1].set_title("Grayscale")
예제 #9
0
def train():
    '''
	进行训练
	'''
    #定义记录部分
    log = Metrics.Logger(config.csv_path, config.tb_path)
    #定义模型
    if config.start_epoch == 1:
        model = net.get_net(classes_num=config.classes_num,
                            channel_size=config.channel_size,
                            cnn_weights_path=config.cnn_weights_path,
                            drop_rate=config.drop_rate)
    else:
        model = net.get_net(classes_num=config.classes_num,
                            channel_size=config.channel_size,
                            drop_rate=config.drop_rate)

    print("model load succeed")

    if config.use_cuda:
        model = model.cuda()  #将model转到cuda上
    if config.use_parallel:
        model = nn.DataParallel(model, device_ids=config.device_ids)
        cudnn.benchmark = True
    if config.start_epoch != 1:
        all_weights_path = config.save_weights_path.format(config.start_epoch -
                                                           1)
        model.load_state_dict(torch.load(all_weights_path))
        print("{} load succeed".format(all_weights_path))

    #加载数据集
    train_folder = config.train_img_path
    validate_folder = config.validate_img_path
    train_loader = ImageReader.get_loader("train", train_folder)
    validate_loader = ImageReader.get_loader("validate", validate_folder)

    #定义优化器和学习率调度器
    optimizer = SGD(params=model.parameters(),
                    lr=config.start_lr,
                    momentum=0.9,
                    weight_decay=config.weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=config.stones,
                                               gamma=0.1)

    #定义评估函数
    accuracy = Metrics.AccuracyList(num=1)
    l_train = Metrics.LossList(1)
    l_val = Metrics.LossList(1)

    #定义最好的准确率
    best_acc = 0
    for i in range(config.start_epoch, config.start_epoch + config.num_epoch):
        #分配学习率
        scheduler.step(epoch=i)
        lr = scheduler.get_lr()[0]

        print("{} epoch start , lr is {}".format(i, lr))

        #开始训练这一轮
        model.train()
        accuracy.set_zeros()
        l_train.set_zeros()
        train_step = 0
        for (x1, x2), y in train_loader:
            x1 = Variable(x1)
            x2 = Variable(x2)
            y = Variable(y)
            if config.use_cuda:
                x1 = x1.cuda()
                x2 = x2.cuda()
                y = y.cuda(async=True)
            optimizer.zero_grad()  #清空梯度值
            y_ = model(x1, x2)  #求y
            #求这一步的损失值和准确率
            step_loss, loss_list = model.get_loss(y_, y)
            step_acc = accuracy(y_, y)
            l_train.log(loss_list)

            #更新梯度值
            step_loss.backward()
            optimizer.step()

            train_step += 1  #训练步数+1

            #输出这一步的记录
            print("{} epoch,{} step,step loss is {:.6f},step acc is {:.4f}".
                  format(i, train_step, loss_list[0], max(step_acc)))
            del (step_loss, x1, x2, y, y_)
        #求这一轮训练情况
        train_acc_list = accuracy.get_acc_list()
        train_loss_list = l_train.get_loss_list()

        #保存模型
        weights_name = config.save_weights_path.format(i)
        torch.save(model.state_dict(), weights_name)
        del_weights_name = config.save_weights_path.format(i - 1)
        if os.path.exists(del_weights_name):
            os.remove(del_weights_name)
        print("{} save,{} delete".format(weights_name, del_weights_name))

        #开始验证步骤
        model.eval()
        accuracy.set_zeros()  #将accuracy中total_sample和total_correct清0
        l_val.set_zeros()
        for (x1, x2), y in validate_loader:
            x1 = Variable(x1, requires_grad=False)
            x2 = Variable(x2, requires_grad=False)
            y = Variable(y, requires_grad=False)
            if config.use_cuda:
                x1 = x1.cuda()
                x2 = x2.cuda()
                y = y.cuda(async=True)
            y_ = model(x1, x2)
            _, loss_list = model.get_valloss(y_, y)
            accuracy(y_, y)
            l_val.log(loss_list)
            del (x1, x2, y, y_, _)
        val_acc_list = accuracy.get_acc_list()
        val_loss_list = l_val.get_loss_list()
        print("validate end,log start")

        #保存最佳的模型
        if best_acc < max(val_acc_list):
            weights_name = config.save_weights_path.format("best_acc")
            torch.save(model.state_dict(), weights_name)
            best_acc = max(val_acc_list)

        #求model的正则化项
        l2_reg = 0.0
        for param in model.parameters():
            l2_reg += torch.norm(param).data[0]
        #开始记录
        log.log(i, train_acc_list, train_loss_list, val_acc_list,
                val_loss_list, l2_reg, lr)
        print("log end ...")

        print(
            "{} epoch end, train loss is {},train acc is {},val loss is {},val acc is {},weight l2 norm is {}"
            .format(i, train_loss_list[0], max(train_acc_list),
                    val_loss_list[0], max(val_acc_list), l2_reg))
    del (model)
    print("{} train end,best_acc is {}...".format(config.dataset, best_acc))
예제 #10
0
def train():
    '''
	定义训练的函数
	'''
    #加载模型
    model = Network.MyDenseNet(name_num_dict=config.name_num_dict,
                               cnn_weights_path=config.cnn_weights_path)

    #加载全部的模型权重
    if config.all_weights_path:
        weights_dict = torch.load(config.all_weights_path)
        model.load_state_dict(state_dict=weights_dict)
        print("load {} weights succecced...".format(config.all_weights_path))

    if torch.cuda.is_available() and use_gpu:
        model = model.cuda()

    #定义训练数据部分
    tran = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Resize(config.target_size)])
    reader = ImageReader.MultiTaskImageReader(
        root_path=config.train_img_root_path,
        csv_path=config.train_img_csv,
        transform=tran,
        name_pos_dict=config.name_pos_dict,
        name_num_dict=config.name_num_dict,
        num_list=config.num_list)
    trainloader = torch.utils.data.DataLoader(reader,
                                              batch_size=config.batch_size,
                                              shuffle=True,
                                              num_workers=config.num_thread)

    #定义验证数据部分
    val_reader = ImageReader.MultiTaskImageReader(
        root_path=config.val_img_root_path,
        csv_path=config.val_img_csv,
        transform=tran,
        name_pos_dict=config.name_pos_dict,
        name_num_dict=config.name_num_dict,
        num_list=config.num_list)
    validateloader = torch.utils.data.DataLoader(val_reader,
                                                 batch_size=config.batch_size,
                                                 shuffle=True,
                                                 num_workers=config.num_thread)

    #定义优化器
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    for epoch in range(config.start_epoch, config.start_epoch + config.epochs):
        print("{} epoch start...".format(epoch))
        #开始训练模式
        model.train()
        #定义一轮中的评估指标
        step = 0  #训练步数
        total_loss = 0  #记录总的运行损失
        total_loss_list = [0, 0, 0, 0, 0, 0, 0, 0]  #记录各个大类的运行损失
        total_acc = 0  #记录总的准确率

        val_step = 0
        val_total_acc = 0
        val_total_loss = 0
        val_total_loss_list = [0, 0, 0, 0, 0, 0, 0, 0]
        for data in trainloader:
            inputs, labels = data
            inputs = Variable(inputs)
            for i in range(len(labels)):
                labels[i] = Variable(labels[i])

            if torch.cuda.is_available() and use_gpu:
                inputs = inputs.cuda()
                for i in range(len(labels)):
                    labels[i] = labels[i].cuda()
            optimizer.zero_grad()  #将梯度缓冲区清0
            outputs = model(inputs)
            step_loss, step_loss_list = metrics.loss(outputs, labels)

            total_loss += step_loss.data[0]  #更新总的损失
            for i in range(len(total_loss_list)):  #更新各个类的损失
                total_loss_list[i] += step_loss_list[i]
            step_loss.backward()
            optimizer.step()
            if step % 10 == 0 and step != 0:
                step_acc = metrics.accuracy(outputs, labels)
                total_acc += step_acc
                print("{} epoch, {} step, step loss is {}, step acc is {}".
                      format(epoch, step, step_loss.data[0], step_acc))
            else:
                print("{} epoch, {} step, step loss is {}".format(
                    epoch, step, step_loss.data[0]))
            #释放显存
            del ([inputs, labels, outputs, step_loss, step_loss_list])

            step += 1
        #模型保存参数
        print("start save model weights...")
        torch.save(model.state_dict(), config.save_weights_path.format(epoch))

        model.eval()  #开启验证模式
        print("start validate ...")
        #解下来进行验证
        for data in validateloader:
            inputs, labels = data
            inputs = Variable(inputs)
            for i in range(len(labels)):
                labels[i] = Variable(labels[i])
            if torch.cuda.is_available() and use_gpu:
                inputs = inputs.cuda()
                for i in range(len(labels)):
                    labels[i] = labels[i].cuda()
            #获取outputs
            outputs = model(inputs)
            step_loss, step_loss_list = metrics.loss(outputs, labels)
            step_acc = metrics.accuracy(outputs, labels)

            val_total_loss += step_loss.data[0]
            for i in range(len(val_total_loss_list)):
                val_total_loss_list[i] += step_loss_list[i]
            val_total_acc += step_acc
            val_step += 1

            #释放显存
            del ([inputs, labels, outputs, step_loss, step_loss_list])

        print("{} epoch , validation loss is {}, validation accuracy is {}".
              format(epoch, val_total_loss / val_step,
                     val_total_acc / val_step))

        #训练完一轮之后将训练结果记录到文件中
        print("start log train info...")
        utils.log_train(epoch, total_loss, total_loss_list, step, total_acc,
                        val_total_loss, val_total_loss_list, val_total_acc,
                        val_step)
    del (model)
    print("{} epoch train end ...".format(epoch))
def main(argv=None):
    keep_probability = tf.placeholder(
        tf.float32, name="keep_probabilty")  #Dropout probability
    Sparse_Sampled_Image = tf.placeholder(
        tf.float32, shape=[None, None, None, 3],
        name="input_Sparse_image")  #Input image sparsly sampled image

    ReconstructImage = BuildNet.inference(
        Sparse_Sampled_Image, keep_probability, 3,
        Vgg_Model_Dir)  # Here the graph(net) is builded

    print("Reading images list")
    #---------------------Read list of image for recostruction------------------------------------------------------------
    Images = []  #Train Image List

    Images += [
        each for each in os.listdir(Image_Dir)
        if each.endswith('.PNG') or each.endswith('.JPG') or each.endswith(
            '.TIF') or each.endswith('.GIF') or each.endswith('.png') or
        each.endswith('.jpg') or each.endswith('.tif') or each.endswith('.gif')
    ]  # Get list of training images

    print('Number of images=' + str(len(Images)))

    #-------------------------Load trained mode----------------------------------------------------------------------------------------------------------------------------

    sess = tf.Session()  #Start Tensorflow session

    print("Setting up Saver...")
    saver = tf.train.Saver()
    # summary_writer = tf.summary.FileWriter(logs_dir, sess.graph)
    sess.run(tf.global_variables_initializer())
    #sess.run(tf.initialize_all_variables())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if trained model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:
        print("Error no trained model found in log dir " + logs_dir +
              "For creating trained model see: Train.py")
        return


#..............Start image reconstruction....................................................................
    for itr in range(len(Images)):
        #.....................Load images for prediction-------------------------------------
        print(str(itr) + ") Reconstructing: " + Image_Dir + Images[itr])
        FullImage, SparseSampledImage = ImageReader.LoadImages(
            Image_Dir + Images[itr], 0, 0, SamplingRate)

        #.......................Run one  prediction...............................................................................
        feed_dict = {
            Sparse_Sampled_Image: SparseSampledImage,
            keep_probability: 1
        }  # Run one cycle of traning
        ReconImage = sess.run(
            ReconstructImage,
            feed_dict=feed_dict)  # run image reconstruction using network
        #......................Save image..........................................................................
        #ReconImage[ReconImage>255]=255
        #ReconImage[ReconImage<0]=0
        misc.imsave(
            OUTPUT_Dir + "/" + Images[itr][0:-4] + "_Reconstructed" +
            Images[itr][-4:], ReconImage[0])
        misc.imsave(
            OUTPUT_Dir + "/" + Images[itr][0:-4] + "_Original" +
            Images[itr][-4:], FullImage[0])
        misc.imsave(
            OUTPUT_Dir + "/" + Images[itr][0:-4] + "_Sampled" +
            Images[itr][-4:], SparseSampledImage[0])
예제 #12
0
        target_small_value, target_small_class = torch.max(
            target_labels[big_class_list[i]][i].view([1, -1]), dim=1)
        res = (pred_small_class == target_small_class).data[0]
        if res:
            true_num += 1
    return true_num / batch_size


if __name__ == "__main__":
    tran = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Resize((224, 224))])
    reader = ImageReader.MultiTaskImageReader(
        root_path="../../ali_data/second_validate_data/Images/",
        csv_path="../../ali_data/second_validate_data/labels.csv",
        transform=tran,
        name_pos_dict=config.name_pos_dict,
        name_num_dict=config.name_num_dict,
        num_list=config.num_list)
    trainloader = torch.utils.data.DataLoader(reader,
                                              batch_size=1,
                                              shuffle=True,
                                              num_workers=4)
    for x, y in trainloader:
        break
    model = Network.MyDenseNet(config.name_num_dict)

    x = torch.autograd.Variable(x)
    output = model(x)
    for i in range(len(output)):
        y[i] = torch.autograd.Variable(y[i])
예제 #13
0
파일: eval.py 프로젝트: woshildh/SCR-CNN
import sfr, ImageReader, config, Metrics
import torch
from torch.autograd import Variable
import torch.nn as nn

model = sfr.get_sfr(classes_num=config.classes_num,
                    channel_size=config.channel_size,
                    drop_rate=config.drop_rate,
                    sr_rate=config.sr_rate,
                    fr_rate=config.fr_rate)
validate_loader = ImageReader.getLoader(config.dataset, "validate",
                                        config.validate_img_path)
all_weights_path = "./weights/cub/sfr_resnet50_cub_best_acc.pth"
if config.use_cuda:
    model = model.cuda()
model.load_state_dict(torch.load(all_weights_path))
model.eval()
val_loss = 0
val_step = 0
accuracy = Metrics.Accuracy()
criterion = nn.CrossEntropyLoss()

for x, y in validate_loader:
    x = Variable(x, requires_grad=False)
    y = Variable(y, requires_grad=False)
    if config.use_cuda:
        x = x.cuda()
        y = y.cuda(async=True)
    y_ = model.forward_validate(x)
    step_loss = criterion(y_, y)
    step_acc = accuracy(y_, y)
예제 #14
0
파일: train.py 프로젝트: woshildh/SCR-CNN
def train():
    '''
	进行训练
	'''
    #定义记录部分
    csv_path = config.csv_path
    tb_path = config.tb_path
    writer = SummaryWriter(log_dir=tb_path)

    #定义模型

    if config.start_epoch == 1:
        model = net.get_net(classes_num=config.classes_num,
                            channel_size=config.channel_size,
                            cnn_weights_path=config.cnn_weights_path,
                            drop_rate=config.drop_rate)
    else:
        model = net.get_net(classes_num=config.classes_num,
                            channel_size=config.channel_size,
                            drop_rate=config.drop_rate)

    print("model load succeed")

    if config.use_cuda:
        model = model.cuda()  #将model转到cuda上
    if config.use_parallel:
        model = nn.DataParallel(model, device_ids=config.device_ids)
        cudnn.benchmark = True
    if config.start_epoch != 1:
        all_weights_path = config.save_weights_path.format(config.start_epoch -
                                                           1)
        model.load_state_dict(torch.load(all_weights_path))
        print("{} load succeed".format(all_weights_path))

    #加载数据集
    train_folder = config.train_img_path
    validate_folder = config.validate_img_path
    train_loader = ImageReader.getLoader(config.dataset, "train", train_folder)
    validate_loader = ImageReader.getLoader(config.dataset, "validate",
                                            validate_folder)

    #定义优化器和学习率调度器
    optimizer = SGD(params=model.parameters(),
                    lr=config.start_lr,
                    momentum=0.9,
                    weight_decay=config.weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=config.stones,
                                               gamma=0.1)

    #定义评估函数
    criterion = nn.CrossEntropyLoss()
    accuracy = Metrics.Accuracy()

    #定义最好的准确率
    best_acc = 0
    for i in range(config.start_epoch, config.start_epoch + config.num_epoch):
        #分配学习率
        scheduler.step(epoch=i)
        lr = scheduler.get_lr()[0]

        print("{} epoch start , lr is {}".format(i, lr))

        #开始训练这一轮
        model.train()
        accuracy.__init__()
        train_loss = 0
        train_step = 0

        for x, y in train_loader:
            x = Variable(x)
            y = Variable(y)
            if config.use_cuda:
                x = x.cuda()
                y = y.cuda(async=True)
            optimizer.zero_grad()  #清空梯度值
            y_ = model(x)  #求y
            #求这一步的损失值和准确率
            step_loss = criterion(y_, y)
            step_acc = accuracy(y_, y)
            train_loss += step_loss.data[0]

            #更新梯度值
            step_loss.backward()
            optimizer.step()

            train_step += 1  #训练步数+1

            #输出这一步的记录
            print("{} epoch,{} step,step loss is {:.6f},step acc is {:.4f}".
                  format(i, train_step, step_loss.data[0], step_acc))
            del (step_loss, x, y, y_)
        #求这一轮训练情况
        train_acc = accuracy.total_correct / (accuracy.total_sample + 1e-5)
        train_loss = train_loss / (train_step + 1e-5)

        #保存模型
        weights_name = config.save_weights_path.format(i)
        torch.save(model.state_dict(), weights_name)
        del_weights_name = config.save_weights_path.format(i - 3)
        if os.path.exists(del_weights_name):
            os.remove(del_weights_name)
        print("{} save,{} delete".format(weights_name, del_weights_name))

        #开始验证步骤
        model.eval()
        accuracy.__init__()  #将accuracy中total_sample和total_correct清0
        val_loss = 0
        val_step = 0
        for x, y in validate_loader:
            x = Variable(x, requires_grad=False)
            y = Variable(y, requires_grad=False)
            if config.use_cuda:
                x = x.cuda()
                y = y.cuda(async=True)
            y_ = model(x)
            step_loss = criterion(y_, y)
            step_acc = accuracy(y_, y)
            val_loss += step_loss.data[0]
            val_step += 1
            del (x, y, y_, step_loss)
        val_acc = accuracy.total_correct / (accuracy.total_sample + 1e-5)
        val_loss = val_loss / (val_step + 1e-5)
        print("validate end,log start")

        #保存最佳的模型
        if best_acc < val_acc:
            weights_name = config.save_weights_path.format("best_acc")
            torch.save(model.state_dict(), weights_name)
            best_acc = val_acc

        #求model的正则化项
        l2_reg = 0.0
        for param in model.parameters():
            l2_reg += torch.norm(param).data[0]
        #开始记录
        with open(csv_path, "a", encoding="utf-8") as file:
            t = get_ctime()
            content = "{},{:.6f},{:.4f},{:.6f},{:.4f},{:.6f},{}".format(
                i, train_loss, train_acc, val_loss, val_acc, l2_reg, t) + "\n"
            file.write(content)
        writer.add_scalar("Train/acc", train_acc, i)
        writer.add_scalar("Train/loss", train_loss, i)
        writer.add_scalar("Val/acc", val_acc, i)
        writer.add_scalar("Val/loss", val_loss, i)
        writer.add_scalar("lr", lr, i)
        writer.add_scalar("l2_reg", l2_reg, i)
        print("log end ...")

        print(
            "{} epoch end, train loss is {:.6f},train acc is {:.4f},val loss is \
{:.6f},val acc is {:.4f},weight l2 norm is {:.6f}".format(
                i, train_loss, train_acc, val_loss, val_acc, l2_reg))
    del (model)
    print("{} train end,best_acc is {}...".format(config.dataset, best_acc))
예제 #15
0
import ImageReader as ImReader
import ClassificationDataReader as cdr
import numpy as np
import KNN
# ImReader.saveMnistDataToTxtFile("Images\NumberImages")
trainLabels = cdr.readFileToArray("trainLabelsMnist.csv")
testLabels = cdr.readFileToArray("testLabelsMnist.csv")
trainData = cdr.readFileToArray("trainDataMnist.csv")
testData = cdr.readFileToArray("testDataMnist.csv")

knn = KNN.KNN(trainData, trainLabels, 3)
counter = 0
predictedLabels = []
for sample in testData:
    sample = np.asarray(sample)
    predictedLabels.append(knn.predict(sample))
    if (counter % 100 == 0):
        print counter
    counter += 1
ImReader.createTxtFileWithData("createTxtFileWithData.csv", predictedLabels)
def main(argv=None):
    keep_probability = tf.placeholder(
        tf.float32, name="keep_probabilty")  #Dropout probability
    Sparse_Sampled_Image = tf.placeholder(
        tf.float32, shape=[None, None, None, 3],
        name="input_Sparse_image")  #Input image sparsly sampled image
    Full_Image = tf.placeholder(
        tf.float32, shape=[None, None, None, 3],
        name="Full_image")  # Full image all pixels filled

    ReconstructImage = BuildNet.inference(
        Sparse_Sampled_Image, keep_probability, 3,
        Vgg_Model_Dir)  # Here the graph(net) is builded
    loss = tf.reduce_mean(
        tf.abs(ReconstructImage - Full_Image, name="L1_Loss")
    )  # Define loss function for training as the difference between reconstruct image and ground truth image

    # tf.summary.scalar("L1_Loss", loss)

    trainable_var = tf.trainable_variables()
    train_op = train(loss, trainable_var)

    #print("Setting up summary op...")
    #summary_op = tf.summary.merge_all()

    print("Reading images list")
    TrainImages = []  #Train Image List

    TrainImages += [
        each for each in os.listdir(Train_Image_Dir)
        if each.endswith('.PNG') or each.endswith('.JPG') or each.endswith(
            '.TIF') or each.endswith('.GIF') or each.endswith('.png') or
        each.endswith('.jpg') or each.endswith('.tif') or each.endswith('.gif')
    ]  # Get list of training images

    print('Number of  Train images=' + str(len(TrainImages)))

    #-------------------------Training Region-----------------------------------------------------------------------------------------------------------------------------

    sess = tf.Session()  #Start Tensorflow session

    print("Setting up Saver...")
    saver = tf.train.Saver()
    # summary_writer = tf.summary.FileWriter(logs_dir, sess.graph)
    sess.run(tf.global_variables_initializer())
    #sess.run(tf.initialize_all_variables())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if trained model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
#---------------------------Start Training: Create loss files----------------------------------------------------------------------------------------------------------
    Nimg = 0
    f = open(
        TrainLossTxtFile,
        "w")  # Create text file for writing the loss trough out the training
    f.write("Iteration\tTrain_Loss\t Learning Rate=" + str(learning_rate))
    f.close()
    #-----------------------------------------------------------------------------------------------------------------
    Epoch = 0
    #..............Start Training loop: Main Training....................................................................
    for itr in range(1, MAX_ITERATION + 1):
        if Nimg >= len(TrainImages) - 1:  # End of an epoch
            Nimg = 0
            random.shuffle(TrainImages)  #Suffle images every epoch
            Epoch += 1
            print("Epoch " + str(Epoch) + " Completed")
#.....................Load images for training
        batch_size = np.min([Batch_Size, len(TrainImages) - Nimg])
        FullImages = np.zeros([batch_size, Im_Hight, Im_Width, 3],
                              dtype=np.int)
        SparseSampledImages = np.zeros([batch_size, Im_Hight, Im_Width, 3],
                                       dtype=np.int)
        for fi in range(batch_size):
            FullImages[fi], SparseSampledImages[fi] = ImageReader.LoadImages(
                Train_Image_Dir + TrainImages[Nimg], Im_Hight, Im_Width,
                SamplingRate)
            Nimg += 1

#.......................Run one batch of training...............................................................................
        feed_dict = {
            Sparse_Sampled_Image: SparseSampledImages,
            Full_Image: FullImages,
            keep_probability: 0.4 + np.random.rand() * 0.6
        }  # Run one cycle of traning
        sess.run(train_op, feed_dict=feed_dict)
        #......................Write training set loss..........................................................................
        if itr % 10 == 0:
            feed_dict = {
                Sparse_Sampled_Image: SparseSampledImages,
                Full_Image: FullImages,
                keep_probability: 1
            }
            train_loss = sess.run(loss, feed_dict=feed_dict)
            print("Step: %d, Train_loss:%g " % (itr, train_loss))
            #  summary_writer.add_summary(summary_str, itr)
            with open(TrainLossTxtFile,
                      "a") as f:  #Write training loss for file
                f.write("\n" + str(itr) + "\t" + str(train_loss))
                f.close()


#....................Save Trained net (ones every 1000 training cycles...............................................
        if itr % 200 == 0:
            print("Saving Model")
            saver.save(sess, logs_dir + "model.ckpt",
                       itr)  # save trained model
예제 #17
0
def makemodel(X):
    convolutionlayer1=conv1(X)
    convolutionlayer2=conv2(convolutionlayer1)
    convolutionlayer3=conv3(convolutionlayer2)
    logits=fullyconnectedlayer(convolutionlayer3)
    return logits

def getloss(Y, logits):

    #实用MSE
    loss = tf.reduce_mean(tf.square(Y-logits))
    return loss


images, label, direction = ImageReader.readImage(epoch=None)
batch = ImageReader.getbatch(images, label, direction, 30)

X = tf.placeholder("float", [None, 256, 256, 3], name='X')
Y = tf.placeholder("float", [None, 2])

model=makemodel(X)
loss=getloss(Y, model)

train_op = tf.train.AdamOptimizer(learning_rate=1e-5).minimize(loss)

saver=tf.train.Saver()
tf.add_to_collection('pred_network',model)

with tf.Session() as sess:
    # tf.train.string_input_producer定义了一个epoch变量,要对它进行初始化
예제 #18
0
                        default='pics/pic.png',
                        help='The file name of picture or .npy file.')
    parser.add_argument('-n',
                        type=int,
                        default=1800,
                        help='Steps of stepping motor.')
    args = parser.parse_args()

    mode = args.m
    filename = args.f
    work_size = args.n

    worker = wk.Worker(work_size)

    if mode == 'test':
        worker.test()

    elif mode == 'work':
        if filename[-3:] == 'npy':
            worker.actions.load(filename)
        else:
            reader = ir.ImageReader(filename)
            reader.set_mode(ir.MODE_CONTOURS)
            contours = reader.get_contours()
            worker.actions.add_contours(contours)

        worker.eval()

    else:
        print('Unknown mode')
예제 #19
0
from numpy import *
import os


def fprintMat(fname, x, row, col):
    f = open(fname, 'w')
    for i in range(row):
        s = ''
        for j in range(col):
            s = s + str(x[i, j]) + " "
        s = s + '\n'
        f.write(s)
    f.close
        
if __name__ == "__main__":
    label, amount = ImageReader.readLabels('../../data/mnist/train-labels.idx1-ubyte')
    images, amount, row, col = ImageReader.readImages('../../data/mnist/train-images.idx3-ubyte')
    q_label, q_amount = ImageReader.readLabels('../../data/mnist/t10k-labels.idx1-ubyte')
    q_images, q_amount, row, col = ImageReader.readImages('../../data/mnist/t10k-images.idx3-ubyte')
    it = int(sys.argv[1])
    teta = zeros((row*col + 1, 10), dtype = float64)
    best_it = -1
    min_error = 1.
    if os.path.exists("out"):
        for i in os.listdir("out"):
            fn = "out/" + i
            os.remove(fn)
        os.removedirs("out")
    os.makedirs("out")
    for h in range(it):
        for i in range(amount):
예제 #20
0
def main(args):

    images = "textures"
    featureMethod = "cu"
    classMethod = "perc"
    ifDisplayImages = False
    ifClassify = True
    radiusSize = 7

    imagesToClassify = PP.ImagesToClassify(images)
    featureExtactionMethod = PP.OtherImagesFeaturesType(featureMethod)
    classificationMethod = PP.ClassificationMethod(classMethod)

    if not ifClassify:

        if(imagesToClassify == PP.ImagesToClassify.NumberImages):

            print("Loading number images..")
            trainImages, testImages = ImRead.LoadNumbersImages()

        elif(imagesToClassify == PP.ImagesToClassify.OtherImages):

            print("Loading other images..")
            trainImages, testImages = ImRead.LoadOtherImages()

            print("Processing images..")
            imSeg.ExtractImages(trainImages)
            imSeg.ExtractImages(testImages)

            print("Computing shape descriptors..")
            Features.ComputeFeatures(trainImages, featureExtactionMethod)
            Features.ComputeFeatures(testImages, featureExtactionMethod)

            trainData = "OtherImages\\trainData.txt"
            trainLabels = "OtherImages\\trainLabels.txt"
            testData = "OtherImages\\testData.txt"
            testLabels = "OtherImages\\testLabels.txt"

            Features.SaveFeaturesToFile(trainImages, trainLabels, trainData)

        elif(imagesToClassify == PP.ImagesToClassify.Textures):

            print("Loading textures..")
            trainImages, testImages = ImRead.LoadTextures()

            print("Processing images..")
            imSeg.ExtractImages(trainImages, onlyConvertToGreyScale=True)
            imSeg.ExtractImages(testImages, onlyConvertToGreyScale=True)

            print("Computing image features..")
            trainThread = threading.Thread(target=Features.ComputeFeatures, args=(trainImages, radiusSize, featureExtactionMethod))
            # Features.ComputeFeatures(trainImages, featureExtactionMethod)
            print("{0} Train feature extraction started!".format(datetime.datetime.now()))
            trainThread.start()
            Features.ComputeTexturePartFeatures(testImages, radiusSize, featureExtactionMethod)
            trainThread.join()
            print("{0} Train feature extraction finished!".format(datetime.datetime.now()))

            trainData = "Textures\\trainData.txt"
            trainLabels = "Textures\\trainLabels.txt"

            print("Saving features to file")
            Features.SaveFeaturesToFile(trainImages, trainLabels, trainData)
            Features.SaveImagePartsFeatures(testImages, radiusSize)

    elif ifClassify:

        print("Classification started!")
        featureFiles = ["test1.txt", "test2.txt", "test3.txt"]
        textureObjects = []

        for featureFile in featureFiles:

            textureObjects.append(Features.ReadFeaturesOfImage(featureFile))

        trainData = "Textures\\trainData.txt"
        trainLabels = "Textures\\trainLabels.txt"
        predictions = []

        if(classificationMethod == PP.ClassificationMethod.OtoczenieKuliste):

            OtKul = OK.OtoczKul(trainData, trainLabels)

            for textureObj in textureObjects:

                predictions.append([])

                for imageFeatures in textureObj.imageFeatures:

                    predictions[-1].append(OtKul.PredictLabel(imageFeatures))

                textureObj.CreateTexture(predictions[-1], radiusSize)
                Features.SaveImage(textureObj)

        elif(classificationMethod == PP.ClassificationMethod.KNN):

            knn = KNN.KNN(trainData, trainLabels, 3)

            for textureObj in textureObjects:

                predictions.append([])

                for imageFeatures in textureObj.imageFeatures:

                    predictions[-1].append(knn.predict(imageFeatures))

                textureObj.CreateTexture(predictions[-1], radiusSize)
                Features.SaveImage(textureObj)

        elif(classificationMethod == PP.ClassificationMethod.Percepton):

            trainDataD, trainLabelsD = readTrainData("Output\\" + trainData, "Output\\" + trainLabels)
            hidden_layers = [10]
            myDNN = Percepton(trainDataD.shape[1], 4, hidden_layers, ifBias=True, batch_size=0)

            print("[%s] Training model.." %(datetime.datetime.now()))
            myDNN.train(trainDataD, trainLabelsD, l_rate=0.1, n_epochs=10000, epsilon=10e-12)
            print("[%s] Model has been trained!" %(datetime.datetime.now()))

            for textureObj in textureObjects:

                predictions = []
                print("[%s] Predicting.." %(datetime.datetime.now()))
                predictions.append(myDNN.predict(np.array(textureObj.imageFeatures)))

                print("[%s] Saving.." %(datetime.datetime.now()))
                textureObj.CreateTexture(predictions[-1], radiusSize)
                Features.SaveImage(textureObj)


    if(ifDisplayImages):

        imDisp = ImDisp.ImageDisplayer(6)
        imDisp.DisplayOtherImagesAnimation(trainImages)