예제 #1
0
#         io.write_tif(im_path, lb*255, data['geotransform']
#                      [i], data['geoprojection'][i], data['size'][i])
#     #    cv2.imwrite(im_path,lb*255)

#     # merging = []
#     # output_vrt = os.path.join(path_data, 'merged.vrt')
#     # for root, dirs, files in os.walk(path_predict):
#     #     for file in files:
#     #         if ".tif" in file:
#     #             merging.append(file)

#     # gdal.BuildVRT(output_vrt, merging, options=gdal.BuildVRTOptions(
#     #     srcNodata=-9999, VRTNodata=-9999))

# # Merging all the tif datasets
# logger.info('Merging tiled dataset')
# io.merge_tile(file_output, predict_image)

# # Converting raster to Vector
# logging.info('Converting Raster to vector')
# output_format = 'shp'
# io.raster2vector(file_output, os.path.dirname(file_output), output_format)

# # Post Processing shp to axis aligned bounding box
# postprocess.aabbox(os.path.dirname(file_output), output_format)

# Saving to accuracy.json
io.tojson(accuracy, os.path.join(path_result, 'accuracy.json'))
logger.info('Completed')
sys.exit()
path_save_callback = os.path.join(config.path_weight,
                                  'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
saving_model = keras.callbacks.ModelCheckpoint(path_save_callback,
                                               monitor='val_loss',
                                               verbose=0,
                                               save_best_only=False,
                                               save_weights_only=True,
                                               mode='auto',
                                               period=5)

# fit the unet with the actual image, train_image
# and the output, train_label
history = unet_model.fit_generator(generator=training_generator,
                                   epochs=config.epoch,
                                   workers=3,
                                   validation_data=validation_generator,
                                   callbacks=[csv_logger, saving_model])

# Saving path of weigths saved
logging.info('Saving model')
unet_model.save(os.path.join(config.path_weight, 'final.hdf5'))

# Getting timings
end_time = time.time() - st_time
timing['Total Time'] = str(end_time)

# Saving to JSON
io.tojson(timing, os.path.join(config.path_model, 'Timing.json'))
logging.info('Completed')
sys.exit()
예제 #3
0
            os.path.join(save_model_lo, 'M_%s_%s.h5' % (str(k), str(j))))

        # Saving path of weigths saved
        logger.info('Saving weights')
        path_weight = os.path.join(save_weight_lo,
                                   'W_%s_%s.h5' % (str(k), str(j)))

        umodel.save_weights(path_weight)

        # Counting number of loops
        count = count + 1
        end_loop = time.time()

        # Getting timings
        timing['loop_%s_%s' % (str(k), str(j))] = end_loop - st_loop
        io.tojson(timing, os.path.join(result_lo, 'Timing.json'))

    # Clearing memory
    train_image = []
    train_label = []

end_time = time.time() - st_time
timing['Total Time'] = str(end_time)

# Saving to JSON
io.tojson(timing, os.path.join(result_lo, 'Timing.json'))

# model.evaluate(x=vali_images, y=vali_label, batch_size=32, verbose=1)#, sample_weight=None, steps=None)
# model.predict( vali_images, batch_size=32, verbose=1)#, steps=None)
logger.info('Completed')
sys.exit()
예제 #4
0
    file_skeleton = []
    for j in range(len(path_merged)):
        temp = join(path_skeleton, basename(path_merged[j]))
        file_skeleton.append(temp)
        _ = postprocess.skeletonize(path_merged[j], temp)

    # Skeletonization completed
    timing[current_process[-1]] = mtime.time() - time

    # Converting raster to Vector
    time = mtime.time()

    print('Converting Raster to vector')
    path_vector = join(path_merged_prediction, 'vector')
    checkdir(path_vector)
    file_vector = []

    for j in range(len(file_skeleton)):
        temp = file_skeleton[j]
        path_r2v = io.raster2vector(temp, path_vector, output_format)

    # Vectorization completed
    timing[current_process[-1]] = mtime.time() - time

# Saving to JSON
io.tojson(timing, join(path_result, 'Timing.json'))

print('Process Completed')
sys.exit()