def parse_detect_config(config=None): config = {} if not config else config c = AttrDict() c.JSON = config.get("JSON", JSON) c.IMAGE_SIZE = config.get("IMAGE_SIZE", IMAGE_SIZE) c.CHECKPOINT_FILE = config.get("CHECKPOINT_FILE", CHECKPOINT_FILE) return c
def parse_ozi_map(map_file_name, cutline_type = None): """cutline_type can be 'projected', 'latlon' or None""" ozi_map = ozi_reader.read_map(open(map_file_name).readlines()) ozi_dir = os.path.split(os.path.abspath(map_file_name))[0] image_name = find_file_ci(ozi_dir, ozi_map.file_name) if not image_name: raise Exception('File "%s" not found.' % image_name) map_srs = get_srs_as_proj4(ozi_map.datum, ozi_map.projection) map_ll_srs = get_ozi_ll_srs(ozi_map.datum, ozi_map.projection) gcps = convert_ozi_gcps(ozi_map.gcps, map_srs, map_ll_srs) geotransform = gcps_to_geotransform(gcps) units_per_pixel = get_resolution(geotransform) if cutline_type and len(ozi_map.cutline) > 2: cutline_projected = {'projected': True, 'latlon': False}[cutline_type] if cutline_projected: cutline_points = convert_cutline(ozi_map.cutline, map_srs, map_ll_srs) cutline_srs = map_srs cutline_units_per_pixel = units_per_pixel else: cutline_points = convert_cutline(ozi_map.cutline) cutline_srs = map_ll_srs cutline_units_per_pixel = get_resolution(geotransform, map_srs, map_ll_srs) cutline = AttrDict(points = cutline_points, srs = cutline_srs, units_per_pixel = cutline_units_per_pixel) else: cutline = None result = AttrDict() result.image_name = image_name result.map_srs = map_srs result.gcps = gcps result.units_per_pixel = units_per_pixel result.cutline = cutline result.geotransform = geotransform result.datum = datum_names_map[ozi_map.datum] result.projection = ozi_map.projection result.inv_geotransform = gcps_to_geotransform([AttrDict(pixel = g.ref, ref = g.pixel) for g in gcps ]) return result
def get_params(): checkpoint_dir = '/Users/Nolsigan/PycharmProjects/rlntm-tensorflow/checkpoints' max_length = 6 rnn_cell = rnn.BasicLSTMCell rnn_hidden = 128 learning_rate = 0.003 optimizer = tf.train.AdamOptimizer() gradient_clipping = 5 batch_size = 100 epochs = 30 epoch_size = 100 num_symbols = 10 dup_factor = 2 mem_dim = 128 mem_move_table = [-1, 0, 1] in_move_table = [-1, 0, 1] out_move_table = [0, 1] return AttrDict(**locals())
def parse_test_config(config=None): config = {} if not config else config c = AttrDict() c.DATASET_ROOT = config.get("DATASET_ROOT", DATASET_ROOT) c.JSON_PATH = config.get("JSON_PATH", "test.json") c.BATCH_SIZE = config.get("BATCH_SIZE", BATCH_SIZE) c.IMAGE_SIZE = config.get("IMAGE_SIZE", IMAGE_SIZE) c.WORKERS = config.get("WORKERS", WORKERS) c.PIN_MEMORY = config.get("PIN_MEMORY", PIN_MEMORY) c.SHUFFLE = config.get("SHUFFLE", False) c.OUT_PATH = config.get("OUT_PATH", OUT_PATH) c.LOAD_MODEL = config.get("LOAD_MODEL", True) c.CHECKPOINT_FILE = config.get("CHECKPOINT_FILE", CHECKPOINT_FILE) return c
def parse_train_config(config=None): config = {} if not config else config c = AttrDict() c.DATASET_ROOT = config.get("DATASET_ROOT", DATASET_ROOT) c.JSON_PATH = config.get("JSON_PATH", "train.json") c.BATCH_SIZE = config.get("BATCH_SIZE", BATCH_SIZE) c.IMAGE_SIZE = config.get("IMAGE_SIZE", IMAGE_SIZE) c.WORKERS = config.get("WORKERS", WORKERS) c.PIN_MEMORY = config.get("PIN_MEMORY", PIN_MEMORY) c.SHUFFLE = config.get("SHUFFLE", True) c.LEARNING_RATE = config.get("LEARNING_RATE", LEARNING_RATE) c.MOMENTUM = config.get("MOMENTUM", MOMENTUM) c.DAMPENING = config.get("DAMPENING", DAMPENING) c.BETAS = config.get("BETAS", BETAS) c.EPS = config.get("EPS", EPS) c.WEIGHT_DECAY = config.get("WEIGHT_DECAY", WEIGHT_DECAY) c.MILESTONES = config.get("MILESTONES", MILESTONES) c.GAMMA = config.get("GAMMA", GAMMA) c.NUM_EPOCHS = config.get("NUM_EPOCHS", NUM_EPOCHS) c.TEST = config.get("TEST", TEST) c.OUT_PATH = config.get("OUT_PATH", OUT_PATH) c.LOAD_MODEL = config.get("LOAD_MODEL", LOAD_MODEL) c.SAVE_MODEL = config.get("SAVE_MODEL", SAVE_MODEL) c.CHECKPOINT_FILE = config.get("CHECKPOINT_FILE", CHECKPOINT_FILE) return c
# Config ############################################################################## from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import re import torch from attr_dict import AttrDict __C = AttrDict() cfg = __C __C.GLOBAL_RANK = 0 __C.EPOCH = 0 # Absolute path to a location to keep some large files, not in this dir. __C.ASSETS_PATH = '/home/dcg-adlr-atao-data.cosmos277/assets' # Use class weighted loss per batch to increase loss for low pixel count classes per batch __C.BATCH_WEIGHTING = False # Border Relaxation Count __C.BORDER_WINDOW = 1 # Number of epoch to use before turn off border restriction __C.REDUCE_BORDER_EPOCH = -1 # Comma Seperated List of class id to relax __C.STRICTBORDERCLASS = None
def read_ozi_map(data): if hasattr(data, 'read'): data = data.read() lines = data.splitlines() lines = [l.strip(' \n\r\x09') for l in lines] if not lines: raise OziFormatError('Document empty.') ozi_map = AttrDict() try: with OziFormatError('line 1'): validate_string_start(lines[0], 'OziExplorer Map Data File Version 2.') ozi_map.title = lines[1].decode('cp1251') ozi_map.file_name = lines[2].decode('cp1251').split('\\')[-1] with OziFormatError('line 5, datum name'): ozi_map.datum = validate_notempty(fields(lines[4], 1)[0]) proj_params = fields(lines[8], 2) with OziFormatError('line 9'): validate_value(proj_params[0], 'Map Projection') ozi_map.projection = AttrDict() with OziFormatError('line 9, projection name'): ozi_map.projection.name = validate_notempty(proj_params[1]) proj_params = fields(lines[39], 10) with OziFormatError('line 40'): validate_value(proj_params[0], 'Projection Setup') if proj_params[1]: ozi_map.projection.lat_origin = validate_float(proj_params[1]) if proj_params[2]: ozi_map.projection.lon_origin = validate_float(proj_params[2]) if proj_params[3]: ozi_map.projection.k_factor = validate_float(proj_params[3]) if proj_params[4]: ozi_map.projection.false_easting = validate_float(proj_params[4]) if proj_params[5]: ozi_map.projection.false_northing = validate_float(proj_params[5]) if proj_params[6]: ozi_map.projection.lat1 = validate_float(proj_params[6]) if proj_params[7]: ozi_map.projection.lat2 = validate_float(proj_params[7]) if proj_params[8]: ozi_map.projection.height = validate_float(proj_params[8]) ozi_map.gcps = [] for i in xrange(1, 31): ozi_gcp = fields(lines[i + 8], 17) with OziFormatError('line %d' % (i + 9)): validate_value(ozi_gcp[0], 'Point%02d' % i) validate_value(ozi_gcp[1], 'xy') validate_value(ozi_gcp[5], 'deg') validate_value(ozi_gcp[12], 'grid') validate_values(ozi_gcp[4], ['in', 'ex']) if ozi_gcp[4] == 'in' and ozi_gcp[2] and ozi_gcp[3]: gcp = AttrDict(pixel=AttrDict(), ref=AttrDict()) gcp.pixel.x = validate_number(ozi_gcp[2]) gcp.pixel.y = validate_number(ozi_gcp[3]) if ozi_gcp[6] and ozi_gcp[7] and ozi_gcp[9] and ozi_gcp[10]: validate_values(ozi_gcp[8], ['S', 'N']) validate_values(ozi_gcp[11], ['W', 'E']) gcp.type = 'latlon' gcp.ref.x = validate_number(ozi_gcp[9]) + validate_float(ozi_gcp[10]) / 60 gcp.ref.y = validate_number(ozi_gcp[6]) + validate_float(ozi_gcp[7]) / 60 if ozi_gcp[11] == 'W': gcp.ref.x *= -1 if ozi_gcp[8] == 'S': gcp.ref.y *= -1 elif ozi_gcp[14] and ozi_gcp[15]: validate_values(ozi_gcp[16], ['N', 'S']) gcp.type = 'proj' gcp.ref.x = validate_float(ozi_gcp[14]) gcp.ref.y = validate_float(ozi_gcp[15]) if ozi_gcp[16] == 'S': gcp.ref.y *= -1 if ozi_gcp[13]: gcp.zone = validate_number(ozi_gcp[13]) else: raise OziFormatError('incomplete gcp definition') ozi_map.gcps.append(gcp) ozi_map.cutline = [] ozi_map.cutline_pixels = [] for line in lines[40:]: point = fields(line, 4) if point[0] == 'MMPLL': lat = validate_float(point[3]) lon = validate_float(point[2]) ozi_map.cutline.append((lon, lat)) elif point[0] == 'MMPXY': x = validate_number(point[2]) y = validate_number(point[3]) ozi_map.cutline_pixels.append((x, y)) except IndexError: raise OziFormatError('Document too short.') return ozi_map